code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def div(self, other, maxlag=None):
'''padded division, pads numerator with zeros to maxlag
'''
if maxlag is None:
maxlag = max(len(self.coef), len(other.coef)) + 1
return (self.padflip(maxlag) / other.flip()).flip() | padded division, pads numerator with zeros to maxlag | div | python | statsmodels/statsmodels | statsmodels/examples/tsa/lagpolynomial.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/tsa/lagpolynomial.py | BSD-3-Clause |
def armaloop(arcoefs, macoefs, x):
'''get arma recursion in simple loop
for simplicity assumes that ma polynomial is not longer than the ar-polynomial
Parameters
----------
arcoefs : array_like
autoregressive coefficients in right hand side parameterization
macoefs : array_like
moving average coefficients, without leading 1
Returns
-------
y : ndarray
predicted values, initial values are the same as the observed values
e : ndarray
predicted residuals, zero for initial observations
Notes
-----
Except for the treatment of initial observations this is the same as using
scipy.signal.lfilter, which is much faster. Written for testing only
'''
arcoefs_r = np.asarray(arcoefs)
macoefs_r = np.asarray(macoefs)
x = np.asarray(x)
nobs = x.shape[0]
#assume ar longer than ma
arlag = arcoefs_r.shape[0]
malag = macoefs_r.shape[0]
maxlag = max(arlag, malag)
print(maxlag)
y = np.zeros(x.shape, float)
e = np.zeros(x.shape, float)
y[:maxlag] = x[:maxlag]
#if malag > arlaga:
for t in range(arlag, maxlag):
y[t] = (x[t-arlag:t] * arcoefs_r).sum(0) + (e[:t] * macoefs_r[:t]).sum(0)
e[t] = x[t] - y[t]
for t in range(maxlag, nobs):
#wrong broadcasting, 1d only
y[t] = (x[t-arlag:t] * arcoefs_r).sum(0) + (e[t-malag:t] * macoefs_r).sum(0)
e[t] = x[t] - y[t]
return y, e | get arma recursion in simple loop
for simplicity assumes that ma polynomial is not longer than the ar-polynomial
Parameters
----------
arcoefs : array_like
autoregressive coefficients in right hand side parameterization
macoefs : array_like
moving average coefficients, without leading 1
Returns
-------
y : ndarray
predicted values, initial values are the same as the observed values
e : ndarray
predicted residuals, zero for initial observations
Notes
-----
Except for the treatment of initial observations this is the same as using
scipy.signal.lfilter, which is much faster. Written for testing only | armaloop | python | statsmodels/statsmodels | statsmodels/examples/tsa/try_ar.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/tsa/try_ar.py | BSD-3-Clause |
def tiny2zero(x, eps = 1e-15):
'''replace abs values smaller than eps by zero, makes copy
'''
mask = np.abs(x.copy()) < eps
x[mask] = 0
return x | replace abs values smaller than eps by zero, makes copy | tiny2zero | python | statsmodels/statsmodels | statsmodels/examples/tsa/ar1cholesky.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/examples/tsa/ar1cholesky.py | BSD-3-Clause |
def func(self, params, alpha=None):
"""evaluate penalization at params
Parameters
----------
params : ndarray
coefficients for the spline basis in the regression model
alpha : float
default penalty weight
Returns
-------
func : float
value of the penalty evaluated at params
"""
if alpha is None:
alpha = self.alpha
f = params.dot(self.univariate_smoother.cov_der2.dot(params))
return alpha * f / self.nobs | evaluate penalization at params
Parameters
----------
params : ndarray
coefficients for the spline basis in the regression model
alpha : float
default penalty weight
Returns
-------
func : float
value of the penalty evaluated at params | func | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def deriv(self, params, alpha=None):
"""evaluate derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients for the spline basis in the regression model
alpha : float
default penalty weight
Returns
-------
deriv : ndarray
derivative, gradient of the penalty with respect to params
"""
if alpha is None:
alpha = self.alpha
d = 2 * alpha * np.dot(self.univariate_smoother.cov_der2, params)
d /= self.nobs
return d | evaluate derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients for the spline basis in the regression model
alpha : float
default penalty weight
Returns
-------
deriv : ndarray
derivative, gradient of the penalty with respect to params | deriv | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def deriv2(self, params, alpha=None):
"""evaluate second derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients for the spline basis in the regression model
alpha : float
default penalty weight
Returns
-------
deriv2 : ndarray, 2-Dim
second derivative, hessian of the penalty with respect to params
"""
if alpha is None:
alpha = self.alpha
d2 = 2 * alpha * self.univariate_smoother.cov_der2
d2 /= self.nobs
return d2 | evaluate second derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients for the spline basis in the regression model
alpha : float
default penalty weight
Returns
-------
deriv2 : ndarray, 2-Dim
second derivative, hessian of the penalty with respect to params | deriv2 | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def penalty_matrix(self, alpha=None):
"""penalty matrix for the smooth term of a GAM
Parameters
----------
alpha : list of floats or None
penalty weights
Returns
-------
penalty matrix
square penalty matrix for quadratic penalization. The number
of rows and columns are equal to the number of columns in the
smooth terms, i.e. the number of parameters for this smooth
term in the regression model
"""
if alpha is None:
alpha = self.alpha
return alpha * self.univariate_smoother.cov_der2 | penalty matrix for the smooth term of a GAM
Parameters
----------
alpha : list of floats or None
penalty weights
Returns
-------
penalty matrix
square penalty matrix for quadratic penalization. The number
of rows and columns are equal to the number of columns in the
smooth terms, i.e. the number of parameters for this smooth
term in the regression model | penalty_matrix | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def func(self, params, alpha=None):
"""evaluate penalization at params
Parameters
----------
params : ndarray
coefficients in the regression model
alpha : float or list of floats
penalty weights
Returns
-------
func : float
value of the penalty evaluated at params
"""
if alpha is None:
alpha = [None] * self.k_variables
cost = 0
for i in range(self.k_variables):
params_i = params[self.mask[i]]
cost += self.gp[i].func(params_i, alpha=alpha[i])
return cost | evaluate penalization at params
Parameters
----------
params : ndarray
coefficients in the regression model
alpha : float or list of floats
penalty weights
Returns
-------
func : float
value of the penalty evaluated at params | func | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def deriv(self, params, alpha=None):
"""evaluate derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients in the regression model
alpha : list of floats or None
penalty weights
Returns
-------
deriv : ndarray
derivative, gradient of the penalty with respect to params
"""
if alpha is None:
alpha = [None] * self.k_variables
grad = [np.zeros(self.start_idx)]
for i in range(self.k_variables):
params_i = params[self.mask[i]]
grad.append(self.gp[i].deriv(params_i, alpha=alpha[i]))
return np.concatenate(grad) | evaluate derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients in the regression model
alpha : list of floats or None
penalty weights
Returns
-------
deriv : ndarray
derivative, gradient of the penalty with respect to params | deriv | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def deriv2(self, params, alpha=None):
"""evaluate second derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients in the regression model
alpha : list of floats or None
penalty weights
Returns
-------
deriv2 : ndarray, 2-Dim
second derivative, hessian of the penalty with respect to params
"""
if alpha is None:
alpha = [None] * self.k_variables
deriv2 = [np.zeros((self.start_idx, self.start_idx))]
for i in range(self.k_variables):
params_i = params[self.mask[i]]
deriv2.append(self.gp[i].deriv2(params_i, alpha=alpha[i]))
return block_diag(*deriv2) | evaluate second derivative of penalty with respect to params
Parameters
----------
params : ndarray
coefficients in the regression model
alpha : list of floats or None
penalty weights
Returns
-------
deriv2 : ndarray, 2-Dim
second derivative, hessian of the penalty with respect to params | deriv2 | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def penalty_matrix(self, alpha=None):
"""penalty matrix for generalized additive model
Parameters
----------
alpha : list of floats or None
penalty weights
Returns
-------
penalty matrix
block diagonal, square penalty matrix for quadratic penalization.
The number of rows and columns are equal to the number of
parameters in the regression model ``k_params``.
Notes
-----
statsmodels does not support backwards compatibility when keywords are
used as positional arguments. The order of keywords might change.
We might need to add a ``params`` keyword if the need arises.
"""
if alpha is None:
alpha = self.alpha
s_all = [np.zeros((self.start_idx, self.start_idx))]
for i in range(self.k_variables):
s_all.append(self.gp[i].penalty_matrix(alpha=alpha[i]))
return block_diag(*s_all) | penalty matrix for generalized additive model
Parameters
----------
alpha : list of floats or None
penalty weights
Returns
-------
penalty matrix
block diagonal, square penalty matrix for quadratic penalization.
The number of rows and columns are equal to the number of
parameters in the regression model ``k_params``.
Notes
-----
statsmodels does not support backwards compatibility when keywords are
used as positional arguments. The order of keywords might change.
We might need to add a ``params`` keyword if the need arises. | penalty_matrix | python | statsmodels/statsmodels | statsmodels/gam/gam_penalties.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_penalties.py | BSD-3-Clause |
def make_bsplines_basis(x, df, degree):
''' make a spline basis for x '''
all_knots, _, _, _ = compute_all_knots(x, df, degree)
basis, der_basis, der2_basis = _eval_bspline_basis(x, all_knots, degree)
return basis, der_basis, der2_basis | make a spline basis for x | make_bsplines_basis | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def get_knots_bsplines(x=None, df=None, knots=None, degree=3,
spacing='quantile', lower_bound=None,
upper_bound=None, all_knots=None):
"""knots for use in B-splines
There are two main options for the knot placement
- quantile spacing with multiplicity of boundary knots
- equal spacing extended to boundary or exterior knots
The first corresponds to splines as used by patsy. the second is the
knot spacing for P-Splines.
"""
# based on patsy memorize_finish
if all_knots is not None:
return all_knots
x_min = x.min()
x_max = x.max()
if degree < 0:
raise ValueError("degree must be greater than 0 (not %r)"
% (degree,))
if int(degree) != degree:
raise ValueError("degree must be an integer (not %r)"
% (degree,))
# These are guaranteed to all be 1d vectors by the code above
# x = np.concatenate(tmp["xs"])
if df is None and knots is None:
raise ValueError("must specify either df or knots")
order = degree + 1
if df is not None:
n_inner_knots = df - order
if n_inner_knots < 0:
raise ValueError("df=%r is too small for degree=%r; must be >= %s"
% (df, degree,
# We know that n_inner_knots is negative;
# if df were that much larger, it would
# have been zero, and things would work.
df - n_inner_knots))
if knots is not None:
if len(knots) != n_inner_knots:
raise ValueError("df=%s with degree=%r implies %s knots, "
"but %s knots were provided"
% (df, degree,
n_inner_knots, len(knots)))
elif spacing == 'quantile':
# Need to compute inner knots
knot_quantiles = np.linspace(0, 1, n_inner_knots + 2)[1:-1]
inner_knots = _R_compat_quantile(x, knot_quantiles)
elif spacing == 'equal':
# Need to compute inner knots
grid = np.linspace(0, 1, n_inner_knots + 2)[1:-1]
inner_knots = x_min + grid * (x_max - x_min)
diff_knots = inner_knots[1] - inner_knots[0]
else:
raise ValueError("incorrect option for spacing")
if knots is not None:
inner_knots = knots
if lower_bound is None:
lower_bound = np.min(x)
if upper_bound is None:
upper_bound = np.max(x)
if lower_bound > upper_bound:
raise ValueError("lower_bound > upper_bound (%r > %r)"
% (lower_bound, upper_bound))
inner_knots = np.asarray(inner_knots)
if inner_knots.ndim > 1:
raise ValueError("knots must be 1 dimensional")
if np.any(inner_knots < lower_bound):
raise ValueError("some knot values (%s) fall below lower bound "
"(%r)"
% (inner_knots[inner_knots < lower_bound],
lower_bound))
if np.any(inner_knots > upper_bound):
raise ValueError("some knot values (%s) fall above upper bound "
"(%r)"
% (inner_knots[inner_knots > upper_bound],
upper_bound))
if spacing == "equal":
diffs = np.arange(1, order + 1) * diff_knots
lower_knots = inner_knots[0] - diffs[::-1]
upper_knots = inner_knots[-1] + diffs
all_knots = np.concatenate((lower_knots, inner_knots, upper_knots))
else:
all_knots = np.concatenate(([lower_bound, upper_bound] * order,
inner_knots))
all_knots.sort()
return all_knots | knots for use in B-splines
There are two main options for the knot placement
- quantile spacing with multiplicity of boundary knots
- equal spacing extended to boundary or exterior knots
The first corresponds to splines as used by patsy. the second is the
knot spacing for P-Splines. | get_knots_bsplines | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def _get_integration_points(knots, k_points=3):
"""add points to each subinterval defined by knots
inserts k_points between each two consecutive knots
"""
k_points = k_points + 1
knots = np.unique(knots)
dxi = np.arange(k_points) / k_points
dxk = np.diff(knots)
dx = dxk[:, None] * dxi
x = np.concatenate(((knots[:-1, None] + dx).ravel(), [knots[-1]]))
return x | add points to each subinterval defined by knots
inserts k_points between each two consecutive knots | _get_integration_points | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def get_covder2(smoother, k_points=3, integration_points=None,
skip_ctransf=False, deriv=2):
"""
Approximate integral of cross product of second derivative of smoother
This uses scipy.integrate simps to compute an approximation to the
integral of the smoother derivative cross-product at knots plus k_points
in between knots.
"""
try:
from scipy.integrate import simpson
except ImportError:
# Remove after SciPy 1.7 is the minimum version
from scipy.integrate import simps as simpson
knots = smoother.knots
if integration_points is None:
x = _get_integration_points(knots, k_points=k_points)
else:
x = integration_points
d2 = smoother.transform(x, deriv=deriv, skip_ctransf=skip_ctransf)
covd2 = simpson(d2[:, :, None] * d2[:, None, :], x=x, axis=0)
return covd2 | Approximate integral of cross product of second derivative of smoother
This uses scipy.integrate simps to compute an approximation to the
integral of the smoother derivative cross-product at knots plus k_points
in between knots. | get_covder2 | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def make_poly_basis(x, degree, intercept=True):
'''
given a vector x returns poly=(1, x, x^2, ..., x^degree)
and its first and second derivative
'''
if intercept:
start = 0
else:
start = 1
nobs = len(x)
basis = np.zeros(shape=(nobs, degree + 1 - start))
der_basis = np.zeros(shape=(nobs, degree + 1 - start))
der2_basis = np.zeros(shape=(nobs, degree + 1 - start))
for i in range(start, degree + 1):
basis[:, i - start] = x ** i
der_basis[:, i - start] = i * x ** (i - 1)
der2_basis[:, i - start] = i * (i - 1) * x ** (i - 2)
return basis, der_basis, der2_basis | given a vector x returns poly=(1, x, x^2, ..., x^degree)
and its first and second derivative | make_poly_basis | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def _smooth_basis_for_single_variable(self):
# TODO: unclear description
"""
given a vector x returns poly=(1, x, x^2, ..., x^degree)
and its first and second derivative
"""
basis = np.zeros(shape=(self.nobs, self.degree))
der_basis = np.zeros(shape=(self.nobs, self.degree))
der2_basis = np.zeros(shape=(self.nobs, self.degree))
for i in range(self.degree):
dg = i + 1
basis[:, i] = self.x ** dg
der_basis[:, i] = dg * self.x ** (dg - 1)
if dg > 1:
der2_basis[:, i] = dg * (dg - 1) * self.x ** (dg - 2)
else:
der2_basis[:, i] = 0
cov_der2 = np.dot(der2_basis.T, der2_basis)
return basis, der_basis, der2_basis, cov_der2 | given a vector x returns poly=(1, x, x^2, ..., x^degree)
and its first and second derivative | _smooth_basis_for_single_variable | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def transform(self, x_new, deriv=0, skip_ctransf=False):
"""create the spline basis for new observations
The main use of this stateful transformation is for prediction
using the same specification of the spline basis.
Parameters
----------
x_new : ndarray
observations of the underlying explanatory variable
deriv : int
which derivative of the spline basis to compute
This is an options for internal computation.
skip_ctransf : bool
whether to skip the constraint transform
This is an options for internal computation.
Returns
-------
basis : ndarray
design matrix for the spline basis for given ``x_new``
"""
if x_new is None:
x_new = self.x
exog = _eval_bspline_basis(x_new, self.knots, self.degree,
deriv=deriv,
include_intercept=self.include_intercept)
# ctransf does not exist yet when cov_der2 is computed
ctransf = getattr(self, 'ctransf', None)
if ctransf is not None and not skip_ctransf:
exog = exog.dot(self.ctransf)
return exog | create the spline basis for new observations
The main use of this stateful transformation is for prediction
using the same specification of the spline basis.
Parameters
----------
x_new : ndarray
observations of the underlying explanatory variable
deriv : int
which derivative of the spline basis to compute
This is an options for internal computation.
skip_ctransf : bool
whether to skip the constraint transform
This is an options for internal computation.
Returns
-------
basis : ndarray
design matrix for the spline basis for given ``x_new`` | transform | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def _get_b_and_d(self, knots):
"""Returns mapping of cyclic cubic spline values to 2nd derivatives.
.. note:: See 'Generalized Additive Models', Simon N. Wood, 2006,
pp 146-147
Parameters
----------
knots : ndarray
The 1-d array knots used for cubic spline parametrization,
must be sorted in ascending order.
Returns
-------
b : ndarray
Array for mapping cyclic cubic spline values at knots to
second derivatives.
d : ndarray
Array for mapping cyclic cubic spline values at knots to
second derivatives.
Notes
-----
The penalty matrix is equal to ``s = d.T.dot(b^-1).dot(d)``
"""
h = knots[1:] - knots[:-1]
n = knots.size - 1
# b and d are defined such that the penalty matrix is equivalent to:
# s = d.T.dot(b^-1).dot(d)
# reference in particular to pag 146 of Wood's book
b = np.zeros((n, n)) # the b matrix on page 146 of Wood's book
d = np.zeros((n, n)) # the d matrix on page 146 of Wood's book
b[0, 0] = (h[n - 1] + h[0]) / 3.
b[0, n - 1] = h[n - 1] / 6.
b[n - 1, 0] = h[n - 1] / 6.
d[0, 0] = -1. / h[0] - 1. / h[n - 1]
d[0, n - 1] = 1. / h[n - 1]
d[n - 1, 0] = 1. / h[n - 1]
for i in range(1, n):
b[i, i] = (h[i - 1] + h[i]) / 3.
b[i, i - 1] = h[i - 1] / 6.
b[i - 1, i] = h[i - 1] / 6.
d[i, i] = -1. / h[i - 1] - 1. / h[i]
d[i, i - 1] = 1. / h[i - 1]
d[i - 1, i] = 1. / h[i - 1]
return b, d | Returns mapping of cyclic cubic spline values to 2nd derivatives.
.. note:: See 'Generalized Additive Models', Simon N. Wood, 2006,
pp 146-147
Parameters
----------
knots : ndarray
The 1-d array knots used for cubic spline parametrization,
must be sorted in ascending order.
Returns
-------
b : ndarray
Array for mapping cyclic cubic spline values at knots to
second derivatives.
d : ndarray
Array for mapping cyclic cubic spline values at knots to
second derivatives.
Notes
-----
The penalty matrix is equal to ``s = d.T.dot(b^-1).dot(d)`` | _get_b_and_d | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def transform(self, x_new):
"""create the spline basis for new observations
The main use of this stateful transformation is for prediction
using the same specification of the spline basis.
Parameters
----------
x_new: ndarray
observations of the underlying explanatory variable
Returns
-------
basis : ndarray
design matrix for the spline basis for given ``x_new``.
"""
if x_new.ndim == 1 and self.k_variables == 1:
x_new = x_new.reshape(-1, 1)
exog = np.hstack(list(self.smoothers[i].transform(x_new[:, i])
for i in range(self.k_variables)))
return exog | create the spline basis for new observations
The main use of this stateful transformation is for prediction
using the same specification of the spline basis.
Parameters
----------
x_new: ndarray
observations of the underlying explanatory variable
Returns
-------
basis : ndarray
design matrix for the spline basis for given ``x_new``. | transform | python | statsmodels/statsmodels | statsmodels/gam/smooth_basis.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/smooth_basis.py | BSD-3-Clause |
def _transform_predict_exog(model, exog, model_spec=None):
"""transform exog for predict using the formula's model_spec
Note: this is copied from base.model.Results.predict and converted to
standalone function with additional options.
"""
is_pandas = _is_using_pandas(exog, None)
exog_index = exog.index if is_pandas else None
if model_spec is None:
model_spec = getattr(model.data, 'model_spec', None)
if model_spec is not None and (exog is not None):
from statsmodels.formula._manager import FormulaManager
if isinstance(exog, pd.Series):
# we are guessing whether it should be column or row
if (hasattr(exog, 'name') and isinstance(exog.name, str) and
exog.name in model_spec.describe()):
# assume we need one column
exog = pd.DataFrame(exog)
else:
# assume we need a row
exog = pd.DataFrame(exog).T
orig_exog_len = len(exog)
is_dict = isinstance(exog, dict)
exog = FormulaManager().get_matrices(model_spec, exog, pandas=True)
if orig_exog_len > len(exog) and not is_dict:
import warnings
if exog_index is None:
warnings.warn('nan values have been dropped', ValueWarning)
else:
exog = exog.reindex(exog_index)
exog_index = exog.index
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (model.exog.ndim == 1 or
model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return exog, exog_index | transform exog for predict using the formula's model_spec
Note: this is copied from base.model.Results.predict and converted to
standalone function with additional options. | _transform_predict_exog | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def _tranform_predict_exog(self, exog=None, exog_smooth=None,
transform=True):
"""Transform original explanatory variables for prediction
Parameters
----------
exog : array_like, optional
The values for the linear explanatory variables.
exog_smooth : array_like
values for the variables in the smooth terms
transform : bool, optional
If transform is False, then ``exog`` is returned unchanged and
``x`` is ignored. It is assumed that exog contains the full
design matrix for the predict observations.
If transform is True, then the basis representation of the smooth
term will be constructed from the provided ``x``.
Returns
-------
exog_transformed : ndarray
design matrix for the prediction
"""
if exog_smooth is not None:
exog_smooth = np.asarray(exog_smooth)
exog_index = None
if transform is False:
# the following allows that either or both exog are not None
if exog_smooth is None:
# exog could be None or array
ex = exog
else:
if exog is None:
ex = exog_smooth
else:
ex = np.column_stack((exog, exog_smooth))
else:
# transform exog_linear if needed
if exog is not None and hasattr(self.model, 'model_spec_linear'):
exog, exog_index = _transform_predict_exog(
self.model, exog, self.model.model_spec_linear)
# create smooth basis
if exog_smooth is not None:
ex_smooth = self.model.smoother.transform(exog_smooth)
if exog is None:
ex = ex_smooth
else:
# TODO: there might be problems is exog_smooth is 1-D
ex = np.column_stack((exog, ex_smooth))
else:
ex = exog
return ex, exog_index | Transform original explanatory variables for prediction
Parameters
----------
exog : array_like, optional
The values for the linear explanatory variables.
exog_smooth : array_like
values for the variables in the smooth terms
transform : bool, optional
If transform is False, then ``exog`` is returned unchanged and
``x`` is ignored. It is assumed that exog contains the full
design matrix for the predict observations.
If transform is True, then the basis representation of the smooth
term will be constructed from the provided ``x``.
Returns
-------
exog_transformed : ndarray
design matrix for the prediction | _tranform_predict_exog | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def get_prediction(self, exog=None, exog_smooth=None, transform=True,
**kwargs):
"""compute prediction results
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
exog_smooth : array_like
values for the variables in the smooth terms
transform : bool, optional
If transform is True, then the basis representation of the smooth
term will be constructed from the provided ``x``.
kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : generalized_linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary tables for the prediction of the mean and of new
observations.
"""
ex, exog_index = self._tranform_predict_exog(exog=exog,
exog_smooth=exog_smooth,
transform=transform)
return super().get_prediction(ex, transform=False, **kwargs) | compute prediction results
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
exog_smooth : array_like
values for the variables in the smooth terms
transform : bool, optional
If transform is True, then the basis representation of the smooth
term will be constructed from the provided ``x``.
kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : generalized_linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and
summary tables for the prediction of the mean and of new
observations. | get_prediction | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def partial_values(self, smooth_index, include_constant=True):
"""contribution of a smooth term to the linear prediction
Warning: This will be replaced by a predict method
Parameters
----------
smooth_index : int
index of the smooth term within list of smooth terms
include_constant : bool
If true, then the estimated intercept is added to the prediction
and its standard errors. This avoids that the confidence interval
has zero width at the imposed identification constraint, e.g.
either at a reference point or at the mean.
Returns
-------
predicted : nd_array
predicted value of linear term.
This is not the expected response if the link function is not
linear.
se_pred : nd_array
standard error of linear prediction
"""
variable = smooth_index
smoother = self.model.smoother
mask = smoother.mask[variable]
start_idx = self.model.k_exog_linear
idx = start_idx + np.nonzero(mask)[0]
# smoother has only smooth parts, not exog_linear
exog_part = smoother.basis[:, mask]
const_idx = self.model.data.const_idx
if include_constant and const_idx is not None:
idx = np.concatenate(([const_idx], idx))
exog_part = self.model.exog[:, idx]
linpred = np.dot(exog_part, self.params[idx])
# select the submatrix corresponding to a single variable
partial_cov_params = self.cov_params(column=idx)
covb = partial_cov_params
var = (exog_part * np.dot(covb, exog_part.T).T).sum(1)
se = np.sqrt(var)
return linpred, se | contribution of a smooth term to the linear prediction
Warning: This will be replaced by a predict method
Parameters
----------
smooth_index : int
index of the smooth term within list of smooth terms
include_constant : bool
If true, then the estimated intercept is added to the prediction
and its standard errors. This avoids that the confidence interval
has zero width at the imposed identification constraint, e.g.
either at a reference point or at the mean.
Returns
-------
predicted : nd_array
predicted value of linear term.
This is not the expected response if the link function is not
linear.
se_pred : nd_array
standard error of linear prediction | partial_values | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def plot_partial(self, smooth_index, plot_se=True, cpr=False,
include_constant=True, ax=None):
"""plot the contribution of a smooth term to the linear prediction
Parameters
----------
smooth_index : int
index of the smooth term within list of smooth terms
plot_se : bool
If plot_se is true, then the confidence interval for the linear
prediction will be added to the plot.
cpr : bool
If cpr (component plus residual) is true, then a scatter plot of
the partial working residuals will be added to the plot.
include_constant : bool
If true, then the estimated intercept is added to the prediction
and its standard errors. This avoids that the confidence interval
has zero width at the imposed identification constraint, e.g.
either at a reference point or at the mean.
ax : None or matplotlib axis instance
If ax is not None, then the plot will be added to it.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise, the Figure to which
`ax` is connected.
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_import_mpl()
variable = smooth_index
y_est, se = self.partial_values(variable,
include_constant=include_constant)
smoother = self.model.smoother
x = smoother.smoothers[variable].x
sort_index = np.argsort(x)
x = x[sort_index]
y_est = y_est[sort_index]
se = se[sort_index]
fig, ax = create_mpl_ax(ax)
if cpr:
# TODO: resid_response does not make sense with nonlinear link
# use resid_working ?
residual = self.resid_working[sort_index]
cpr_ = y_est + residual
ax.scatter(x, cpr_, s=4)
ax.plot(x, y_est, c='blue', lw=2)
if plot_se:
ax.plot(x, y_est + 1.96 * se, '-', c='blue')
ax.plot(x, y_est - 1.96 * se, '-', c='blue')
ax.set_xlabel(smoother.smoothers[variable].variable_name)
return fig | plot the contribution of a smooth term to the linear prediction
Parameters
----------
smooth_index : int
index of the smooth term within list of smooth terms
plot_se : bool
If plot_se is true, then the confidence interval for the linear
prediction will be added to the plot.
cpr : bool
If cpr (component plus residual) is true, then a scatter plot of
the partial working residuals will be added to the plot.
include_constant : bool
If true, then the estimated intercept is added to the prediction
and its standard errors. This avoids that the confidence interval
has zero width at the imposed identification constraint, e.g.
either at a reference point or at the mean.
ax : None or matplotlib axis instance
If ax is not None, then the plot will be added to it.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise, the Figure to which
`ax` is connected. | plot_partial | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def test_significance(self, smooth_index):
"""hypothesis test that a smooth component is zero.
This calls `wald_test` to compute the hypothesis test, but uses
effective degrees of freedom.
Parameters
----------
smooth_index : int
index of the smooth term within list of smooth terms
Returns
-------
wald_test : ContrastResults instance
the results instance created by `wald_test`
"""
variable = smooth_index
smoother = self.model.smoother
start_idx = self.model.k_exog_linear
k_params = len(self.params)
# a bit messy, we need first index plus length of smooth term
mask = smoother.mask[variable]
k_constraints = mask.sum()
idx = start_idx + np.nonzero(mask)[0][0]
constraints = np.eye(k_constraints, k_params, idx)
df_constraints = self.edf[idx: idx + k_constraints].sum()
return self.wald_test(constraints, df_constraints=df_constraints) | hypothesis test that a smooth component is zero.
This calls `wald_test` to compute the hypothesis test, but uses
effective degrees of freedom.
Parameters
----------
smooth_index : int
index of the smooth term within list of smooth terms
Returns
-------
wald_test : ContrastResults instance
the results instance created by `wald_test` | test_significance | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def get_hat_matrix_diag(self, observed=True, _axis=1):
"""
Compute the diagonal of the hat matrix
Parameters
----------
observed : bool
If true, then observed hessian is used in the hat matrix
computation. If false, then the expected hessian is used.
In the case of a canonical link function both are the same.
This is only relevant for models that implement both observed
and expected Hessian, which is currently only GLM. Other
models only use the observed Hessian.
_axis : int
This is mainly for internal use. By default it returns the usual
diagonal of the hat matrix. If _axis is zero, then the result
corresponds to the effective degrees of freedom, ``edf`` for each
column of exog.
Returns
-------
hat_matrix_diag : ndarray
The diagonal of the hat matrix computed from the observed
or expected hessian.
"""
weights = self.model.hessian_factor(self.params, scale=self.scale,
observed=observed)
wexog = np.sqrt(weights)[:, None] * self.model.exog
# we can use inverse hessian directly instead of computing it from
# WLS/IRLS as in GLM
# TODO: does `normalized_cov_params * scale` work in all cases?
# this avoids recomputing hessian, check when used for other models.
hess_inv = self.normalized_cov_params * self.scale
# this is in GLM equivalent to the more generic and direct
# hess_inv = np.linalg.inv(-self.model.hessian(self.params))
hd = (wexog * hess_inv.dot(wexog.T).T).sum(axis=_axis)
return hd | Compute the diagonal of the hat matrix
Parameters
----------
observed : bool
If true, then observed hessian is used in the hat matrix
computation. If false, then the expected hessian is used.
In the case of a canonical link function both are the same.
This is only relevant for models that implement both observed
and expected Hessian, which is currently only GLM. Other
models only use the observed Hessian.
_axis : int
This is mainly for internal use. By default it returns the usual
diagonal of the hat matrix. If _axis is zero, then the result
corresponds to the effective degrees of freedom, ``edf`` for each
column of exog.
Returns
-------
hat_matrix_diag : ndarray
The diagonal of the hat matrix computed from the observed
or expected hessian. | get_hat_matrix_diag | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def _check_alpha(self, alpha):
"""check and convert alpha to required list format
Parameters
----------
alpha : scalar, list or array_like
penalization weight
Returns
-------
alpha : list
penalization weight, list with length equal to the number of
smooth terms
"""
if not isinstance(alpha, Iterable):
alpha = [alpha] * len(self.smoother.smoothers)
elif not isinstance(alpha, list):
# we want alpha to be a list
alpha = list(alpha)
return alpha | check and convert alpha to required list format
Parameters
----------
alpha : scalar, list or array_like
penalization weight
Returns
-------
alpha : list
penalization weight, list with length equal to the number of
smooth terms | _check_alpha | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def fit(self, start_params=None, maxiter=1000, method='pirls', tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,
full_output=True, disp=False, max_start_irls=3, **kwargs):
"""estimate parameters and create instance of GLMGamResults class
Parameters
----------
most parameters are the same as for GLM
method : optimization method
The special optimization method is "pirls" which uses a penalized
version of IRLS. Other methods are gradient optimizers as used in
base.model.LikelihoodModel.
Returns
-------
res : instance of wrapped GLMGamResults
"""
# TODO: temporary hack to remove attribute
# formula also might be attached which in inherited from_formula
# causes problems in predict
if hasattr(self, 'formula'):
self.formula_linear = self.formula
del self.formula
# TODO: alpha not allowed yet, but is in `_fit_pirls`
# alpha = self._check_alpha()
if method.lower() in ['pirls', 'irls']:
res = self._fit_pirls(self.alpha, start_params=start_params,
maxiter=maxiter, tol=tol, scale=scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t, **kwargs)
else:
if max_start_irls > 0 and (start_params is None):
res = self._fit_pirls(self.alpha, start_params=start_params,
maxiter=max_start_irls, tol=tol,
scale=scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t, **kwargs)
start_params = res.params
del res
res = super().fit(start_params=start_params,
maxiter=maxiter, method=method,
tol=tol, scale=scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t,
full_output=full_output, disp=disp,
max_start_irls=0,
**kwargs)
return res | estimate parameters and create instance of GLMGamResults class
Parameters
----------
most parameters are the same as for GLM
method : optimization method
The special optimization method is "pirls" which uses a penalized
version of IRLS. Other methods are gradient optimizers as used in
base.model.LikelihoodModel.
Returns
-------
res : instance of wrapped GLMGamResults | fit | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def _fit_pirls(self, alpha, start_params=None, maxiter=100, tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,
weights=None):
"""fit model with penalized reweighted least squares
"""
# TODO: this currently modifies several attributes
# self.scale, self.scaletype, self.mu, self.weights
# self.data_weights,
# and possibly self._offset_exposure
# several of those might not be necessary, e.g. mu and weights
# alpha = alpha * len(y) * self.scale / 100
# TODO: we need to rescale alpha
endog = self.endog
wlsexog = self.exog # smoother.basis
spl_s = self.penal.penalty_matrix(alpha=alpha)
nobs, n_columns = wlsexog.shape
# TODO what are these values?
if weights is None:
self.data_weights = np.array([1.] * nobs)
else:
self.data_weights = weights
if not hasattr(self, '_offset_exposure'):
self._offset_exposure = 0
self.scaletype = scale
# TODO: check default scale types
# self.scaletype = 'dev'
# during iteration
self.scale = 1
if start_params is None:
mu = self.family.starting_mu(endog)
lin_pred = self.family.predict(mu)
else:
lin_pred = np.dot(wlsexog, start_params) + self._offset_exposure
mu = self.family.fitted(lin_pred)
dev = self.family.deviance(endog, mu)
history = dict(params=[None, start_params], deviance=[np.inf, dev])
converged = False
criterion = history['deviance']
# This special case is used to get the likelihood for a specific
# params vector.
if maxiter == 0:
mu = self.family.fitted(lin_pred)
self.scale = self.estimate_scale(mu)
wls_results = lm.RegressionResults(self, start_params, None)
iteration = 0
for iteration in range(maxiter):
# TODO: is this equivalent to point 1 of page 136:
# w = 1 / (V(mu) * g'(mu)) ?
self.weights = self.data_weights * self.family.weights(mu)
# TODO: is this equivalent to point 1 of page 136:
# z = g(mu)(y - mu) + X beta ?
wlsendog = (lin_pred + self.family.link.deriv(mu) * (endog - mu)
- self._offset_exposure)
# this defines the augmented matrix point 2a on page 136
wls_results = penalized_wls(wlsendog, wlsexog, spl_s, self.weights)
lin_pred = np.dot(wlsexog, wls_results.params).ravel()
lin_pred += self._offset_exposure
mu = self.family.fitted(lin_pred)
# We do not need to update scale in GLM/LEF models
# We might need it in dispersion models.
# self.scale = self.estimate_scale(mu)
history = self._update_history(wls_results, mu, history)
if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
# TODO need atol, rtol
# args of _check_convergence: (criterion, iteration, atol, rtol)
converged = _check_convergence(criterion, iteration, tol, 0)
if converged:
break
self.mu = mu
self.scale = self.estimate_scale(mu)
glm_results = GLMGamResults(self, wls_results.params,
wls_results.normalized_cov_params,
self.scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
glm_results.method = "PIRLS"
history['iteration'] = iteration + 1
glm_results.fit_history = history
glm_results.converged = converged
return GLMGamResultsWrapper(glm_results) | fit model with penalized reweighted least squares | _fit_pirls | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def select_penweight(self, criterion='aic', start_params=None,
start_model_params=None,
method='basinhopping', **fit_kwds):
"""find alpha by minimizing results criterion
The objective for the minimization can be results attributes like
``gcv``, ``aic`` or ``bic`` where the latter are based on effective
degrees of freedom.
Warning: In many case the optimization might converge to a local
optimum or near optimum. Different start_params or using a global
optimizer is recommended, default is basinhopping.
Parameters
----------
criterion='aic'
name of results attribute to be minimized.
Default is 'aic', other options are 'gcv', 'cv' or 'bic'.
start_params : None or array
starting parameters for alpha in the penalization weight
minimization. The parameters are internally exponentiated and
the minimization is with respect to ``exp(alpha)``
start_model_params : None or array
starting parameter for the ``model._fit_pirls``.
method : 'basinhopping', 'nm' or 'minimize'
'basinhopping' and 'nm' directly use the underlying scipy.optimize
functions `basinhopping` and `fmin`. 'minimize' provides access
to the high level interface, `scipy.optimize.minimize`.
fit_kwds : keyword arguments
additional keyword arguments will be used in the call to the
scipy optimizer. Which keywords are supported depends on the
scipy optimization function.
Returns
-------
alpha : ndarray
penalization parameter found by minimizing the criterion.
Note that this can be only a local (near) optimum.
fit_res : tuple
results returned by the scipy optimization routine. The
parameters in the optimization problem are `log(alpha)`
history : dict
history of calls to pirls and contains alpha, the fit
criterion and the parameters to which pirls converged to for the
given alpha.
Notes
-----
In the test cases Nelder-Mead and bfgs often converge to local optima,
see also https://github.com/statsmodels/statsmodels/issues/5381.
This does not use any analytical derivatives for the criterion
minimization.
Status: experimental, It is possible that defaults change if there
is a better way to find a global optimum. API (e.g. type of return)
might also change.
"""
# copy attributes that are changed, so we can reset them
scale_keep = self.scale
scaletype_keep = self.scaletype
# TODO: use .copy() method when available for all types
alpha_keep = copy.copy(self.alpha)
if start_params is None:
start_params = np.zeros(self.k_smooths)
else:
start_params = np.log(1e-20 + start_params)
history = {}
history['alpha'] = []
history['params'] = [start_model_params]
history['criterion'] = []
def fun(p):
a = np.exp(p)
res_ = self._fit_pirls(start_params=history['params'][-1],
alpha=a)
history['alpha'].append(a)
history['params'].append(np.asarray(res_.params))
return getattr(res_, criterion)
if method == 'nm':
kwds = dict(full_output=True, maxiter=1000, maxfun=2000)
kwds.update(fit_kwds)
fit_res = optimize.fmin(fun, start_params, **kwds)
opt = fit_res[0]
elif method == 'basinhopping':
kwds = dict(minimizer_kwargs={'method': 'Nelder-Mead',
'options': {'maxiter': 100, 'maxfev': 500}},
niter=10)
kwds.update(fit_kwds)
fit_res = optimize.basinhopping(fun, start_params, **kwds)
opt = fit_res.x
elif method == 'minimize':
fit_res = optimize.minimize(fun, start_params, **fit_kwds)
opt = fit_res.x
else:
raise ValueError('method not recognized')
del history['params'][0] # remove the model start_params
alpha = np.exp(opt)
# reset attributes that have or might have changed
self.scale = scale_keep
self.scaletype = scaletype_keep
self.alpha = alpha_keep
return alpha, fit_res, history | find alpha by minimizing results criterion
The objective for the minimization can be results attributes like
``gcv``, ``aic`` or ``bic`` where the latter are based on effective
degrees of freedom.
Warning: In many case the optimization might converge to a local
optimum or near optimum. Different start_params or using a global
optimizer is recommended, default is basinhopping.
Parameters
----------
criterion='aic'
name of results attribute to be minimized.
Default is 'aic', other options are 'gcv', 'cv' or 'bic'.
start_params : None or array
starting parameters for alpha in the penalization weight
minimization. The parameters are internally exponentiated and
the minimization is with respect to ``exp(alpha)``
start_model_params : None or array
starting parameter for the ``model._fit_pirls``.
method : 'basinhopping', 'nm' or 'minimize'
'basinhopping' and 'nm' directly use the underlying scipy.optimize
functions `basinhopping` and `fmin`. 'minimize' provides access
to the high level interface, `scipy.optimize.minimize`.
fit_kwds : keyword arguments
additional keyword arguments will be used in the call to the
scipy optimizer. Which keywords are supported depends on the
scipy optimization function.
Returns
-------
alpha : ndarray
penalization parameter found by minimizing the criterion.
Note that this can be only a local (near) optimum.
fit_res : tuple
results returned by the scipy optimization routine. The
parameters in the optimization problem are `log(alpha)`
history : dict
history of calls to pirls and contains alpha, the fit
criterion and the parameters to which pirls converged to for the
given alpha.
Notes
-----
In the test cases Nelder-Mead and bfgs often converge to local optima,
see also https://github.com/statsmodels/statsmodels/issues/5381.
This does not use any analytical derivatives for the criterion
minimization.
Status: experimental, It is possible that defaults change if there
is a better way to find a global optimum. API (e.g. type of return)
might also change. | select_penweight | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def select_penweight_kfold(self, alphas=None, cv_iterator=None, cost=None,
k_folds=5, k_grid=11):
"""find alphas by k-fold cross-validation
Warning: This estimates ``k_folds`` models for each point in the
grid of alphas.
Parameters
----------
alphas : None or list of arrays
cv_iterator : instance
instance of a cross-validation iterator, by default this is a
KFold instance
cost : function
default is mean squared error. The cost function to evaluate the
prediction error for the left out sample. This should take two
arrays as argument and return one float.
k_folds : int
number of folds if default Kfold iterator is used.
This is ignored if ``cv_iterator`` is not None.
Returns
-------
alpha_cv : list of float
Best alpha in grid according to cross-validation
res_cv : instance of MultivariateGAMCVPath
The instance was used for cross-validation and holds the results
Notes
-----
The default alphas are defined as
``alphas = [np.logspace(0, 7, k_grid) for _ in range(k_smooths)]``
"""
if cost is None:
def cost(x1, x2):
return np.linalg.norm(x1 - x2) / len(x1)
if alphas is None:
alphas = [np.logspace(0, 7, k_grid) for _ in range(self.k_smooths)]
if cv_iterator is None:
cv_iterator = KFold(k_folds=k_folds, shuffle=True)
gam_cv = MultivariateGAMCVPath(smoother=self.smoother, alphas=alphas,
gam=GLMGam, cost=cost, endog=self.endog,
exog=self.exog_linear,
cv_iterator=cv_iterator)
gam_cv_res = gam_cv.fit()
return gam_cv_res.alpha_cv, gam_cv_res | find alphas by k-fold cross-validation
Warning: This estimates ``k_folds`` models for each point in the
grid of alphas.
Parameters
----------
alphas : None or list of arrays
cv_iterator : instance
instance of a cross-validation iterator, by default this is a
KFold instance
cost : function
default is mean squared error. The cost function to evaluate the
prediction error for the left out sample. This should take two
arrays as argument and return one float.
k_folds : int
number of folds if default Kfold iterator is used.
This is ignored if ``cv_iterator`` is not None.
Returns
-------
alpha_cv : list of float
Best alpha in grid according to cross-validation
res_cv : instance of MultivariateGAMCVPath
The instance was used for cross-validation and holds the results
Notes
-----
The default alphas are defined as
``alphas = [np.logspace(0, 7, k_grid) for _ in range(k_smooths)]`` | select_penweight_kfold | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def penalized_wls(endog, exog, penalty_matrix, weights):
"""weighted least squares with quadratic penalty
Parameters
----------
endog : ndarray
response or endogenous variable
exog : ndarray
design matrix, matrix of exogenous or explanatory variables
penalty_matrix : ndarray, 2-Dim square
penality matrix for quadratic penalization. Note, the penalty_matrix
is multiplied by two to match non-pirls fitting methods.
weights : ndarray
weights for WLS
Returns
-------
results : Results instance of WLS
"""
y, x, s = endog, exog, penalty_matrix
# TODO: I do not understand why I need 2 * s
aug_y, aug_x, aug_weights = make_augmented_matrix(y, x, 2 * s, weights)
wls_results = lm.WLS(aug_y, aug_x, aug_weights).fit()
# TODO: use MinimalWLS during iterations, less overhead
# However, MinimalWLS does not return normalized_cov_params
# which we need at the end of the iterations
# call would be
# wls_results = reg_tools._MinimalWLS(aug_y, aug_x, aug_weights).fit()
wls_results.params = wls_results.params.ravel()
return wls_results | weighted least squares with quadratic penalty
Parameters
----------
endog : ndarray
response or endogenous variable
exog : ndarray
design matrix, matrix of exogenous or explanatory variables
penalty_matrix : ndarray, 2-Dim square
penality matrix for quadratic penalization. Note, the penalty_matrix
is multiplied by two to match non-pirls fitting methods.
weights : ndarray
weights for WLS
Returns
-------
results : Results instance of WLS | penalized_wls | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def make_augmented_matrix(endog, exog, penalty_matrix, weights):
"""augment endog, exog and weights with stochastic restriction matrix
Parameters
----------
endog : ndarray
response or endogenous variable
exog : ndarray
design matrix, matrix of exogenous or explanatory variables
penalty_matrix : ndarray, 2-Dim square
penality matrix for quadratic penalization
weights : ndarray
weights for WLS
Returns
-------
endog_aug : ndarray
augmented response variable
exog_aug : ndarray
augmented design matrix
weights_aug : ndarray
augmented weights for WLS
"""
y, x, s, = endog, exog, penalty_matrix
nobs = x.shape[0]
# TODO: needs full because of broadcasting with weights
# check what weights should be doing
rs = matrix_sqrt(s)
x1 = np.vstack([x, rs]) # augmented x
n_samp1es_x1 = x1.shape[0]
y1 = np.array([0.] * n_samp1es_x1) # augmented y
y1[:nobs] = y
id1 = np.array([1.] * rs.shape[0])
w1 = np.concatenate([weights, id1])
return y1, x1, w1 | augment endog, exog and weights with stochastic restriction matrix
Parameters
----------
endog : ndarray
response or endogenous variable
exog : ndarray
design matrix, matrix of exogenous or explanatory variables
penalty_matrix : ndarray, 2-Dim square
penality matrix for quadratic penalization
weights : ndarray
weights for WLS
Returns
-------
endog_aug : ndarray
augmented response variable
exog_aug : ndarray
augmented design matrix
weights_aug : ndarray
augmented weights for WLS | make_augmented_matrix | python | statsmodels/statsmodels | statsmodels/gam/generalized_additive_model.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/generalized_additive_model.py | BSD-3-Clause |
def polynomial_sample_data():
"""A polynomial of degree 4
poly = ax^4 + bx^3 + cx^2 + dx + e
second der = 12ax^2 + 6bx + 2c
integral from -1 to 1 of second der^2 is
(288 a^2)/5 + 32 a c + 8 (3 b^2 + c^2)
the gradient of the integral is der
[576*a/5 + 32 * c, 48*b, 32*a + 16*c, 0, 0]
Returns
-------
poly : smoother instance
y : ndarray
generated function values, demeaned
"""
n = 10000
x = np.linspace(-1, 1, n)
y = 2 * x ** 3 - x
y -= y.mean()
degree = [4]
pol = PolynomialSmoother(x, degree)
return pol, y | A polynomial of degree 4
poly = ax^4 + bx^3 + cx^2 + dx + e
second der = 12ax^2 + 6bx + 2c
integral from -1 to 1 of second der^2 is
(288 a^2)/5 + 32 a c + 8 (3 b^2 + c^2)
the gradient of the integral is der
[576*a/5 + 32 * c, 48*b, 32*a + 16*c, 0, 0]
Returns
-------
poly : smoother instance
y : ndarray
generated function values, demeaned | polynomial_sample_data | python | statsmodels/statsmodels | statsmodels/gam/tests/test_gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/tests/test_gam.py | BSD-3-Clause |
def test_gam_penalty():
"""
test the func method of the gam penalty
:return:
"""
pol, y = polynomial_sample_data()
univ_pol = pol.smoothers[0]
alpha = 1
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=univ_pol)
for _ in range(10):
params = np.random.randint(-2, 2, 4)
gp_score = gp.func(params)
itg = integral(params)
assert_allclose(gp_score, itg, atol=1.e-1) | test the func method of the gam penalty
:return: | test_gam_penalty | python | statsmodels/statsmodels | statsmodels/gam/tests/test_gam.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/tests/test_gam.py | BSD-3-Clause |
def _split_train_test_smoothers(x, smoother, train_index, test_index):
"""split smoothers in test and train sets and create GenericSmoothers
Note: this does not take exog_linear into account
"""
train_smoothers = []
test_smoothers = []
for smoother in smoother.smoothers:
train_basis = smoother.basis[train_index]
train_der_basis = smoother.der_basis[train_index]
train_der2_basis = smoother.der2_basis[train_index]
train_cov_der2 = smoother.cov_der2
# TODO: Double check this part. cov_der2 is calculated with all data
train_x = smoother.x[train_index]
train_smoothers.append(
UnivariateGenericSmoother(
train_x, train_basis, train_der_basis, train_der2_basis,
train_cov_der2, smoother.variable_name + ' train'))
test_basis = smoother.basis[test_index]
test_der_basis = smoother.der_basis[test_index]
test_cov_der2 = smoother.cov_der2
# TODO: Double check this part. cov_der2 is calculated with all data
test_x = smoother.x[test_index]
test_smoothers.append(
UnivariateGenericSmoother(
test_x, test_basis, test_der_basis, train_der2_basis,
test_cov_der2, smoother.variable_name + ' test'))
train_multivariate_smoothers = GenericSmoothers(x[train_index],
train_smoothers)
test_multivariate_smoothers = GenericSmoothers(x[test_index],
test_smoothers)
return train_multivariate_smoothers, test_multivariate_smoothers | split smoothers in test and train sets and create GenericSmoothers
Note: this does not take exog_linear into account | _split_train_test_smoothers | python | statsmodels/statsmodels | statsmodels/gam/gam_cross_validation/gam_cross_validation.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_cross_validation/gam_cross_validation.py | BSD-3-Clause |
def split(self, X, y=None, label=None):
"""yield index split into train and test sets
"""
# TODO: X and y are redundant, we only need nobs
nobs = X.shape[0]
index = np.array(range(nobs))
if self.shuffle:
np.random.shuffle(index)
folds = np.array_split(index, self.k_folds)
for fold in folds:
test_index = np.zeros(nobs, dtype=bool)
test_index[fold] = True
train_index = np.logical_not(test_index)
yield train_index, test_index | yield index split into train and test sets | split | python | statsmodels/statsmodels | statsmodels/gam/gam_cross_validation/cross_validators.py | https://github.com/statsmodels/statsmodels/blob/master/statsmodels/gam/gam_cross_validation/cross_validators.py | BSD-3-Clause |
def execute_nb(src, dst, allow_errors=False, timeout=1000, kernel_name=None):
"""
Execute notebook in `src` and write the output to `dst`
Parameters
----------
src, dst: str
path to notebook
allow_errors: bool
timeout: int
kernel_name: str
defualts to value set in notebook metadata
Returns
-------
dst: str
"""
with open(src, encoding="utf-8") as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(
allow_errors=False, timeout=timeout, kernel_name=kernel_name
)
ep.preprocess(nb, {"metadata": {"path": SOURCE_DIR}})
with open(dst, "w", encoding="utf-8") as f:
nbformat.write(nb, f)
return dst | Execute notebook in `src` and write the output to `dst`
Parameters
----------
src, dst: str
path to notebook
allow_errors: bool
timeout: int
kernel_name: str
defualts to value set in notebook metadata
Returns
-------
dst: str | execute_nb | python | statsmodels/statsmodels | tools/nbgenerate.py | https://github.com/statsmodels/statsmodels/blob/master/tools/nbgenerate.py | BSD-3-Clause |
def convert(src, dst, to="rst"):
"""
Convert a notebook `src`.
Parameters
----------
src, dst: str
filepaths
to: {'rst', 'html'}
format to export to
"""
dispatch = {"rst": RSTExporter, "html": HTMLExporter}
exporter = dispatch[to.lower()]()
(body, resources) = exporter.from_filename(src)
with open(dst, "w", encoding="utf-8") as f:
f.write(body)
return dst | Convert a notebook `src`.
Parameters
----------
src, dst: str
filepaths
to: {'rst', 'html'}
format to export to | convert | python | statsmodels/statsmodels | tools/nbgenerate.py | https://github.com/statsmodels/statsmodels/blob/master/tools/nbgenerate.py | BSD-3-Clause |
def is_newer(file1, file2, strict=True):
"""
Determine if file1 has been modified after file2
Parameters
----------
file1 : str
File path. May not exist, in which case False is returned.
file1 : str
File path. Must exist.
strict : bool
Use strict inequality test (>). If False, then returns True for files
with the same modified time.
Returns
-------
newer : bool
True if file1 is strictly newer than file 2
"""
try:
t1 = os.path.getmtime(file1)
t2 = os.path.getmtime(file2)
except FileNotFoundError:
return False
if strict:
return t1 > t2
return t1 >= t2 | Determine if file1 has been modified after file2
Parameters
----------
file1 : str
File path. May not exist, in which case False is returned.
file1 : str
File path. Must exist.
strict : bool
Use strict inequality test (>). If False, then returns True for files
with the same modified time.
Returns
-------
newer : bool
True if file1 is strictly newer than file 2 | is_newer | python | statsmodels/statsmodels | tools/export_notebooks_to_python.py | https://github.com/statsmodels/statsmodels/blob/master/tools/export_notebooks_to_python.py | BSD-3-Clause |
def import_submodules(module: ModuleType):
"""Import all submodules of a module, recursively."""
for loader, module_name, is_pkg in pkgutil.walk_packages(
module.__path__, module.__name__ + "."
):
blacklisted = any([f".{bl}." in module_name for bl in BLACKLIST])
if blacklisted:
continue
mod = importlib.import_module(module_name)
if mod.__name__.endswith(".api"):
api_modules.append(mod) | Import all submodules of a module, recursively. | import_submodules | python | statsmodels/statsmodels | tools/check_api.py | https://github.com/statsmodels/statsmodels/blob/master/tools/check_api.py | BSD-3-Clause |
def iter_subclasses(cls, _seen=None, template_classes=[]):
"""
Generator to iterate over all the subclasses of Model. Based on
http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
Yields class
"""
if not isinstance(cls, type):
raise TypeError(
"itersubclasses must be called with "
"new-style classes, not %.100r" % cls
)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen and sub.__name__ not in template_classes:
_seen.add(sub)
# we do not want to yield the templates, but we do want to
# recurse on them
yield sub
for sub in iter_subclasses(sub, _seen, template_classes):
yield sub | Generator to iterate over all the subclasses of Model. Based on
http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
Yields class | iter_subclasses | python | statsmodels/statsmodels | tools/generate_formula_api.py | https://github.com/statsmodels/statsmodels/blob/master/tools/generate_formula_api.py | BSD-3-Clause |
def error(code, **kwargs):
"""
Return a tuple with the error code and the message with variables replaced.
This is syntactic sugar so instead of:
- `('EX02', ERROR_MSGS['EX02'].format(doctest_log=log))`
We can simply use:
- `error('EX02', doctest_log=log)`
Parameters
----------
code : str
Error code.
**kwargs
Values for the variables in the error messages
Returns
-------
code : str
Error code.
message : str
Error message with variables replaced.
"""
return (code, ERROR_MSGS[code].format(**kwargs)) | Return a tuple with the error code and the message with variables replaced.
This is syntactic sugar so instead of:
- `('EX02', ERROR_MSGS['EX02'].format(doctest_log=log))`
We can simply use:
- `error('EX02', doctest_log=log)`
Parameters
----------
code : str
Error code.
**kwargs
Values for the variables in the error messages
Returns
-------
code : str
Error code.
message : str
Error message with variables replaced. | error | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = "statsmodels"
previous_line = current_section = current_subsection = ""
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set("-"):
current_section = previous_line
continue
if set(line) == set("~"):
current_subsection = previous_line
continue
if line.startswith(".. currentmodule::"):
current_module = line.replace(".. currentmodule::", "").strip()
continue
if line == ".. autosummary::":
position = "autosummary"
continue
if position == "autosummary":
if line == "":
position = "items"
continue
if position == "items":
if line == "":
position = None
continue
item = line.strip()
if item.startswith("~statsmodels."):
path = item.replace("~", "").split(".")
item = path[-1]
current_module = ".".join(path[:-1])
func = importlib.import_module(current_module)
for part in item.split("."):
func = getattr(func, part)
yield (
".".join([current_module, item]),
func,
current_section,
current_subsection,
)
previous_line = line | Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located. | get_api_items | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def _load_obj(name):
"""
Import Python object from its name as string.
Parameters
----------
name : str
Object name to import (e.g. pandas.Series.str.upper)
Returns
-------
object
Python object that can be a class, method, function...
Examples
--------
>>> Docstring._load_obj('pandas.Series')
<class 'pandas.core.series.Series'>
"""
for maxsplit in range(1, name.count(".") + 1):
func_name_split = name.rsplit(".", maxsplit)
module, *func_parts = func_name_split
try:
obj = importlib.import_module(module)
except ImportError:
pass
else:
continue
if "obj" not in locals():
raise ImportError(
"No module can be imported " 'from "{}"'.format(name)
)
for part in func_parts:
obj = getattr(obj, part)
return obj | Import Python object from its name as string.
Parameters
----------
name : str
Object name to import (e.g. pandas.Series.str.upper)
Returns
-------
object
Python object that can be a class, method, function...
Examples
--------
>>> Docstring._load_obj('pandas.Series')
<class 'pandas.core.series.Series'> | _load_obj | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def _to_original_callable(obj):
"""
Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
all cases, but it should help find some (properties...).
"""
while True:
if inspect.isfunction(obj) or inspect.isclass(obj):
f = inspect.getfile(obj)
if f.startswith("<") and f.endswith(">"):
return None
return obj
if inspect.ismethod(obj):
obj = obj.__func__
elif isinstance(obj, functools.partial):
obj = obj.func
elif isinstance(obj, property):
obj = obj.fget
else:
return None | Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
all cases, but it should help find some (properties...). | _to_original_callable | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def source_file_name(self):
"""
File name where the object is implemented (e.g. pandas/core/frame.py).
"""
try:
fname = inspect.getsourcefile(self.code_obj)
except TypeError:
# In some cases the object is something complex like a cython
# object that can't be easily introspected. An it's better to
# return the source code file of the object as None, than crash
pass
else:
if fname:
fname = os.path.relpath(fname, BASE_PATH)
return fname | File name where the object is implemented (e.g. pandas/core/frame.py). | source_file_name | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def source_file_def_line(self):
"""
Number of line where the object is defined in its file.
"""
try:
return inspect.getsourcelines(self.code_obj)[-1]
except (OSError, TypeError):
# In some cases the object is something complex like a cython
# object that can't be easily introspected. An it's better to
# return the line number as None, than crash
pass | Number of line where the object is defined in its file. | source_file_def_line | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def method_returns_something(self):
"""
Check if the docstrings method can return something.
Bare returns, returns valued None and returns from nested functions are
disconsidered.
Returns
-------
bool
Whether the docstrings method can return something.
"""
def get_returns_not_on_nested_functions(node):
returns = [node] if isinstance(node, ast.Return) else []
for child in ast.iter_child_nodes(node):
# Ignore nested functions and its subtrees.
if not isinstance(child, ast.FunctionDef):
child_returns = get_returns_not_on_nested_functions(child)
returns.extend(child_returns)
return returns
tree = ast.parse(self.method_source).body
if tree:
returns = get_returns_not_on_nested_functions(tree[0])
return_values = [r.value for r in returns]
# Replace NameConstant nodes valued None for None.
for i, v in enumerate(return_values):
if isinstance(v, ast.NameConstant) and v.value is None:
return_values[i] = None
return any(return_values)
else:
return False | Check if the docstrings method can return something.
Bare returns, returns valued None and returns from nested functions are
disconsidered.
Returns
-------
bool
Whether the docstrings method can return something. | method_returns_something | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def get_validation_data(doc):
"""
Validate the docstring.
Parameters
----------
doc : Docstring
A Docstring object with the given function name.
Returns
-------
tuple
errors : list of tuple
Errors occurred during validation.
warnings : list of tuple
Warnings occurred during validation.
examples_errs : str
Examples usage displayed along the error, otherwise empty string.
Notes
-----
The errors codes are defined as:
- First two characters: Section where the error happens:
* GL: Global (no section, like section ordering errors)
* SS: Short summary
* ES: Extended summary
* PR: Parameters
* RT: Returns
* YD: Yields
* RS: Raises
* WN: Warns
* SA: See Also
* NT: Notes
* RF: References
* EX: Examples
- Last two characters: Numeric error code inside the section
For example, EX02 is the second codified error in the Examples section
(which in this case is assigned to examples that do not pass the tests).
The error codes, their corresponding error messages, and the details on how
they are validated, are not documented more than in the source code of this
function.
"""
errs = []
wrns = []
if not doc.raw_doc:
errs.append(error("GL08"))
return errs, wrns, ""
if doc.start_blank_lines != 1 and not doc.single_line_docstring:
errs.append(error("GL01"))
if doc.end_blank_lines != 1 and not doc.single_line_docstring:
errs.append(error("GL02"))
if doc.double_blank_lines:
errs.append(error("GL03"))
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
errs.append(
error("GL04", mentioned_private_classes=", ".join(mentioned_errs))
)
for line in doc.raw_doc.splitlines():
if re.match("^ *\t", line):
errs.append(error("GL05", line_with_tabs=line.lstrip()))
unexpected_sections = [
section
for section in doc.section_titles
if section not in ALLOWED_SECTIONS
]
for section in unexpected_sections:
errs.append(
error(
"GL06",
section=section,
allowed_sections=", ".join(ALLOWED_SECTIONS),
)
)
correct_order = [
section
for section in ALLOWED_SECTIONS
if section in doc.section_titles
]
if correct_order != doc.section_titles:
errs.append(error("GL07", correct_sections=", ".join(correct_order)))
if doc.deprecated and not doc.extended_summary.startswith(
".. deprecated:: "
):
errs.append(error("GL09"))
if not doc.summary:
errs.append(error("SS01"))
else:
if not doc.summary[0].isupper():
errs.append(error("SS02"))
if (
doc.summary[-1] != "."
and not doc.single_line_docstring
and ALLOW_SINGLE_LINE_DOCSTRINGS
):
errs.append(error("SS03"))
if doc.summary != doc.summary.lstrip():
errs.append(error("SS04"))
elif (
doc.is_function_or_method and doc.summary.split(" ")[0][-1] == "s"
):
errs.append(error("SS05"))
if doc.num_summary_lines > 1:
errs.append(error("SS06"))
if not doc.extended_summary:
wrns.append(("ES01", "No extended summary found"))
# PR01: Parameters not documented
# PR02: Unknown parameters
# PR03: Wrong parameters order
errs += doc.parameter_mismatches
for param in doc.doc_parameters:
if not param.startswith("*"): # Check can ignore var / kwargs
if not doc.parameter_type(param):
if ":" in param:
errs.append(error("PR10", param_name=param.split(":")[0]))
else:
errs.append(error("PR04", param_name=param))
else:
if doc.parameter_type(param)[-1] == ".":
errs.append(error("PR05", param_name=param))
common_type_errors = [
("integer", "int"),
("boolean", "bool"),
("string", "str"),
]
for wrong_type, right_type in common_type_errors:
if wrong_type in doc.parameter_type(param):
errs.append(
error(
"PR06",
param_name=param,
right_type=right_type,
wrong_type=wrong_type,
)
)
if not doc.parameter_desc(param):
errs.append(error("PR07", param_name=param))
else:
if not doc.parameter_desc(param)[0].isupper():
errs.append(error("PR08", param_name=param))
if doc.parameter_desc(param)[-1] != ".":
errs.append(error("PR09", param_name=param))
if doc.is_function_or_method:
if not doc.returns:
if doc.method_returns_something and not doc.no_return_whitelisted:
errs.append(error("RT01"))
else:
if len(doc.returns) == 1 and doc.returns[0].name:
errs.append(error("RT02"))
for name_or_type, type_, desc in doc.returns:
if not desc:
errs.append(error("RT03"))
else:
desc = " ".join(desc)
if not desc[0].isupper():
errs.append(error("RT04"))
if not desc.endswith("."):
errs.append(error("RT05"))
if not doc.yields and "yield" in doc.method_source:
errs.append(error("YD01"))
if not doc.see_also:
wrns.append(error("SA01"))
else:
for rel_name, rel_desc in doc.see_also.items():
if rel_desc:
if not rel_desc.endswith("."):
errs.append(error("SA02", reference_name=rel_name))
if not rel_desc[0].isupper():
errs.append(error("SA03", reference_name=rel_name))
else:
errs.append(error("SA04", reference_name=rel_name))
# TODO: Change to statsmodels
if rel_name.startswith("pandas."):
errs.append(
error(
"SA05",
reference_name=rel_name,
right_reference=rel_name[len("pandas.") :],
)
)
examples_errs = ""
if not doc.examples:
wrns.append(error("EX01"))
elif RUN_DOCTESTS:
examples_errs = doc.examples_errors
if examples_errs:
errs.append(error("EX02", doctest_log=examples_errs))
for err in doc.validate_pep8():
errs.append(
error(
"EX03",
error_code=err.error_code,
error_message=err.message,
times_happening=f" ({err.count} times)"
if err.count > 1
else "",
)
)
examples_source_code = "".join(doc.examples_source_code)
for wrong_import in ("numpy", "pandas"):
if f"import {wrong_import}" in examples_source_code:
errs.append(error("EX04", imported_library=wrong_import))
return errs, wrns, examples_errs | Validate the docstring.
Parameters
----------
doc : Docstring
A Docstring object with the given function name.
Returns
-------
tuple
errors : list of tuple
Errors occurred during validation.
warnings : list of tuple
Warnings occurred during validation.
examples_errs : str
Examples usage displayed along the error, otherwise empty string.
Notes
-----
The errors codes are defined as:
- First two characters: Section where the error happens:
* GL: Global (no section, like section ordering errors)
* SS: Short summary
* ES: Extended summary
* PR: Parameters
* RT: Returns
* YD: Yields
* RS: Raises
* WN: Warns
* SA: See Also
* NT: Notes
* RF: References
* EX: Examples
- Last two characters: Numeric error code inside the section
For example, EX02 is the second codified error in the Examples section
(which in this case is assigned to examples that do not pass the tests).
The error codes, their corresponding error messages, and the details on how
they are validated, are not documented more than in the source code of this
function. | get_validation_data | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def validate_one(func_name):
"""
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring.
"""
doc = Docstring(func_name)
errs, wrns, examples_errs = get_validation_data(doc)
return {
"type": doc.type,
"docstring": doc.clean_doc,
"deprecated": doc.deprecated,
"file": doc.source_file_name,
"file_line": doc.source_file_def_line,
"github_link": doc.github_url,
"errors": errs,
"warnings": wrns,
"examples_errors": examples_errs,
} | Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring. | validate_one | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def validate_all(prefix, ignore_deprecated=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
"""
result = {}
seen = {}
# functions from the API docs
api_doc_fnames = os.path.join(BASE_PATH, "docs", "source", "*.rst")
api_items = []
for api_doc_fname in glob.glob(api_doc_fnames):
if "sandbox" in api_doc_fname:
continue
with open(api_doc_fname, encoding="utf8") as f:
api_items += list(get_api_items(f))
for func_name, func_obj, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
shared_code_key = doc_info["file"], doc_info["file_line"]
shared_code = seen.get(shared_code_key, "")
result[func_name].update(
{
"in_api": True,
"section": section,
"subsection": subsection,
"shared_code_with": shared_code,
}
)
seen[shared_code_key] = func_name
# functions from introspecting Series and DataFrame
api_item_names = set(list(zip(*api_items))[0])
for class_name, class_ in API_CLASSES:
for member in inspect.getmembers(class_):
func_name = class_name + "." + member[0]
if (
not member[0].startswith("_")
and func_name not in api_item_names
):
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
result[func_name]["in_api"] = False
return result | Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information. | validate_all | python | statsmodels/statsmodels | tools/validate_docstrings.py | https://github.com/statsmodels/statsmodels/blob/master/tools/validate_docstrings.py | BSD-3-Clause |
def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
# http://ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/
# Make sure we're outputting HTML
if app.builder.format != "html":
return
src = source[0]
# Skip converted notebooks
if 'nbconvert_exporter' in src:
return
try:
rendered = app.builder.templates.render_string(src,
app.config.html_context)
source[0] = rendered
except Exception as exc:
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(exc)
logger.warning(source[0]) | Render our pages as a jinja template for fancy templating goodness. | rstjinja | python | statsmodels/statsmodels | docs/source/conf.py | https://github.com/statsmodels/statsmodels/blob/master/docs/source/conf.py | BSD-3-Clause |
def getrsq(fitresult):
'''calculates rsquared residual, total and explained sums of squares
Parameters
----------
fitresult : instance of Regression Result class, or tuple of (resid, endog)
arrays regression residuals and endogenous variable
Returns
-------
rsquared
residual sum of squares
(centered) total sum of squares
explained sum of squares (for centered)
'''
if hasattr(fitresult, 'resid') and hasattr(fitresult, 'model'):
resid = fitresult.resid
endog = fitresult.model.endog
nobs = fitresult.nobs
else:
resid = fitresult[0]
endog = fitresult[1]
nobs = resid.shape[0]
rss = np.dot(resid, resid)
tss = np.var(endog)*nobs
return 1-rss/tss, rss, tss, tss-rss | calculates rsquared residual, total and explained sums of squares
Parameters
----------
fitresult : instance of Regression Result class, or tuple of (resid, endog)
arrays regression residuals and endogenous variable
Returns
-------
rsquared
residual sum of squares
(centered) total sum of squares
explained sum of squares (for centered) | getrsq | python | statsmodels/statsmodels | examples/incomplete/wls_extended.py | https://github.com/statsmodels/statsmodels/blob/master/examples/incomplete/wls_extended.py | BSD-3-Clause |
def index_trim_outlier(resid, k):
'''returns indices to residual array with k outliers removed
Parameters
----------
resid : array_like, 1d
data vector, usually residuals of a regression
k : int
number of outliers to remove
Returns
-------
trimmed_index : ndarray, 1d
index array with k outliers removed
outlier_index : ndarray, 1d
index array of k outliers
Notes
-----
Outliers are defined as the k observations with the largest
absolute values.
'''
sort_index = np.argsort(np.abs(resid))
# index of non-outlier
trimmed_index = np.sort(sort_index[:-k])
outlier_index = np.sort(sort_index[-k:])
return trimmed_index, outlier_index | returns indices to residual array with k outliers removed
Parameters
----------
resid : array_like, 1d
data vector, usually residuals of a regression
k : int
number of outliers to remove
Returns
-------
trimmed_index : ndarray, 1d
index array with k outliers removed
outlier_index : ndarray, 1d
index array of k outliers
Notes
-----
Outliers are defined as the k observations with the largest
absolute values. | index_trim_outlier | python | statsmodels/statsmodels | examples/incomplete/wls_extended.py | https://github.com/statsmodels/statsmodels/blob/master/examples/incomplete/wls_extended.py | BSD-3-Clause |
def __init__(
self, results: type[
statsmodels.regression.linear_model.RegressionResultsWrapper]
) -> None:
"""
For a linear regression model, generates following diagnostic plots:
a. residual
b. qq
c. scale location and
d. leverage
and a table
e. vif
Args:
results (Type[statsmodels.regression.linear_model.RegressionResultsWrapper]):
must be instance of statsmodels.regression.linear_model object
Raises:
TypeError: if instance does not belong to above object
Example:
>>> import numpy as np
>>> import pandas as pd
>>> import statsmodels.formula.api as smf
>>> x = np.linspace(-np.pi, np.pi, 100)
>>> y = 3*x + 8 + np.random.normal(0,1, 100)
>>> df = pd.DataFrame({'x':x, 'y':y})
>>> res = smf.ols(formula= "y ~ x", data=df).fit()
>>> cls = Linear_Reg_Diagnostic(res)
>>> cls(plot_context="seaborn-v0_8")
In case you do not need all plots you can also independently make an individual plot/table
in following ways
>>> cls = Linear_Reg_Diagnostic(res)
>>> cls.residual_plot()
>>> cls.qq_plot()
>>> cls.scale_location_plot()
>>> cls.leverage_plot()
>>> cls.vif_table()
"""
if isinstance(
results, statsmodels.regression.linear_model.
RegressionResultsWrapper) is False:
raise TypeError(
"result must be instance of statsmodels.regression.linear_model.RegressionResultsWrapper object"
)
self.results = maybe_unwrap_results(results)
self.y_true = self.results.model.endog
self.y_predict = self.results.fittedvalues
self.xvar = self.results.model.exog
self.xvar_names = self.results.model.exog_names
self.residual = np.array(self.results.resid)
influence = self.results.get_influence()
self.residual_norm = influence.resid_studentized_internal
self.leverage = influence.hat_matrix_diag
self.cooks_distance = influence.cooks_distance[0]
self.nparams = len(self.results.params) | For a linear regression model, generates following diagnostic plots:
a. residual
b. qq
c. scale location and
d. leverage
and a table
e. vif
Args:
results (Type[statsmodels.regression.linear_model.RegressionResultsWrapper]):
must be instance of statsmodels.regression.linear_model object
Raises:
TypeError: if instance does not belong to above object
Example:
>>> import numpy as np
>>> import pandas as pd
>>> import statsmodels.formula.api as smf
>>> x = np.linspace(-np.pi, np.pi, 100)
>>> y = 3*x + 8 + np.random.normal(0,1, 100)
>>> df = pd.DataFrame({'x':x, 'y':y})
>>> res = smf.ols(formula= "y ~ x", data=df).fit()
>>> cls = Linear_Reg_Diagnostic(res)
>>> cls(plot_context="seaborn-v0_8")
In case you do not need all plots you can also independently make an individual plot/table
in following ways
>>> cls = Linear_Reg_Diagnostic(res)
>>> cls.residual_plot()
>>> cls.qq_plot()
>>> cls.scale_location_plot()
>>> cls.leverage_plot()
>>> cls.vif_table() | __init__ | python | statsmodels/statsmodels | examples/python/linear_regression_diagnostics_plots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/linear_regression_diagnostics_plots.py | BSD-3-Clause |
def residual_plot(self, ax=None):
"""
Residual vs Fitted Plot
Graphical tool to identify non-linearity.
(Roughly) Horizontal red line is an indicator that the residual has a linear pattern
"""
if ax is None:
fig, ax = plt.subplots()
sns.residplot(x=self.y_predict,
y=self.residual,
lowess=True,
scatter_kws={'alpha': 0.5},
line_kws={
'color': 'red',
'lw': 1,
'alpha': 0.8
},
ax=ax)
# annotations
residual_abs = np.abs(self.residual)
abs_resid = np.flip(np.sort(residual_abs))
abs_resid_top_3 = abs_resid[:3]
for i, _ in enumerate(abs_resid_top_3):
ax.annotate(i,
xy=(self.y_predict[i], self.residual[i]),
color='C3')
ax.set_title('Residuals vs Fitted', fontweight="bold")
ax.set_xlabel('Fitted values')
ax.set_ylabel('Residuals')
return ax | Residual vs Fitted Plot
Graphical tool to identify non-linearity.
(Roughly) Horizontal red line is an indicator that the residual has a linear pattern | residual_plot | python | statsmodels/statsmodels | examples/python/linear_regression_diagnostics_plots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/linear_regression_diagnostics_plots.py | BSD-3-Clause |
def qq_plot(self, ax=None):
"""
Standarized Residual vs Theoretical Quantile plot
Used to visually check if residuals are normally distributed.
Points spread along the diagonal line will suggest so.
"""
if ax is None:
fig, ax = plt.subplots()
QQ = ProbPlot(self.residual_norm)
QQ.qqplot(line='45', alpha=0.5, lw=1, ax=ax)
# annotations
abs_norm_resid = np.flip(np.argsort(np.abs(self.residual_norm)), 0)
abs_norm_resid_top_3 = abs_norm_resid[:3]
for r, i in enumerate(abs_norm_resid_top_3):
ax.annotate(i,
xy=(np.flip(QQ.theoretical_quantiles,
0)[r], self.residual_norm[i]),
ha='right',
color='C3')
ax.set_title('Normal Q-Q', fontweight="bold")
ax.set_xlabel('Theoretical Quantiles')
ax.set_ylabel('Standardized Residuals')
return ax | Standarized Residual vs Theoretical Quantile plot
Used to visually check if residuals are normally distributed.
Points spread along the diagonal line will suggest so. | qq_plot | python | statsmodels/statsmodels | examples/python/linear_regression_diagnostics_plots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/linear_regression_diagnostics_plots.py | BSD-3-Clause |
def scale_location_plot(self, ax=None):
"""
Sqrt(Standarized Residual) vs Fitted values plot
Used to check homoscedasticity of the residuals.
Horizontal line will suggest so.
"""
if ax is None:
fig, ax = plt.subplots()
residual_norm_abs_sqrt = np.sqrt(np.abs(self.residual_norm))
ax.scatter(self.y_predict, residual_norm_abs_sqrt, alpha=0.5)
sns.regplot(x=self.y_predict,
y=residual_norm_abs_sqrt,
scatter=False,
ci=False,
lowess=True,
line_kws={
'color': 'red',
'lw': 1,
'alpha': 0.8
},
ax=ax)
# annotations
abs_sq_norm_resid = np.flip(np.argsort(residual_norm_abs_sqrt), 0)
abs_sq_norm_resid_top_3 = abs_sq_norm_resid[:3]
for i in abs_sq_norm_resid_top_3:
ax.annotate(i,
xy=(self.y_predict[i], residual_norm_abs_sqrt[i]),
color='C3')
ax.set_title('Scale-Location', fontweight="bold")
ax.set_xlabel('Fitted values')
ax.set_ylabel(r'$\sqrt{|\mathrm{Standardized\ Residuals}|}$')
return ax | Sqrt(Standarized Residual) vs Fitted values plot
Used to check homoscedasticity of the residuals.
Horizontal line will suggest so. | scale_location_plot | python | statsmodels/statsmodels | examples/python/linear_regression_diagnostics_plots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/linear_regression_diagnostics_plots.py | BSD-3-Clause |
def leverage_plot(self, ax=None):
"""
Residual vs Leverage plot
Points falling outside Cook's distance curves are considered observation that can sway the fit
aka are influential.
Good to have none outside the curves.
"""
if ax is None:
fig, ax = plt.subplots()
ax.scatter(self.leverage, self.residual_norm, alpha=0.5)
sns.regplot(x=self.leverage,
y=self.residual_norm,
scatter=False,
ci=False,
lowess=True,
line_kws={
'color': 'red',
'lw': 1,
'alpha': 0.8
},
ax=ax)
# annotations
leverage_top_3 = np.flip(np.argsort(self.cooks_distance), 0)[:3]
for i in leverage_top_3:
ax.annotate(i,
xy=(self.leverage[i], self.residual_norm[i]),
color='C3')
xtemp, ytemp = self.__cooks_dist_line(0.5) # 0.5 line
ax.plot(xtemp,
ytemp,
label="Cook's distance",
lw=1,
ls='--',
color='red')
xtemp, ytemp = self.__cooks_dist_line(1) # 1 line
ax.plot(xtemp, ytemp, lw=1, ls='--', color='red')
ax.set_xlim(0, max(self.leverage) + 0.01)
ax.set_title('Residuals vs Leverage', fontweight="bold")
ax.set_xlabel('Leverage')
ax.set_ylabel('Standardized Residuals')
ax.legend(loc='upper right')
return ax | Residual vs Leverage plot
Points falling outside Cook's distance curves are considered observation that can sway the fit
aka are influential.
Good to have none outside the curves. | leverage_plot | python | statsmodels/statsmodels | examples/python/linear_regression_diagnostics_plots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/linear_regression_diagnostics_plots.py | BSD-3-Clause |
def vif_table(self):
"""
VIF table
VIF, the variance inflation factor, is a measure of multicollinearity.
VIF > 5 for a variable indicates that it is highly collinear with the
other input variables.
"""
vif_df = pd.DataFrame()
vif_df["Features"] = self.xvar_names
vif_df["VIF Factor"] = [
variance_inflation_factor(self.xvar, i)
for i in range(self.xvar.shape[1])
]
print(vif_df.sort_values("VIF Factor").round(2)) | VIF table
VIF, the variance inflation factor, is a measure of multicollinearity.
VIF > 5 for a variable indicates that it is highly collinear with the
other input variables. | vif_table | python | statsmodels/statsmodels | examples/python/linear_regression_diagnostics_plots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/linear_regression_diagnostics_plots.py | BSD-3-Clause |
def __cooks_dist_line(self, factor):
"""
Helper function for plotting Cook's distance curves
"""
p = self.nparams
formula = lambda x: np.sqrt((factor * p * (1 - x)) / x)
x = np.linspace(0.001, max(self.leverage), 50)
y = formula(x)
return x, y | Helper function for plotting Cook's distance curves | __cooks_dist_line | python | statsmodels/statsmodels | examples/python/linear_regression_diagnostics_plots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/linear_regression_diagnostics_plots.py | BSD-3-Clause |
def lowess_with_confidence_bounds(x,
y,
eval_x,
N=200,
conf_interval=0.95,
lowess_kw=None):
"""
Perform Lowess regression and determine a confidence interval by bootstrap resampling
"""
# Lowess smoothing
smoothed = sm.nonparametric.lowess(exog=x,
endog=y,
xvals=eval_x,
**lowess_kw)
# Perform bootstrap resamplings of the data
# and evaluate the smoothing at a fixed set of points
smoothed_values = np.empty((N, len(eval_x)))
for i in range(N):
sample = np.random.choice(len(x), len(x), replace=True)
sampled_x = x[sample]
sampled_y = y[sample]
smoothed_values[i] = sm.nonparametric.lowess(exog=sampled_x,
endog=sampled_y,
xvals=eval_x,
**lowess_kw)
# Get the confidence interval
sorted_values = np.sort(smoothed_values, axis=0)
bound = int(N * (1 - conf_interval) / 2)
bottom = sorted_values[bound - 1]
top = sorted_values[-bound]
return smoothed, bottom, top | Perform Lowess regression and determine a confidence interval by bootstrap resampling | lowess_with_confidence_bounds | python | statsmodels/statsmodels | examples/python/lowess.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/lowess.py | BSD-3-Clause |
def beanplot(data, plot_opts={}, jitter=False):
"""helper function to try out different plot options"""
fig = plt.figure()
ax = fig.add_subplot(111)
plot_opts_ = {
"cutoff_val": 5,
"cutoff_type": "abs",
"label_fontsize": "small",
"label_rotation": 30,
}
plot_opts_.update(plot_opts)
sm.graphics.beanplot(data,
ax=ax,
labels=labels,
jitter=jitter,
plot_opts=plot_opts_)
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age") | helper function to try out different plot options | beanplot | python | statsmodels/statsmodels | examples/python/plots_boxplots.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/plots_boxplots.py | BSD-3-Clause |
def start_params(self):
"""
Defines the starting values for the parameters
The linear regression gives us reasonable starting values for the constant
d and the variance of the epsilon error
"""
exog = sm.add_constant(self.exog)
res = sm.OLS(self.endog, exog).fit()
params = np.r_[res.params[0], res.scale, 0.001, 0.001]
return params | Defines the starting values for the parameters
The linear regression gives us reasonable starting values for the constant
d and the variance of the epsilon error | start_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def transform_params(self, unconstrained):
"""
We constraint the last three parameters
('var.e', 'var.x.coeff', 'var.w.coeff') to be positive,
because they are variances
"""
constrained = unconstrained.copy()
constrained[self.positive_parameters] = (
constrained[self.positive_parameters]**2)
return constrained | We constraint the last three parameters
('var.e', 'var.x.coeff', 'var.w.coeff') to be positive,
because they are variances | transform_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def untransform_params(self, constrained):
"""
Need to unstransform all the parameters you transformed
in the `transform_params` function
"""
unconstrained = constrained.copy()
unconstrained[self.positive_parameters] = (
unconstrained[self.positive_parameters]**0.5)
return unconstrained | Need to unstransform all the parameters you transformed
in the `transform_params` function | untransform_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def start_params(self):
"""
Defines the starting values for the parameters
The linear regression gives us reasonable starting values for the constant
d and the variance of the epsilon error
"""
exog = sm.add_constant(self.exog)
res = sm.OLS(self.endog, exog).fit()
params = np.r_[res.params[0], res.scale, 0.001, 0.001, 0.7, 0.8]
return params | Defines the starting values for the parameters
The linear regression gives us reasonable starting values for the constant
d and the variance of the epsilon error | start_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def transform_params(self, unconstrained):
"""
We constraint the last three parameters
('var.e', 'var.x.coeff', 'var.w.coeff') to be positive,
because they are variances
"""
constrained = unconstrained.copy()
constrained[self.positive_parameters] = (
constrained[self.positive_parameters]**2)
return constrained | We constraint the last three parameters
('var.e', 'var.x.coeff', 'var.w.coeff') to be positive,
because they are variances | transform_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def untransform_params(self, constrained):
"""
Need to unstransform all the parameters you transformed
in the `transform_params` function
"""
unconstrained = constrained.copy()
unconstrained[self.positive_parameters] = (
unconstrained[self.positive_parameters]**0.5)
return unconstrained | Need to unstransform all the parameters you transformed
in the `transform_params` function | untransform_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def start_params(self):
"""
Initial values
"""
# (optional) Use scale for var_e1 and var_e2 starting values
params = np.r_[self.initial_values["var_e1"],
self.initial_values["var_e2"],
self.initial_values["var_w1"],
self.initial_values["var_w2"],
self.initial_values["delta1"],
self.initial_values["delta2"],
self.initial_values["delta3"], ]
return params | Initial values | start_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def transform_params(self, unconstrained):
"""
If you need to restrict parameters
For example, variances should be > 0
Parameters maybe have to be within -1 and 1
"""
constrained = unconstrained.copy()
constrained[self.positive_parameters] = (
constrained[self.positive_parameters]**2)
return constrained | If you need to restrict parameters
For example, variances should be > 0
Parameters maybe have to be within -1 and 1 | transform_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def untransform_params(self, constrained):
"""
Need to reverse what you did in transform_params()
"""
unconstrained = constrained.copy()
unconstrained[self.positive_parameters] = (
unconstrained[self.positive_parameters]**0.5)
return unconstrained | Need to reverse what you did in transform_params() | untransform_params | python | statsmodels/statsmodels | examples/python/statespace_custom_models.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/statespace_custom_models.py | BSD-3-Clause |
def add_stl_plot(fig, res, legend):
"""Add 3 plots from a second STL fit"""
axs = fig.get_axes()
comps = ["trend", "seasonal", "resid"]
for ax, comp in zip(axs[1:], comps):
series = getattr(res, comp)
if comp == "resid":
ax.plot(series, marker="o", linestyle="none")
else:
ax.plot(series)
if comp == "trend":
ax.legend(legend, frameon=False) | Add 3 plots from a second STL fit | add_stl_plot | python | statsmodels/statsmodels | examples/python/stl_decomposition.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/stl_decomposition.py | BSD-3-Clause |
def chi2(pars):
"""Cost function."""
y_model = pars[0] * data["x"] + pars[1]
chi = (data["y"] - y_model) / data["y_err"]
return np.sum(chi**2) | Cost function. | chi2 | python | statsmodels/statsmodels | examples/python/chi2_fitting.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/chi2_fitting.py | BSD-3-Clause |
def _exog_gen(exog, partitions):
"""partitions exog data"""
n_exog = exog.shape[0]
n_part = np.ceil(n_exog / partitions)
ii = 0
while ii < n_exog:
jj = int(min(ii + n_part, n_exog))
yield exog[ii:jj, :]
ii += int(n_part) | partitions exog data | _exog_gen | python | statsmodels/statsmodels | examples/python/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/distributed_estimation.py | BSD-3-Clause |
def _endog_gen(endog, partitions):
"""partitions endog data"""
n_endog = endog.shape[0]
n_part = np.ceil(n_endog / partitions)
ii = 0
while ii < n_endog:
jj = int(min(ii + n_part, n_endog))
yield endog[ii:jj]
ii += int(n_part) | partitions endog data | _endog_gen | python | statsmodels/statsmodels | examples/python/distributed_estimation.py | https://github.com/statsmodels/statsmodels/blob/master/examples/python/distributed_estimation.py | BSD-3-Clause |
def test_book_dir() -> str:
"""Return test book dir"""
# TODO: Can move this to conftest.py if there will be more unittests
return str(Path(__file__).parent.parent / "test_books") | Return test book dir | test_book_dir | python | yihong0618/bilingual_book_maker | tests/test_integration.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/tests/test_integration.py | MIT |
def test_google_translate_epub(test_book_dir, tmpdir):
"""Test google translate epub"""
shutil.copyfile(
os.path.join(test_book_dir, "Liber_Esther.epub"),
os.path.join(tmpdir, "Liber_Esther.epub"),
)
subprocess.run(
[
sys.executable,
"make_book.py",
"--book_name",
os.path.join(tmpdir, "Liber_Esther.epub"),
"--test",
"--test_num",
"20",
"--model",
"google",
],
env=os.environ.copy(),
)
assert os.path.isfile(os.path.join(tmpdir, "Liber_Esther_bilingual.epub"))
assert os.path.getsize(os.path.join(tmpdir, "Liber_Esther_bilingual.epub")) != 0 | Test google translate epub | test_google_translate_epub | python | yihong0618/bilingual_book_maker | tests/test_integration.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/tests/test_integration.py | MIT |
def test_deepl_free_translate_epub(test_book_dir, tmpdir):
"""Test deepl free translate epub"""
shutil.copyfile(
os.path.join(test_book_dir, "Liber_Esther.epub"),
os.path.join(tmpdir, "Liber_Esther.epub"),
)
subprocess.run(
[
sys.executable,
"make_book.py",
"--book_name",
os.path.join(tmpdir, "Liber_Esther.epub"),
"--test",
"--test_num",
"20",
"--model",
"deeplfree",
],
env=os.environ.copy(),
)
assert os.path.isfile(os.path.join(tmpdir, "Liber_Esther_bilingual.epub"))
assert os.path.getsize(os.path.join(tmpdir, "Liber_Esther_bilingual.epub")) != 0 | Test deepl free translate epub | test_deepl_free_translate_epub | python | yihong0618/bilingual_book_maker | tests/test_integration.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/tests/test_integration.py | MIT |
def test_google_translate_txt(test_book_dir, tmpdir):
"""Test google translate txt"""
shutil.copyfile(
os.path.join(test_book_dir, "the_little_prince.txt"),
os.path.join(tmpdir, "the_little_prince.txt"),
)
subprocess.run(
[
sys.executable,
"make_book.py",
"--book_name",
os.path.join(tmpdir, "the_little_prince.txt"),
"--test",
"--test_num",
"20",
"--model",
"google",
],
env=os.environ.copy(),
)
assert os.path.isfile(os.path.join(tmpdir, "the_little_prince_bilingual.txt"))
assert os.path.getsize(os.path.join(tmpdir, "the_little_prince_bilingual.txt")) != 0 | Test google translate txt | test_google_translate_txt | python | yihong0618/bilingual_book_maker | tests/test_integration.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/tests/test_integration.py | MIT |
def test_google_translate_txt_batch_size(test_book_dir, tmpdir):
"""Test google translate txt with batch_size"""
shutil.copyfile(
os.path.join(test_book_dir, "the_little_prince.txt"),
os.path.join(tmpdir, "the_little_prince.txt"),
)
subprocess.run(
[
sys.executable,
"make_book.py",
"--book_name",
os.path.join(tmpdir, "the_little_prince.txt"),
"--test",
"--batch_size",
"30",
"--test_num",
"20",
"--model",
"google",
],
env=os.environ.copy(),
)
assert os.path.isfile(os.path.join(tmpdir, "the_little_prince_bilingual.txt"))
assert os.path.getsize(os.path.join(tmpdir, "the_little_prince_bilingual.txt")) != 0 | Test google translate txt with batch_size | test_google_translate_txt_batch_size | python | yihong0618/bilingual_book_maker | tests/test_integration.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/tests/test_integration.py | MIT |
def test_caiyun_translate_txt(test_book_dir, tmpdir):
"""Test caiyun translate txt"""
shutil.copyfile(
os.path.join(test_book_dir, "the_little_prince.txt"),
os.path.join(tmpdir, "the_little_prince.txt"),
)
subprocess.run(
[
sys.executable,
"make_book.py",
"--book_name",
os.path.join(tmpdir, "the_little_prince.txt"),
"--test",
"--batch_size",
"10",
"--test_num",
"100",
"--model",
"caiyun",
],
env=os.environ.copy(),
)
assert os.path.isfile(os.path.join(tmpdir, "the_little_prince_bilingual.txt"))
assert os.path.getsize(os.path.join(tmpdir, "the_little_prince_bilingual.txt")) != 0 | Test caiyun translate txt | test_caiyun_translate_txt | python | yihong0618/bilingual_book_maker | tests/test_integration.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/tests/test_integration.py | MIT |
def num_tokens_from_text(text, model="gpt-3.5-turbo-0301"):
messages = (
{
"role": "user",
"content": text,
},
)
"""Returns the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo-0301": # note: future models may deviate from this
num_tokens = 0
for message in messages:
num_tokens += (
4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
)
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not presently implemented for model {model}.
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
) | Returns the number of tokens used by a list of messages. | num_tokens_from_text | python | yihong0618/bilingual_book_maker | book_maker/utils.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/utils.py | MIT |
def __init__(self, serials=None, device_path=None, desktopkobodir="") -> None:
if serials is None:
serials = []
print(__about__)
self.kobodir = ""
kobodb = ""
# Order of checks
# 1. first check if a device_path has been passed in, and whether
# we can find the sqlite db in the respective place
# 2. if 1., and we got some serials passed in (from saved
# settings in calibre), just use it
# 3. if 1. worked, but we didn't get serials, try to parse them
# from the device, if this didn't work, unset everything
# 4. if by now we don't have kobodir set, give up on device and
# try to use the Desktop app.
# step 1. check whether this looks like a real device
if device_path:
# we got a device path
self.kobodir = os.path.join(device_path, ".kobo")
# devices use KoboReader.sqlite
kobodb = os.path.join(self.kobodir, "KoboReader.sqlite")
if not os.path.isfile(kobodb):
# device path seems to be wrong, unset it
device_path = ""
self.kobodir = ""
kobodb = ""
# step 3. we found a device but didn't get serials, try to get them
#
# we got a device path but no saved serial
# try to get the serial from the device
# get serial from device_path/.adobe-digital-editions/device.xml
if self.kobodir and len(serials) == 0 and can_parse_xml:
# print "get_device_settings - device_path = {0}".format(device_path)
devicexml = os.path.join(
device_path,
".adobe-digital-editions",
"device.xml",
)
# print "trying to load {0}".format(devicexml)
if os.path.exists(devicexml):
# print "trying to parse {0}".format(devicexml)
xmltree = ET.parse(devicexml)
for node in xmltree.iter():
if "deviceSerial" in node.tag:
serial = node.text
# print "found serial {0}".format(serial)
serials.append(serial)
break
else:
# print "cannot get serials from device."
device_path = ""
self.kobodir = ""
kobodb = ""
if self.kobodir == "":
# step 4. we haven't found a device with serials, so try desktop apps
if desktopkobodir != "":
self.kobodir = desktopkobodir
if self.kobodir == "":
if sys.platform.startswith("win"):
import winreg
if (
sys.getwindowsversion().major > 5
and "LOCALAPPDATA" in os.environ
):
# Python 2.x does not return unicode env. Use Python 3.x
self.kobodir = winreg.ExpandEnvironmentStrings("%LOCALAPPDATA%")
if self.kobodir == "" and "USERPROFILE" in os.environ:
# Python 2.x does not return unicode env. Use Python 3.x
self.kobodir = os.path.join(
winreg.ExpandEnvironmentStrings("%USERPROFILE%"),
"Local Settings",
"Application Data",
)
self.kobodir = os.path.join(
self.kobodir,
"Kobo",
"Kobo Desktop Edition",
)
elif sys.platform.startswith("darwin"):
self.kobodir = os.path.join(
os.environ["HOME"],
"Library",
"Application Support",
"Kobo",
"Kobo Desktop Edition",
)
elif sys.platform.startswith("linux"):
# sets ~/.config/calibre as the location to store the kobodir location info file and creates this directory if necessary
kobodir_cache_dir = os.path.join(
os.environ["HOME"],
".config",
"calibre",
)
if not os.path.isdir(kobodir_cache_dir):
os.mkdir(kobodir_cache_dir)
# appends the name of the file we're storing the kobodir location info to the above path
kobodir_cache_file = f"{str(kobodir_cache_dir)}/kobo_location"
"""if the above file does not exist, recursively searches from the root
of the filesystem until kobodir is found and stores the location of kobodir
in that file so this loop can be skipped in the future"""
original_stdout = sys.stdout
if not os.path.isfile(kobodir_cache_file):
for root, _dirs, files in os.walk("/"):
for file in files:
if file == "Kobo.sqlite":
kobo_linux_path = str(root)
with open(
kobodir_cache_file,
"w",
encoding="utf-8",
) as f:
sys.stdout = f
print(kobo_linux_path, end="")
sys.stdout = original_stdout
f = open(kobodir_cache_file, encoding="utf-8")
self.kobodir = f.read()
# desktop versions use Kobo.sqlite
kobodb = os.path.join(self.kobodir, "Kobo.sqlite")
# check for existence of file
if not os.path.isfile(kobodb):
# give up here, we haven't found anything useful
self.kobodir = ""
kobodb = ""
if self.kobodir != "":
self.bookdir = os.path.join(self.kobodir, "kepub")
# make a copy of the database in a temporary file
# so we can ensure it's not using WAL logging which sqlite3 can't do.
self.newdb = tempfile.NamedTemporaryFile(mode="wb", delete=False)
print(self.newdb.name)
with open(kobodb, "rb") as olddb:
self.newdb.write(olddb.read(18))
self.newdb.write(b"\x01\x01")
olddb.read(2)
self.newdb.write(olddb.read())
self.newdb.close()
self.__sqlite = sqlite3.connect(self.newdb.name)
self.__cursor = self.__sqlite.cursor()
self._userkeys = []
self._books = []
self._volumeID = []
self._serials = serials | if the above file does not exist, recursively searches from the root
of the filesystem until kobodir is found and stores the location of kobodir
in that file so this loop can be skipped in the future | __init__ | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def close(self):
"""Closes the database used by the library."""
self.__cursor.close()
self.__sqlite.close()
# delete the temporary copy of the database
os.remove(self.newdb.name) | Closes the database used by the library. | close | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def userkeys(self):
"""The list of potential userkeys being used by this library.
Only one of these will be valid.
"""
if len(self._userkeys) != 0:
return self._userkeys
for macaddr in self.__getmacaddrs():
self._userkeys.extend(self.__getuserkeys(macaddr))
return self._userkeys | The list of potential userkeys being used by this library.
Only one of these will be valid. | userkeys | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def books(self):
"""The list of KoboBook objects in the library."""
if len(self._books) != 0:
return self._books
"""Drm-ed kepub"""
for row in self.__cursor.execute(
"SELECT DISTINCT volumeid, Title, Attribution, Series FROM content_keys, content WHERE contentid = volumeid",
):
self._books.append(
KoboBook(
row[0],
row[1],
self.__bookfile(row[0]),
"kepub",
self.__cursor,
author=row[2],
series=row[3],
),
)
self._volumeID.append(row[0])
"""Drm-free"""
for f in os.listdir(self.bookdir):
if f not in self._volumeID:
row = self.__cursor.execute(
"SELECT Title, Attribution, Series FROM content WHERE ContentID = '"
+ f
+ "'",
).fetchone()
if row is not None:
fTitle = row[0]
self._books.append(
KoboBook(
f,
fTitle,
self.__bookfile(f),
"drm-free",
self.__cursor,
author=row[1],
series=row[2],
),
)
self._volumeID.append(f)
"""Sort"""
self._books.sort(key=lambda x: x.title)
return self._books | The list of KoboBook objects in the library. | books | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def __bookfile(self, volumeid):
"""The filename needed to open a given book."""
return os.path.join(self.kobodir, "kepub", volumeid) | The filename needed to open a given book. | __bookfile | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def __getmacaddrs(self):
"""The list of all MAC addresses on this machine."""
macaddrs = []
if sys.platform.startswith("win"):
c = re.compile(
"\\s?(" + "[0-9a-f]{2}[:\\-]" * 5 + "[0-9a-f]{2})(\\s|$)",
re.IGNORECASE,
)
output = subprocess.Popen(
"wmic nic where PhysicalAdapter=True get MACAddress",
shell=True,
stdout=subprocess.PIPE,
text=True,
).stdout
for line in output:
if m := c.search(line):
macaddrs.append(re.sub("-", ":", m[1]).upper())
elif sys.platform.startswith("darwin"):
c = re.compile(
"\\s(" + "[0-9a-f]{2}:" * 5 + "[0-9a-f]{2})(\\s|$)",
re.IGNORECASE,
)
output = subprocess.check_output(
"/sbin/ifconfig -a",
shell=True,
encoding="utf-8",
)
matches = c.findall(output)
macaddrs.extend(m[0].upper() for m in matches)
else:
# probably linux
# let's try ip
c = re.compile(
"\\s(" + "[0-9a-f]{2}:" * 5 + "[0-9a-f]{2})(\\s|$)",
re.IGNORECASE,
)
for line in os.popen("ip -br link"):
if m := c.search(line):
macaddrs.append(m[1].upper())
# let's try ipconfig under wine
c = re.compile(
"\\s(" + "[0-9a-f]{2}-" * 5 + "[0-9a-f]{2})(\\s|$)",
re.IGNORECASE,
)
for line in os.popen("ipconfig /all"):
if m := c.search(line):
macaddrs.append(re.sub("-", ":", m[1]).upper())
# extend the list of macaddrs in any case with the serials
# cannot hurt ;-)
macaddrs.extend(self._serials)
return macaddrs | The list of all MAC addresses on this machine. | __getmacaddrs | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def encryptedfiles(self):
"""A dictionary of KoboFiles inside the book.
The dictionary keys are the relative pathnames, which are
the same as the pathnames inside the book 'zip' file."""
if self.type == "drm-free":
return self._encryptedfiles
if len(self._encryptedfiles) != 0:
return self._encryptedfiles
# Read the list of encrypted files from the DB
for row in self.__cursor.execute(
"SELECT elementid,elementkey FROM content_keys,content WHERE volumeid = ? AND volumeid = contentid",
(self.volumeid,),
):
self._encryptedfiles[row[0]] = KoboFile(
row[0],
None,
base64.b64decode(row[1]),
)
# Read the list of files from the kepub OPF manifest so that
# we can get their proper MIME type.
# NOTE: this requires that the OPF file is unencrypted!
zin = zipfile.ZipFile(self.filename, "r")
xmlns = {
"ocf": "urn:oasis:names:tc:opendocument:xmlns:container",
"opf": "http://www.idpf.org/2007/opf",
}
ocf = ET.fromstring(zin.read("META-INF/container.xml"))
opffile = ocf.find(".//ocf:rootfile", xmlns).attrib["full-path"]
basedir = re.sub("[^/]+$", "", opffile)
opf = ET.fromstring(zin.read(opffile))
zin.close()
c = re.compile("/")
for item in opf.findall(".//opf:item", xmlns):
# Convert relative URIs
href = item.attrib["href"]
if not c.match(href):
href = "".join((basedir, href))
# Update books we've found from the DB.
if href in self._encryptedfiles:
mimetype = item.attrib["media-type"]
self._encryptedfiles[href].mimetype = mimetype
return self._encryptedfiles | A dictionary of KoboFiles inside the book.
The dictionary keys are the relative pathnames, which are
the same as the pathnames inside the book 'zip' file. | encryptedfiles | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def decrypt(self, userkey, contents):
"""
Decrypt the contents using the provided user key and the
file page key. The caller must determine if the decrypted
data is correct."""
# The userkey decrypts the page key (self.key)
keyenc = AES(userkey)
decryptedkey = keyenc.decrypt(self.key)
# The decrypted page key decrypts the content
pageenc = AES(decryptedkey)
return self.__removeaespadding(pageenc.decrypt(contents)) | Decrypt the contents using the provided user key and the
file page key. The caller must determine if the decrypted
data is correct. | decrypt | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def check(self, contents):
"""
If the contents uses some known MIME types, check if it
conforms to the type. Throw a ValueError exception if not.
If the contents uses an uncheckable MIME type, don't check
it and don't throw an exception.
Returns True if the content was checked, False if it was not
checked."""
if self.mimetype == "application/xhtml+xml":
# assume utf-8 with no BOM
textoffset = 0
stride = 1
print(f"Checking text:{contents[:10]}:")
# check for byte order mark
if contents[:3] == b"\xef\xbb\xbf":
# seems to be utf-8 with BOM
print("Could be utf-8 with BOM")
textoffset = 3
elif contents[:2] == b"\xfe\xff":
# seems to be utf-16BE
print("Could be utf-16BE")
textoffset = 3
stride = 2
elif contents[:2] == b"\xff\xfe":
# seems to be utf-16LE
print("Could be utf-16LE")
textoffset = 2
stride = 2
else:
print("Perhaps utf-8 without BOM")
# now check that the first few characters are in the ASCII range
for i in range(textoffset, textoffset + 5 * stride, stride):
if contents[i] < 32 or contents[i] > 127:
# Non-ascii, so decryption probably failed
print(f"Bad character at {i}, value {contents[i]}")
raise ValueError
print("Seems to be good text")
return True
if self.mimetype == "image/jpeg":
if contents[:3] == b"\xff\xd8\xff":
return True
print(f"Bad JPEG: {contents[:3].hex()}")
raise ValueError
return False | If the contents uses some known MIME types, check if it
conforms to the type. Throw a ValueError exception if not.
If the contents uses an uncheckable MIME type, don't check
it and don't throw an exception.
Returns True if the content was checked, False if it was not
checked. | check | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def __removeaespadding(self, contents):
"""
Remove the trailing padding, using what appears to be the CMS
algorithm from RFC 5652 6.3"""
lastchar = binascii.b2a_hex(contents[-1:])
strlen = int(lastchar, 16)
padding = strlen
if strlen == 1:
return contents[:-1]
if strlen < 16:
for _ in range(strlen):
testchar = binascii.b2a_hex(contents[-strlen : -(strlen - 1)])
if testchar != lastchar:
padding = 0
if padding > 0:
contents = contents[:-padding]
return contents | Remove the trailing padding, using what appears to be the CMS
algorithm from RFC 5652 6.3 | __removeaespadding | python | yihong0618/bilingual_book_maker | book_maker/obok.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/obok.py | MIT |
def process_markdown_content(self):
"""将原始内容处理成 markdown 段落"""
current_paragraph = []
for line in self.origin_book:
# 如果是空行且当前段落不为空,保存当前段落
if not line.strip() and current_paragraph:
self.md_paragraphs.append("\n".join(current_paragraph))
current_paragraph = []
# 如果是标题行,单独作为一个段落
elif line.strip().startswith("#"):
if current_paragraph:
self.md_paragraphs.append("\n".join(current_paragraph))
current_paragraph = []
self.md_paragraphs.append(line)
# 其他情况,添加到当前段落
else:
current_paragraph.append(line)
# 处理最后一个段落
if current_paragraph:
self.md_paragraphs.append("\n".join(current_paragraph)) | 将原始内容处理成 markdown 段落 | process_markdown_content | python | yihong0618/bilingual_book_maker | book_maker/loader/md_loader.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/loader/md_loader.py | MIT |
def _check_blocks(self, translate_blocks, origin_blocks):
"""
Check if the translated blocks match the original text, with only a simple check of the beginning numbers.
"""
if len(translate_blocks) != len(origin_blocks):
return False
for t in zip(translate_blocks, origin_blocks):
i = 0
try:
i = int(t[0].get("number", 0))
except ValueError:
m = re.search(r"\s*\d+", t[0].get("number"))
if m:
i = int(m.group())
j = int(t[1].get("number", -1))
if i != j:
print(f"check failed: {i}!={j}")
return False
return True | Check if the translated blocks match the original text, with only a simple check of the beginning numbers. | _check_blocks | python | yihong0618/bilingual_book_maker | book_maker/loader/srt_loader.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/loader/srt_loader.py | MIT |
def translate(self, text):
print(text)
"""r = self.session.post(
self.api_url,
headers=self.headers,
data=f"q={requests.utils.quote(text)}",
)
if not r.ok:
return text
t_text = "".join(
[sentence.get("trans", "") for sentence in r.json()["sentences"]],
)"""
t_text = self._retry_translate(text)
print("[bold green]" + re.sub("\n{3,}", "\n\n", t_text) + "[/bold green]")
return t_text | r = self.session.post(
self.api_url,
headers=self.headers,
data=f"q={requests.utils.quote(text)}",
)
if not r.ok:
return text
t_text = "".join(
[sentence.get("trans", "") for sentence in r.json()["sentences"]],
) | translate | python | yihong0618/bilingual_book_maker | book_maker/translator/google_translator.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/translator/google_translator.py | MIT |
def create_messages(self, text, intermediate_messages=None):
"""Create messages for the current translation request"""
current_msg = {
"role": "user",
"content": self.prompt_template.format(
text=text,
language=self.language,
),
}
messages = []
if intermediate_messages:
messages.extend(intermediate_messages)
messages.append(current_msg)
return messages | Create messages for the current translation request | create_messages | python | yihong0618/bilingual_book_maker | book_maker/translator/claude_translator.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/translator/claude_translator.py | MIT |
def create_context_messages(self):
"""Create a message pair containing all context paragraphs"""
if not self.context_flag or not self.context_list:
return []
# Create a single message pair for all previous context
return [
{
"role": "user",
"content": self.prompt_template.format(
text="\n\n".join(self.context_list),
language=self.language,
),
},
{"role": "assistant", "content": "\n\n".join(self.context_translated_list)},
] | Create a message pair containing all context paragraphs | create_context_messages | python | yihong0618/bilingual_book_maker | book_maker/translator/claude_translator.py | https://github.com/yihong0618/bilingual_book_maker/blob/master/book_maker/translator/claude_translator.py | MIT |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.