id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 51
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
246,600 | sebp/scikit-survival | sksurv/ensemble/survival_loss.py | CoxPH.update_terminal_regions | def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel() | python | def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel() | [
"def",
"update_terminal_regions",
"(",
"self",
",",
"tree",
",",
"X",
",",
"y",
",",
"residual",
",",
"y_pred",
",",
"sample_weight",
",",
"sample_mask",
",",
"learning_rate",
"=",
"1.0",
",",
"k",
"=",
"0",
")",
":",
"# update predictions",
"y_pred",
"[",
":",
",",
"k",
"]",
"+=",
"learning_rate",
"*",
"tree",
".",
"predict",
"(",
"X",
")",
".",
"ravel",
"(",
")"
] | Least squares does not need to update terminal regions.
But it has to update the predictions. | [
"Least",
"squares",
"does",
"not",
"need",
"to",
"update",
"terminal",
"regions",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/survival_loss.py#L55-L63 |
246,601 | sebp/scikit-survival | sksurv/setup.py | build_from_c_and_cpp_files | def build_from_c_and_cpp_files(extensions):
"""Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources = sources | python | def build_from_c_and_cpp_files(extensions):
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources = sources | [
"def",
"build_from_c_and_cpp_files",
"(",
"extensions",
")",
":",
"for",
"extension",
"in",
"extensions",
":",
"sources",
"=",
"[",
"]",
"for",
"sfile",
"in",
"extension",
".",
"sources",
":",
"path",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"sfile",
")",
"if",
"ext",
"in",
"(",
"'.pyx'",
",",
"'.py'",
")",
":",
"if",
"extension",
".",
"language",
"==",
"'c++'",
":",
"ext",
"=",
"'.cpp'",
"else",
":",
"ext",
"=",
"'.c'",
"sfile",
"=",
"path",
"+",
"ext",
"sources",
".",
"append",
"(",
"sfile",
")",
"extension",
".",
"sources",
"=",
"sources"
] | Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install. | [
"Modify",
"the",
"extensions",
"to",
"build",
"from",
"the",
".",
"c",
"and",
".",
"cpp",
"files",
".",
"This",
"is",
"useful",
"for",
"releases",
"this",
"way",
"cython",
"is",
"not",
"required",
"to",
"run",
"python",
"setup",
".",
"py",
"install",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/setup.py#L20-L36 |
246,602 | sebp/scikit-survival | sksurv/svm/survival_svm.py | SurvivalCounter._count_values | def _count_values(self):
"""Return dict mapping relevance level to sample index"""
indices = {yi: [i] for i, yi in enumerate(self.y) if self.status[i]}
return indices | python | def _count_values(self):
indices = {yi: [i] for i, yi in enumerate(self.y) if self.status[i]}
return indices | [
"def",
"_count_values",
"(",
"self",
")",
":",
"indices",
"=",
"{",
"yi",
":",
"[",
"i",
"]",
"for",
"i",
",",
"yi",
"in",
"enumerate",
"(",
"self",
".",
"y",
")",
"if",
"self",
".",
"status",
"[",
"i",
"]",
"}",
"return",
"indices"
] | Return dict mapping relevance level to sample index | [
"Return",
"dict",
"mapping",
"relevance",
"level",
"to",
"sample",
"index"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/survival_svm.py#L134-L138 |
246,603 | sebp/scikit-survival | sksurv/svm/survival_svm.py | BaseSurvivalSVM._create_optimizer | def _create_optimizer(self, X, y, status):
"""Samples are ordered by relevance"""
if self.optimizer is None:
self.optimizer = 'avltree'
times, ranks = y
if self.optimizer == 'simple':
optimizer = SimpleOptimizer(X, status, self.alpha, self.rank_ratio, timeit=self.timeit)
elif self.optimizer == 'PRSVM':
optimizer = PRSVMOptimizer(X, status, self.alpha, self.rank_ratio, timeit=self.timeit)
elif self.optimizer == 'direct-count':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
SurvivalCounter(X, ranks, status, len(ranks), times), timeit=self.timeit)
elif self.optimizer == 'rbtree':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
OrderStatisticTreeSurvivalCounter(X, ranks, status, RBTree, times),
timeit=self.timeit)
elif self.optimizer == 'avltree':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
OrderStatisticTreeSurvivalCounter(X, ranks, status, AVLTree, times),
timeit=self.timeit)
else:
raise ValueError('unknown optimizer: {0}'.format(self.optimizer))
return optimizer | python | def _create_optimizer(self, X, y, status):
if self.optimizer is None:
self.optimizer = 'avltree'
times, ranks = y
if self.optimizer == 'simple':
optimizer = SimpleOptimizer(X, status, self.alpha, self.rank_ratio, timeit=self.timeit)
elif self.optimizer == 'PRSVM':
optimizer = PRSVMOptimizer(X, status, self.alpha, self.rank_ratio, timeit=self.timeit)
elif self.optimizer == 'direct-count':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
SurvivalCounter(X, ranks, status, len(ranks), times), timeit=self.timeit)
elif self.optimizer == 'rbtree':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
OrderStatisticTreeSurvivalCounter(X, ranks, status, RBTree, times),
timeit=self.timeit)
elif self.optimizer == 'avltree':
optimizer = LargeScaleOptimizer(self.alpha, self.rank_ratio, self.fit_intercept,
OrderStatisticTreeSurvivalCounter(X, ranks, status, AVLTree, times),
timeit=self.timeit)
else:
raise ValueError('unknown optimizer: {0}'.format(self.optimizer))
return optimizer | [
"def",
"_create_optimizer",
"(",
"self",
",",
"X",
",",
"y",
",",
"status",
")",
":",
"if",
"self",
".",
"optimizer",
"is",
"None",
":",
"self",
".",
"optimizer",
"=",
"'avltree'",
"times",
",",
"ranks",
"=",
"y",
"if",
"self",
".",
"optimizer",
"==",
"'simple'",
":",
"optimizer",
"=",
"SimpleOptimizer",
"(",
"X",
",",
"status",
",",
"self",
".",
"alpha",
",",
"self",
".",
"rank_ratio",
",",
"timeit",
"=",
"self",
".",
"timeit",
")",
"elif",
"self",
".",
"optimizer",
"==",
"'PRSVM'",
":",
"optimizer",
"=",
"PRSVMOptimizer",
"(",
"X",
",",
"status",
",",
"self",
".",
"alpha",
",",
"self",
".",
"rank_ratio",
",",
"timeit",
"=",
"self",
".",
"timeit",
")",
"elif",
"self",
".",
"optimizer",
"==",
"'direct-count'",
":",
"optimizer",
"=",
"LargeScaleOptimizer",
"(",
"self",
".",
"alpha",
",",
"self",
".",
"rank_ratio",
",",
"self",
".",
"fit_intercept",
",",
"SurvivalCounter",
"(",
"X",
",",
"ranks",
",",
"status",
",",
"len",
"(",
"ranks",
")",
",",
"times",
")",
",",
"timeit",
"=",
"self",
".",
"timeit",
")",
"elif",
"self",
".",
"optimizer",
"==",
"'rbtree'",
":",
"optimizer",
"=",
"LargeScaleOptimizer",
"(",
"self",
".",
"alpha",
",",
"self",
".",
"rank_ratio",
",",
"self",
".",
"fit_intercept",
",",
"OrderStatisticTreeSurvivalCounter",
"(",
"X",
",",
"ranks",
",",
"status",
",",
"RBTree",
",",
"times",
")",
",",
"timeit",
"=",
"self",
".",
"timeit",
")",
"elif",
"self",
".",
"optimizer",
"==",
"'avltree'",
":",
"optimizer",
"=",
"LargeScaleOptimizer",
"(",
"self",
".",
"alpha",
",",
"self",
".",
"rank_ratio",
",",
"self",
".",
"fit_intercept",
",",
"OrderStatisticTreeSurvivalCounter",
"(",
"X",
",",
"ranks",
",",
"status",
",",
"AVLTree",
",",
"times",
")",
",",
"timeit",
"=",
"self",
".",
"timeit",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'unknown optimizer: {0}'",
".",
"format",
"(",
"self",
".",
"optimizer",
")",
")",
"return",
"optimizer"
] | Samples are ordered by relevance | [
"Samples",
"are",
"ordered",
"by",
"relevance"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/survival_svm.py#L608-L633 |
246,604 | sebp/scikit-survival | sksurv/svm/survival_svm.py | BaseSurvivalSVM._argsort_and_resolve_ties | def _argsort_and_resolve_ties(time, random_state):
"""Like numpy.argsort, but resolves ties uniformly at random"""
n_samples = len(time)
order = numpy.argsort(time, kind="mergesort")
i = 0
while i < n_samples - 1:
inext = i + 1
while inext < n_samples and time[order[i]] == time[order[inext]]:
inext += 1
if i + 1 != inext:
# resolve ties randomly
random_state.shuffle(order[i:inext])
i = inext
return order | python | def _argsort_and_resolve_ties(time, random_state):
n_samples = len(time)
order = numpy.argsort(time, kind="mergesort")
i = 0
while i < n_samples - 1:
inext = i + 1
while inext < n_samples and time[order[i]] == time[order[inext]]:
inext += 1
if i + 1 != inext:
# resolve ties randomly
random_state.shuffle(order[i:inext])
i = inext
return order | [
"def",
"_argsort_and_resolve_ties",
"(",
"time",
",",
"random_state",
")",
":",
"n_samples",
"=",
"len",
"(",
"time",
")",
"order",
"=",
"numpy",
".",
"argsort",
"(",
"time",
",",
"kind",
"=",
"\"mergesort\"",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"n_samples",
"-",
"1",
":",
"inext",
"=",
"i",
"+",
"1",
"while",
"inext",
"<",
"n_samples",
"and",
"time",
"[",
"order",
"[",
"i",
"]",
"]",
"==",
"time",
"[",
"order",
"[",
"inext",
"]",
"]",
":",
"inext",
"+=",
"1",
"if",
"i",
"+",
"1",
"!=",
"inext",
":",
"# resolve ties randomly",
"random_state",
".",
"shuffle",
"(",
"order",
"[",
"i",
":",
"inext",
"]",
")",
"i",
"=",
"inext",
"return",
"order"
] | Like numpy.argsort, but resolves ties uniformly at random | [
"Like",
"numpy",
".",
"argsort",
"but",
"resolves",
"ties",
"uniformly",
"at",
"random"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/survival_svm.py#L702-L717 |
246,605 | sebp/scikit-survival | sksurv/linear_model/aft.py | IPCRidge.fit | def fit(self, X, y):
"""Build an accelerated failure time model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
weights = ipc_weights(event, time)
super().fit(X, numpy.log(time), sample_weight=weights)
return self | python | def fit(self, X, y):
X, event, time = check_arrays_survival(X, y)
weights = ipc_weights(event, time)
super().fit(X, numpy.log(time), sample_weight=weights)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"X",
",",
"event",
",",
"time",
"=",
"check_arrays_survival",
"(",
"X",
",",
"y",
")",
"weights",
"=",
"ipc_weights",
"(",
"event",
",",
"time",
")",
"super",
"(",
")",
".",
"fit",
"(",
"X",
",",
"numpy",
".",
"log",
"(",
"time",
")",
",",
"sample_weight",
"=",
"weights",
")",
"return",
"self"
] | Build an accelerated failure time model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self | [
"Build",
"an",
"accelerated",
"failure",
"time",
"model",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/linear_model/aft.py#L52-L74 |
246,606 | sebp/scikit-survival | sksurv/linear_model/coxph.py | BreslowEstimator.fit | def fit(self, linear_predictor, event, time):
"""Compute baseline cumulative hazard function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
self
"""
risk_score = numpy.exp(linear_predictor)
order = numpy.argsort(time, kind="mergesort")
risk_score = risk_score[order]
uniq_times, n_events, n_at_risk = _compute_counts(event, time, order)
divisor = numpy.empty(n_at_risk.shape, dtype=numpy.float_)
value = numpy.sum(risk_score)
divisor[0] = value
k = 0
for i in range(1, len(n_at_risk)):
d = n_at_risk[i - 1] - n_at_risk[i]
value -= risk_score[k:(k + d)].sum()
k += d
divisor[i] = value
assert k == n_at_risk[0] - n_at_risk[-1]
y = numpy.cumsum(n_events / divisor)
self.cum_baseline_hazard_ = StepFunction(uniq_times, y)
self.baseline_survival_ = StepFunction(self.cum_baseline_hazard_.x,
numpy.exp(- self.cum_baseline_hazard_.y))
return self | python | def fit(self, linear_predictor, event, time):
risk_score = numpy.exp(linear_predictor)
order = numpy.argsort(time, kind="mergesort")
risk_score = risk_score[order]
uniq_times, n_events, n_at_risk = _compute_counts(event, time, order)
divisor = numpy.empty(n_at_risk.shape, dtype=numpy.float_)
value = numpy.sum(risk_score)
divisor[0] = value
k = 0
for i in range(1, len(n_at_risk)):
d = n_at_risk[i - 1] - n_at_risk[i]
value -= risk_score[k:(k + d)].sum()
k += d
divisor[i] = value
assert k == n_at_risk[0] - n_at_risk[-1]
y = numpy.cumsum(n_events / divisor)
self.cum_baseline_hazard_ = StepFunction(uniq_times, y)
self.baseline_survival_ = StepFunction(self.cum_baseline_hazard_.x,
numpy.exp(- self.cum_baseline_hazard_.y))
return self | [
"def",
"fit",
"(",
"self",
",",
"linear_predictor",
",",
"event",
",",
"time",
")",
":",
"risk_score",
"=",
"numpy",
".",
"exp",
"(",
"linear_predictor",
")",
"order",
"=",
"numpy",
".",
"argsort",
"(",
"time",
",",
"kind",
"=",
"\"mergesort\"",
")",
"risk_score",
"=",
"risk_score",
"[",
"order",
"]",
"uniq_times",
",",
"n_events",
",",
"n_at_risk",
"=",
"_compute_counts",
"(",
"event",
",",
"time",
",",
"order",
")",
"divisor",
"=",
"numpy",
".",
"empty",
"(",
"n_at_risk",
".",
"shape",
",",
"dtype",
"=",
"numpy",
".",
"float_",
")",
"value",
"=",
"numpy",
".",
"sum",
"(",
"risk_score",
")",
"divisor",
"[",
"0",
"]",
"=",
"value",
"k",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"n_at_risk",
")",
")",
":",
"d",
"=",
"n_at_risk",
"[",
"i",
"-",
"1",
"]",
"-",
"n_at_risk",
"[",
"i",
"]",
"value",
"-=",
"risk_score",
"[",
"k",
":",
"(",
"k",
"+",
"d",
")",
"]",
".",
"sum",
"(",
")",
"k",
"+=",
"d",
"divisor",
"[",
"i",
"]",
"=",
"value",
"assert",
"k",
"==",
"n_at_risk",
"[",
"0",
"]",
"-",
"n_at_risk",
"[",
"-",
"1",
"]",
"y",
"=",
"numpy",
".",
"cumsum",
"(",
"n_events",
"/",
"divisor",
")",
"self",
".",
"cum_baseline_hazard_",
"=",
"StepFunction",
"(",
"uniq_times",
",",
"y",
")",
"self",
".",
"baseline_survival_",
"=",
"StepFunction",
"(",
"self",
".",
"cum_baseline_hazard_",
".",
"x",
",",
"numpy",
".",
"exp",
"(",
"-",
"self",
".",
"cum_baseline_hazard_",
".",
"y",
")",
")",
"return",
"self"
] | Compute baseline cumulative hazard function.
Parameters
----------
linear_predictor : array-like, shape = (n_samples,)
Linear predictor of risk: `X @ coef`.
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
self | [
"Compute",
"baseline",
"cumulative",
"hazard",
"function",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/linear_model/coxph.py#L42-L81 |
246,607 | sebp/scikit-survival | sksurv/linear_model/coxph.py | CoxPHOptimizer.nlog_likelihood | def nlog_likelihood(self, w):
"""Compute negative partial log-likelihood
Parameters
----------
w : array, shape = (n_features,)
Estimate of coefficients
Returns
-------
loss : float
Average negative partial log-likelihood
"""
time = self.time
n_samples = self.x.shape[0]
xw = numpy.dot(self.x, w)
loss = 0
risk_set = 0
k = 0
for i in range(n_samples):
ti = time[i]
while k < n_samples and ti == time[k]:
risk_set += numpy.exp(xw[k])
k += 1
if self.event[i]:
loss -= (xw[i] - numpy.log(risk_set)) / n_samples
# add regularization term to log-likelihood
return loss + self.alpha * squared_norm(w) / (2. * n_samples) | python | def nlog_likelihood(self, w):
time = self.time
n_samples = self.x.shape[0]
xw = numpy.dot(self.x, w)
loss = 0
risk_set = 0
k = 0
for i in range(n_samples):
ti = time[i]
while k < n_samples and ti == time[k]:
risk_set += numpy.exp(xw[k])
k += 1
if self.event[i]:
loss -= (xw[i] - numpy.log(risk_set)) / n_samples
# add regularization term to log-likelihood
return loss + self.alpha * squared_norm(w) / (2. * n_samples) | [
"def",
"nlog_likelihood",
"(",
"self",
",",
"w",
")",
":",
"time",
"=",
"self",
".",
"time",
"n_samples",
"=",
"self",
".",
"x",
".",
"shape",
"[",
"0",
"]",
"xw",
"=",
"numpy",
".",
"dot",
"(",
"self",
".",
"x",
",",
"w",
")",
"loss",
"=",
"0",
"risk_set",
"=",
"0",
"k",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"n_samples",
")",
":",
"ti",
"=",
"time",
"[",
"i",
"]",
"while",
"k",
"<",
"n_samples",
"and",
"ti",
"==",
"time",
"[",
"k",
"]",
":",
"risk_set",
"+=",
"numpy",
".",
"exp",
"(",
"xw",
"[",
"k",
"]",
")",
"k",
"+=",
"1",
"if",
"self",
".",
"event",
"[",
"i",
"]",
":",
"loss",
"-=",
"(",
"xw",
"[",
"i",
"]",
"-",
"numpy",
".",
"log",
"(",
"risk_set",
")",
")",
"/",
"n_samples",
"# add regularization term to log-likelihood",
"return",
"loss",
"+",
"self",
".",
"alpha",
"*",
"squared_norm",
"(",
"w",
")",
"/",
"(",
"2.",
"*",
"n_samples",
")"
] | Compute negative partial log-likelihood
Parameters
----------
w : array, shape = (n_features,)
Estimate of coefficients
Returns
-------
loss : float
Average negative partial log-likelihood | [
"Compute",
"negative",
"partial",
"log",
"-",
"likelihood"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/linear_model/coxph.py#L138-L168 |
246,608 | sebp/scikit-survival | sksurv/linear_model/coxph.py | CoxPHOptimizer.update | def update(self, w, offset=0):
"""Compute gradient and Hessian matrix with respect to `w`."""
time = self.time
x = self.x
exp_xw = numpy.exp(offset + numpy.dot(x, w))
n_samples, n_features = x.shape
gradient = numpy.zeros((1, n_features), dtype=float)
hessian = numpy.zeros((n_features, n_features), dtype=float)
inv_n_samples = 1. / n_samples
risk_set = 0
risk_set_x = 0
risk_set_xx = 0
k = 0
# iterate time in descending order
for i in range(n_samples):
ti = time[i]
while k < n_samples and ti == time[k]:
risk_set += exp_xw[k]
# preserve 2D shape of row vector
xk = x[k:k + 1]
risk_set_x += exp_xw[k] * xk
# outer product
xx = numpy.dot(xk.T, xk)
risk_set_xx += exp_xw[k] * xx
k += 1
if self.event[i]:
gradient -= (x[i:i + 1] - risk_set_x / risk_set) * inv_n_samples
a = risk_set_xx / risk_set
z = risk_set_x / risk_set
# outer product
b = numpy.dot(z.T, z)
hessian += (a - b) * inv_n_samples
if self.alpha > 0:
gradient += self.alpha * inv_n_samples * w
diag_idx = numpy.diag_indices(n_features)
hessian[diag_idx] += self.alpha * inv_n_samples
self.gradient = gradient.ravel()
self.hessian = hessian | python | def update(self, w, offset=0):
time = self.time
x = self.x
exp_xw = numpy.exp(offset + numpy.dot(x, w))
n_samples, n_features = x.shape
gradient = numpy.zeros((1, n_features), dtype=float)
hessian = numpy.zeros((n_features, n_features), dtype=float)
inv_n_samples = 1. / n_samples
risk_set = 0
risk_set_x = 0
risk_set_xx = 0
k = 0
# iterate time in descending order
for i in range(n_samples):
ti = time[i]
while k < n_samples and ti == time[k]:
risk_set += exp_xw[k]
# preserve 2D shape of row vector
xk = x[k:k + 1]
risk_set_x += exp_xw[k] * xk
# outer product
xx = numpy.dot(xk.T, xk)
risk_set_xx += exp_xw[k] * xx
k += 1
if self.event[i]:
gradient -= (x[i:i + 1] - risk_set_x / risk_set) * inv_n_samples
a = risk_set_xx / risk_set
z = risk_set_x / risk_set
# outer product
b = numpy.dot(z.T, z)
hessian += (a - b) * inv_n_samples
if self.alpha > 0:
gradient += self.alpha * inv_n_samples * w
diag_idx = numpy.diag_indices(n_features)
hessian[diag_idx] += self.alpha * inv_n_samples
self.gradient = gradient.ravel()
self.hessian = hessian | [
"def",
"update",
"(",
"self",
",",
"w",
",",
"offset",
"=",
"0",
")",
":",
"time",
"=",
"self",
".",
"time",
"x",
"=",
"self",
".",
"x",
"exp_xw",
"=",
"numpy",
".",
"exp",
"(",
"offset",
"+",
"numpy",
".",
"dot",
"(",
"x",
",",
"w",
")",
")",
"n_samples",
",",
"n_features",
"=",
"x",
".",
"shape",
"gradient",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"1",
",",
"n_features",
")",
",",
"dtype",
"=",
"float",
")",
"hessian",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"n_features",
",",
"n_features",
")",
",",
"dtype",
"=",
"float",
")",
"inv_n_samples",
"=",
"1.",
"/",
"n_samples",
"risk_set",
"=",
"0",
"risk_set_x",
"=",
"0",
"risk_set_xx",
"=",
"0",
"k",
"=",
"0",
"# iterate time in descending order",
"for",
"i",
"in",
"range",
"(",
"n_samples",
")",
":",
"ti",
"=",
"time",
"[",
"i",
"]",
"while",
"k",
"<",
"n_samples",
"and",
"ti",
"==",
"time",
"[",
"k",
"]",
":",
"risk_set",
"+=",
"exp_xw",
"[",
"k",
"]",
"# preserve 2D shape of row vector",
"xk",
"=",
"x",
"[",
"k",
":",
"k",
"+",
"1",
"]",
"risk_set_x",
"+=",
"exp_xw",
"[",
"k",
"]",
"*",
"xk",
"# outer product",
"xx",
"=",
"numpy",
".",
"dot",
"(",
"xk",
".",
"T",
",",
"xk",
")",
"risk_set_xx",
"+=",
"exp_xw",
"[",
"k",
"]",
"*",
"xx",
"k",
"+=",
"1",
"if",
"self",
".",
"event",
"[",
"i",
"]",
":",
"gradient",
"-=",
"(",
"x",
"[",
"i",
":",
"i",
"+",
"1",
"]",
"-",
"risk_set_x",
"/",
"risk_set",
")",
"*",
"inv_n_samples",
"a",
"=",
"risk_set_xx",
"/",
"risk_set",
"z",
"=",
"risk_set_x",
"/",
"risk_set",
"# outer product",
"b",
"=",
"numpy",
".",
"dot",
"(",
"z",
".",
"T",
",",
"z",
")",
"hessian",
"+=",
"(",
"a",
"-",
"b",
")",
"*",
"inv_n_samples",
"if",
"self",
".",
"alpha",
">",
"0",
":",
"gradient",
"+=",
"self",
".",
"alpha",
"*",
"inv_n_samples",
"*",
"w",
"diag_idx",
"=",
"numpy",
".",
"diag_indices",
"(",
"n_features",
")",
"hessian",
"[",
"diag_idx",
"]",
"+=",
"self",
".",
"alpha",
"*",
"inv_n_samples",
"self",
".",
"gradient",
"=",
"gradient",
".",
"ravel",
"(",
")",
"self",
".",
"hessian",
"=",
"hessian"
] | Compute gradient and Hessian matrix with respect to `w`. | [
"Compute",
"gradient",
"and",
"Hessian",
"matrix",
"with",
"respect",
"to",
"w",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/linear_model/coxph.py#L170-L218 |
246,609 | sebp/scikit-survival | sksurv/linear_model/coxph.py | CoxPHSurvivalAnalysis.fit | def fit(self, X, y):
"""Minimize negative partial log-likelihood for provided data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
if self.alpha < 0:
raise ValueError("alpha must be positive, but was %r" % self.alpha)
optimizer = CoxPHOptimizer(X, event, time, self.alpha)
verbose_reporter = VerboseReporter(self.verbose)
w = numpy.zeros(X.shape[1])
w_prev = w
i = 0
loss = float('inf')
while True:
if i >= self.n_iter:
verbose_reporter.end_max_iter(i)
warnings.warn(('Optimization did not converge: Maximum number of iterations has been exceeded.'),
stacklevel=2, category=ConvergenceWarning)
break
optimizer.update(w)
delta = solve(optimizer.hessian, optimizer.gradient,
overwrite_a=False, overwrite_b=False, check_finite=False)
if not numpy.all(numpy.isfinite(delta)):
raise ValueError("search direction contains NaN or infinite values")
w_new = w - delta
loss_new = optimizer.nlog_likelihood(w_new)
verbose_reporter.update(i, delta, loss_new)
if loss_new > loss:
# perform step-halving if negative log-likelihood does not decrease
w = (w_prev + w) / 2
loss = optimizer.nlog_likelihood(w)
verbose_reporter.step_halving(i, loss)
i += 1
continue
w_prev = w
w = w_new
res = numpy.abs(1 - (loss_new / loss))
if res < self.tol:
verbose_reporter.end_converged(i)
break
loss = loss_new
i += 1
self.coef_ = w
self._baseline_model.fit(numpy.dot(X, self.coef_), event, time)
return self | python | def fit(self, X, y):
X, event, time = check_arrays_survival(X, y)
if self.alpha < 0:
raise ValueError("alpha must be positive, but was %r" % self.alpha)
optimizer = CoxPHOptimizer(X, event, time, self.alpha)
verbose_reporter = VerboseReporter(self.verbose)
w = numpy.zeros(X.shape[1])
w_prev = w
i = 0
loss = float('inf')
while True:
if i >= self.n_iter:
verbose_reporter.end_max_iter(i)
warnings.warn(('Optimization did not converge: Maximum number of iterations has been exceeded.'),
stacklevel=2, category=ConvergenceWarning)
break
optimizer.update(w)
delta = solve(optimizer.hessian, optimizer.gradient,
overwrite_a=False, overwrite_b=False, check_finite=False)
if not numpy.all(numpy.isfinite(delta)):
raise ValueError("search direction contains NaN or infinite values")
w_new = w - delta
loss_new = optimizer.nlog_likelihood(w_new)
verbose_reporter.update(i, delta, loss_new)
if loss_new > loss:
# perform step-halving if negative log-likelihood does not decrease
w = (w_prev + w) / 2
loss = optimizer.nlog_likelihood(w)
verbose_reporter.step_halving(i, loss)
i += 1
continue
w_prev = w
w = w_new
res = numpy.abs(1 - (loss_new / loss))
if res < self.tol:
verbose_reporter.end_converged(i)
break
loss = loss_new
i += 1
self.coef_ = w
self._baseline_model.fit(numpy.dot(X, self.coef_), event, time)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"X",
",",
"event",
",",
"time",
"=",
"check_arrays_survival",
"(",
"X",
",",
"y",
")",
"if",
"self",
".",
"alpha",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"alpha must be positive, but was %r\"",
"%",
"self",
".",
"alpha",
")",
"optimizer",
"=",
"CoxPHOptimizer",
"(",
"X",
",",
"event",
",",
"time",
",",
"self",
".",
"alpha",
")",
"verbose_reporter",
"=",
"VerboseReporter",
"(",
"self",
".",
"verbose",
")",
"w",
"=",
"numpy",
".",
"zeros",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"w_prev",
"=",
"w",
"i",
"=",
"0",
"loss",
"=",
"float",
"(",
"'inf'",
")",
"while",
"True",
":",
"if",
"i",
">=",
"self",
".",
"n_iter",
":",
"verbose_reporter",
".",
"end_max_iter",
"(",
"i",
")",
"warnings",
".",
"warn",
"(",
"(",
"'Optimization did not converge: Maximum number of iterations has been exceeded.'",
")",
",",
"stacklevel",
"=",
"2",
",",
"category",
"=",
"ConvergenceWarning",
")",
"break",
"optimizer",
".",
"update",
"(",
"w",
")",
"delta",
"=",
"solve",
"(",
"optimizer",
".",
"hessian",
",",
"optimizer",
".",
"gradient",
",",
"overwrite_a",
"=",
"False",
",",
"overwrite_b",
"=",
"False",
",",
"check_finite",
"=",
"False",
")",
"if",
"not",
"numpy",
".",
"all",
"(",
"numpy",
".",
"isfinite",
"(",
"delta",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"search direction contains NaN or infinite values\"",
")",
"w_new",
"=",
"w",
"-",
"delta",
"loss_new",
"=",
"optimizer",
".",
"nlog_likelihood",
"(",
"w_new",
")",
"verbose_reporter",
".",
"update",
"(",
"i",
",",
"delta",
",",
"loss_new",
")",
"if",
"loss_new",
">",
"loss",
":",
"# perform step-halving if negative log-likelihood does not decrease",
"w",
"=",
"(",
"w_prev",
"+",
"w",
")",
"/",
"2",
"loss",
"=",
"optimizer",
".",
"nlog_likelihood",
"(",
"w",
")",
"verbose_reporter",
".",
"step_halving",
"(",
"i",
",",
"loss",
")",
"i",
"+=",
"1",
"continue",
"w_prev",
"=",
"w",
"w",
"=",
"w_new",
"res",
"=",
"numpy",
".",
"abs",
"(",
"1",
"-",
"(",
"loss_new",
"/",
"loss",
")",
")",
"if",
"res",
"<",
"self",
".",
"tol",
":",
"verbose_reporter",
".",
"end_converged",
"(",
"i",
")",
"break",
"loss",
"=",
"loss_new",
"i",
"+=",
"1",
"self",
".",
"coef_",
"=",
"w",
"self",
".",
"_baseline_model",
".",
"fit",
"(",
"numpy",
".",
"dot",
"(",
"X",
",",
"self",
".",
"coef_",
")",
",",
"event",
",",
"time",
")",
"return",
"self"
] | Minimize negative partial log-likelihood for provided data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self | [
"Minimize",
"negative",
"partial",
"log",
"-",
"likelihood",
"for",
"provided",
"data",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/linear_model/coxph.py#L292-L359 |
246,610 | sebp/scikit-survival | sksurv/nonparametric.py | _compute_counts | def _compute_counts(event, time, order=None):
"""Count right censored and uncensored samples at each unique time point.
Parameters
----------
event : array
Boolean event indicator.
time : array
Survival time or time of censoring.
order : array or None
Indices to order time in ascending order.
If None, order will be computed.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point.
"""
n_samples = event.shape[0]
if order is None:
order = numpy.argsort(time, kind="mergesort")
uniq_times = numpy.empty(n_samples, dtype=time.dtype)
uniq_events = numpy.empty(n_samples, dtype=numpy.int_)
uniq_counts = numpy.empty(n_samples, dtype=numpy.int_)
i = 0
prev_val = time[order[0]]
j = 0
while True:
count_event = 0
count = 0
while i < n_samples and prev_val == time[order[i]]:
if event[order[i]]:
count_event += 1
count += 1
i += 1
uniq_times[j] = prev_val
uniq_events[j] = count_event
uniq_counts[j] = count
j += 1
if i == n_samples:
break
prev_val = time[order[i]]
times = numpy.resize(uniq_times, j)
n_events = numpy.resize(uniq_events, j)
total_count = numpy.resize(uniq_counts, j)
# offset cumulative sum by one
total_count = numpy.concatenate(([0], total_count))
n_at_risk = n_samples - numpy.cumsum(total_count)
return times, n_events, n_at_risk[:-1] | python | def _compute_counts(event, time, order=None):
n_samples = event.shape[0]
if order is None:
order = numpy.argsort(time, kind="mergesort")
uniq_times = numpy.empty(n_samples, dtype=time.dtype)
uniq_events = numpy.empty(n_samples, dtype=numpy.int_)
uniq_counts = numpy.empty(n_samples, dtype=numpy.int_)
i = 0
prev_val = time[order[0]]
j = 0
while True:
count_event = 0
count = 0
while i < n_samples and prev_val == time[order[i]]:
if event[order[i]]:
count_event += 1
count += 1
i += 1
uniq_times[j] = prev_val
uniq_events[j] = count_event
uniq_counts[j] = count
j += 1
if i == n_samples:
break
prev_val = time[order[i]]
times = numpy.resize(uniq_times, j)
n_events = numpy.resize(uniq_events, j)
total_count = numpy.resize(uniq_counts, j)
# offset cumulative sum by one
total_count = numpy.concatenate(([0], total_count))
n_at_risk = n_samples - numpy.cumsum(total_count)
return times, n_events, n_at_risk[:-1] | [
"def",
"_compute_counts",
"(",
"event",
",",
"time",
",",
"order",
"=",
"None",
")",
":",
"n_samples",
"=",
"event",
".",
"shape",
"[",
"0",
"]",
"if",
"order",
"is",
"None",
":",
"order",
"=",
"numpy",
".",
"argsort",
"(",
"time",
",",
"kind",
"=",
"\"mergesort\"",
")",
"uniq_times",
"=",
"numpy",
".",
"empty",
"(",
"n_samples",
",",
"dtype",
"=",
"time",
".",
"dtype",
")",
"uniq_events",
"=",
"numpy",
".",
"empty",
"(",
"n_samples",
",",
"dtype",
"=",
"numpy",
".",
"int_",
")",
"uniq_counts",
"=",
"numpy",
".",
"empty",
"(",
"n_samples",
",",
"dtype",
"=",
"numpy",
".",
"int_",
")",
"i",
"=",
"0",
"prev_val",
"=",
"time",
"[",
"order",
"[",
"0",
"]",
"]",
"j",
"=",
"0",
"while",
"True",
":",
"count_event",
"=",
"0",
"count",
"=",
"0",
"while",
"i",
"<",
"n_samples",
"and",
"prev_val",
"==",
"time",
"[",
"order",
"[",
"i",
"]",
"]",
":",
"if",
"event",
"[",
"order",
"[",
"i",
"]",
"]",
":",
"count_event",
"+=",
"1",
"count",
"+=",
"1",
"i",
"+=",
"1",
"uniq_times",
"[",
"j",
"]",
"=",
"prev_val",
"uniq_events",
"[",
"j",
"]",
"=",
"count_event",
"uniq_counts",
"[",
"j",
"]",
"=",
"count",
"j",
"+=",
"1",
"if",
"i",
"==",
"n_samples",
":",
"break",
"prev_val",
"=",
"time",
"[",
"order",
"[",
"i",
"]",
"]",
"times",
"=",
"numpy",
".",
"resize",
"(",
"uniq_times",
",",
"j",
")",
"n_events",
"=",
"numpy",
".",
"resize",
"(",
"uniq_events",
",",
"j",
")",
"total_count",
"=",
"numpy",
".",
"resize",
"(",
"uniq_counts",
",",
"j",
")",
"# offset cumulative sum by one",
"total_count",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"[",
"0",
"]",
",",
"total_count",
")",
")",
"n_at_risk",
"=",
"n_samples",
"-",
"numpy",
".",
"cumsum",
"(",
"total_count",
")",
"return",
"times",
",",
"n_events",
",",
"n_at_risk",
"[",
":",
"-",
"1",
"]"
] | Count right censored and uncensored samples at each unique time point.
Parameters
----------
event : array
Boolean event indicator.
time : array
Survival time or time of censoring.
order : array or None
Indices to order time in ascending order.
If None, order will be computed.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point. | [
"Count",
"right",
"censored",
"and",
"uncensored",
"samples",
"at",
"each",
"unique",
"time",
"point",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L28-L94 |
246,611 | sebp/scikit-survival | sksurv/nonparametric.py | _compute_counts_truncated | def _compute_counts_truncated(event, time_enter, time_exit):
"""Compute counts for left truncated and right censored survival data.
Parameters
----------
event : array
Boolean event indicator.
time_start : array
Time when a subject entered the study.
time_exit : array
Time when a subject left the study due to an
event or censoring.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point.
"""
if (time_enter > time_exit).any():
raise ValueError("exit time must be larger start time for all samples")
n_samples = event.shape[0]
uniq_times = numpy.sort(numpy.unique(numpy.concatenate((time_enter, time_exit))), kind="mergesort")
total_counts = numpy.empty(len(uniq_times), dtype=numpy.int_)
event_counts = numpy.empty(len(uniq_times), dtype=numpy.int_)
order_enter = numpy.argsort(time_enter, kind="mergesort")
order_exit = numpy.argsort(time_exit, kind="mergesort")
s_time_enter = time_enter[order_enter]
s_time_exit = time_exit[order_exit]
t0 = uniq_times[0]
# everything larger is included
idx_enter = numpy.searchsorted(s_time_enter, t0, side="right")
# everything smaller is excluded
idx_exit = numpy.searchsorted(s_time_exit, t0, side="left")
total_counts[0] = idx_enter
# except people die on the day they enter
event_counts[0] = 0
for i in range(1, len(uniq_times)):
ti = uniq_times[i]
while idx_enter < n_samples and s_time_enter[idx_enter] <= ti:
idx_enter += 1
while idx_exit < n_samples and s_time_exit[idx_exit] < ti:
idx_exit += 1
risk_set = numpy.setdiff1d(order_enter[:idx_enter], order_exit[:idx_exit], assume_unique=True)
total_counts[i] = len(risk_set)
count_event = 0
k = idx_exit
while k < n_samples and s_time_exit[k] == ti:
if event[order_exit[k]]:
count_event += 1
k += 1
event_counts[i] = count_event
return uniq_times, event_counts, total_counts | python | def _compute_counts_truncated(event, time_enter, time_exit):
if (time_enter > time_exit).any():
raise ValueError("exit time must be larger start time for all samples")
n_samples = event.shape[0]
uniq_times = numpy.sort(numpy.unique(numpy.concatenate((time_enter, time_exit))), kind="mergesort")
total_counts = numpy.empty(len(uniq_times), dtype=numpy.int_)
event_counts = numpy.empty(len(uniq_times), dtype=numpy.int_)
order_enter = numpy.argsort(time_enter, kind="mergesort")
order_exit = numpy.argsort(time_exit, kind="mergesort")
s_time_enter = time_enter[order_enter]
s_time_exit = time_exit[order_exit]
t0 = uniq_times[0]
# everything larger is included
idx_enter = numpy.searchsorted(s_time_enter, t0, side="right")
# everything smaller is excluded
idx_exit = numpy.searchsorted(s_time_exit, t0, side="left")
total_counts[0] = idx_enter
# except people die on the day they enter
event_counts[0] = 0
for i in range(1, len(uniq_times)):
ti = uniq_times[i]
while idx_enter < n_samples and s_time_enter[idx_enter] <= ti:
idx_enter += 1
while idx_exit < n_samples and s_time_exit[idx_exit] < ti:
idx_exit += 1
risk_set = numpy.setdiff1d(order_enter[:idx_enter], order_exit[:idx_exit], assume_unique=True)
total_counts[i] = len(risk_set)
count_event = 0
k = idx_exit
while k < n_samples and s_time_exit[k] == ti:
if event[order_exit[k]]:
count_event += 1
k += 1
event_counts[i] = count_event
return uniq_times, event_counts, total_counts | [
"def",
"_compute_counts_truncated",
"(",
"event",
",",
"time_enter",
",",
"time_exit",
")",
":",
"if",
"(",
"time_enter",
">",
"time_exit",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"exit time must be larger start time for all samples\"",
")",
"n_samples",
"=",
"event",
".",
"shape",
"[",
"0",
"]",
"uniq_times",
"=",
"numpy",
".",
"sort",
"(",
"numpy",
".",
"unique",
"(",
"numpy",
".",
"concatenate",
"(",
"(",
"time_enter",
",",
"time_exit",
")",
")",
")",
",",
"kind",
"=",
"\"mergesort\"",
")",
"total_counts",
"=",
"numpy",
".",
"empty",
"(",
"len",
"(",
"uniq_times",
")",
",",
"dtype",
"=",
"numpy",
".",
"int_",
")",
"event_counts",
"=",
"numpy",
".",
"empty",
"(",
"len",
"(",
"uniq_times",
")",
",",
"dtype",
"=",
"numpy",
".",
"int_",
")",
"order_enter",
"=",
"numpy",
".",
"argsort",
"(",
"time_enter",
",",
"kind",
"=",
"\"mergesort\"",
")",
"order_exit",
"=",
"numpy",
".",
"argsort",
"(",
"time_exit",
",",
"kind",
"=",
"\"mergesort\"",
")",
"s_time_enter",
"=",
"time_enter",
"[",
"order_enter",
"]",
"s_time_exit",
"=",
"time_exit",
"[",
"order_exit",
"]",
"t0",
"=",
"uniq_times",
"[",
"0",
"]",
"# everything larger is included",
"idx_enter",
"=",
"numpy",
".",
"searchsorted",
"(",
"s_time_enter",
",",
"t0",
",",
"side",
"=",
"\"right\"",
")",
"# everything smaller is excluded",
"idx_exit",
"=",
"numpy",
".",
"searchsorted",
"(",
"s_time_exit",
",",
"t0",
",",
"side",
"=",
"\"left\"",
")",
"total_counts",
"[",
"0",
"]",
"=",
"idx_enter",
"# except people die on the day they enter",
"event_counts",
"[",
"0",
"]",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"uniq_times",
")",
")",
":",
"ti",
"=",
"uniq_times",
"[",
"i",
"]",
"while",
"idx_enter",
"<",
"n_samples",
"and",
"s_time_enter",
"[",
"idx_enter",
"]",
"<=",
"ti",
":",
"idx_enter",
"+=",
"1",
"while",
"idx_exit",
"<",
"n_samples",
"and",
"s_time_exit",
"[",
"idx_exit",
"]",
"<",
"ti",
":",
"idx_exit",
"+=",
"1",
"risk_set",
"=",
"numpy",
".",
"setdiff1d",
"(",
"order_enter",
"[",
":",
"idx_enter",
"]",
",",
"order_exit",
"[",
":",
"idx_exit",
"]",
",",
"assume_unique",
"=",
"True",
")",
"total_counts",
"[",
"i",
"]",
"=",
"len",
"(",
"risk_set",
")",
"count_event",
"=",
"0",
"k",
"=",
"idx_exit",
"while",
"k",
"<",
"n_samples",
"and",
"s_time_exit",
"[",
"k",
"]",
"==",
"ti",
":",
"if",
"event",
"[",
"order_exit",
"[",
"k",
"]",
"]",
":",
"count_event",
"+=",
"1",
"k",
"+=",
"1",
"event_counts",
"[",
"i",
"]",
"=",
"count_event",
"return",
"uniq_times",
",",
"event_counts",
",",
"total_counts"
] | Compute counts for left truncated and right censored survival data.
Parameters
----------
event : array
Boolean event indicator.
time_start : array
Time when a subject entered the study.
time_exit : array
Time when a subject left the study due to an
event or censoring.
Returns
-------
times : array
Unique time points.
n_events : array
Number of events at each time point.
n_at_risk : array
Number of samples that are censored or have an event at each time point. | [
"Compute",
"counts",
"for",
"left",
"truncated",
"and",
"right",
"censored",
"survival",
"data",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L97-L167 |
246,612 | sebp/scikit-survival | sksurv/nonparametric.py | kaplan_meier_estimator | def kaplan_meier_estimator(event, time_exit, time_enter=None, time_min=None):
"""Kaplan-Meier estimator of survival function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time_exit : array-like, shape = (n_samples,)
Contains event/censoring times.
time_enter : array-like, shape = (n_samples,), optional
Contains time when each individual entered the study for
left truncated survival data.
time_min : float, optional
Compute estimator conditional on survival at least up to
the specified time.
Returns
-------
time : array, shape = (n_times,)
Unique times.
prob_survival : array, shape = (n_times,)
Survival probability at each unique time point.
If `time_enter` is provided, estimates are conditional probabilities.
Examples
--------
Creating a Kaplan-Meier curve:
>>> x, y = kaplan_meier_estimator(event, time)
>>> plt.step(x, y, where="post")
>>> plt.ylim(0, 1)
>>> plt.show()
References
----------
.. [1] Kaplan, E. L. and Meier, P., "Nonparametric estimation from incomplete observations",
Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958.
"""
event, time_enter, time_exit = check_y_survival(event, time_enter, time_exit, allow_all_censored=True)
check_consistent_length(event, time_enter, time_exit)
if time_enter is None:
uniq_times, n_events, n_at_risk = _compute_counts(event, time_exit)
else:
uniq_times, n_events, n_at_risk = _compute_counts_truncated(event, time_enter, time_exit)
values = 1 - n_events / n_at_risk
if time_min is not None:
mask = uniq_times >= time_min
uniq_times = numpy.compress(mask, uniq_times)
values = numpy.compress(mask, values)
y = numpy.cumprod(values)
return uniq_times, y | python | def kaplan_meier_estimator(event, time_exit, time_enter=None, time_min=None):
event, time_enter, time_exit = check_y_survival(event, time_enter, time_exit, allow_all_censored=True)
check_consistent_length(event, time_enter, time_exit)
if time_enter is None:
uniq_times, n_events, n_at_risk = _compute_counts(event, time_exit)
else:
uniq_times, n_events, n_at_risk = _compute_counts_truncated(event, time_enter, time_exit)
values = 1 - n_events / n_at_risk
if time_min is not None:
mask = uniq_times >= time_min
uniq_times = numpy.compress(mask, uniq_times)
values = numpy.compress(mask, values)
y = numpy.cumprod(values)
return uniq_times, y | [
"def",
"kaplan_meier_estimator",
"(",
"event",
",",
"time_exit",
",",
"time_enter",
"=",
"None",
",",
"time_min",
"=",
"None",
")",
":",
"event",
",",
"time_enter",
",",
"time_exit",
"=",
"check_y_survival",
"(",
"event",
",",
"time_enter",
",",
"time_exit",
",",
"allow_all_censored",
"=",
"True",
")",
"check_consistent_length",
"(",
"event",
",",
"time_enter",
",",
"time_exit",
")",
"if",
"time_enter",
"is",
"None",
":",
"uniq_times",
",",
"n_events",
",",
"n_at_risk",
"=",
"_compute_counts",
"(",
"event",
",",
"time_exit",
")",
"else",
":",
"uniq_times",
",",
"n_events",
",",
"n_at_risk",
"=",
"_compute_counts_truncated",
"(",
"event",
",",
"time_enter",
",",
"time_exit",
")",
"values",
"=",
"1",
"-",
"n_events",
"/",
"n_at_risk",
"if",
"time_min",
"is",
"not",
"None",
":",
"mask",
"=",
"uniq_times",
">=",
"time_min",
"uniq_times",
"=",
"numpy",
".",
"compress",
"(",
"mask",
",",
"uniq_times",
")",
"values",
"=",
"numpy",
".",
"compress",
"(",
"mask",
",",
"values",
")",
"y",
"=",
"numpy",
".",
"cumprod",
"(",
"values",
")",
"return",
"uniq_times",
",",
"y"
] | Kaplan-Meier estimator of survival function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time_exit : array-like, shape = (n_samples,)
Contains event/censoring times.
time_enter : array-like, shape = (n_samples,), optional
Contains time when each individual entered the study for
left truncated survival data.
time_min : float, optional
Compute estimator conditional on survival at least up to
the specified time.
Returns
-------
time : array, shape = (n_times,)
Unique times.
prob_survival : array, shape = (n_times,)
Survival probability at each unique time point.
If `time_enter` is provided, estimates are conditional probabilities.
Examples
--------
Creating a Kaplan-Meier curve:
>>> x, y = kaplan_meier_estimator(event, time)
>>> plt.step(x, y, where="post")
>>> plt.ylim(0, 1)
>>> plt.show()
References
----------
.. [1] Kaplan, E. L. and Meier, P., "Nonparametric estimation from incomplete observations",
Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958. | [
"Kaplan",
"-",
"Meier",
"estimator",
"of",
"survival",
"function",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L170-L228 |
246,613 | sebp/scikit-survival | sksurv/nonparametric.py | nelson_aalen_estimator | def nelson_aalen_estimator(event, time):
"""Nelson-Aalen estimator of cumulative hazard function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
time : array, shape = (n_times,)
Unique times.
cum_hazard : array, shape = (n_times,)
Cumulative hazard at each unique time point.
References
----------
.. [1] Nelson, W., "Theory and applications of hazard plotting for censored failure data",
Technometrics, vol. 14, pp. 945-965, 1972.
.. [2] Aalen, O. O., "Nonparametric inference for a family of counting processes",
Annals of Statistics, vol. 6, pp. 701–726, 1978.
"""
event, time = check_y_survival(event, time)
check_consistent_length(event, time)
uniq_times, n_events, n_at_risk = _compute_counts(event, time)
y = numpy.cumsum(n_events / n_at_risk)
return uniq_times, y | python | def nelson_aalen_estimator(event, time):
event, time = check_y_survival(event, time)
check_consistent_length(event, time)
uniq_times, n_events, n_at_risk = _compute_counts(event, time)
y = numpy.cumsum(n_events / n_at_risk)
return uniq_times, y | [
"def",
"nelson_aalen_estimator",
"(",
"event",
",",
"time",
")",
":",
"event",
",",
"time",
"=",
"check_y_survival",
"(",
"event",
",",
"time",
")",
"check_consistent_length",
"(",
"event",
",",
"time",
")",
"uniq_times",
",",
"n_events",
",",
"n_at_risk",
"=",
"_compute_counts",
"(",
"event",
",",
"time",
")",
"y",
"=",
"numpy",
".",
"cumsum",
"(",
"n_events",
"/",
"n_at_risk",
")",
"return",
"uniq_times",
",",
"y"
] | Nelson-Aalen estimator of cumulative hazard function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time : array-like, shape = (n_samples,)
Contains event/censoring times.
Returns
-------
time : array, shape = (n_times,)
Unique times.
cum_hazard : array, shape = (n_times,)
Cumulative hazard at each unique time point.
References
----------
.. [1] Nelson, W., "Theory and applications of hazard plotting for censored failure data",
Technometrics, vol. 14, pp. 945-965, 1972.
.. [2] Aalen, O. O., "Nonparametric inference for a family of counting processes",
Annals of Statistics, vol. 6, pp. 701–726, 1978. | [
"Nelson",
"-",
"Aalen",
"estimator",
"of",
"cumulative",
"hazard",
"function",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L231-L264 |
246,614 | sebp/scikit-survival | sksurv/nonparametric.py | ipc_weights | def ipc_weights(event, time):
"""Compute inverse probability of censoring weights
Parameters
----------
event : array, shape = (n_samples,)
Boolean event indicator.
time : array, shape = (n_samples,)
Time when a subject experienced an event or was censored.
Returns
-------
weights : array, shape = (n_samples,)
inverse probability of censoring weights
"""
if event.all():
return numpy.ones(time.shape[0])
unique_time, p = kaplan_meier_estimator(~event, time)
idx = numpy.searchsorted(unique_time, time[event])
Ghat = p[idx]
assert (Ghat > 0).all()
weights = numpy.zeros(time.shape[0])
weights[event] = 1.0 / Ghat
return weights | python | def ipc_weights(event, time):
if event.all():
return numpy.ones(time.shape[0])
unique_time, p = kaplan_meier_estimator(~event, time)
idx = numpy.searchsorted(unique_time, time[event])
Ghat = p[idx]
assert (Ghat > 0).all()
weights = numpy.zeros(time.shape[0])
weights[event] = 1.0 / Ghat
return weights | [
"def",
"ipc_weights",
"(",
"event",
",",
"time",
")",
":",
"if",
"event",
".",
"all",
"(",
")",
":",
"return",
"numpy",
".",
"ones",
"(",
"time",
".",
"shape",
"[",
"0",
"]",
")",
"unique_time",
",",
"p",
"=",
"kaplan_meier_estimator",
"(",
"~",
"event",
",",
"time",
")",
"idx",
"=",
"numpy",
".",
"searchsorted",
"(",
"unique_time",
",",
"time",
"[",
"event",
"]",
")",
"Ghat",
"=",
"p",
"[",
"idx",
"]",
"assert",
"(",
"Ghat",
">",
"0",
")",
".",
"all",
"(",
")",
"weights",
"=",
"numpy",
".",
"zeros",
"(",
"time",
".",
"shape",
"[",
"0",
"]",
")",
"weights",
"[",
"event",
"]",
"=",
"1.0",
"/",
"Ghat",
"return",
"weights"
] | Compute inverse probability of censoring weights
Parameters
----------
event : array, shape = (n_samples,)
Boolean event indicator.
time : array, shape = (n_samples,)
Time when a subject experienced an event or was censored.
Returns
-------
weights : array, shape = (n_samples,)
inverse probability of censoring weights | [
"Compute",
"inverse",
"probability",
"of",
"censoring",
"weights"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L267-L296 |
246,615 | sebp/scikit-survival | sksurv/nonparametric.py | SurvivalFunctionEstimator.fit | def fit(self, y):
"""Estimate survival distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
event, time = check_y_survival(y, allow_all_censored=True)
unique_time, prob = kaplan_meier_estimator(event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.], prob))
return self | python | def fit(self, y):
event, time = check_y_survival(y, allow_all_censored=True)
unique_time, prob = kaplan_meier_estimator(event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.], prob))
return self | [
"def",
"fit",
"(",
"self",
",",
"y",
")",
":",
"event",
",",
"time",
"=",
"check_y_survival",
"(",
"y",
",",
"allow_all_censored",
"=",
"True",
")",
"unique_time",
",",
"prob",
"=",
"kaplan_meier_estimator",
"(",
"event",
",",
"time",
")",
"self",
".",
"unique_time_",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"[",
"-",
"numpy",
".",
"infty",
"]",
",",
"unique_time",
")",
")",
"self",
".",
"prob_",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"[",
"1.",
"]",
",",
"prob",
")",
")",
"return",
"self"
] | Estimate survival distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self | [
"Estimate",
"survival",
"distribution",
"from",
"training",
"data",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L305-L325 |
246,616 | sebp/scikit-survival | sksurv/nonparametric.py | SurvivalFunctionEstimator.predict_proba | def predict_proba(self, time):
"""Return probability of an event after given time point.
:math:`\\hat{S}(t) = P(T > t)`
Parameters
----------
time : array, shape = (n_samples,)
Time to estimate probability at.
Returns
-------
prob : array, shape = (n_samples,)
Probability of an event.
"""
check_is_fitted(self, "unique_time_")
time = check_array(time, ensure_2d=False)
# K-M is undefined if estimate at last time point is non-zero
extends = time > self.unique_time_[-1]
if self.prob_[-1] > 0 and extends.any():
raise ValueError("time must be smaller than largest "
"observed time point: {}".format(self.unique_time_[-1]))
# beyond last time point is zero probability
Shat = numpy.empty(time.shape, dtype=float)
Shat[extends] = 0.0
valid = ~extends
time = time[valid]
idx = numpy.searchsorted(self.unique_time_, time)
# for non-exact matches, we need to shift the index to left
eps = numpy.finfo(self.unique_time_.dtype).eps
exact = numpy.absolute(self.unique_time_[idx] - time) < eps
idx[~exact] -= 1
Shat[valid] = self.prob_[idx]
return Shat | python | def predict_proba(self, time):
check_is_fitted(self, "unique_time_")
time = check_array(time, ensure_2d=False)
# K-M is undefined if estimate at last time point is non-zero
extends = time > self.unique_time_[-1]
if self.prob_[-1] > 0 and extends.any():
raise ValueError("time must be smaller than largest "
"observed time point: {}".format(self.unique_time_[-1]))
# beyond last time point is zero probability
Shat = numpy.empty(time.shape, dtype=float)
Shat[extends] = 0.0
valid = ~extends
time = time[valid]
idx = numpy.searchsorted(self.unique_time_, time)
# for non-exact matches, we need to shift the index to left
eps = numpy.finfo(self.unique_time_.dtype).eps
exact = numpy.absolute(self.unique_time_[idx] - time) < eps
idx[~exact] -= 1
Shat[valid] = self.prob_[idx]
return Shat | [
"def",
"predict_proba",
"(",
"self",
",",
"time",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"\"unique_time_\"",
")",
"time",
"=",
"check_array",
"(",
"time",
",",
"ensure_2d",
"=",
"False",
")",
"# K-M is undefined if estimate at last time point is non-zero",
"extends",
"=",
"time",
">",
"self",
".",
"unique_time_",
"[",
"-",
"1",
"]",
"if",
"self",
".",
"prob_",
"[",
"-",
"1",
"]",
">",
"0",
"and",
"extends",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"time must be smaller than largest \"",
"\"observed time point: {}\"",
".",
"format",
"(",
"self",
".",
"unique_time_",
"[",
"-",
"1",
"]",
")",
")",
"# beyond last time point is zero probability",
"Shat",
"=",
"numpy",
".",
"empty",
"(",
"time",
".",
"shape",
",",
"dtype",
"=",
"float",
")",
"Shat",
"[",
"extends",
"]",
"=",
"0.0",
"valid",
"=",
"~",
"extends",
"time",
"=",
"time",
"[",
"valid",
"]",
"idx",
"=",
"numpy",
".",
"searchsorted",
"(",
"self",
".",
"unique_time_",
",",
"time",
")",
"# for non-exact matches, we need to shift the index to left",
"eps",
"=",
"numpy",
".",
"finfo",
"(",
"self",
".",
"unique_time_",
".",
"dtype",
")",
".",
"eps",
"exact",
"=",
"numpy",
".",
"absolute",
"(",
"self",
".",
"unique_time_",
"[",
"idx",
"]",
"-",
"time",
")",
"<",
"eps",
"idx",
"[",
"~",
"exact",
"]",
"-=",
"1",
"Shat",
"[",
"valid",
"]",
"=",
"self",
".",
"prob_",
"[",
"idx",
"]",
"return",
"Shat"
] | Return probability of an event after given time point.
:math:`\\hat{S}(t) = P(T > t)`
Parameters
----------
time : array, shape = (n_samples,)
Time to estimate probability at.
Returns
-------
prob : array, shape = (n_samples,)
Probability of an event. | [
"Return",
"probability",
"of",
"an",
"event",
"after",
"given",
"time",
"point",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L327-L364 |
246,617 | sebp/scikit-survival | sksurv/nonparametric.py | CensoringDistributionEstimator.fit | def fit(self, y):
"""Estimate censoring distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
event, time = check_y_survival(y)
if event.all():
self.unique_time_ = numpy.unique(time)
self.prob_ = numpy.ones(self.unique_time_.shape[0])
else:
unique_time, prob = kaplan_meier_estimator(~event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.], prob))
return self | python | def fit(self, y):
event, time = check_y_survival(y)
if event.all():
self.unique_time_ = numpy.unique(time)
self.prob_ = numpy.ones(self.unique_time_.shape[0])
else:
unique_time, prob = kaplan_meier_estimator(~event, time)
self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time))
self.prob_ = numpy.concatenate(([1.], prob))
return self | [
"def",
"fit",
"(",
"self",
",",
"y",
")",
":",
"event",
",",
"time",
"=",
"check_y_survival",
"(",
"y",
")",
"if",
"event",
".",
"all",
"(",
")",
":",
"self",
".",
"unique_time_",
"=",
"numpy",
".",
"unique",
"(",
"time",
")",
"self",
".",
"prob_",
"=",
"numpy",
".",
"ones",
"(",
"self",
".",
"unique_time_",
".",
"shape",
"[",
"0",
"]",
")",
"else",
":",
"unique_time",
",",
"prob",
"=",
"kaplan_meier_estimator",
"(",
"~",
"event",
",",
"time",
")",
"self",
".",
"unique_time_",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"[",
"-",
"numpy",
".",
"infty",
"]",
",",
"unique_time",
")",
")",
"self",
".",
"prob_",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"[",
"1.",
"]",
",",
"prob",
")",
")",
"return",
"self"
] | Estimate censoring distribution from training data.
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self | [
"Estimate",
"censoring",
"distribution",
"from",
"training",
"data",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L370-L393 |
246,618 | sebp/scikit-survival | sksurv/nonparametric.py | CensoringDistributionEstimator.predict_ipcw | def predict_ipcw(self, y):
"""Return inverse probability of censoring weights at given time points.
:math:`\\omega_i = \\delta_i / \\hat{G}(y_i)`
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
ipcw : array, shape = (n_samples,)
Inverse probability of censoring weights.
"""
event, time = check_y_survival(y)
Ghat = self.predict_proba(time[event])
if (Ghat == 0.0).any():
raise ValueError("censoring survival function is zero at one or more time points")
weights = numpy.zeros(time.shape[0])
weights[event] = 1.0 / Ghat
return weights | python | def predict_ipcw(self, y):
event, time = check_y_survival(y)
Ghat = self.predict_proba(time[event])
if (Ghat == 0.0).any():
raise ValueError("censoring survival function is zero at one or more time points")
weights = numpy.zeros(time.shape[0])
weights[event] = 1.0 / Ghat
return weights | [
"def",
"predict_ipcw",
"(",
"self",
",",
"y",
")",
":",
"event",
",",
"time",
"=",
"check_y_survival",
"(",
"y",
")",
"Ghat",
"=",
"self",
".",
"predict_proba",
"(",
"time",
"[",
"event",
"]",
")",
"if",
"(",
"Ghat",
"==",
"0.0",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"censoring survival function is zero at one or more time points\"",
")",
"weights",
"=",
"numpy",
".",
"zeros",
"(",
"time",
".",
"shape",
"[",
"0",
"]",
")",
"weights",
"[",
"event",
"]",
"=",
"1.0",
"/",
"Ghat",
"return",
"weights"
] | Return inverse probability of censoring weights at given time points.
:math:`\\omega_i = \\delta_i / \\hat{G}(y_i)`
Parameters
----------
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
ipcw : array, shape = (n_samples,)
Inverse probability of censoring weights. | [
"Return",
"inverse",
"probability",
"of",
"censoring",
"weights",
"at",
"given",
"time",
"points",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L395-L421 |
246,619 | sebp/scikit-survival | sksurv/metrics.py | concordance_index_censored | def concordance_index_censored(event_indicator, event_time, estimate, tied_tol=1e-8):
"""Concordance index for right-censored data
The concordance index is defined as the proportion of all comparable pairs
in which the predictions and outcomes are concordant.
Samples are comparable if for at least one of them an event occurred.
If the estimated risk is larger for the sample with a higher time of
event/censoring, the predictions of that pair are said to be concordant.
If an event occurred for one sample and the other is known to be
event-free at least until the time of event of the first, the second
sample is assumed to *outlive* the first.
When predicted risks are identical for a pair, 0.5 rather than 1 is added
to the count of concordant pairs.
A pair is not comparable if an event occurred for both of them at the same
time or an event occurred for one of them but the time of censoring is
smaller than the time of event of the first one.
Parameters
----------
event_indicator : array-like, shape = (n_samples,)
Boolean array denotes whether an event occurred
event_time : array-like, shape = (n_samples,)
Array containing the time of an event or time of censoring
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event
tied_tol : float, optional, default: 1e-8
The tolerance value for considering ties.
If the absolute difference between risk scores is smaller
or equal than `tied_tol`, risk scores are considered tied.
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of comparable pairs sharing the same time
References
----------
.. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A,
"Multivariable prognostic models: issues in developing models,
evaluating assumptions and adequacy, and measuring and reducing errors",
Statistics in Medicine, 15(4), 361-87, 1996.
"""
event_indicator, event_time, estimate = _check_inputs(
event_indicator, event_time, estimate)
w = numpy.ones_like(estimate)
return _estimate_concordance_index(event_indicator, event_time, estimate, w, tied_tol) | python | def concordance_index_censored(event_indicator, event_time, estimate, tied_tol=1e-8):
event_indicator, event_time, estimate = _check_inputs(
event_indicator, event_time, estimate)
w = numpy.ones_like(estimate)
return _estimate_concordance_index(event_indicator, event_time, estimate, w, tied_tol) | [
"def",
"concordance_index_censored",
"(",
"event_indicator",
",",
"event_time",
",",
"estimate",
",",
"tied_tol",
"=",
"1e-8",
")",
":",
"event_indicator",
",",
"event_time",
",",
"estimate",
"=",
"_check_inputs",
"(",
"event_indicator",
",",
"event_time",
",",
"estimate",
")",
"w",
"=",
"numpy",
".",
"ones_like",
"(",
"estimate",
")",
"return",
"_estimate_concordance_index",
"(",
"event_indicator",
",",
"event_time",
",",
"estimate",
",",
"w",
",",
"tied_tol",
")"
] | Concordance index for right-censored data
The concordance index is defined as the proportion of all comparable pairs
in which the predictions and outcomes are concordant.
Samples are comparable if for at least one of them an event occurred.
If the estimated risk is larger for the sample with a higher time of
event/censoring, the predictions of that pair are said to be concordant.
If an event occurred for one sample and the other is known to be
event-free at least until the time of event of the first, the second
sample is assumed to *outlive* the first.
When predicted risks are identical for a pair, 0.5 rather than 1 is added
to the count of concordant pairs.
A pair is not comparable if an event occurred for both of them at the same
time or an event occurred for one of them but the time of censoring is
smaller than the time of event of the first one.
Parameters
----------
event_indicator : array-like, shape = (n_samples,)
Boolean array denotes whether an event occurred
event_time : array-like, shape = (n_samples,)
Array containing the time of an event or time of censoring
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event
tied_tol : float, optional, default: 1e-8
The tolerance value for considering ties.
If the absolute difference between risk scores is smaller
or equal than `tied_tol`, risk scores are considered tied.
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of comparable pairs sharing the same time
References
----------
.. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A,
"Multivariable prognostic models: issues in developing models,
evaluating assumptions and adequacy, and measuring and reducing errors",
Statistics in Medicine, 15(4), 361-87, 1996. | [
"Concordance",
"index",
"for",
"right",
"-",
"censored",
"data"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/metrics.py#L111-L174 |
246,620 | sebp/scikit-survival | sksurv/metrics.py | concordance_index_ipcw | def concordance_index_ipcw(survival_train, survival_test, estimate, tau=None, tied_tol=1e-8):
"""Concordance index for right-censored data based on inverse probability of censoring weights.
This is an alternative to the estimator in :func:`concordance_index_censored`
that does not depend on the distribution of censoring times in the test data.
Therefore, the estimate is unbiased and consistent for a population concordance
measure that is free of censoring.
It is based on inverse probability of censoring weights, thus requires
access to survival times from the training data to estimate the censoring
distribution. Note that this requires that survival times `survival_test`
lie within the range of survival times `survival_train`. This can be
achieved by specifying the truncation time `tau`.
The resulting `cindex` tells how well the given prediction model works in
predicting events that occur in the time range from 0 to `tau`.
The estimator uses the Kaplan-Meier estimator to estimate the
censoring survivor function. Therefore, it is restricted to
situations where the random censoring assumption holds and
censoring is independent of the features.
Parameters
----------
survival_train : structured array, shape = (n_train_samples,)
Survival times for training data to estimate the censoring
distribution from.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
survival_test : structured array, shape = (n_samples,)
Survival times of test data.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event of test data.
tau : float, optional
Truncation time. The survival function for the underlying
censoring time distribution :math:`D` needs to be positive
at `tau`, i.e., `tau` should be chosen such that the
probability of being censored after time `tau` is non-zero:
:math:`P(D > \\tau) > 0`. If `None`, no truncation is performed.
tied_tol : float, optional, default: 1e-8
The tolerance value for considering ties.
If the absolute difference between risk scores is smaller
or equal than `tied_tol`, risk scores are considered tied.
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of comparable pairs sharing the same time
References
----------
.. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011).
"On the C-statistics for evaluating overall adequacy of risk prediction
procedures with censored survival data".
Statistics in Medicine, 30(10), 1105–1117.
"""
test_event, test_time = check_y_survival(survival_test)
if tau is not None:
survival_test = survival_test[test_time < tau]
estimate = check_array(estimate, ensure_2d=False)
check_consistent_length(test_event, test_time, estimate)
cens = CensoringDistributionEstimator()
cens.fit(survival_train)
ipcw = cens.predict_ipcw(survival_test)
w = numpy.square(ipcw)
return _estimate_concordance_index(test_event, test_time, estimate, w, tied_tol) | python | def concordance_index_ipcw(survival_train, survival_test, estimate, tau=None, tied_tol=1e-8):
test_event, test_time = check_y_survival(survival_test)
if tau is not None:
survival_test = survival_test[test_time < tau]
estimate = check_array(estimate, ensure_2d=False)
check_consistent_length(test_event, test_time, estimate)
cens = CensoringDistributionEstimator()
cens.fit(survival_train)
ipcw = cens.predict_ipcw(survival_test)
w = numpy.square(ipcw)
return _estimate_concordance_index(test_event, test_time, estimate, w, tied_tol) | [
"def",
"concordance_index_ipcw",
"(",
"survival_train",
",",
"survival_test",
",",
"estimate",
",",
"tau",
"=",
"None",
",",
"tied_tol",
"=",
"1e-8",
")",
":",
"test_event",
",",
"test_time",
"=",
"check_y_survival",
"(",
"survival_test",
")",
"if",
"tau",
"is",
"not",
"None",
":",
"survival_test",
"=",
"survival_test",
"[",
"test_time",
"<",
"tau",
"]",
"estimate",
"=",
"check_array",
"(",
"estimate",
",",
"ensure_2d",
"=",
"False",
")",
"check_consistent_length",
"(",
"test_event",
",",
"test_time",
",",
"estimate",
")",
"cens",
"=",
"CensoringDistributionEstimator",
"(",
")",
"cens",
".",
"fit",
"(",
"survival_train",
")",
"ipcw",
"=",
"cens",
".",
"predict_ipcw",
"(",
"survival_test",
")",
"w",
"=",
"numpy",
".",
"square",
"(",
"ipcw",
")",
"return",
"_estimate_concordance_index",
"(",
"test_event",
",",
"test_time",
",",
"estimate",
",",
"w",
",",
"tied_tol",
")"
] | Concordance index for right-censored data based on inverse probability of censoring weights.
This is an alternative to the estimator in :func:`concordance_index_censored`
that does not depend on the distribution of censoring times in the test data.
Therefore, the estimate is unbiased and consistent for a population concordance
measure that is free of censoring.
It is based on inverse probability of censoring weights, thus requires
access to survival times from the training data to estimate the censoring
distribution. Note that this requires that survival times `survival_test`
lie within the range of survival times `survival_train`. This can be
achieved by specifying the truncation time `tau`.
The resulting `cindex` tells how well the given prediction model works in
predicting events that occur in the time range from 0 to `tau`.
The estimator uses the Kaplan-Meier estimator to estimate the
censoring survivor function. Therefore, it is restricted to
situations where the random censoring assumption holds and
censoring is independent of the features.
Parameters
----------
survival_train : structured array, shape = (n_train_samples,)
Survival times for training data to estimate the censoring
distribution from.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
survival_test : structured array, shape = (n_samples,)
Survival times of test data.
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
estimate : array-like, shape = (n_samples,)
Estimated risk of experiencing an event of test data.
tau : float, optional
Truncation time. The survival function for the underlying
censoring time distribution :math:`D` needs to be positive
at `tau`, i.e., `tau` should be chosen such that the
probability of being censored after time `tau` is non-zero:
:math:`P(D > \\tau) > 0`. If `None`, no truncation is performed.
tied_tol : float, optional, default: 1e-8
The tolerance value for considering ties.
If the absolute difference between risk scores is smaller
or equal than `tied_tol`, risk scores are considered tied.
Returns
-------
cindex : float
Concordance index
concordant : int
Number of concordant pairs
discordant : int
Number of discordant pairs
tied_risk : int
Number of pairs having tied estimated risks
tied_time : int
Number of comparable pairs sharing the same time
References
----------
.. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011).
"On the C-statistics for evaluating overall adequacy of risk prediction
procedures with censored survival data".
Statistics in Medicine, 30(10), 1105–1117. | [
"Concordance",
"index",
"for",
"right",
"-",
"censored",
"data",
"based",
"on",
"inverse",
"probability",
"of",
"censoring",
"weights",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/metrics.py#L177-L266 |
246,621 | sebp/scikit-survival | sksurv/kernels/clinical.py | _nominal_kernel | def _nominal_kernel(x, y, out):
"""Number of features that match exactly"""
for i in range(x.shape[0]):
for j in range(y.shape[0]):
out[i, j] += (x[i, :] == y[j, :]).sum()
return out | python | def _nominal_kernel(x, y, out):
for i in range(x.shape[0]):
for j in range(y.shape[0]):
out[i, j] += (x[i, :] == y[j, :]).sum()
return out | [
"def",
"_nominal_kernel",
"(",
"x",
",",
"y",
",",
"out",
")",
":",
"for",
"i",
"in",
"range",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
":",
"out",
"[",
"i",
",",
"j",
"]",
"+=",
"(",
"x",
"[",
"i",
",",
":",
"]",
"==",
"y",
"[",
"j",
",",
":",
"]",
")",
".",
"sum",
"(",
")",
"return",
"out"
] | Number of features that match exactly | [
"Number",
"of",
"features",
"that",
"match",
"exactly"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L26-L32 |
246,622 | sebp/scikit-survival | sksurv/kernels/clinical.py | _get_continuous_and_ordinal_array | def _get_continuous_and_ordinal_array(x):
"""Convert array from continuous and ordered categorical columns"""
nominal_columns = x.select_dtypes(include=['object', 'category']).columns
ordinal_columns = pandas.Index([v for v in nominal_columns if x[v].cat.ordered])
continuous_columns = x.select_dtypes(include=[numpy.number]).columns
x_num = x.loc[:, continuous_columns].astype(numpy.float64).values
if len(ordinal_columns) > 0:
x = _ordinal_as_numeric(x, ordinal_columns)
nominal_columns = nominal_columns.difference(ordinal_columns)
x_out = numpy.column_stack((x_num, x))
else:
x_out = x_num
return x_out, nominal_columns | python | def _get_continuous_and_ordinal_array(x):
nominal_columns = x.select_dtypes(include=['object', 'category']).columns
ordinal_columns = pandas.Index([v for v in nominal_columns if x[v].cat.ordered])
continuous_columns = x.select_dtypes(include=[numpy.number]).columns
x_num = x.loc[:, continuous_columns].astype(numpy.float64).values
if len(ordinal_columns) > 0:
x = _ordinal_as_numeric(x, ordinal_columns)
nominal_columns = nominal_columns.difference(ordinal_columns)
x_out = numpy.column_stack((x_num, x))
else:
x_out = x_num
return x_out, nominal_columns | [
"def",
"_get_continuous_and_ordinal_array",
"(",
"x",
")",
":",
"nominal_columns",
"=",
"x",
".",
"select_dtypes",
"(",
"include",
"=",
"[",
"'object'",
",",
"'category'",
"]",
")",
".",
"columns",
"ordinal_columns",
"=",
"pandas",
".",
"Index",
"(",
"[",
"v",
"for",
"v",
"in",
"nominal_columns",
"if",
"x",
"[",
"v",
"]",
".",
"cat",
".",
"ordered",
"]",
")",
"continuous_columns",
"=",
"x",
".",
"select_dtypes",
"(",
"include",
"=",
"[",
"numpy",
".",
"number",
"]",
")",
".",
"columns",
"x_num",
"=",
"x",
".",
"loc",
"[",
":",
",",
"continuous_columns",
"]",
".",
"astype",
"(",
"numpy",
".",
"float64",
")",
".",
"values",
"if",
"len",
"(",
"ordinal_columns",
")",
">",
"0",
":",
"x",
"=",
"_ordinal_as_numeric",
"(",
"x",
",",
"ordinal_columns",
")",
"nominal_columns",
"=",
"nominal_columns",
".",
"difference",
"(",
"ordinal_columns",
")",
"x_out",
"=",
"numpy",
".",
"column_stack",
"(",
"(",
"x_num",
",",
"x",
")",
")",
"else",
":",
"x_out",
"=",
"x_num",
"return",
"x_out",
",",
"nominal_columns"
] | Convert array from continuous and ordered categorical columns | [
"Convert",
"array",
"from",
"continuous",
"and",
"ordered",
"categorical",
"columns"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L35-L50 |
246,623 | sebp/scikit-survival | sksurv/kernels/clinical.py | clinical_kernel | def clinical_kernel(x, y=None):
"""Computes clinical kernel
The clinical kernel distinguishes between continuous
ordinal,and nominal variables.
Parameters
----------
x : pandas.DataFrame, shape = (n_samples_x, n_features)
Training data
y : pandas.DataFrame, shape = (n_samples_y, n_features)
Testing data
Returns
-------
kernel : array, shape = (n_samples_x, n_samples_y)
Kernel matrix. Values are normalized to lie within [0, 1].
References
----------
.. [1] Daemen, A., De Moor, B.,
"Development of a kernel function for clinical data".
Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009
"""
if y is not None:
if x.shape[1] != y.shape[1]:
raise ValueError('x and y have different number of features')
if not x.columns.equals(y.columns):
raise ValueError('columns do not match')
else:
y = x
mat = numpy.zeros((x.shape[0], y.shape[0]), dtype=float)
x_numeric, nominal_columns = _get_continuous_and_ordinal_array(x)
if id(x) != id(y):
y_numeric, _ = _get_continuous_and_ordinal_array(y)
else:
y_numeric = x_numeric
continuous_ordinal_kernel(x_numeric, y_numeric, mat)
_nominal_kernel(x.loc[:, nominal_columns].values,
y.loc[:, nominal_columns].values,
mat)
mat /= x.shape[1]
return mat | python | def clinical_kernel(x, y=None):
if y is not None:
if x.shape[1] != y.shape[1]:
raise ValueError('x and y have different number of features')
if not x.columns.equals(y.columns):
raise ValueError('columns do not match')
else:
y = x
mat = numpy.zeros((x.shape[0], y.shape[0]), dtype=float)
x_numeric, nominal_columns = _get_continuous_and_ordinal_array(x)
if id(x) != id(y):
y_numeric, _ = _get_continuous_and_ordinal_array(y)
else:
y_numeric = x_numeric
continuous_ordinal_kernel(x_numeric, y_numeric, mat)
_nominal_kernel(x.loc[:, nominal_columns].values,
y.loc[:, nominal_columns].values,
mat)
mat /= x.shape[1]
return mat | [
"def",
"clinical_kernel",
"(",
"x",
",",
"y",
"=",
"None",
")",
":",
"if",
"y",
"is",
"not",
"None",
":",
"if",
"x",
".",
"shape",
"[",
"1",
"]",
"!=",
"y",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'x and y have different number of features'",
")",
"if",
"not",
"x",
".",
"columns",
".",
"equals",
"(",
"y",
".",
"columns",
")",
":",
"raise",
"ValueError",
"(",
"'columns do not match'",
")",
"else",
":",
"y",
"=",
"x",
"mat",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"y",
".",
"shape",
"[",
"0",
"]",
")",
",",
"dtype",
"=",
"float",
")",
"x_numeric",
",",
"nominal_columns",
"=",
"_get_continuous_and_ordinal_array",
"(",
"x",
")",
"if",
"id",
"(",
"x",
")",
"!=",
"id",
"(",
"y",
")",
":",
"y_numeric",
",",
"_",
"=",
"_get_continuous_and_ordinal_array",
"(",
"y",
")",
"else",
":",
"y_numeric",
"=",
"x_numeric",
"continuous_ordinal_kernel",
"(",
"x_numeric",
",",
"y_numeric",
",",
"mat",
")",
"_nominal_kernel",
"(",
"x",
".",
"loc",
"[",
":",
",",
"nominal_columns",
"]",
".",
"values",
",",
"y",
".",
"loc",
"[",
":",
",",
"nominal_columns",
"]",
".",
"values",
",",
"mat",
")",
"mat",
"/=",
"x",
".",
"shape",
"[",
"1",
"]",
"return",
"mat"
] | Computes clinical kernel
The clinical kernel distinguishes between continuous
ordinal,and nominal variables.
Parameters
----------
x : pandas.DataFrame, shape = (n_samples_x, n_features)
Training data
y : pandas.DataFrame, shape = (n_samples_y, n_features)
Testing data
Returns
-------
kernel : array, shape = (n_samples_x, n_samples_y)
Kernel matrix. Values are normalized to lie within [0, 1].
References
----------
.. [1] Daemen, A., De Moor, B.,
"Development of a kernel function for clinical data".
Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009 | [
"Computes",
"clinical",
"kernel"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L61-L107 |
246,624 | sebp/scikit-survival | sksurv/kernels/clinical.py | ClinicalKernelTransform._prepare_by_column_dtype | def _prepare_by_column_dtype(self, X):
"""Get distance functions for each column's dtype"""
if not isinstance(X, pandas.DataFrame):
raise TypeError('X must be a pandas DataFrame')
numeric_columns = []
nominal_columns = []
numeric_ranges = []
fit_data = numpy.empty_like(X)
for i, dt in enumerate(X.dtypes):
col = X.iloc[:, i]
if is_categorical_dtype(dt):
if col.cat.ordered:
numeric_ranges.append(col.cat.codes.max() - col.cat.codes.min())
numeric_columns.append(i)
else:
nominal_columns.append(i)
col = col.cat.codes
elif is_numeric_dtype(dt):
numeric_ranges.append(col.max() - col.min())
numeric_columns.append(i)
else:
raise TypeError('unsupported dtype: %r' % dt)
fit_data[:, i] = col.values
self._numeric_columns = numpy.asarray(numeric_columns)
self._nominal_columns = numpy.asarray(nominal_columns)
self._numeric_ranges = numpy.asarray(numeric_ranges, dtype=float)
self.X_fit_ = fit_data | python | def _prepare_by_column_dtype(self, X):
if not isinstance(X, pandas.DataFrame):
raise TypeError('X must be a pandas DataFrame')
numeric_columns = []
nominal_columns = []
numeric_ranges = []
fit_data = numpy.empty_like(X)
for i, dt in enumerate(X.dtypes):
col = X.iloc[:, i]
if is_categorical_dtype(dt):
if col.cat.ordered:
numeric_ranges.append(col.cat.codes.max() - col.cat.codes.min())
numeric_columns.append(i)
else:
nominal_columns.append(i)
col = col.cat.codes
elif is_numeric_dtype(dt):
numeric_ranges.append(col.max() - col.min())
numeric_columns.append(i)
else:
raise TypeError('unsupported dtype: %r' % dt)
fit_data[:, i] = col.values
self._numeric_columns = numpy.asarray(numeric_columns)
self._nominal_columns = numpy.asarray(nominal_columns)
self._numeric_ranges = numpy.asarray(numeric_ranges, dtype=float)
self.X_fit_ = fit_data | [
"def",
"_prepare_by_column_dtype",
"(",
"self",
",",
"X",
")",
":",
"if",
"not",
"isinstance",
"(",
"X",
",",
"pandas",
".",
"DataFrame",
")",
":",
"raise",
"TypeError",
"(",
"'X must be a pandas DataFrame'",
")",
"numeric_columns",
"=",
"[",
"]",
"nominal_columns",
"=",
"[",
"]",
"numeric_ranges",
"=",
"[",
"]",
"fit_data",
"=",
"numpy",
".",
"empty_like",
"(",
"X",
")",
"for",
"i",
",",
"dt",
"in",
"enumerate",
"(",
"X",
".",
"dtypes",
")",
":",
"col",
"=",
"X",
".",
"iloc",
"[",
":",
",",
"i",
"]",
"if",
"is_categorical_dtype",
"(",
"dt",
")",
":",
"if",
"col",
".",
"cat",
".",
"ordered",
":",
"numeric_ranges",
".",
"append",
"(",
"col",
".",
"cat",
".",
"codes",
".",
"max",
"(",
")",
"-",
"col",
".",
"cat",
".",
"codes",
".",
"min",
"(",
")",
")",
"numeric_columns",
".",
"append",
"(",
"i",
")",
"else",
":",
"nominal_columns",
".",
"append",
"(",
"i",
")",
"col",
"=",
"col",
".",
"cat",
".",
"codes",
"elif",
"is_numeric_dtype",
"(",
"dt",
")",
":",
"numeric_ranges",
".",
"append",
"(",
"col",
".",
"max",
"(",
")",
"-",
"col",
".",
"min",
"(",
")",
")",
"numeric_columns",
".",
"append",
"(",
"i",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'unsupported dtype: %r'",
"%",
"dt",
")",
"fit_data",
"[",
":",
",",
"i",
"]",
"=",
"col",
".",
"values",
"self",
".",
"_numeric_columns",
"=",
"numpy",
".",
"asarray",
"(",
"numeric_columns",
")",
"self",
".",
"_nominal_columns",
"=",
"numpy",
".",
"asarray",
"(",
"nominal_columns",
")",
"self",
".",
"_numeric_ranges",
"=",
"numpy",
".",
"asarray",
"(",
"numeric_ranges",
",",
"dtype",
"=",
"float",
")",
"self",
".",
"X_fit_",
"=",
"fit_data"
] | Get distance functions for each column's dtype | [
"Get",
"distance",
"functions",
"for",
"each",
"column",
"s",
"dtype"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L153-L185 |
246,625 | sebp/scikit-survival | sksurv/kernels/clinical.py | ClinicalKernelTransform.fit | def fit(self, X, y=None, **kwargs):
"""Determine transformation parameters from data in X.
Subsequent calls to `transform(Y)` compute the pairwise
distance to `X`.
Parameters of the clinical kernel are only updated
if `fit_once` is `False`, otherwise you have to
explicitly call `prepare()` once.
Parameters
----------
X: pandas.DataFrame, shape = (n_samples, n_features)
Data to estimate parameters from.
y : None
Argument is ignored (included for compatibility reasons).
kwargs : dict
Argument is ignored (included for compatibility reasons).
Returns
-------
self : object
Returns the instance itself.
"""
if X.ndim != 2:
raise ValueError("expected 2d array, but got %d" % X.ndim)
if self.fit_once:
self.X_fit_ = X
else:
self._prepare_by_column_dtype(X)
return self | python | def fit(self, X, y=None, **kwargs):
if X.ndim != 2:
raise ValueError("expected 2d array, but got %d" % X.ndim)
if self.fit_once:
self.X_fit_ = X
else:
self._prepare_by_column_dtype(X)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"X",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"expected 2d array, but got %d\"",
"%",
"X",
".",
"ndim",
")",
"if",
"self",
".",
"fit_once",
":",
"self",
".",
"X_fit_",
"=",
"X",
"else",
":",
"self",
".",
"_prepare_by_column_dtype",
"(",
"X",
")",
"return",
"self"
] | Determine transformation parameters from data in X.
Subsequent calls to `transform(Y)` compute the pairwise
distance to `X`.
Parameters of the clinical kernel are only updated
if `fit_once` is `False`, otherwise you have to
explicitly call `prepare()` once.
Parameters
----------
X: pandas.DataFrame, shape = (n_samples, n_features)
Data to estimate parameters from.
y : None
Argument is ignored (included for compatibility reasons).
kwargs : dict
Argument is ignored (included for compatibility reasons).
Returns
-------
self : object
Returns the instance itself. | [
"Determine",
"transformation",
"parameters",
"from",
"data",
"in",
"X",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L187-L220 |
246,626 | sebp/scikit-survival | sksurv/kernels/clinical.py | ClinicalKernelTransform.transform | def transform(self, Y):
r"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_)
Kernel matrix. Values are normalized to lie within [0, 1].
"""
check_is_fitted(self, 'X_fit_')
n_samples_x, n_features = self.X_fit_.shape
Y = numpy.asarray(Y)
if Y.shape[1] != n_features:
raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1]))
n_samples_y = Y.shape[0]
mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float)
continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64),
self.X_fit_[:, self._numeric_columns].astype(numpy.float64),
self._numeric_ranges, mat)
if len(self._nominal_columns) > 0:
_nominal_kernel(Y[:, self._nominal_columns],
self.X_fit_[:, self._nominal_columns],
mat)
mat /= n_features
return mat | python | def transform(self, Y):
r"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_)
Kernel matrix. Values are normalized to lie within [0, 1].
"""
check_is_fitted(self, 'X_fit_')
n_samples_x, n_features = self.X_fit_.shape
Y = numpy.asarray(Y)
if Y.shape[1] != n_features:
raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1]))
n_samples_y = Y.shape[0]
mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float)
continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64),
self.X_fit_[:, self._numeric_columns].astype(numpy.float64),
self._numeric_ranges, mat)
if len(self._nominal_columns) > 0:
_nominal_kernel(Y[:, self._nominal_columns],
self.X_fit_[:, self._nominal_columns],
mat)
mat /= n_features
return mat | [
"def",
"transform",
"(",
"self",
",",
"Y",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"'X_fit_'",
")",
"n_samples_x",
",",
"n_features",
"=",
"self",
".",
"X_fit_",
".",
"shape",
"Y",
"=",
"numpy",
".",
"asarray",
"(",
"Y",
")",
"if",
"Y",
".",
"shape",
"[",
"1",
"]",
"!=",
"n_features",
":",
"raise",
"ValueError",
"(",
"'expected array with %d features, but got %d'",
"%",
"(",
"n_features",
",",
"Y",
".",
"shape",
"[",
"1",
"]",
")",
")",
"n_samples_y",
"=",
"Y",
".",
"shape",
"[",
"0",
"]",
"mat",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"n_samples_y",
",",
"n_samples_x",
")",
",",
"dtype",
"=",
"float",
")",
"continuous_ordinal_kernel_with_ranges",
"(",
"Y",
"[",
":",
",",
"self",
".",
"_numeric_columns",
"]",
".",
"astype",
"(",
"numpy",
".",
"float64",
")",
",",
"self",
".",
"X_fit_",
"[",
":",
",",
"self",
".",
"_numeric_columns",
"]",
".",
"astype",
"(",
"numpy",
".",
"float64",
")",
",",
"self",
".",
"_numeric_ranges",
",",
"mat",
")",
"if",
"len",
"(",
"self",
".",
"_nominal_columns",
")",
">",
"0",
":",
"_nominal_kernel",
"(",
"Y",
"[",
":",
",",
"self",
".",
"_nominal_columns",
"]",
",",
"self",
".",
"X_fit_",
"[",
":",
",",
"self",
".",
"_nominal_columns",
"]",
",",
"mat",
")",
"mat",
"/=",
"n_features",
"return",
"mat"
] | r"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_)
Kernel matrix. Values are normalized to lie within [0, 1]. | [
"r",
"Compute",
"all",
"pairwise",
"distances",
"between",
"self",
".",
"X_fit_",
"and",
"Y",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L222-L257 |
246,627 | sebp/scikit-survival | sksurv/ensemble/boosting.py | _fit_stage_componentwise | def _fit_stage_componentwise(X, residuals, sample_weight, **fit_params):
"""Fit component-wise weighted least squares model"""
n_features = X.shape[1]
base_learners = []
error = numpy.empty(n_features)
for component in range(n_features):
learner = ComponentwiseLeastSquares(component).fit(X, residuals, sample_weight)
l_pred = learner.predict(X)
error[component] = squared_norm(residuals - l_pred)
base_learners.append(learner)
# TODO: could use bottleneck.nanargmin for speed
best_component = numpy.nanargmin(error)
best_learner = base_learners[best_component]
return best_learner | python | def _fit_stage_componentwise(X, residuals, sample_weight, **fit_params):
n_features = X.shape[1]
base_learners = []
error = numpy.empty(n_features)
for component in range(n_features):
learner = ComponentwiseLeastSquares(component).fit(X, residuals, sample_weight)
l_pred = learner.predict(X)
error[component] = squared_norm(residuals - l_pred)
base_learners.append(learner)
# TODO: could use bottleneck.nanargmin for speed
best_component = numpy.nanargmin(error)
best_learner = base_learners[best_component]
return best_learner | [
"def",
"_fit_stage_componentwise",
"(",
"X",
",",
"residuals",
",",
"sample_weight",
",",
"*",
"*",
"fit_params",
")",
":",
"n_features",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"base_learners",
"=",
"[",
"]",
"error",
"=",
"numpy",
".",
"empty",
"(",
"n_features",
")",
"for",
"component",
"in",
"range",
"(",
"n_features",
")",
":",
"learner",
"=",
"ComponentwiseLeastSquares",
"(",
"component",
")",
".",
"fit",
"(",
"X",
",",
"residuals",
",",
"sample_weight",
")",
"l_pred",
"=",
"learner",
".",
"predict",
"(",
"X",
")",
"error",
"[",
"component",
"]",
"=",
"squared_norm",
"(",
"residuals",
"-",
"l_pred",
")",
"base_learners",
".",
"append",
"(",
"learner",
")",
"# TODO: could use bottleneck.nanargmin for speed",
"best_component",
"=",
"numpy",
".",
"nanargmin",
"(",
"error",
")",
"best_learner",
"=",
"base_learners",
"[",
"best_component",
"]",
"return",
"best_learner"
] | Fit component-wise weighted least squares model | [
"Fit",
"component",
"-",
"wise",
"weighted",
"least",
"squares",
"model"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L72-L87 |
246,628 | sebp/scikit-survival | sksurv/ensemble/boosting.py | ComponentwiseGradientBoostingSurvivalAnalysis.coef_ | def coef_(self):
"""Return the aggregated coefficients.
Returns
-------
coef_ : ndarray, shape = (n_features + 1,)
Coefficients of features. The first element denotes the intercept.
"""
coef = numpy.zeros(self.n_features_ + 1, dtype=float)
for estimator in self.estimators_:
coef[estimator.component] += self.learning_rate * estimator.coef_
return coef | python | def coef_(self):
coef = numpy.zeros(self.n_features_ + 1, dtype=float)
for estimator in self.estimators_:
coef[estimator.component] += self.learning_rate * estimator.coef_
return coef | [
"def",
"coef_",
"(",
"self",
")",
":",
"coef",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"n_features_",
"+",
"1",
",",
"dtype",
"=",
"float",
")",
"for",
"estimator",
"in",
"self",
".",
"estimators_",
":",
"coef",
"[",
"estimator",
".",
"component",
"]",
"+=",
"self",
".",
"learning_rate",
"*",
"estimator",
".",
"coef_",
"return",
"coef"
] | Return the aggregated coefficients.
Returns
-------
coef_ : ndarray, shape = (n_features + 1,)
Coefficients of features. The first element denotes the intercept. | [
"Return",
"the",
"aggregated",
"coefficients",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L338-L351 |
246,629 | sebp/scikit-survival | sksurv/ensemble/boosting.py | GradientBoostingSurvivalAnalysis._fit_stage | def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, scale, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == numpy.bool
loss = self.loss_
# whether to use dropout in next iteration
do_dropout = self.dropout_rate > 0. and 0 < i < len(scale) - 1
for k in range(loss.K):
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_split=self.min_impurity_split,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(numpy.float64)
X = X_csr if X_csr is not None else X
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# add tree to ensemble
self.estimators_[i, k] = tree
# update tree leaves
if do_dropout:
# select base learners to be dropped for next iteration
drop_model, n_dropped = _sample_binomial_plus_one(self.dropout_rate, i + 1, random_state)
# adjust scaling factor of tree that is going to be trained in next iteration
scale[i + 1] = 1. / (n_dropped + 1.)
y_pred[:, k] = 0
for m in range(i + 1):
if drop_model[m] == 1:
# adjust scaling factor of dropped trees
scale[m] *= n_dropped / (n_dropped + 1.)
else:
# pseudoresponse of next iteration (without contribution of dropped trees)
y_pred[:, k] += self.learning_rate * scale[m] * self.estimators_[m, k].predict(X).ravel()
else:
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
return y_pred | python | def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, scale, X_idx_sorted, X_csc=None, X_csr=None):
assert sample_mask.dtype == numpy.bool
loss = self.loss_
# whether to use dropout in next iteration
do_dropout = self.dropout_rate > 0. and 0 < i < len(scale) - 1
for k in range(loss.K):
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_split=self.min_impurity_split,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(numpy.float64)
X = X_csr if X_csr is not None else X
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# add tree to ensemble
self.estimators_[i, k] = tree
# update tree leaves
if do_dropout:
# select base learners to be dropped for next iteration
drop_model, n_dropped = _sample_binomial_plus_one(self.dropout_rate, i + 1, random_state)
# adjust scaling factor of tree that is going to be trained in next iteration
scale[i + 1] = 1. / (n_dropped + 1.)
y_pred[:, k] = 0
for m in range(i + 1):
if drop_model[m] == 1:
# adjust scaling factor of dropped trees
scale[m] *= n_dropped / (n_dropped + 1.)
else:
# pseudoresponse of next iteration (without contribution of dropped trees)
y_pred[:, k] += self.learning_rate * scale[m] * self.estimators_[m, k].predict(X).ravel()
else:
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
return y_pred | [
"def",
"_fit_stage",
"(",
"self",
",",
"i",
",",
"X",
",",
"y",
",",
"y_pred",
",",
"sample_weight",
",",
"sample_mask",
",",
"random_state",
",",
"scale",
",",
"X_idx_sorted",
",",
"X_csc",
"=",
"None",
",",
"X_csr",
"=",
"None",
")",
":",
"assert",
"sample_mask",
".",
"dtype",
"==",
"numpy",
".",
"bool",
"loss",
"=",
"self",
".",
"loss_",
"# whether to use dropout in next iteration",
"do_dropout",
"=",
"self",
".",
"dropout_rate",
">",
"0.",
"and",
"0",
"<",
"i",
"<",
"len",
"(",
"scale",
")",
"-",
"1",
"for",
"k",
"in",
"range",
"(",
"loss",
".",
"K",
")",
":",
"residual",
"=",
"loss",
".",
"negative_gradient",
"(",
"y",
",",
"y_pred",
",",
"k",
"=",
"k",
",",
"sample_weight",
"=",
"sample_weight",
")",
"# induce regression tree on residuals",
"tree",
"=",
"DecisionTreeRegressor",
"(",
"criterion",
"=",
"self",
".",
"criterion",
",",
"splitter",
"=",
"'best'",
",",
"max_depth",
"=",
"self",
".",
"max_depth",
",",
"min_samples_split",
"=",
"self",
".",
"min_samples_split",
",",
"min_samples_leaf",
"=",
"self",
".",
"min_samples_leaf",
",",
"min_weight_fraction_leaf",
"=",
"self",
".",
"min_weight_fraction_leaf",
",",
"min_impurity_split",
"=",
"self",
".",
"min_impurity_split",
",",
"min_impurity_decrease",
"=",
"self",
".",
"min_impurity_decrease",
",",
"max_features",
"=",
"self",
".",
"max_features",
",",
"max_leaf_nodes",
"=",
"self",
".",
"max_leaf_nodes",
",",
"random_state",
"=",
"random_state",
",",
"presort",
"=",
"self",
".",
"presort",
")",
"if",
"self",
".",
"subsample",
"<",
"1.0",
":",
"# no inplace multiplication!",
"sample_weight",
"=",
"sample_weight",
"*",
"sample_mask",
".",
"astype",
"(",
"numpy",
".",
"float64",
")",
"X",
"=",
"X_csr",
"if",
"X_csr",
"is",
"not",
"None",
"else",
"X",
"tree",
".",
"fit",
"(",
"X",
",",
"residual",
",",
"sample_weight",
"=",
"sample_weight",
",",
"check_input",
"=",
"False",
",",
"X_idx_sorted",
"=",
"X_idx_sorted",
")",
"# add tree to ensemble",
"self",
".",
"estimators_",
"[",
"i",
",",
"k",
"]",
"=",
"tree",
"# update tree leaves",
"if",
"do_dropout",
":",
"# select base learners to be dropped for next iteration",
"drop_model",
",",
"n_dropped",
"=",
"_sample_binomial_plus_one",
"(",
"self",
".",
"dropout_rate",
",",
"i",
"+",
"1",
",",
"random_state",
")",
"# adjust scaling factor of tree that is going to be trained in next iteration",
"scale",
"[",
"i",
"+",
"1",
"]",
"=",
"1.",
"/",
"(",
"n_dropped",
"+",
"1.",
")",
"y_pred",
"[",
":",
",",
"k",
"]",
"=",
"0",
"for",
"m",
"in",
"range",
"(",
"i",
"+",
"1",
")",
":",
"if",
"drop_model",
"[",
"m",
"]",
"==",
"1",
":",
"# adjust scaling factor of dropped trees",
"scale",
"[",
"m",
"]",
"*=",
"n_dropped",
"/",
"(",
"n_dropped",
"+",
"1.",
")",
"else",
":",
"# pseudoresponse of next iteration (without contribution of dropped trees)",
"y_pred",
"[",
":",
",",
"k",
"]",
"+=",
"self",
".",
"learning_rate",
"*",
"scale",
"[",
"m",
"]",
"*",
"self",
".",
"estimators_",
"[",
"m",
",",
"k",
"]",
".",
"predict",
"(",
"X",
")",
".",
"ravel",
"(",
")",
"else",
":",
"# update tree leaves",
"loss",
".",
"update_terminal_regions",
"(",
"tree",
".",
"tree_",
",",
"X",
",",
"y",
",",
"residual",
",",
"y_pred",
",",
"sample_weight",
",",
"sample_mask",
",",
"self",
".",
"learning_rate",
",",
"k",
"=",
"k",
")",
"return",
"y_pred"
] | Fit another stage of ``n_classes_`` trees to the boosting model. | [
"Fit",
"another",
"stage",
"of",
"n_classes_",
"trees",
"to",
"the",
"boosting",
"model",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L609-L671 |
246,630 | sebp/scikit-survival | sksurv/ensemble/boosting.py | GradientBoostingSurvivalAnalysis._fit_stages | def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = numpy.ones((n_samples, ), dtype=numpy.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.dropout_rate > 0.:
scale = numpy.ones(self.n_estimators, dtype=float)
else:
scale = None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
y_oob_sample = y[~sample_mask]
old_oob_score = loss_(y_oob_sample,
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, scale, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score - loss_(y_oob_sample, y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
if self.dropout_rate > 0.:
self.scale_ = scale
return i + 1 | python | def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = numpy.ones((n_samples, ), dtype=numpy.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.dropout_rate > 0.:
scale = numpy.ones(self.n_estimators, dtype=float)
else:
scale = None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
y_oob_sample = y[~sample_mask]
old_oob_score = loss_(y_oob_sample,
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, scale, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score - loss_(y_oob_sample, y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
if self.dropout_rate > 0.:
self.scale_ = scale
return i + 1 | [
"def",
"_fit_stages",
"(",
"self",
",",
"X",
",",
"y",
",",
"y_pred",
",",
"sample_weight",
",",
"random_state",
",",
"begin_at_stage",
"=",
"0",
",",
"monitor",
"=",
"None",
",",
"X_idx_sorted",
"=",
"None",
")",
":",
"n_samples",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"do_oob",
"=",
"self",
".",
"subsample",
"<",
"1.0",
"sample_mask",
"=",
"numpy",
".",
"ones",
"(",
"(",
"n_samples",
",",
")",
",",
"dtype",
"=",
"numpy",
".",
"bool",
")",
"n_inbag",
"=",
"max",
"(",
"1",
",",
"int",
"(",
"self",
".",
"subsample",
"*",
"n_samples",
")",
")",
"loss_",
"=",
"self",
".",
"loss_",
"if",
"self",
".",
"verbose",
":",
"verbose_reporter",
"=",
"VerboseReporter",
"(",
"self",
".",
"verbose",
")",
"verbose_reporter",
".",
"init",
"(",
"self",
",",
"begin_at_stage",
")",
"X_csc",
"=",
"csc_matrix",
"(",
"X",
")",
"if",
"issparse",
"(",
"X",
")",
"else",
"None",
"X_csr",
"=",
"csr_matrix",
"(",
"X",
")",
"if",
"issparse",
"(",
"X",
")",
"else",
"None",
"if",
"self",
".",
"dropout_rate",
">",
"0.",
":",
"scale",
"=",
"numpy",
".",
"ones",
"(",
"self",
".",
"n_estimators",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"scale",
"=",
"None",
"# perform boosting iterations",
"i",
"=",
"begin_at_stage",
"for",
"i",
"in",
"range",
"(",
"begin_at_stage",
",",
"self",
".",
"n_estimators",
")",
":",
"# subsampling",
"if",
"do_oob",
":",
"sample_mask",
"=",
"_random_sample_mask",
"(",
"n_samples",
",",
"n_inbag",
",",
"random_state",
")",
"# OOB score before adding this stage",
"y_oob_sample",
"=",
"y",
"[",
"~",
"sample_mask",
"]",
"old_oob_score",
"=",
"loss_",
"(",
"y_oob_sample",
",",
"y_pred",
"[",
"~",
"sample_mask",
"]",
",",
"sample_weight",
"[",
"~",
"sample_mask",
"]",
")",
"# fit next stage of trees",
"y_pred",
"=",
"self",
".",
"_fit_stage",
"(",
"i",
",",
"X",
",",
"y",
",",
"y_pred",
",",
"sample_weight",
",",
"sample_mask",
",",
"random_state",
",",
"scale",
",",
"X_idx_sorted",
",",
"X_csc",
",",
"X_csr",
")",
"# track deviance (= loss)",
"if",
"do_oob",
":",
"self",
".",
"train_score_",
"[",
"i",
"]",
"=",
"loss_",
"(",
"y",
"[",
"sample_mask",
"]",
",",
"y_pred",
"[",
"sample_mask",
"]",
",",
"sample_weight",
"[",
"sample_mask",
"]",
")",
"self",
".",
"oob_improvement_",
"[",
"i",
"]",
"=",
"(",
"old_oob_score",
"-",
"loss_",
"(",
"y_oob_sample",
",",
"y_pred",
"[",
"~",
"sample_mask",
"]",
",",
"sample_weight",
"[",
"~",
"sample_mask",
"]",
")",
")",
"else",
":",
"# no need to fancy index w/ no subsampling",
"self",
".",
"train_score_",
"[",
"i",
"]",
"=",
"loss_",
"(",
"y",
",",
"y_pred",
",",
"sample_weight",
")",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"verbose_reporter",
".",
"update",
"(",
"i",
",",
"self",
")",
"if",
"monitor",
"is",
"not",
"None",
":",
"early_stopping",
"=",
"monitor",
"(",
"i",
",",
"self",
",",
"locals",
"(",
")",
")",
"if",
"early_stopping",
":",
"break",
"if",
"self",
".",
"dropout_rate",
">",
"0.",
":",
"self",
".",
"scale_",
"=",
"scale",
"return",
"i",
"+",
"1"
] | Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping. | [
"Iteratively",
"fits",
"the",
"stages",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L673-L741 |
246,631 | sebp/scikit-survival | sksurv/ensemble/boosting.py | GradientBoostingSurvivalAnalysis.fit | def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
sample_weight : array-like, shape = (n_samples,), optional
Weights given to each sample. If omitted, all samples have weight 1.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
X, event, time = check_arrays_survival(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features_ = X.shape
X = X.astype(DTYPE)
if sample_weight is None:
sample_weight = numpy.ones(n_samples, dtype=numpy.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, sample_weight)
self._check_params()
self.loss_ = LOSS_FUNCTIONS[self.loss](1)
if isinstance(self.loss_, (CensoredSquaredLoss, IPCWLeastSquaresError)):
time = numpy.log(time)
self._init_state()
self.init_.fit(X, (event, time), sample_weight)
y_pred = self.init_.predict(X)
begin_at_stage = 0
if self.presort is True and issparse(X):
raise ValueError(
"Presorting is not supported for sparse matrices.")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto':
presort = not issparse(X)
X_idx_sorted = None
if presort:
X_idx_sorted = numpy.asfortranarray(numpy.argsort(X, axis=0),
dtype=numpy.int32)
# fit the boosting stages
y = numpy.fromiter(zip(event, time), dtype=[('event', numpy.bool), ('time', numpy.float64)])
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional tests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.n_estimators_ = n_stages
return self | python | def fit(self, X, y, sample_weight=None, monitor=None):
random_state = check_random_state(self.random_state)
X, event, time = check_arrays_survival(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features_ = X.shape
X = X.astype(DTYPE)
if sample_weight is None:
sample_weight = numpy.ones(n_samples, dtype=numpy.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, sample_weight)
self._check_params()
self.loss_ = LOSS_FUNCTIONS[self.loss](1)
if isinstance(self.loss_, (CensoredSquaredLoss, IPCWLeastSquaresError)):
time = numpy.log(time)
self._init_state()
self.init_.fit(X, (event, time), sample_weight)
y_pred = self.init_.predict(X)
begin_at_stage = 0
if self.presort is True and issparse(X):
raise ValueError(
"Presorting is not supported for sparse matrices.")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto':
presort = not issparse(X)
X_idx_sorted = None
if presort:
X_idx_sorted = numpy.asfortranarray(numpy.argsort(X, axis=0),
dtype=numpy.int32)
# fit the boosting stages
y = numpy.fromiter(zip(event, time), dtype=[('event', numpy.bool), ('time', numpy.float64)])
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional tests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.n_estimators_ = n_stages
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"sample_weight",
"=",
"None",
",",
"monitor",
"=",
"None",
")",
":",
"random_state",
"=",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"X",
",",
"event",
",",
"time",
"=",
"check_arrays_survival",
"(",
"X",
",",
"y",
",",
"accept_sparse",
"=",
"[",
"'csr'",
",",
"'csc'",
",",
"'coo'",
"]",
",",
"dtype",
"=",
"DTYPE",
")",
"n_samples",
",",
"self",
".",
"n_features_",
"=",
"X",
".",
"shape",
"X",
"=",
"X",
".",
"astype",
"(",
"DTYPE",
")",
"if",
"sample_weight",
"is",
"None",
":",
"sample_weight",
"=",
"numpy",
".",
"ones",
"(",
"n_samples",
",",
"dtype",
"=",
"numpy",
".",
"float32",
")",
"else",
":",
"sample_weight",
"=",
"column_or_1d",
"(",
"sample_weight",
",",
"warn",
"=",
"True",
")",
"check_consistent_length",
"(",
"X",
",",
"sample_weight",
")",
"self",
".",
"_check_params",
"(",
")",
"self",
".",
"loss_",
"=",
"LOSS_FUNCTIONS",
"[",
"self",
".",
"loss",
"]",
"(",
"1",
")",
"if",
"isinstance",
"(",
"self",
".",
"loss_",
",",
"(",
"CensoredSquaredLoss",
",",
"IPCWLeastSquaresError",
")",
")",
":",
"time",
"=",
"numpy",
".",
"log",
"(",
"time",
")",
"self",
".",
"_init_state",
"(",
")",
"self",
".",
"init_",
".",
"fit",
"(",
"X",
",",
"(",
"event",
",",
"time",
")",
",",
"sample_weight",
")",
"y_pred",
"=",
"self",
".",
"init_",
".",
"predict",
"(",
"X",
")",
"begin_at_stage",
"=",
"0",
"if",
"self",
".",
"presort",
"is",
"True",
"and",
"issparse",
"(",
"X",
")",
":",
"raise",
"ValueError",
"(",
"\"Presorting is not supported for sparse matrices.\"",
")",
"presort",
"=",
"self",
".",
"presort",
"# Allow presort to be 'auto', which means True if the dataset is dense,",
"# otherwise it will be False.",
"if",
"presort",
"==",
"'auto'",
":",
"presort",
"=",
"not",
"issparse",
"(",
"X",
")",
"X_idx_sorted",
"=",
"None",
"if",
"presort",
":",
"X_idx_sorted",
"=",
"numpy",
".",
"asfortranarray",
"(",
"numpy",
".",
"argsort",
"(",
"X",
",",
"axis",
"=",
"0",
")",
",",
"dtype",
"=",
"numpy",
".",
"int32",
")",
"# fit the boosting stages",
"y",
"=",
"numpy",
".",
"fromiter",
"(",
"zip",
"(",
"event",
",",
"time",
")",
",",
"dtype",
"=",
"[",
"(",
"'event'",
",",
"numpy",
".",
"bool",
")",
",",
"(",
"'time'",
",",
"numpy",
".",
"float64",
")",
"]",
")",
"n_stages",
"=",
"self",
".",
"_fit_stages",
"(",
"X",
",",
"y",
",",
"y_pred",
",",
"sample_weight",
",",
"random_state",
",",
"begin_at_stage",
",",
"monitor",
",",
"X_idx_sorted",
")",
"# change shape of arrays after fit (early-stopping or additional tests)",
"if",
"n_stages",
"!=",
"self",
".",
"estimators_",
".",
"shape",
"[",
"0",
"]",
":",
"self",
".",
"estimators_",
"=",
"self",
".",
"estimators_",
"[",
":",
"n_stages",
"]",
"self",
".",
"train_score_",
"=",
"self",
".",
"train_score_",
"[",
":",
"n_stages",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'oob_improvement_'",
")",
":",
"self",
".",
"oob_improvement_",
"=",
"self",
".",
"oob_improvement_",
"[",
":",
"n_stages",
"]",
"self",
".",
"n_estimators_",
"=",
"n_stages",
"return",
"self"
] | Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
sample_weight : array-like, shape = (n_samples,), optional
Weights given to each sample. If omitted, all samples have weight 1.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self. | [
"Fit",
"the",
"gradient",
"boosting",
"model",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L743-L823 |
246,632 | sebp/scikit-survival | sksurv/ensemble/boosting.py | GradientBoostingSurvivalAnalysis.staged_predict | def staged_predict(self, X):
"""Predict hazard at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : generator of array of shape = (n_samples,)
The predicted value of the input samples.
"""
check_is_fitted(self, 'estimators_')
# if dropout wasn't used during training, proceed as usual,
# otherwise consider scaling factor of individual trees
if not hasattr(self, "scale_"):
for y in self._staged_decision_function(X):
yield self._scale_prediction(y.ravel())
else:
for y in self._dropout_staged_decision_function(X):
yield self._scale_prediction(y.ravel()) | python | def staged_predict(self, X):
check_is_fitted(self, 'estimators_')
# if dropout wasn't used during training, proceed as usual,
# otherwise consider scaling factor of individual trees
if not hasattr(self, "scale_"):
for y in self._staged_decision_function(X):
yield self._scale_prediction(y.ravel())
else:
for y in self._dropout_staged_decision_function(X):
yield self._scale_prediction(y.ravel()) | [
"def",
"staged_predict",
"(",
"self",
",",
"X",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"'estimators_'",
")",
"# if dropout wasn't used during training, proceed as usual,",
"# otherwise consider scaling factor of individual trees",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"scale_\"",
")",
":",
"for",
"y",
"in",
"self",
".",
"_staged_decision_function",
"(",
"X",
")",
":",
"yield",
"self",
".",
"_scale_prediction",
"(",
"y",
".",
"ravel",
"(",
")",
")",
"else",
":",
"for",
"y",
"in",
"self",
".",
"_dropout_staged_decision_function",
"(",
"X",
")",
":",
"yield",
"self",
".",
"_scale_prediction",
"(",
"y",
".",
"ravel",
"(",
")",
")"
] | Predict hazard at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : generator of array of shape = (n_samples,)
The predicted value of the input samples. | [
"Predict",
"hazard",
"at",
"each",
"stage",
"for",
"X",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L886-L911 |
246,633 | sebp/scikit-survival | sksurv/svm/minlip.py | MinlipSurvivalAnalysis.fit | def fit(self, X, y):
"""Build a MINLIP survival model from training data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
self._fit(X, event, time)
return self | python | def fit(self, X, y):
X, event, time = check_arrays_survival(X, y)
self._fit(X, event, time)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"X",
",",
"event",
",",
"time",
"=",
"check_arrays_survival",
"(",
"X",
",",
"y",
")",
"self",
".",
"_fit",
"(",
"X",
",",
"event",
",",
"time",
")",
"return",
"self"
] | Build a MINLIP survival model from training data.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self | [
"Build",
"a",
"MINLIP",
"survival",
"model",
"from",
"training",
"data",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/minlip.py#L227-L247 |
246,634 | sebp/scikit-survival | sksurv/svm/minlip.py | MinlipSurvivalAnalysis.predict | def predict(self, X):
"""Predict risk score of experiencing an event.
Higher scores indicate shorter survival (high risk),
lower scores longer survival (low risk).
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted risk.
"""
K = self._get_kernel(X, self.X_fit_)
pred = -numpy.dot(self.coef_, K.T)
return pred.ravel() | python | def predict(self, X):
K = self._get_kernel(X, self.X_fit_)
pred = -numpy.dot(self.coef_, K.T)
return pred.ravel() | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"K",
"=",
"self",
".",
"_get_kernel",
"(",
"X",
",",
"self",
".",
"X_fit_",
")",
"pred",
"=",
"-",
"numpy",
".",
"dot",
"(",
"self",
".",
"coef_",
",",
"K",
".",
"T",
")",
"return",
"pred",
".",
"ravel",
"(",
")"
] | Predict risk score of experiencing an event.
Higher scores indicate shorter survival (high risk),
lower scores longer survival (low risk).
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted risk. | [
"Predict",
"risk",
"score",
"of",
"experiencing",
"an",
"event",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/minlip.py#L249-L267 |
246,635 | sebp/scikit-survival | sksurv/datasets/base.py | get_x_y | def get_x_y(data_frame, attr_labels, pos_label=None, survival=True):
"""Split data frame into features and labels.
Parameters
----------
data_frame : pandas.DataFrame, shape = (n_samples, n_columns)
A data frame.
attr_labels : sequence of str or None
A list of one or more columns that are considered the label.
If `survival` is `True`, then attr_labels has two elements:
1) the name of the column denoting the event indicator, and
2) the name of the column denoting the survival time.
If the sequence contains `None`, then labels are not retrieved
and only a data frame with features is returned.
pos_label : any, optional
Which value of the event indicator column denotes that a
patient experienced an event. This value is ignored if
`survival` is `False`.
survival : bool, optional, default: True
Whether to return `y` that can be used for survival analysis.
Returns
-------
X : pandas.DataFrame, shape = (n_samples, n_columns - len(attr_labels))
Data frame containing features.
y : None or pandas.DataFrame, shape = (n_samples, len(attr_labels))
Data frame containing columns with supervised information.
If `survival` was `True`, then the column denoting the event
indicator will be boolean and survival times will be float.
If `attr_labels` contains `None`, y is set to `None`.
"""
if survival:
if len(attr_labels) != 2:
raise ValueError("expected sequence of length two for attr_labels, but got %d" % len(attr_labels))
if pos_label is None:
raise ValueError("pos_label needs to be specified if survival=True")
return _get_x_y_survival(data_frame, attr_labels[0], attr_labels[1], pos_label)
return _get_x_y_other(data_frame, attr_labels) | python | def get_x_y(data_frame, attr_labels, pos_label=None, survival=True):
if survival:
if len(attr_labels) != 2:
raise ValueError("expected sequence of length two for attr_labels, but got %d" % len(attr_labels))
if pos_label is None:
raise ValueError("pos_label needs to be specified if survival=True")
return _get_x_y_survival(data_frame, attr_labels[0], attr_labels[1], pos_label)
return _get_x_y_other(data_frame, attr_labels) | [
"def",
"get_x_y",
"(",
"data_frame",
",",
"attr_labels",
",",
"pos_label",
"=",
"None",
",",
"survival",
"=",
"True",
")",
":",
"if",
"survival",
":",
"if",
"len",
"(",
"attr_labels",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"expected sequence of length two for attr_labels, but got %d\"",
"%",
"len",
"(",
"attr_labels",
")",
")",
"if",
"pos_label",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"pos_label needs to be specified if survival=True\"",
")",
"return",
"_get_x_y_survival",
"(",
"data_frame",
",",
"attr_labels",
"[",
"0",
"]",
",",
"attr_labels",
"[",
"1",
"]",
",",
"pos_label",
")",
"return",
"_get_x_y_other",
"(",
"data_frame",
",",
"attr_labels",
")"
] | Split data frame into features and labels.
Parameters
----------
data_frame : pandas.DataFrame, shape = (n_samples, n_columns)
A data frame.
attr_labels : sequence of str or None
A list of one or more columns that are considered the label.
If `survival` is `True`, then attr_labels has two elements:
1) the name of the column denoting the event indicator, and
2) the name of the column denoting the survival time.
If the sequence contains `None`, then labels are not retrieved
and only a data frame with features is returned.
pos_label : any, optional
Which value of the event indicator column denotes that a
patient experienced an event. This value is ignored if
`survival` is `False`.
survival : bool, optional, default: True
Whether to return `y` that can be used for survival analysis.
Returns
-------
X : pandas.DataFrame, shape = (n_samples, n_columns - len(attr_labels))
Data frame containing features.
y : None or pandas.DataFrame, shape = (n_samples, len(attr_labels))
Data frame containing columns with supervised information.
If `survival` was `True`, then the column denoting the event
indicator will be boolean and survival times will be float.
If `attr_labels` contains `None`, y is set to `None`. | [
"Split",
"data",
"frame",
"into",
"features",
"and",
"labels",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/datasets/base.py#L46-L88 |
246,636 | sebp/scikit-survival | sksurv/datasets/base.py | load_arff_files_standardized | def load_arff_files_standardized(path_training, attr_labels, pos_label=None, path_testing=None, survival=True,
standardize_numeric=True, to_numeric=True):
"""Load dataset in ARFF format.
Parameters
----------
path_training : str
Path to ARFF file containing data.
attr_labels : sequence of str
Names of attributes denoting dependent variables.
If ``survival`` is set, it must be a sequence with two items:
the name of the event indicator and the name of the survival/censoring time.
pos_label : any type, optional
Value corresponding to an event in survival analysis.
Only considered if ``survival`` is ``True``.
path_testing : str, optional
Path to ARFF file containing hold-out data. Only columns that are available in both
training and testing are considered (excluding dependent variables).
If ``standardize_numeric`` is set, data is normalized by considering both training
and testing data.
survival : bool, optional, default: True
Whether the dependent variables denote event indicator and survival/censoring time.
standardize_numeric : bool, optional, default: True
Whether to standardize data to zero mean and unit variance.
See :func:`sksurv.column.standardize`.
to_numeric : boo, optional, default: True
Whether to convert categorical variables to numeric values.
See :func:`sksurv.column.categorical_to_numeric`.
Returns
-------
x_train : pandas.DataFrame, shape = (n_train, n_features)
Training data.
y_train : pandas.DataFrame, shape = (n_train, n_labels)
Dependent variables of training data.
x_test : None or pandas.DataFrame, shape = (n_train, n_features)
Testing data if `path_testing` was provided.
y_test : None or pandas.DataFrame, shape = (n_train, n_labels)
Dependent variables of testing data if `path_testing` was provided.
"""
dataset = loadarff(path_training)
if "index" in dataset.columns:
dataset.index = dataset["index"].astype(object)
dataset.drop("index", axis=1, inplace=True)
x_train, y_train = get_x_y(dataset, attr_labels, pos_label, survival)
if path_testing is not None:
x_test, y_test = _load_arff_testing(path_testing, attr_labels, pos_label, survival)
if len(x_train.columns.symmetric_difference(x_test.columns)) > 0:
warnings.warn("Restricting columns to intersection between training and testing data",
stacklevel=2)
cols = x_train.columns.intersection(x_test.columns)
if len(cols) == 0:
raise ValueError("columns of training and test data do not intersect")
x_train = x_train.loc[:, cols]
x_test = x_test.loc[:, cols]
x = safe_concat((x_train, x_test), axis=0)
if standardize_numeric:
x = standardize(x)
if to_numeric:
x = categorical_to_numeric(x)
n_train = x_train.shape[0]
x_train = x.iloc[:n_train, :]
x_test = x.iloc[n_train:, :]
else:
if standardize_numeric:
x_train = standardize(x_train)
if to_numeric:
x_train = categorical_to_numeric(x_train)
x_test = None
y_test = None
return x_train, y_train, x_test, y_test | python | def load_arff_files_standardized(path_training, attr_labels, pos_label=None, path_testing=None, survival=True,
standardize_numeric=True, to_numeric=True):
dataset = loadarff(path_training)
if "index" in dataset.columns:
dataset.index = dataset["index"].astype(object)
dataset.drop("index", axis=1, inplace=True)
x_train, y_train = get_x_y(dataset, attr_labels, pos_label, survival)
if path_testing is not None:
x_test, y_test = _load_arff_testing(path_testing, attr_labels, pos_label, survival)
if len(x_train.columns.symmetric_difference(x_test.columns)) > 0:
warnings.warn("Restricting columns to intersection between training and testing data",
stacklevel=2)
cols = x_train.columns.intersection(x_test.columns)
if len(cols) == 0:
raise ValueError("columns of training and test data do not intersect")
x_train = x_train.loc[:, cols]
x_test = x_test.loc[:, cols]
x = safe_concat((x_train, x_test), axis=0)
if standardize_numeric:
x = standardize(x)
if to_numeric:
x = categorical_to_numeric(x)
n_train = x_train.shape[0]
x_train = x.iloc[:n_train, :]
x_test = x.iloc[n_train:, :]
else:
if standardize_numeric:
x_train = standardize(x_train)
if to_numeric:
x_train = categorical_to_numeric(x_train)
x_test = None
y_test = None
return x_train, y_train, x_test, y_test | [
"def",
"load_arff_files_standardized",
"(",
"path_training",
",",
"attr_labels",
",",
"pos_label",
"=",
"None",
",",
"path_testing",
"=",
"None",
",",
"survival",
"=",
"True",
",",
"standardize_numeric",
"=",
"True",
",",
"to_numeric",
"=",
"True",
")",
":",
"dataset",
"=",
"loadarff",
"(",
"path_training",
")",
"if",
"\"index\"",
"in",
"dataset",
".",
"columns",
":",
"dataset",
".",
"index",
"=",
"dataset",
"[",
"\"index\"",
"]",
".",
"astype",
"(",
"object",
")",
"dataset",
".",
"drop",
"(",
"\"index\"",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"x_train",
",",
"y_train",
"=",
"get_x_y",
"(",
"dataset",
",",
"attr_labels",
",",
"pos_label",
",",
"survival",
")",
"if",
"path_testing",
"is",
"not",
"None",
":",
"x_test",
",",
"y_test",
"=",
"_load_arff_testing",
"(",
"path_testing",
",",
"attr_labels",
",",
"pos_label",
",",
"survival",
")",
"if",
"len",
"(",
"x_train",
".",
"columns",
".",
"symmetric_difference",
"(",
"x_test",
".",
"columns",
")",
")",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Restricting columns to intersection between training and testing data\"",
",",
"stacklevel",
"=",
"2",
")",
"cols",
"=",
"x_train",
".",
"columns",
".",
"intersection",
"(",
"x_test",
".",
"columns",
")",
"if",
"len",
"(",
"cols",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"columns of training and test data do not intersect\"",
")",
"x_train",
"=",
"x_train",
".",
"loc",
"[",
":",
",",
"cols",
"]",
"x_test",
"=",
"x_test",
".",
"loc",
"[",
":",
",",
"cols",
"]",
"x",
"=",
"safe_concat",
"(",
"(",
"x_train",
",",
"x_test",
")",
",",
"axis",
"=",
"0",
")",
"if",
"standardize_numeric",
":",
"x",
"=",
"standardize",
"(",
"x",
")",
"if",
"to_numeric",
":",
"x",
"=",
"categorical_to_numeric",
"(",
"x",
")",
"n_train",
"=",
"x_train",
".",
"shape",
"[",
"0",
"]",
"x_train",
"=",
"x",
".",
"iloc",
"[",
":",
"n_train",
",",
":",
"]",
"x_test",
"=",
"x",
".",
"iloc",
"[",
"n_train",
":",
",",
":",
"]",
"else",
":",
"if",
"standardize_numeric",
":",
"x_train",
"=",
"standardize",
"(",
"x_train",
")",
"if",
"to_numeric",
":",
"x_train",
"=",
"categorical_to_numeric",
"(",
"x_train",
")",
"x_test",
"=",
"None",
"y_test",
"=",
"None",
"return",
"x_train",
",",
"y_train",
",",
"x_test",
",",
"y_test"
] | Load dataset in ARFF format.
Parameters
----------
path_training : str
Path to ARFF file containing data.
attr_labels : sequence of str
Names of attributes denoting dependent variables.
If ``survival`` is set, it must be a sequence with two items:
the name of the event indicator and the name of the survival/censoring time.
pos_label : any type, optional
Value corresponding to an event in survival analysis.
Only considered if ``survival`` is ``True``.
path_testing : str, optional
Path to ARFF file containing hold-out data. Only columns that are available in both
training and testing are considered (excluding dependent variables).
If ``standardize_numeric`` is set, data is normalized by considering both training
and testing data.
survival : bool, optional, default: True
Whether the dependent variables denote event indicator and survival/censoring time.
standardize_numeric : bool, optional, default: True
Whether to standardize data to zero mean and unit variance.
See :func:`sksurv.column.standardize`.
to_numeric : boo, optional, default: True
Whether to convert categorical variables to numeric values.
See :func:`sksurv.column.categorical_to_numeric`.
Returns
-------
x_train : pandas.DataFrame, shape = (n_train, n_features)
Training data.
y_train : pandas.DataFrame, shape = (n_train, n_labels)
Dependent variables of training data.
x_test : None or pandas.DataFrame, shape = (n_train, n_features)
Testing data if `path_testing` was provided.
y_test : None or pandas.DataFrame, shape = (n_train, n_labels)
Dependent variables of testing data if `path_testing` was provided. | [
"Load",
"dataset",
"in",
"ARFF",
"format",
"."
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/datasets/base.py#L91-L179 |
246,637 | sebp/scikit-survival | sksurv/datasets/base.py | load_aids | def load_aids(endpoint="aids"):
"""Load and return the AIDS Clinical Trial dataset
The dataset has 1,151 samples and 11 features.
The dataset has 2 endpoints:
1. AIDS defining event, which occurred for 96 patients (8.3%)
2. Death, which occurred for 26 patients (2.3%)
Parameters
----------
endpoint : aids|death
The endpoint
Returns
-------
x : pandas.DataFrame
The measurements for each patient.
y : structured array with 2 fields
*censor*: boolean indicating whether the endpoint has been reached
or the event time is right censored.
*time*: total length of follow-up
If ``endpoint`` is death, the fields are named *censor_d* and *time_d*.
References
----------
.. [1] http://www.umass.edu/statdata/statdata/data/
.. [2] Hosmer, D., Lemeshow, S., May, S.:
"Applied Survival Analysis: Regression Modeling of Time to Event Data."
John Wiley & Sons, Inc. (2008)
"""
labels_aids = ['censor', 'time']
labels_death = ['censor_d', 'time_d']
if endpoint == "aids":
attr_labels = labels_aids
drop_columns = labels_death
elif endpoint == "death":
attr_labels = labels_death
drop_columns = labels_aids
else:
raise ValueError("endpoint must be 'aids' or 'death'")
fn = resource_filename(__name__, 'data/actg320.arff')
x, y = get_x_y(loadarff(fn), attr_labels=attr_labels, pos_label='1')
x.drop(drop_columns, axis=1, inplace=True)
return x, y | python | def load_aids(endpoint="aids"):
labels_aids = ['censor', 'time']
labels_death = ['censor_d', 'time_d']
if endpoint == "aids":
attr_labels = labels_aids
drop_columns = labels_death
elif endpoint == "death":
attr_labels = labels_death
drop_columns = labels_aids
else:
raise ValueError("endpoint must be 'aids' or 'death'")
fn = resource_filename(__name__, 'data/actg320.arff')
x, y = get_x_y(loadarff(fn), attr_labels=attr_labels, pos_label='1')
x.drop(drop_columns, axis=1, inplace=True)
return x, y | [
"def",
"load_aids",
"(",
"endpoint",
"=",
"\"aids\"",
")",
":",
"labels_aids",
"=",
"[",
"'censor'",
",",
"'time'",
"]",
"labels_death",
"=",
"[",
"'censor_d'",
",",
"'time_d'",
"]",
"if",
"endpoint",
"==",
"\"aids\"",
":",
"attr_labels",
"=",
"labels_aids",
"drop_columns",
"=",
"labels_death",
"elif",
"endpoint",
"==",
"\"death\"",
":",
"attr_labels",
"=",
"labels_death",
"drop_columns",
"=",
"labels_aids",
"else",
":",
"raise",
"ValueError",
"(",
"\"endpoint must be 'aids' or 'death'\"",
")",
"fn",
"=",
"resource_filename",
"(",
"__name__",
",",
"'data/actg320.arff'",
")",
"x",
",",
"y",
"=",
"get_x_y",
"(",
"loadarff",
"(",
"fn",
")",
",",
"attr_labels",
"=",
"attr_labels",
",",
"pos_label",
"=",
"'1'",
")",
"x",
".",
"drop",
"(",
"drop_columns",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"return",
"x",
",",
"y"
] | Load and return the AIDS Clinical Trial dataset
The dataset has 1,151 samples and 11 features.
The dataset has 2 endpoints:
1. AIDS defining event, which occurred for 96 patients (8.3%)
2. Death, which occurred for 26 patients (2.3%)
Parameters
----------
endpoint : aids|death
The endpoint
Returns
-------
x : pandas.DataFrame
The measurements for each patient.
y : structured array with 2 fields
*censor*: boolean indicating whether the endpoint has been reached
or the event time is right censored.
*time*: total length of follow-up
If ``endpoint`` is death, the fields are named *censor_d* and *time_d*.
References
----------
.. [1] http://www.umass.edu/statdata/statdata/data/
.. [2] Hosmer, D., Lemeshow, S., May, S.:
"Applied Survival Analysis: Regression Modeling of Time to Event Data."
John Wiley & Sons, Inc. (2008) | [
"Load",
"and",
"return",
"the",
"AIDS",
"Clinical",
"Trial",
"dataset"
] | cfc99fd20454cdd6f4f20fe331b39f2191ccaabc | https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/datasets/base.py#L284-L333 |
246,638 | seemethere/nba_py | nba_py/__init__.py | _api_scrape | def _api_scrape(json_inp, ndx):
"""
Internal method to streamline the getting of data from the json
Args:
json_inp (json): json input from our caller
ndx (int): index where the data is located in the api
Returns:
If pandas is present:
DataFrame (pandas.DataFrame): data set from ndx within the
API's json
else:
A dictionary of both headers and values from the page
"""
try:
headers = json_inp['resultSets'][ndx]['headers']
values = json_inp['resultSets'][ndx]['rowSet']
except KeyError:
# This is so ugly but this is what you get when your data comes out
# in not a standard format
try:
headers = json_inp['resultSet'][ndx]['headers']
values = json_inp['resultSet'][ndx]['rowSet']
except KeyError:
# Added for results that only include one set (ex. LeagueLeaders)
headers = json_inp['resultSet']['headers']
values = json_inp['resultSet']['rowSet']
if HAS_PANDAS:
return DataFrame(values, columns=headers)
else:
# Taken from www.github.com/bradleyfay/py-goldsberry
return [dict(zip(headers, value)) for value in values] | python | def _api_scrape(json_inp, ndx):
try:
headers = json_inp['resultSets'][ndx]['headers']
values = json_inp['resultSets'][ndx]['rowSet']
except KeyError:
# This is so ugly but this is what you get when your data comes out
# in not a standard format
try:
headers = json_inp['resultSet'][ndx]['headers']
values = json_inp['resultSet'][ndx]['rowSet']
except KeyError:
# Added for results that only include one set (ex. LeagueLeaders)
headers = json_inp['resultSet']['headers']
values = json_inp['resultSet']['rowSet']
if HAS_PANDAS:
return DataFrame(values, columns=headers)
else:
# Taken from www.github.com/bradleyfay/py-goldsberry
return [dict(zip(headers, value)) for value in values] | [
"def",
"_api_scrape",
"(",
"json_inp",
",",
"ndx",
")",
":",
"try",
":",
"headers",
"=",
"json_inp",
"[",
"'resultSets'",
"]",
"[",
"ndx",
"]",
"[",
"'headers'",
"]",
"values",
"=",
"json_inp",
"[",
"'resultSets'",
"]",
"[",
"ndx",
"]",
"[",
"'rowSet'",
"]",
"except",
"KeyError",
":",
"# This is so ugly but this is what you get when your data comes out",
"# in not a standard format",
"try",
":",
"headers",
"=",
"json_inp",
"[",
"'resultSet'",
"]",
"[",
"ndx",
"]",
"[",
"'headers'",
"]",
"values",
"=",
"json_inp",
"[",
"'resultSet'",
"]",
"[",
"ndx",
"]",
"[",
"'rowSet'",
"]",
"except",
"KeyError",
":",
"# Added for results that only include one set (ex. LeagueLeaders)",
"headers",
"=",
"json_inp",
"[",
"'resultSet'",
"]",
"[",
"'headers'",
"]",
"values",
"=",
"json_inp",
"[",
"'resultSet'",
"]",
"[",
"'rowSet'",
"]",
"if",
"HAS_PANDAS",
":",
"return",
"DataFrame",
"(",
"values",
",",
"columns",
"=",
"headers",
")",
"else",
":",
"# Taken from www.github.com/bradleyfay/py-goldsberry",
"return",
"[",
"dict",
"(",
"zip",
"(",
"headers",
",",
"value",
")",
")",
"for",
"value",
"in",
"values",
"]"
] | Internal method to streamline the getting of data from the json
Args:
json_inp (json): json input from our caller
ndx (int): index where the data is located in the api
Returns:
If pandas is present:
DataFrame (pandas.DataFrame): data set from ndx within the
API's json
else:
A dictionary of both headers and values from the page | [
"Internal",
"method",
"to",
"streamline",
"the",
"getting",
"of",
"data",
"from",
"the",
"json"
] | ffeaf4251d796ff9313367a752a45a0d7b16489e | https://github.com/seemethere/nba_py/blob/ffeaf4251d796ff9313367a752a45a0d7b16489e/nba_py/__init__.py#L34-L67 |
246,639 | seemethere/nba_py | nba_py/player.py | get_player | def get_player(first_name,
last_name=None,
season=constants.CURRENT_SEASON,
only_current=0,
just_id=True):
"""
Calls our PlayerList class to get a full list of players and then returns
just an id if specified or the full row of player information
Args:
:first_name: First name of the player
:last_name: Last name of the player
(this is None if the player only has first name [Nene])
:only_current: Only wants the current list of players
:just_id: Only wants the id of the player
Returns:
Either the ID or full row of information of the player inputted
Raises:
:PlayerNotFoundException::
"""
if last_name is None:
name = first_name.lower()
else:
name = '{}, {}'.format(last_name, first_name).lower()
pl = PlayerList(season=season, only_current=only_current).info()
hdr = 'DISPLAY_LAST_COMMA_FIRST'
if HAS_PANDAS:
item = pl[pl.DISPLAY_LAST_COMMA_FIRST.str.lower() == name]
else:
item = next(plyr for plyr in pl if str(plyr[hdr]).lower() == name)
if len(item) == 0:
raise PlayerNotFoundException
elif just_id:
return item['PERSON_ID']
else:
return item | python | def get_player(first_name,
last_name=None,
season=constants.CURRENT_SEASON,
only_current=0,
just_id=True):
if last_name is None:
name = first_name.lower()
else:
name = '{}, {}'.format(last_name, first_name).lower()
pl = PlayerList(season=season, only_current=only_current).info()
hdr = 'DISPLAY_LAST_COMMA_FIRST'
if HAS_PANDAS:
item = pl[pl.DISPLAY_LAST_COMMA_FIRST.str.lower() == name]
else:
item = next(plyr for plyr in pl if str(plyr[hdr]).lower() == name)
if len(item) == 0:
raise PlayerNotFoundException
elif just_id:
return item['PERSON_ID']
else:
return item | [
"def",
"get_player",
"(",
"first_name",
",",
"last_name",
"=",
"None",
",",
"season",
"=",
"constants",
".",
"CURRENT_SEASON",
",",
"only_current",
"=",
"0",
",",
"just_id",
"=",
"True",
")",
":",
"if",
"last_name",
"is",
"None",
":",
"name",
"=",
"first_name",
".",
"lower",
"(",
")",
"else",
":",
"name",
"=",
"'{}, {}'",
".",
"format",
"(",
"last_name",
",",
"first_name",
")",
".",
"lower",
"(",
")",
"pl",
"=",
"PlayerList",
"(",
"season",
"=",
"season",
",",
"only_current",
"=",
"only_current",
")",
".",
"info",
"(",
")",
"hdr",
"=",
"'DISPLAY_LAST_COMMA_FIRST'",
"if",
"HAS_PANDAS",
":",
"item",
"=",
"pl",
"[",
"pl",
".",
"DISPLAY_LAST_COMMA_FIRST",
".",
"str",
".",
"lower",
"(",
")",
"==",
"name",
"]",
"else",
":",
"item",
"=",
"next",
"(",
"plyr",
"for",
"plyr",
"in",
"pl",
"if",
"str",
"(",
"plyr",
"[",
"hdr",
"]",
")",
".",
"lower",
"(",
")",
"==",
"name",
")",
"if",
"len",
"(",
"item",
")",
"==",
"0",
":",
"raise",
"PlayerNotFoundException",
"elif",
"just_id",
":",
"return",
"item",
"[",
"'PERSON_ID'",
"]",
"else",
":",
"return",
"item"
] | Calls our PlayerList class to get a full list of players and then returns
just an id if specified or the full row of player information
Args:
:first_name: First name of the player
:last_name: Last name of the player
(this is None if the player only has first name [Nene])
:only_current: Only wants the current list of players
:just_id: Only wants the id of the player
Returns:
Either the ID or full row of information of the player inputted
Raises:
:PlayerNotFoundException:: | [
"Calls",
"our",
"PlayerList",
"class",
"to",
"get",
"a",
"full",
"list",
"of",
"players",
"and",
"then",
"returns",
"just",
"an",
"id",
"if",
"specified",
"or",
"the",
"full",
"row",
"of",
"player",
"information"
] | ffeaf4251d796ff9313367a752a45a0d7b16489e | https://github.com/seemethere/nba_py/blob/ffeaf4251d796ff9313367a752a45a0d7b16489e/nba_py/player.py#L9-L46 |
246,640 | ishikota/PyPokerEngine | pypokerengine/players.py | BasePokerPlayer.respond_to_ask | def respond_to_ask(self, message):
"""Called from Dealer when ask message received from RoundManager"""
valid_actions, hole_card, round_state = self.__parse_ask_message(message)
return self.declare_action(valid_actions, hole_card, round_state) | python | def respond_to_ask(self, message):
valid_actions, hole_card, round_state = self.__parse_ask_message(message)
return self.declare_action(valid_actions, hole_card, round_state) | [
"def",
"respond_to_ask",
"(",
"self",
",",
"message",
")",
":",
"valid_actions",
",",
"hole_card",
",",
"round_state",
"=",
"self",
".",
"__parse_ask_message",
"(",
"message",
")",
"return",
"self",
".",
"declare_action",
"(",
"valid_actions",
",",
"hole_card",
",",
"round_state",
")"
] | Called from Dealer when ask message received from RoundManager | [
"Called",
"from",
"Dealer",
"when",
"ask",
"message",
"received",
"from",
"RoundManager"
] | a52a048a15da276005eca4acae96fb6eeb4dc034 | https://github.com/ishikota/PyPokerEngine/blob/a52a048a15da276005eca4acae96fb6eeb4dc034/pypokerengine/players.py#L45-L48 |
246,641 | ishikota/PyPokerEngine | pypokerengine/players.py | BasePokerPlayer.receive_notification | def receive_notification(self, message):
"""Called from Dealer when notification received from RoundManager"""
msg_type = message["message_type"]
if msg_type == "game_start_message":
info = self.__parse_game_start_message(message)
self.receive_game_start_message(info)
elif msg_type == "round_start_message":
round_count, hole, seats = self.__parse_round_start_message(message)
self.receive_round_start_message(round_count, hole, seats)
elif msg_type == "street_start_message":
street, state = self.__parse_street_start_message(message)
self.receive_street_start_message(street, state)
elif msg_type == "game_update_message":
new_action, round_state = self.__parse_game_update_message(message)
self.receive_game_update_message(new_action, round_state)
elif msg_type == "round_result_message":
winners, hand_info, state = self.__parse_round_result_message(message)
self.receive_round_result_message(winners, hand_info, state) | python | def receive_notification(self, message):
msg_type = message["message_type"]
if msg_type == "game_start_message":
info = self.__parse_game_start_message(message)
self.receive_game_start_message(info)
elif msg_type == "round_start_message":
round_count, hole, seats = self.__parse_round_start_message(message)
self.receive_round_start_message(round_count, hole, seats)
elif msg_type == "street_start_message":
street, state = self.__parse_street_start_message(message)
self.receive_street_start_message(street, state)
elif msg_type == "game_update_message":
new_action, round_state = self.__parse_game_update_message(message)
self.receive_game_update_message(new_action, round_state)
elif msg_type == "round_result_message":
winners, hand_info, state = self.__parse_round_result_message(message)
self.receive_round_result_message(winners, hand_info, state) | [
"def",
"receive_notification",
"(",
"self",
",",
"message",
")",
":",
"msg_type",
"=",
"message",
"[",
"\"message_type\"",
"]",
"if",
"msg_type",
"==",
"\"game_start_message\"",
":",
"info",
"=",
"self",
".",
"__parse_game_start_message",
"(",
"message",
")",
"self",
".",
"receive_game_start_message",
"(",
"info",
")",
"elif",
"msg_type",
"==",
"\"round_start_message\"",
":",
"round_count",
",",
"hole",
",",
"seats",
"=",
"self",
".",
"__parse_round_start_message",
"(",
"message",
")",
"self",
".",
"receive_round_start_message",
"(",
"round_count",
",",
"hole",
",",
"seats",
")",
"elif",
"msg_type",
"==",
"\"street_start_message\"",
":",
"street",
",",
"state",
"=",
"self",
".",
"__parse_street_start_message",
"(",
"message",
")",
"self",
".",
"receive_street_start_message",
"(",
"street",
",",
"state",
")",
"elif",
"msg_type",
"==",
"\"game_update_message\"",
":",
"new_action",
",",
"round_state",
"=",
"self",
".",
"__parse_game_update_message",
"(",
"message",
")",
"self",
".",
"receive_game_update_message",
"(",
"new_action",
",",
"round_state",
")",
"elif",
"msg_type",
"==",
"\"round_result_message\"",
":",
"winners",
",",
"hand_info",
",",
"state",
"=",
"self",
".",
"__parse_round_result_message",
"(",
"message",
")",
"self",
".",
"receive_round_result_message",
"(",
"winners",
",",
"hand_info",
",",
"state",
")"
] | Called from Dealer when notification received from RoundManager | [
"Called",
"from",
"Dealer",
"when",
"notification",
"received",
"from",
"RoundManager"
] | a52a048a15da276005eca4acae96fb6eeb4dc034 | https://github.com/ishikota/PyPokerEngine/blob/a52a048a15da276005eca4acae96fb6eeb4dc034/pypokerengine/players.py#L50-L72 |
246,642 | alex-sherman/unsync | examples/mixing_methods.py | result_continuation | async def result_continuation(task):
"""A preliminary result processor we'll chain on to the original task
This will get executed wherever the source task was executed, in this
case one of the threads in the ThreadPoolExecutor"""
await asyncio.sleep(0.1)
num, res = task.result()
return num, res * 2 | python | async def result_continuation(task):
await asyncio.sleep(0.1)
num, res = task.result()
return num, res * 2 | [
"async",
"def",
"result_continuation",
"(",
"task",
")",
":",
"await",
"asyncio",
".",
"sleep",
"(",
"0.1",
")",
"num",
",",
"res",
"=",
"task",
".",
"result",
"(",
")",
"return",
"num",
",",
"res",
"*",
"2"
] | A preliminary result processor we'll chain on to the original task
This will get executed wherever the source task was executed, in this
case one of the threads in the ThreadPoolExecutor | [
"A",
"preliminary",
"result",
"processor",
"we",
"ll",
"chain",
"on",
"to",
"the",
"original",
"task",
"This",
"will",
"get",
"executed",
"wherever",
"the",
"source",
"task",
"was",
"executed",
"in",
"this",
"case",
"one",
"of",
"the",
"threads",
"in",
"the",
"ThreadPoolExecutor"
] | a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe | https://github.com/alex-sherman/unsync/blob/a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe/examples/mixing_methods.py#L16-L22 |
246,643 | alex-sherman/unsync | examples/mixing_methods.py | result_processor | async def result_processor(tasks):
"""An async result aggregator that combines all the results
This gets executed in unsync.loop and unsync.thread"""
output = {}
for task in tasks:
num, res = await task
output[num] = res
return output | python | async def result_processor(tasks):
output = {}
for task in tasks:
num, res = await task
output[num] = res
return output | [
"async",
"def",
"result_processor",
"(",
"tasks",
")",
":",
"output",
"=",
"{",
"}",
"for",
"task",
"in",
"tasks",
":",
"num",
",",
"res",
"=",
"await",
"task",
"output",
"[",
"num",
"]",
"=",
"res",
"return",
"output"
] | An async result aggregator that combines all the results
This gets executed in unsync.loop and unsync.thread | [
"An",
"async",
"result",
"aggregator",
"that",
"combines",
"all",
"the",
"results",
"This",
"gets",
"executed",
"in",
"unsync",
".",
"loop",
"and",
"unsync",
".",
"thread"
] | a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe | https://github.com/alex-sherman/unsync/blob/a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe/examples/mixing_methods.py#L25-L32 |
246,644 | fastavro/fastavro | fastavro/_read_py.py | read_union | def read_union(fo, writer_schema, reader_schema=None):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union.
"""
# schema resolution
index = read_long(fo)
if reader_schema:
# Handle case where the reader schema is just a single type (not union)
if not isinstance(reader_schema, list):
if match_types(writer_schema[index], reader_schema):
return read_data(fo, writer_schema[index], reader_schema)
else:
for schema in reader_schema:
if match_types(writer_schema[index], schema):
return read_data(fo, writer_schema[index], schema)
msg = 'schema mismatch: %s not found in %s' % \
(writer_schema, reader_schema)
raise SchemaResolutionError(msg)
else:
return read_data(fo, writer_schema[index]) | python | def read_union(fo, writer_schema, reader_schema=None):
# schema resolution
index = read_long(fo)
if reader_schema:
# Handle case where the reader schema is just a single type (not union)
if not isinstance(reader_schema, list):
if match_types(writer_schema[index], reader_schema):
return read_data(fo, writer_schema[index], reader_schema)
else:
for schema in reader_schema:
if match_types(writer_schema[index], schema):
return read_data(fo, writer_schema[index], schema)
msg = 'schema mismatch: %s not found in %s' % \
(writer_schema, reader_schema)
raise SchemaResolutionError(msg)
else:
return read_data(fo, writer_schema[index]) | [
"def",
"read_union",
"(",
"fo",
",",
"writer_schema",
",",
"reader_schema",
"=",
"None",
")",
":",
"# schema resolution",
"index",
"=",
"read_long",
"(",
"fo",
")",
"if",
"reader_schema",
":",
"# Handle case where the reader schema is just a single type (not union)",
"if",
"not",
"isinstance",
"(",
"reader_schema",
",",
"list",
")",
":",
"if",
"match_types",
"(",
"writer_schema",
"[",
"index",
"]",
",",
"reader_schema",
")",
":",
"return",
"read_data",
"(",
"fo",
",",
"writer_schema",
"[",
"index",
"]",
",",
"reader_schema",
")",
"else",
":",
"for",
"schema",
"in",
"reader_schema",
":",
"if",
"match_types",
"(",
"writer_schema",
"[",
"index",
"]",
",",
"schema",
")",
":",
"return",
"read_data",
"(",
"fo",
",",
"writer_schema",
"[",
"index",
"]",
",",
"schema",
")",
"msg",
"=",
"'schema mismatch: %s not found in %s'",
"%",
"(",
"writer_schema",
",",
"reader_schema",
")",
"raise",
"SchemaResolutionError",
"(",
"msg",
")",
"else",
":",
"return",
"read_data",
"(",
"fo",
",",
"writer_schema",
"[",
"index",
"]",
")"
] | A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union. | [
"A",
"union",
"is",
"encoded",
"by",
"first",
"writing",
"a",
"long",
"value",
"indicating",
"the",
"zero",
"-",
"based",
"position",
"within",
"the",
"union",
"of",
"the",
"schema",
"of",
"its",
"value",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L345-L366 |
246,645 | fastavro/fastavro | fastavro/_read_py.py | read_data | def read_data(fo, writer_schema, reader_schema=None):
"""Read data from file object according to schema."""
record_type = extract_record_type(writer_schema)
logical_type = extract_logical_type(writer_schema)
if reader_schema and record_type in AVRO_TYPES:
# If the schemas are the same, set the reader schema to None so that no
# schema resolution is done for this call or future recursive calls
if writer_schema == reader_schema:
reader_schema = None
else:
match_schemas(writer_schema, reader_schema)
reader_fn = READERS.get(record_type)
if reader_fn:
try:
data = reader_fn(fo, writer_schema, reader_schema)
except StructError:
raise EOFError('cannot read %s from %s' % (record_type, fo))
if 'logicalType' in writer_schema:
fn = LOGICAL_READERS.get(logical_type)
if fn:
return fn(data, writer_schema, reader_schema)
if reader_schema is not None:
return maybe_promote(
data,
record_type,
extract_record_type(reader_schema)
)
else:
return data
else:
return read_data(
fo,
SCHEMA_DEFS[record_type],
SCHEMA_DEFS.get(reader_schema)
) | python | def read_data(fo, writer_schema, reader_schema=None):
record_type = extract_record_type(writer_schema)
logical_type = extract_logical_type(writer_schema)
if reader_schema and record_type in AVRO_TYPES:
# If the schemas are the same, set the reader schema to None so that no
# schema resolution is done for this call or future recursive calls
if writer_schema == reader_schema:
reader_schema = None
else:
match_schemas(writer_schema, reader_schema)
reader_fn = READERS.get(record_type)
if reader_fn:
try:
data = reader_fn(fo, writer_schema, reader_schema)
except StructError:
raise EOFError('cannot read %s from %s' % (record_type, fo))
if 'logicalType' in writer_schema:
fn = LOGICAL_READERS.get(logical_type)
if fn:
return fn(data, writer_schema, reader_schema)
if reader_schema is not None:
return maybe_promote(
data,
record_type,
extract_record_type(reader_schema)
)
else:
return data
else:
return read_data(
fo,
SCHEMA_DEFS[record_type],
SCHEMA_DEFS.get(reader_schema)
) | [
"def",
"read_data",
"(",
"fo",
",",
"writer_schema",
",",
"reader_schema",
"=",
"None",
")",
":",
"record_type",
"=",
"extract_record_type",
"(",
"writer_schema",
")",
"logical_type",
"=",
"extract_logical_type",
"(",
"writer_schema",
")",
"if",
"reader_schema",
"and",
"record_type",
"in",
"AVRO_TYPES",
":",
"# If the schemas are the same, set the reader schema to None so that no",
"# schema resolution is done for this call or future recursive calls",
"if",
"writer_schema",
"==",
"reader_schema",
":",
"reader_schema",
"=",
"None",
"else",
":",
"match_schemas",
"(",
"writer_schema",
",",
"reader_schema",
")",
"reader_fn",
"=",
"READERS",
".",
"get",
"(",
"record_type",
")",
"if",
"reader_fn",
":",
"try",
":",
"data",
"=",
"reader_fn",
"(",
"fo",
",",
"writer_schema",
",",
"reader_schema",
")",
"except",
"StructError",
":",
"raise",
"EOFError",
"(",
"'cannot read %s from %s'",
"%",
"(",
"record_type",
",",
"fo",
")",
")",
"if",
"'logicalType'",
"in",
"writer_schema",
":",
"fn",
"=",
"LOGICAL_READERS",
".",
"get",
"(",
"logical_type",
")",
"if",
"fn",
":",
"return",
"fn",
"(",
"data",
",",
"writer_schema",
",",
"reader_schema",
")",
"if",
"reader_schema",
"is",
"not",
"None",
":",
"return",
"maybe_promote",
"(",
"data",
",",
"record_type",
",",
"extract_record_type",
"(",
"reader_schema",
")",
")",
"else",
":",
"return",
"data",
"else",
":",
"return",
"read_data",
"(",
"fo",
",",
"SCHEMA_DEFS",
"[",
"record_type",
"]",
",",
"SCHEMA_DEFS",
".",
"get",
"(",
"reader_schema",
")",
")"
] | Read data from file object according to schema. | [
"Read",
"data",
"from",
"file",
"object",
"according",
"to",
"schema",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L477-L516 |
246,646 | fastavro/fastavro | fastavro/_read_py.py | _iter_avro_records | def _iter_avro_records(fo, header, codec, writer_schema, reader_schema):
"""Return iterator over avro records."""
sync_marker = header['sync']
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError('Unrecognized codec: %r' % codec)
block_count = 0
while True:
try:
block_count = read_long(fo)
except StopIteration:
return
block_fo = read_block(fo)
for i in xrange(block_count):
yield read_data(block_fo, writer_schema, reader_schema)
skip_sync(fo, sync_marker) | python | def _iter_avro_records(fo, header, codec, writer_schema, reader_schema):
sync_marker = header['sync']
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError('Unrecognized codec: %r' % codec)
block_count = 0
while True:
try:
block_count = read_long(fo)
except StopIteration:
return
block_fo = read_block(fo)
for i in xrange(block_count):
yield read_data(block_fo, writer_schema, reader_schema)
skip_sync(fo, sync_marker) | [
"def",
"_iter_avro_records",
"(",
"fo",
",",
"header",
",",
"codec",
",",
"writer_schema",
",",
"reader_schema",
")",
":",
"sync_marker",
"=",
"header",
"[",
"'sync'",
"]",
"read_block",
"=",
"BLOCK_READERS",
".",
"get",
"(",
"codec",
")",
"if",
"not",
"read_block",
":",
"raise",
"ValueError",
"(",
"'Unrecognized codec: %r'",
"%",
"codec",
")",
"block_count",
"=",
"0",
"while",
"True",
":",
"try",
":",
"block_count",
"=",
"read_long",
"(",
"fo",
")",
"except",
"StopIteration",
":",
"return",
"block_fo",
"=",
"read_block",
"(",
"fo",
")",
"for",
"i",
"in",
"xrange",
"(",
"block_count",
")",
":",
"yield",
"read_data",
"(",
"block_fo",
",",
"writer_schema",
",",
"reader_schema",
")",
"skip_sync",
"(",
"fo",
",",
"sync_marker",
")"
] | Return iterator over avro records. | [
"Return",
"iterator",
"over",
"avro",
"records",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L559-L579 |
246,647 | fastavro/fastavro | fastavro/_read_py.py | _iter_avro_blocks | def _iter_avro_blocks(fo, header, codec, writer_schema, reader_schema):
"""Return iterator over avro blocks."""
sync_marker = header['sync']
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError('Unrecognized codec: %r' % codec)
while True:
offset = fo.tell()
try:
num_block_records = read_long(fo)
except StopIteration:
return
block_bytes = read_block(fo)
skip_sync(fo, sync_marker)
size = fo.tell() - offset
yield Block(
block_bytes, num_block_records, codec, reader_schema,
writer_schema, offset, size
) | python | def _iter_avro_blocks(fo, header, codec, writer_schema, reader_schema):
sync_marker = header['sync']
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError('Unrecognized codec: %r' % codec)
while True:
offset = fo.tell()
try:
num_block_records = read_long(fo)
except StopIteration:
return
block_bytes = read_block(fo)
skip_sync(fo, sync_marker)
size = fo.tell() - offset
yield Block(
block_bytes, num_block_records, codec, reader_schema,
writer_schema, offset, size
) | [
"def",
"_iter_avro_blocks",
"(",
"fo",
",",
"header",
",",
"codec",
",",
"writer_schema",
",",
"reader_schema",
")",
":",
"sync_marker",
"=",
"header",
"[",
"'sync'",
"]",
"read_block",
"=",
"BLOCK_READERS",
".",
"get",
"(",
"codec",
")",
"if",
"not",
"read_block",
":",
"raise",
"ValueError",
"(",
"'Unrecognized codec: %r'",
"%",
"codec",
")",
"while",
"True",
":",
"offset",
"=",
"fo",
".",
"tell",
"(",
")",
"try",
":",
"num_block_records",
"=",
"read_long",
"(",
"fo",
")",
"except",
"StopIteration",
":",
"return",
"block_bytes",
"=",
"read_block",
"(",
"fo",
")",
"skip_sync",
"(",
"fo",
",",
"sync_marker",
")",
"size",
"=",
"fo",
".",
"tell",
"(",
")",
"-",
"offset",
"yield",
"Block",
"(",
"block_bytes",
",",
"num_block_records",
",",
"codec",
",",
"reader_schema",
",",
"writer_schema",
",",
"offset",
",",
"size",
")"
] | Return iterator over avro blocks. | [
"Return",
"iterator",
"over",
"avro",
"blocks",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L582-L606 |
246,648 | fastavro/fastavro | fastavro/_write_py.py | prepare_timestamp_millis | def prepare_timestamp_millis(data, schema):
"""Converts datetime.datetime object to int timestamp with milliseconds
"""
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = (data - epoch)
return int(delta.total_seconds() * MLS_PER_SECOND)
t = int(time.mktime(data.timetuple())) * MLS_PER_SECOND + int(
data.microsecond / 1000)
return t
else:
return data | python | def prepare_timestamp_millis(data, schema):
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = (data - epoch)
return int(delta.total_seconds() * MLS_PER_SECOND)
t = int(time.mktime(data.timetuple())) * MLS_PER_SECOND + int(
data.microsecond / 1000)
return t
else:
return data | [
"def",
"prepare_timestamp_millis",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"datetime",
".",
"datetime",
")",
":",
"if",
"data",
".",
"tzinfo",
"is",
"not",
"None",
":",
"delta",
"=",
"(",
"data",
"-",
"epoch",
")",
"return",
"int",
"(",
"delta",
".",
"total_seconds",
"(",
")",
"*",
"MLS_PER_SECOND",
")",
"t",
"=",
"int",
"(",
"time",
".",
"mktime",
"(",
"data",
".",
"timetuple",
"(",
")",
")",
")",
"*",
"MLS_PER_SECOND",
"+",
"int",
"(",
"data",
".",
"microsecond",
"/",
"1000",
")",
"return",
"t",
"else",
":",
"return",
"data"
] | Converts datetime.datetime object to int timestamp with milliseconds | [
"Converts",
"datetime",
".",
"datetime",
"object",
"to",
"int",
"timestamp",
"with",
"milliseconds"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L43-L54 |
246,649 | fastavro/fastavro | fastavro/_write_py.py | prepare_timestamp_micros | def prepare_timestamp_micros(data, schema):
"""Converts datetime.datetime to int timestamp with microseconds"""
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = (data - epoch)
return int(delta.total_seconds() * MCS_PER_SECOND)
t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \
data.microsecond
return t
else:
return data | python | def prepare_timestamp_micros(data, schema):
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = (data - epoch)
return int(delta.total_seconds() * MCS_PER_SECOND)
t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \
data.microsecond
return t
else:
return data | [
"def",
"prepare_timestamp_micros",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"datetime",
".",
"datetime",
")",
":",
"if",
"data",
".",
"tzinfo",
"is",
"not",
"None",
":",
"delta",
"=",
"(",
"data",
"-",
"epoch",
")",
"return",
"int",
"(",
"delta",
".",
"total_seconds",
"(",
")",
"*",
"MCS_PER_SECOND",
")",
"t",
"=",
"int",
"(",
"time",
".",
"mktime",
"(",
"data",
".",
"timetuple",
"(",
")",
")",
")",
"*",
"MCS_PER_SECOND",
"+",
"data",
".",
"microsecond",
"return",
"t",
"else",
":",
"return",
"data"
] | Converts datetime.datetime to int timestamp with microseconds | [
"Converts",
"datetime",
".",
"datetime",
"to",
"int",
"timestamp",
"with",
"microseconds"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L57-L67 |
246,650 | fastavro/fastavro | fastavro/_write_py.py | prepare_date | def prepare_date(data, schema):
"""Converts datetime.date to int timestamp"""
if isinstance(data, datetime.date):
return data.toordinal() - DAYS_SHIFT
else:
return data | python | def prepare_date(data, schema):
if isinstance(data, datetime.date):
return data.toordinal() - DAYS_SHIFT
else:
return data | [
"def",
"prepare_date",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"datetime",
".",
"date",
")",
":",
"return",
"data",
".",
"toordinal",
"(",
")",
"-",
"DAYS_SHIFT",
"else",
":",
"return",
"data"
] | Converts datetime.date to int timestamp | [
"Converts",
"datetime",
".",
"date",
"to",
"int",
"timestamp"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L70-L75 |
246,651 | fastavro/fastavro | fastavro/_write_py.py | prepare_uuid | def prepare_uuid(data, schema):
"""Converts uuid.UUID to
string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
"""
if isinstance(data, uuid.UUID):
return str(data)
else:
return data | python | def prepare_uuid(data, schema):
if isinstance(data, uuid.UUID):
return str(data)
else:
return data | [
"def",
"prepare_uuid",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"uuid",
".",
"UUID",
")",
":",
"return",
"str",
"(",
"data",
")",
"else",
":",
"return",
"data"
] | Converts uuid.UUID to
string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | [
"Converts",
"uuid",
".",
"UUID",
"to",
"string",
"formatted",
"UUID",
"xxxxxxxx",
"-",
"xxxx",
"-",
"xxxx",
"-",
"xxxx",
"-",
"xxxxxxxxxxxx"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L78-L85 |
246,652 | fastavro/fastavro | fastavro/_write_py.py | prepare_time_millis | def prepare_time_millis(data, schema):
"""Convert datetime.time to int timestamp with milliseconds"""
if isinstance(data, datetime.time):
return int(
data.hour * MLS_PER_HOUR + data.minute * MLS_PER_MINUTE
+ data.second * MLS_PER_SECOND + int(data.microsecond / 1000))
else:
return data | python | def prepare_time_millis(data, schema):
if isinstance(data, datetime.time):
return int(
data.hour * MLS_PER_HOUR + data.minute * MLS_PER_MINUTE
+ data.second * MLS_PER_SECOND + int(data.microsecond / 1000))
else:
return data | [
"def",
"prepare_time_millis",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"datetime",
".",
"time",
")",
":",
"return",
"int",
"(",
"data",
".",
"hour",
"*",
"MLS_PER_HOUR",
"+",
"data",
".",
"minute",
"*",
"MLS_PER_MINUTE",
"+",
"data",
".",
"second",
"*",
"MLS_PER_SECOND",
"+",
"int",
"(",
"data",
".",
"microsecond",
"/",
"1000",
")",
")",
"else",
":",
"return",
"data"
] | Convert datetime.time to int timestamp with milliseconds | [
"Convert",
"datetime",
".",
"time",
"to",
"int",
"timestamp",
"with",
"milliseconds"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L88-L95 |
246,653 | fastavro/fastavro | fastavro/_write_py.py | prepare_time_micros | def prepare_time_micros(data, schema):
"""Convert datetime.time to int timestamp with microseconds"""
if isinstance(data, datetime.time):
return long(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE
+ data.second * MCS_PER_SECOND + data.microsecond)
else:
return data | python | def prepare_time_micros(data, schema):
if isinstance(data, datetime.time):
return long(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE
+ data.second * MCS_PER_SECOND + data.microsecond)
else:
return data | [
"def",
"prepare_time_micros",
"(",
"data",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"datetime",
".",
"time",
")",
":",
"return",
"long",
"(",
"data",
".",
"hour",
"*",
"MCS_PER_HOUR",
"+",
"data",
".",
"minute",
"*",
"MCS_PER_MINUTE",
"+",
"data",
".",
"second",
"*",
"MCS_PER_SECOND",
"+",
"data",
".",
"microsecond",
")",
"else",
":",
"return",
"data"
] | Convert datetime.time to int timestamp with microseconds | [
"Convert",
"datetime",
".",
"time",
"to",
"int",
"timestamp",
"with",
"microseconds"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L98-L104 |
246,654 | fastavro/fastavro | fastavro/_write_py.py | prepare_bytes_decimal | def prepare_bytes_decimal(data, schema):
"""Convert decimal.Decimal to bytes"""
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get('scale', 0)
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if -exp > scale:
raise ValueError(
'Scale provided in schema does not match the decimal')
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
bits_req = unscaled_datum.bit_length() + 1
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
bytes_req = bits_req // 8
padding_bits = ~((1 << bits_req) - 1) if sign else 0
packed_bits = padding_bits | unscaled_datum
bytes_req += 1 if (bytes_req << 3) < bits_req else 0
tmp = MemoryIO()
for index in range(bytes_req - 1, -1, -1):
bits_to_write = packed_bits >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
return tmp.getvalue() | python | def prepare_bytes_decimal(data, schema):
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get('scale', 0)
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if -exp > scale:
raise ValueError(
'Scale provided in schema does not match the decimal')
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
bits_req = unscaled_datum.bit_length() + 1
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
bytes_req = bits_req // 8
padding_bits = ~((1 << bits_req) - 1) if sign else 0
packed_bits = padding_bits | unscaled_datum
bytes_req += 1 if (bytes_req << 3) < bits_req else 0
tmp = MemoryIO()
for index in range(bytes_req - 1, -1, -1):
bits_to_write = packed_bits >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
return tmp.getvalue() | [
"def",
"prepare_bytes_decimal",
"(",
"data",
",",
"schema",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"decimal",
".",
"Decimal",
")",
":",
"return",
"data",
"scale",
"=",
"schema",
".",
"get",
"(",
"'scale'",
",",
"0",
")",
"# based on https://github.com/apache/avro/pull/82/",
"sign",
",",
"digits",
",",
"exp",
"=",
"data",
".",
"as_tuple",
"(",
")",
"if",
"-",
"exp",
">",
"scale",
":",
"raise",
"ValueError",
"(",
"'Scale provided in schema does not match the decimal'",
")",
"delta",
"=",
"exp",
"+",
"scale",
"if",
"delta",
">",
"0",
":",
"digits",
"=",
"digits",
"+",
"(",
"0",
",",
")",
"*",
"delta",
"unscaled_datum",
"=",
"0",
"for",
"digit",
"in",
"digits",
":",
"unscaled_datum",
"=",
"(",
"unscaled_datum",
"*",
"10",
")",
"+",
"digit",
"bits_req",
"=",
"unscaled_datum",
".",
"bit_length",
"(",
")",
"+",
"1",
"if",
"sign",
":",
"unscaled_datum",
"=",
"(",
"1",
"<<",
"bits_req",
")",
"-",
"unscaled_datum",
"bytes_req",
"=",
"bits_req",
"//",
"8",
"padding_bits",
"=",
"~",
"(",
"(",
"1",
"<<",
"bits_req",
")",
"-",
"1",
")",
"if",
"sign",
"else",
"0",
"packed_bits",
"=",
"padding_bits",
"|",
"unscaled_datum",
"bytes_req",
"+=",
"1",
"if",
"(",
"bytes_req",
"<<",
"3",
")",
"<",
"bits_req",
"else",
"0",
"tmp",
"=",
"MemoryIO",
"(",
")",
"for",
"index",
"in",
"range",
"(",
"bytes_req",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"bits_to_write",
"=",
"packed_bits",
">>",
"(",
"8",
"*",
"index",
")",
"tmp",
".",
"write",
"(",
"mk_bits",
"(",
"bits_to_write",
"&",
"0xff",
")",
")",
"return",
"tmp",
".",
"getvalue",
"(",
")"
] | Convert decimal.Decimal to bytes | [
"Convert",
"decimal",
".",
"Decimal",
"to",
"bytes"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L107-L145 |
246,655 | fastavro/fastavro | fastavro/_write_py.py | prepare_fixed_decimal | def prepare_fixed_decimal(data, schema):
"""Converts decimal.Decimal to fixed length bytes array"""
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get('scale', 0)
size = schema['size']
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if -exp > scale:
raise ValueError(
'Scale provided in schema does not match the decimal')
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
bits_req = unscaled_datum.bit_length() + 1
size_in_bits = size * 8
offset_bits = size_in_bits - bits_req
mask = 2 ** size_in_bits - 1
bit = 1
for i in range(bits_req):
mask ^= bit
bit <<= 1
if bits_req < 8:
bytes_req = 1
else:
bytes_req = bits_req // 8
if bits_req % 8 != 0:
bytes_req += 1
tmp = MemoryIO()
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
unscaled_datum = mask | unscaled_datum
for index in range(size - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
else:
for i in range(offset_bits // 8):
tmp.write(mk_bits(0))
for index in range(bytes_req - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
return tmp.getvalue() | python | def prepare_fixed_decimal(data, schema):
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get('scale', 0)
size = schema['size']
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if -exp > scale:
raise ValueError(
'Scale provided in schema does not match the decimal')
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
bits_req = unscaled_datum.bit_length() + 1
size_in_bits = size * 8
offset_bits = size_in_bits - bits_req
mask = 2 ** size_in_bits - 1
bit = 1
for i in range(bits_req):
mask ^= bit
bit <<= 1
if bits_req < 8:
bytes_req = 1
else:
bytes_req = bits_req // 8
if bits_req % 8 != 0:
bytes_req += 1
tmp = MemoryIO()
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
unscaled_datum = mask | unscaled_datum
for index in range(size - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
else:
for i in range(offset_bits // 8):
tmp.write(mk_bits(0))
for index in range(bytes_req - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
return tmp.getvalue() | [
"def",
"prepare_fixed_decimal",
"(",
"data",
",",
"schema",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"decimal",
".",
"Decimal",
")",
":",
"return",
"data",
"scale",
"=",
"schema",
".",
"get",
"(",
"'scale'",
",",
"0",
")",
"size",
"=",
"schema",
"[",
"'size'",
"]",
"# based on https://github.com/apache/avro/pull/82/",
"sign",
",",
"digits",
",",
"exp",
"=",
"data",
".",
"as_tuple",
"(",
")",
"if",
"-",
"exp",
">",
"scale",
":",
"raise",
"ValueError",
"(",
"'Scale provided in schema does not match the decimal'",
")",
"delta",
"=",
"exp",
"+",
"scale",
"if",
"delta",
">",
"0",
":",
"digits",
"=",
"digits",
"+",
"(",
"0",
",",
")",
"*",
"delta",
"unscaled_datum",
"=",
"0",
"for",
"digit",
"in",
"digits",
":",
"unscaled_datum",
"=",
"(",
"unscaled_datum",
"*",
"10",
")",
"+",
"digit",
"bits_req",
"=",
"unscaled_datum",
".",
"bit_length",
"(",
")",
"+",
"1",
"size_in_bits",
"=",
"size",
"*",
"8",
"offset_bits",
"=",
"size_in_bits",
"-",
"bits_req",
"mask",
"=",
"2",
"**",
"size_in_bits",
"-",
"1",
"bit",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"bits_req",
")",
":",
"mask",
"^=",
"bit",
"bit",
"<<=",
"1",
"if",
"bits_req",
"<",
"8",
":",
"bytes_req",
"=",
"1",
"else",
":",
"bytes_req",
"=",
"bits_req",
"//",
"8",
"if",
"bits_req",
"%",
"8",
"!=",
"0",
":",
"bytes_req",
"+=",
"1",
"tmp",
"=",
"MemoryIO",
"(",
")",
"if",
"sign",
":",
"unscaled_datum",
"=",
"(",
"1",
"<<",
"bits_req",
")",
"-",
"unscaled_datum",
"unscaled_datum",
"=",
"mask",
"|",
"unscaled_datum",
"for",
"index",
"in",
"range",
"(",
"size",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"bits_to_write",
"=",
"unscaled_datum",
">>",
"(",
"8",
"*",
"index",
")",
"tmp",
".",
"write",
"(",
"mk_bits",
"(",
"bits_to_write",
"&",
"0xff",
")",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"offset_bits",
"//",
"8",
")",
":",
"tmp",
".",
"write",
"(",
"mk_bits",
"(",
"0",
")",
")",
"for",
"index",
"in",
"range",
"(",
"bytes_req",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"bits_to_write",
"=",
"unscaled_datum",
">>",
"(",
"8",
"*",
"index",
")",
"tmp",
".",
"write",
"(",
"mk_bits",
"(",
"bits_to_write",
"&",
"0xff",
")",
")",
"return",
"tmp",
".",
"getvalue",
"(",
")"
] | Converts decimal.Decimal to fixed length bytes array | [
"Converts",
"decimal",
".",
"Decimal",
"to",
"fixed",
"length",
"bytes",
"array"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L148-L203 |
246,656 | fastavro/fastavro | fastavro/_write_py.py | write_crc32 | def write_crc32(fo, bytes):
"""A 4-byte, big-endian CRC32 checksum"""
data = crc32(bytes) & 0xFFFFFFFF
fo.write(pack('>I', data)) | python | def write_crc32(fo, bytes):
data = crc32(bytes) & 0xFFFFFFFF
fo.write(pack('>I', data)) | [
"def",
"write_crc32",
"(",
"fo",
",",
"bytes",
")",
":",
"data",
"=",
"crc32",
"(",
"bytes",
")",
"&",
"0xFFFFFFFF",
"fo",
".",
"write",
"(",
"pack",
"(",
"'>I'",
",",
"data",
")",
")"
] | A 4-byte, big-endian CRC32 checksum | [
"A",
"4",
"-",
"byte",
"big",
"-",
"endian",
"CRC32",
"checksum"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L245-L248 |
246,657 | fastavro/fastavro | fastavro/_write_py.py | write_union | def write_union(fo, datum, schema):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value. The value
is then encoded per the indicated schema within the union."""
if isinstance(datum, tuple):
(name, datum) = datum
for index, candidate in enumerate(schema):
if extract_record_type(candidate) == 'record':
schema_name = candidate['name']
else:
schema_name = candidate
if name == schema_name:
break
else:
msg = 'provided union type name %s not found in schema %s' \
% (name, schema)
raise ValueError(msg)
else:
pytype = type(datum)
best_match_index = -1
most_fields = -1
for index, candidate in enumerate(schema):
if validate(datum, candidate, raise_errors=False):
if extract_record_type(candidate) == 'record':
fields = len(candidate['fields'])
if fields > most_fields:
best_match_index = index
most_fields = fields
else:
best_match_index = index
break
if best_match_index < 0:
msg = '%r (type %s) do not match %s' % (datum, pytype, schema)
raise ValueError(msg)
index = best_match_index
# write data
write_long(fo, index)
write_data(fo, datum, schema[index]) | python | def write_union(fo, datum, schema):
if isinstance(datum, tuple):
(name, datum) = datum
for index, candidate in enumerate(schema):
if extract_record_type(candidate) == 'record':
schema_name = candidate['name']
else:
schema_name = candidate
if name == schema_name:
break
else:
msg = 'provided union type name %s not found in schema %s' \
% (name, schema)
raise ValueError(msg)
else:
pytype = type(datum)
best_match_index = -1
most_fields = -1
for index, candidate in enumerate(schema):
if validate(datum, candidate, raise_errors=False):
if extract_record_type(candidate) == 'record':
fields = len(candidate['fields'])
if fields > most_fields:
best_match_index = index
most_fields = fields
else:
best_match_index = index
break
if best_match_index < 0:
msg = '%r (type %s) do not match %s' % (datum, pytype, schema)
raise ValueError(msg)
index = best_match_index
# write data
write_long(fo, index)
write_data(fo, datum, schema[index]) | [
"def",
"write_union",
"(",
"fo",
",",
"datum",
",",
"schema",
")",
":",
"if",
"isinstance",
"(",
"datum",
",",
"tuple",
")",
":",
"(",
"name",
",",
"datum",
")",
"=",
"datum",
"for",
"index",
",",
"candidate",
"in",
"enumerate",
"(",
"schema",
")",
":",
"if",
"extract_record_type",
"(",
"candidate",
")",
"==",
"'record'",
":",
"schema_name",
"=",
"candidate",
"[",
"'name'",
"]",
"else",
":",
"schema_name",
"=",
"candidate",
"if",
"name",
"==",
"schema_name",
":",
"break",
"else",
":",
"msg",
"=",
"'provided union type name %s not found in schema %s'",
"%",
"(",
"name",
",",
"schema",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"pytype",
"=",
"type",
"(",
"datum",
")",
"best_match_index",
"=",
"-",
"1",
"most_fields",
"=",
"-",
"1",
"for",
"index",
",",
"candidate",
"in",
"enumerate",
"(",
"schema",
")",
":",
"if",
"validate",
"(",
"datum",
",",
"candidate",
",",
"raise_errors",
"=",
"False",
")",
":",
"if",
"extract_record_type",
"(",
"candidate",
")",
"==",
"'record'",
":",
"fields",
"=",
"len",
"(",
"candidate",
"[",
"'fields'",
"]",
")",
"if",
"fields",
">",
"most_fields",
":",
"best_match_index",
"=",
"index",
"most_fields",
"=",
"fields",
"else",
":",
"best_match_index",
"=",
"index",
"break",
"if",
"best_match_index",
"<",
"0",
":",
"msg",
"=",
"'%r (type %s) do not match %s'",
"%",
"(",
"datum",
",",
"pytype",
",",
"schema",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"index",
"=",
"best_match_index",
"# write data",
"write_long",
"(",
"fo",
",",
"index",
")",
"write_data",
"(",
"fo",
",",
"datum",
",",
"schema",
"[",
"index",
"]",
")"
] | A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value. The value
is then encoded per the indicated schema within the union. | [
"A",
"union",
"is",
"encoded",
"by",
"first",
"writing",
"a",
"long",
"value",
"indicating",
"the",
"zero",
"-",
"based",
"position",
"within",
"the",
"union",
"of",
"the",
"schema",
"of",
"its",
"value",
".",
"The",
"value",
"is",
"then",
"encoded",
"per",
"the",
"indicated",
"schema",
"within",
"the",
"union",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L302-L341 |
246,658 | fastavro/fastavro | fastavro/_write_py.py | write_data | def write_data(fo, datum, schema):
"""Write a datum of data to output stream.
Paramaters
----------
fo: file-like
Output file
datum: object
Data to write
schema: dict
Schemda to use
"""
record_type = extract_record_type(schema)
logical_type = extract_logical_type(schema)
fn = WRITERS.get(record_type)
if fn:
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, schema)
return fn(fo, datum, schema)
else:
return write_data(fo, datum, SCHEMA_DEFS[record_type]) | python | def write_data(fo, datum, schema):
record_type = extract_record_type(schema)
logical_type = extract_logical_type(schema)
fn = WRITERS.get(record_type)
if fn:
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, schema)
return fn(fo, datum, schema)
else:
return write_data(fo, datum, SCHEMA_DEFS[record_type]) | [
"def",
"write_data",
"(",
"fo",
",",
"datum",
",",
"schema",
")",
":",
"record_type",
"=",
"extract_record_type",
"(",
"schema",
")",
"logical_type",
"=",
"extract_logical_type",
"(",
"schema",
")",
"fn",
"=",
"WRITERS",
".",
"get",
"(",
"record_type",
")",
"if",
"fn",
":",
"if",
"logical_type",
":",
"prepare",
"=",
"LOGICAL_WRITERS",
".",
"get",
"(",
"logical_type",
")",
"if",
"prepare",
":",
"datum",
"=",
"prepare",
"(",
"datum",
",",
"schema",
")",
"return",
"fn",
"(",
"fo",
",",
"datum",
",",
"schema",
")",
"else",
":",
"return",
"write_data",
"(",
"fo",
",",
"datum",
",",
"SCHEMA_DEFS",
"[",
"record_type",
"]",
")"
] | Write a datum of data to output stream.
Paramaters
----------
fo: file-like
Output file
datum: object
Data to write
schema: dict
Schemda to use | [
"Write",
"a",
"datum",
"of",
"data",
"to",
"output",
"stream",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L390-L414 |
246,659 | fastavro/fastavro | fastavro/_write_py.py | null_write_block | def null_write_block(fo, block_bytes):
"""Write block in "null" codec."""
write_long(fo, len(block_bytes))
fo.write(block_bytes) | python | def null_write_block(fo, block_bytes):
write_long(fo, len(block_bytes))
fo.write(block_bytes) | [
"def",
"null_write_block",
"(",
"fo",
",",
"block_bytes",
")",
":",
"write_long",
"(",
"fo",
",",
"len",
"(",
"block_bytes",
")",
")",
"fo",
".",
"write",
"(",
"block_bytes",
")"
] | Write block in "null" codec. | [
"Write",
"block",
"in",
"null",
"codec",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L426-L429 |
246,660 | fastavro/fastavro | fastavro/_write_py.py | deflate_write_block | def deflate_write_block(fo, block_bytes):
"""Write block in "deflate" codec."""
# The first two characters and last character are zlib
# wrappers around deflate data.
data = compress(block_bytes)[2:-1]
write_long(fo, len(data))
fo.write(data) | python | def deflate_write_block(fo, block_bytes):
# The first two characters and last character are zlib
# wrappers around deflate data.
data = compress(block_bytes)[2:-1]
write_long(fo, len(data))
fo.write(data) | [
"def",
"deflate_write_block",
"(",
"fo",
",",
"block_bytes",
")",
":",
"# The first two characters and last character are zlib",
"# wrappers around deflate data.",
"data",
"=",
"compress",
"(",
"block_bytes",
")",
"[",
"2",
":",
"-",
"1",
"]",
"write_long",
"(",
"fo",
",",
"len",
"(",
"data",
")",
")",
"fo",
".",
"write",
"(",
"data",
")"
] | Write block in "deflate" codec. | [
"Write",
"block",
"in",
"deflate",
"codec",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L432-L439 |
246,661 | fastavro/fastavro | fastavro/_write_py.py | schemaless_writer | def schemaless_writer(fo, schema, record):
"""Write a single record without the schema or header information
Parameters
----------
fo: file-like
Output file
schema: dict
Schema
record: dict
Record to write
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file.avro', 'rb') as fp:
fastavro.schemaless_writer(fp, parsed_schema, record)
Note: The ``schemaless_writer`` can only write a single record.
"""
schema = parse_schema(schema)
write_data(fo, record, schema) | python | def schemaless_writer(fo, schema, record):
schema = parse_schema(schema)
write_data(fo, record, schema) | [
"def",
"schemaless_writer",
"(",
"fo",
",",
"schema",
",",
"record",
")",
":",
"schema",
"=",
"parse_schema",
"(",
"schema",
")",
"write_data",
"(",
"fo",
",",
"record",
",",
"schema",
")"
] | Write a single record without the schema or header information
Parameters
----------
fo: file-like
Output file
schema: dict
Schema
record: dict
Record to write
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file.avro', 'rb') as fp:
fastavro.schemaless_writer(fp, parsed_schema, record)
Note: The ``schemaless_writer`` can only write a single record. | [
"Write",
"a",
"single",
"record",
"without",
"the",
"schema",
"or",
"header",
"information"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L636-L658 |
246,662 | fastavro/fastavro | fastavro/_validation_py.py | validate_int | def validate_int(datum, **kwargs):
"""
Check that the data value is a non floating
point number with size less that Int32.
Also support for logicalType timestamp validation with datetime.
Int32 = -2147483648<=datum<=2147483647
conditional python types
(int, long, numbers.Integral,
datetime.time, datetime.datetime, datetime.date)
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return (
(isinstance(datum, (int, long, numbers.Integral))
and INT_MIN_VALUE <= datum <= INT_MAX_VALUE
and not isinstance(datum, bool))
or isinstance(
datum, (datetime.time, datetime.datetime, datetime.date)
)
) | python | def validate_int(datum, **kwargs):
return (
(isinstance(datum, (int, long, numbers.Integral))
and INT_MIN_VALUE <= datum <= INT_MAX_VALUE
and not isinstance(datum, bool))
or isinstance(
datum, (datetime.time, datetime.datetime, datetime.date)
)
) | [
"def",
"validate_int",
"(",
"datum",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"(",
"isinstance",
"(",
"datum",
",",
"(",
"int",
",",
"long",
",",
"numbers",
".",
"Integral",
")",
")",
"and",
"INT_MIN_VALUE",
"<=",
"datum",
"<=",
"INT_MAX_VALUE",
"and",
"not",
"isinstance",
"(",
"datum",
",",
"bool",
")",
")",
"or",
"isinstance",
"(",
"datum",
",",
"(",
"datetime",
".",
"time",
",",
"datetime",
".",
"datetime",
",",
"datetime",
".",
"date",
")",
")",
")"
] | Check that the data value is a non floating
point number with size less that Int32.
Also support for logicalType timestamp validation with datetime.
Int32 = -2147483648<=datum<=2147483647
conditional python types
(int, long, numbers.Integral,
datetime.time, datetime.datetime, datetime.date)
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs | [
"Check",
"that",
"the",
"data",
"value",
"is",
"a",
"non",
"floating",
"point",
"number",
"with",
"size",
"less",
"that",
"Int32",
".",
"Also",
"support",
"for",
"logicalType",
"timestamp",
"validation",
"with",
"datetime",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L79-L105 |
246,663 | fastavro/fastavro | fastavro/_validation_py.py | validate_float | def validate_float(datum, **kwargs):
"""
Check that the data value is a floating
point number or double precision.
conditional python types
(int, long, float, numbers.Real)
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return (
isinstance(datum, (int, long, float, numbers.Real))
and not isinstance(datum, bool)
) | python | def validate_float(datum, **kwargs):
return (
isinstance(datum, (int, long, float, numbers.Real))
and not isinstance(datum, bool)
) | [
"def",
"validate_float",
"(",
"datum",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"isinstance",
"(",
"datum",
",",
"(",
"int",
",",
"long",
",",
"float",
",",
"numbers",
".",
"Real",
")",
")",
"and",
"not",
"isinstance",
"(",
"datum",
",",
"bool",
")",
")"
] | Check that the data value is a floating
point number or double precision.
conditional python types
(int, long, float, numbers.Real)
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs | [
"Check",
"that",
"the",
"data",
"value",
"is",
"a",
"floating",
"point",
"number",
"or",
"double",
"precision",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L137-L155 |
246,664 | fastavro/fastavro | fastavro/_validation_py.py | validate_record | def validate_record(datum, schema, parent_ns=None, raise_errors=True):
"""
Check that the data is a Mapping type with all schema defined fields
validated as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
_, namespace = schema_name(schema, parent_ns)
return (
isinstance(datum, Mapping) and
all(validate(datum=datum.get(f['name'], f.get('default', no_value)),
schema=f['type'],
field='{}.{}'.format(namespace, f['name']),
raise_errors=raise_errors)
for f in schema['fields']
)
) | python | def validate_record(datum, schema, parent_ns=None, raise_errors=True):
_, namespace = schema_name(schema, parent_ns)
return (
isinstance(datum, Mapping) and
all(validate(datum=datum.get(f['name'], f.get('default', no_value)),
schema=f['type'],
field='{}.{}'.format(namespace, f['name']),
raise_errors=raise_errors)
for f in schema['fields']
)
) | [
"def",
"validate_record",
"(",
"datum",
",",
"schema",
",",
"parent_ns",
"=",
"None",
",",
"raise_errors",
"=",
"True",
")",
":",
"_",
",",
"namespace",
"=",
"schema_name",
"(",
"schema",
",",
"parent_ns",
")",
"return",
"(",
"isinstance",
"(",
"datum",
",",
"Mapping",
")",
"and",
"all",
"(",
"validate",
"(",
"datum",
"=",
"datum",
".",
"get",
"(",
"f",
"[",
"'name'",
"]",
",",
"f",
".",
"get",
"(",
"'default'",
",",
"no_value",
")",
")",
",",
"schema",
"=",
"f",
"[",
"'type'",
"]",
",",
"field",
"=",
"'{}.{}'",
".",
"format",
"(",
"namespace",
",",
"f",
"[",
"'name'",
"]",
")",
",",
"raise_errors",
"=",
"raise_errors",
")",
"for",
"f",
"in",
"schema",
"[",
"'fields'",
"]",
")",
")"
] | Check that the data is a Mapping type with all schema defined fields
validated as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data | [
"Check",
"that",
"the",
"data",
"is",
"a",
"Mapping",
"type",
"with",
"all",
"schema",
"defined",
"fields",
"validated",
"as",
"True",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L245-L270 |
246,665 | fastavro/fastavro | fastavro/_validation_py.py | validate_union | def validate_union(datum, schema, parent_ns=None, raise_errors=True):
"""
Check that the data is a list type with possible options to
validate as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
if isinstance(datum, tuple):
(name, datum) = datum
for candidate in schema:
if extract_record_type(candidate) == 'record':
if name == candidate["name"]:
return validate(datum, schema=candidate,
field=parent_ns,
raise_errors=raise_errors)
else:
return False
errors = []
for s in schema:
try:
ret = validate(datum, schema=s,
field=parent_ns,
raise_errors=raise_errors)
if ret:
# We exit on the first passing type in Unions
return True
except ValidationError as e:
errors.extend(e.errors)
if raise_errors:
raise ValidationError(*errors)
return False | python | def validate_union(datum, schema, parent_ns=None, raise_errors=True):
if isinstance(datum, tuple):
(name, datum) = datum
for candidate in schema:
if extract_record_type(candidate) == 'record':
if name == candidate["name"]:
return validate(datum, schema=candidate,
field=parent_ns,
raise_errors=raise_errors)
else:
return False
errors = []
for s in schema:
try:
ret = validate(datum, schema=s,
field=parent_ns,
raise_errors=raise_errors)
if ret:
# We exit on the first passing type in Unions
return True
except ValidationError as e:
errors.extend(e.errors)
if raise_errors:
raise ValidationError(*errors)
return False | [
"def",
"validate_union",
"(",
"datum",
",",
"schema",
",",
"parent_ns",
"=",
"None",
",",
"raise_errors",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"datum",
",",
"tuple",
")",
":",
"(",
"name",
",",
"datum",
")",
"=",
"datum",
"for",
"candidate",
"in",
"schema",
":",
"if",
"extract_record_type",
"(",
"candidate",
")",
"==",
"'record'",
":",
"if",
"name",
"==",
"candidate",
"[",
"\"name\"",
"]",
":",
"return",
"validate",
"(",
"datum",
",",
"schema",
"=",
"candidate",
",",
"field",
"=",
"parent_ns",
",",
"raise_errors",
"=",
"raise_errors",
")",
"else",
":",
"return",
"False",
"errors",
"=",
"[",
"]",
"for",
"s",
"in",
"schema",
":",
"try",
":",
"ret",
"=",
"validate",
"(",
"datum",
",",
"schema",
"=",
"s",
",",
"field",
"=",
"parent_ns",
",",
"raise_errors",
"=",
"raise_errors",
")",
"if",
"ret",
":",
"# We exit on the first passing type in Unions",
"return",
"True",
"except",
"ValidationError",
"as",
"e",
":",
"errors",
".",
"extend",
"(",
"e",
".",
"errors",
")",
"if",
"raise_errors",
":",
"raise",
"ValidationError",
"(",
"*",
"errors",
")",
"return",
"False"
] | Check that the data is a list type with possible options to
validate as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data | [
"Check",
"that",
"the",
"data",
"is",
"a",
"list",
"type",
"with",
"possible",
"options",
"to",
"validate",
"as",
"True",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L273-L313 |
246,666 | fastavro/fastavro | fastavro/_validation_py.py | validate_many | def validate_many(records, schema, raise_errors=True):
"""
Validate a list of data!
Parameters
----------
records: iterable
List of records to validate
schema: dict
Schema
raise_errors: bool, optional
If true, errors are raised for invalid data. If false, a simple
True (valid) or False (invalid) result is returned
Example::
from fastavro.validation import validate_many
schema = {...}
records = [{...}, {...}, ...]
validate_many(records, schema)
"""
errors = []
results = []
for record in records:
try:
results.append(validate(record, schema, raise_errors=raise_errors))
except ValidationError as e:
errors.extend(e.errors)
if raise_errors and errors:
raise ValidationError(*errors)
return all(results) | python | def validate_many(records, schema, raise_errors=True):
errors = []
results = []
for record in records:
try:
results.append(validate(record, schema, raise_errors=raise_errors))
except ValidationError as e:
errors.extend(e.errors)
if raise_errors and errors:
raise ValidationError(*errors)
return all(results) | [
"def",
"validate_many",
"(",
"records",
",",
"schema",
",",
"raise_errors",
"=",
"True",
")",
":",
"errors",
"=",
"[",
"]",
"results",
"=",
"[",
"]",
"for",
"record",
"in",
"records",
":",
"try",
":",
"results",
".",
"append",
"(",
"validate",
"(",
"record",
",",
"schema",
",",
"raise_errors",
"=",
"raise_errors",
")",
")",
"except",
"ValidationError",
"as",
"e",
":",
"errors",
".",
"extend",
"(",
"e",
".",
"errors",
")",
"if",
"raise_errors",
"and",
"errors",
":",
"raise",
"ValidationError",
"(",
"*",
"errors",
")",
"return",
"all",
"(",
"results",
")"
] | Validate a list of data!
Parameters
----------
records: iterable
List of records to validate
schema: dict
Schema
raise_errors: bool, optional
If true, errors are raised for invalid data. If false, a simple
True (valid) or False (invalid) result is returned
Example::
from fastavro.validation import validate_many
schema = {...}
records = [{...}, {...}, ...]
validate_many(records, schema) | [
"Validate",
"a",
"list",
"of",
"data!"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L383-L414 |
246,667 | fastavro/fastavro | fastavro/_schema_py.py | parse_schema | def parse_schema(schema, _write_hint=True, _force=False):
"""Returns a parsed avro schema
It is not necessary to call parse_schema but doing so and saving the parsed
schema for use later will make future operations faster as the schema will
not need to be reparsed.
Parameters
----------
schema: dict
Input schema
_write_hint: bool
Internal API argument specifying whether or not the __fastavro_parsed
marker should be added to the schema
_force: bool
Internal API argument. If True, the schema will always be parsed even
if it has been parsed and has the __fastavro_parsed marker
Example::
from fastavro import parse_schema
from fastavro import writer
parsed_schema = parse_schema(original_schema)
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
"""
if _force:
return _parse_schema(schema, "", _write_hint)
elif isinstance(schema, dict) and "__fastavro_parsed" in schema:
return schema
else:
return _parse_schema(schema, "", _write_hint) | python | def parse_schema(schema, _write_hint=True, _force=False):
if _force:
return _parse_schema(schema, "", _write_hint)
elif isinstance(schema, dict) and "__fastavro_parsed" in schema:
return schema
else:
return _parse_schema(schema, "", _write_hint) | [
"def",
"parse_schema",
"(",
"schema",
",",
"_write_hint",
"=",
"True",
",",
"_force",
"=",
"False",
")",
":",
"if",
"_force",
":",
"return",
"_parse_schema",
"(",
"schema",
",",
"\"\"",
",",
"_write_hint",
")",
"elif",
"isinstance",
"(",
"schema",
",",
"dict",
")",
"and",
"\"__fastavro_parsed\"",
"in",
"schema",
":",
"return",
"schema",
"else",
":",
"return",
"_parse_schema",
"(",
"schema",
",",
"\"\"",
",",
"_write_hint",
")"
] | Returns a parsed avro schema
It is not necessary to call parse_schema but doing so and saving the parsed
schema for use later will make future operations faster as the schema will
not need to be reparsed.
Parameters
----------
schema: dict
Input schema
_write_hint: bool
Internal API argument specifying whether or not the __fastavro_parsed
marker should be added to the schema
_force: bool
Internal API argument. If True, the schema will always be parsed even
if it has been parsed and has the __fastavro_parsed marker
Example::
from fastavro import parse_schema
from fastavro import writer
parsed_schema = parse_schema(original_schema)
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records) | [
"Returns",
"a",
"parsed",
"avro",
"schema"
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_schema_py.py#L53-L86 |
246,668 | fastavro/fastavro | fastavro/_schema_py.py | load_schema | def load_schema(schema_path):
'''
Returns a schema loaded from the file at `schema_path`.
Will recursively load referenced schemas assuming they can be found in
files in the same directory and named with the convention
`<type_name>.avsc`.
'''
with open(schema_path) as fd:
schema = json.load(fd)
schema_dir, schema_file = path.split(schema_path)
return _load_schema(schema, schema_dir) | python | def load_schema(schema_path):
'''
Returns a schema loaded from the file at `schema_path`.
Will recursively load referenced schemas assuming they can be found in
files in the same directory and named with the convention
`<type_name>.avsc`.
'''
with open(schema_path) as fd:
schema = json.load(fd)
schema_dir, schema_file = path.split(schema_path)
return _load_schema(schema, schema_dir) | [
"def",
"load_schema",
"(",
"schema_path",
")",
":",
"with",
"open",
"(",
"schema_path",
")",
"as",
"fd",
":",
"schema",
"=",
"json",
".",
"load",
"(",
"fd",
")",
"schema_dir",
",",
"schema_file",
"=",
"path",
".",
"split",
"(",
"schema_path",
")",
"return",
"_load_schema",
"(",
"schema",
",",
"schema_dir",
")"
] | Returns a schema loaded from the file at `schema_path`.
Will recursively load referenced schemas assuming they can be found in
files in the same directory and named with the convention
`<type_name>.avsc`. | [
"Returns",
"a",
"schema",
"loaded",
"from",
"the",
"file",
"at",
"schema_path",
"."
] | bafe826293e19eb93e77bbb0f6adfa059c7884b2 | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_schema_py.py#L212-L223 |
246,669 | alejandroautalan/pygubu | pygubu/widgets/simpletooltip.py | ToolTip.showtip | def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", foreground="black",
relief=tk.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1) | python | def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", foreground="black",
relief=tk.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1) | [
"def",
"showtip",
"(",
"self",
",",
"text",
")",
":",
"self",
".",
"text",
"=",
"text",
"if",
"self",
".",
"tipwindow",
"or",
"not",
"self",
".",
"text",
":",
"return",
"x",
",",
"y",
",",
"cx",
",",
"cy",
"=",
"self",
".",
"widget",
".",
"bbox",
"(",
"\"insert\"",
")",
"x",
"=",
"x",
"+",
"self",
".",
"widget",
".",
"winfo_rootx",
"(",
")",
"+",
"27",
"y",
"=",
"y",
"+",
"cy",
"+",
"self",
".",
"widget",
".",
"winfo_rooty",
"(",
")",
"+",
"27",
"self",
".",
"tipwindow",
"=",
"tw",
"=",
"tk",
".",
"Toplevel",
"(",
"self",
".",
"widget",
")",
"tw",
".",
"wm_overrideredirect",
"(",
"1",
")",
"tw",
".",
"wm_geometry",
"(",
"\"+%d+%d\"",
"%",
"(",
"x",
",",
"y",
")",
")",
"try",
":",
"# For Mac OS",
"tw",
".",
"tk",
".",
"call",
"(",
"\"::tk::unsupported::MacWindowStyle\"",
",",
"\"style\"",
",",
"tw",
".",
"_w",
",",
"\"help\"",
",",
"\"noActivates\"",
")",
"except",
"tk",
".",
"TclError",
":",
"pass",
"label",
"=",
"tk",
".",
"Label",
"(",
"tw",
",",
"text",
"=",
"self",
".",
"text",
",",
"justify",
"=",
"tk",
".",
"LEFT",
",",
"background",
"=",
"\"#ffffe0\"",
",",
"foreground",
"=",
"\"black\"",
",",
"relief",
"=",
"tk",
".",
"SOLID",
",",
"borderwidth",
"=",
"1",
",",
"font",
"=",
"(",
"\"tahoma\"",
",",
"\"8\"",
",",
"\"normal\"",
")",
")",
"label",
".",
"pack",
"(",
"ipadx",
"=",
"1",
")"
] | Display text in tooltip window | [
"Display",
"text",
"in",
"tooltip",
"window"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/simpletooltip.py#L20-L42 |
246,670 | alejandroautalan/pygubu | pygubu/__init__.py | TkApplication.run | def run(self):
"""Ejecute the main loop."""
self.toplevel.protocol("WM_DELETE_WINDOW", self.__on_window_close)
self.toplevel.mainloop() | python | def run(self):
self.toplevel.protocol("WM_DELETE_WINDOW", self.__on_window_close)
self.toplevel.mainloop() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"toplevel",
".",
"protocol",
"(",
"\"WM_DELETE_WINDOW\"",
",",
"self",
".",
"__on_window_close",
")",
"self",
".",
"toplevel",
".",
"mainloop",
"(",
")"
] | Ejecute the main loop. | [
"Ejecute",
"the",
"main",
"loop",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/__init__.py#L41-L45 |
246,671 | alejandroautalan/pygubu | examples/py2exe/myapp.py | MyApplication.create_regpoly | def create_regpoly(self, x0, y0, x1, y1, sides=0, start=90, extent=360, **kw):
"""Create a regular polygon"""
coords = self.__regpoly_coords(x0, y0, x1, y1, sides, start, extent)
return self.canvas.create_polygon(*coords, **kw) | python | def create_regpoly(self, x0, y0, x1, y1, sides=0, start=90, extent=360, **kw):
coords = self.__regpoly_coords(x0, y0, x1, y1, sides, start, extent)
return self.canvas.create_polygon(*coords, **kw) | [
"def",
"create_regpoly",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
",",
"sides",
"=",
"0",
",",
"start",
"=",
"90",
",",
"extent",
"=",
"360",
",",
"*",
"*",
"kw",
")",
":",
"coords",
"=",
"self",
".",
"__regpoly_coords",
"(",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
",",
"sides",
",",
"start",
",",
"extent",
")",
"return",
"self",
".",
"canvas",
".",
"create_polygon",
"(",
"*",
"coords",
",",
"*",
"*",
"kw",
")"
] | Create a regular polygon | [
"Create",
"a",
"regular",
"polygon"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/examples/py2exe/myapp.py#L131-L134 |
246,672 | alejandroautalan/pygubu | examples/py2exe/myapp.py | MyApplication.__regpoly_coords | def __regpoly_coords(self, x0, y0, x1, y1, sides, start, extent):
"""Create the coordinates of the regular polygon specified"""
coords = []
if extent == 0:
return coords
xm = (x0 + x1) / 2.
ym = (y0 + y1) / 2.
rx = xm - x0
ry = ym - y0
n = sides
if n == 0: # 0 sides => circle
n = round((rx + ry) * .5)
if n < 2:
n = 4
# Extent can be negative
dirv = 1 if extent > 0 else -1
if abs(extent) > 360:
extent = dirv * abs(extent) % 360
step = dirv * 360 / n
numsteps = 1 + extent / float(step)
numsteps_int = int(numsteps)
i = 0
while i < numsteps_int:
rad = (start - i * step) * DEG2RAD
x = rx * math.cos(rad)
y = ry * math.sin(rad)
coords.append((xm+x, ym-y))
i += 1
# Figure out where last segment should end
if numsteps != numsteps_int:
# Vecter V1 is last drawn vertext (x,y) from above
# Vector V2 is the edge of the polygon
rad2 = (start - numsteps_int * step) * DEG2RAD
x2 = rx * math.cos(rad2) - x
y2 = ry * math.sin(rad2) - y
# Vector V3 is unit vector in direction we end at
rad3 = (start - extent) * DEG2RAD
x3 = math.cos(rad3)
y3 = math.sin(rad3)
# Find where V3 crosses V1+V2 => find j s.t. V1 + kV2 = jV3
j = (x*y2 - x2*y) / (x3*y2 - x2*y3)
coords.append((xm + j * x3, ym - j * y3))
return coords | python | def __regpoly_coords(self, x0, y0, x1, y1, sides, start, extent):
coords = []
if extent == 0:
return coords
xm = (x0 + x1) / 2.
ym = (y0 + y1) / 2.
rx = xm - x0
ry = ym - y0
n = sides
if n == 0: # 0 sides => circle
n = round((rx + ry) * .5)
if n < 2:
n = 4
# Extent can be negative
dirv = 1 if extent > 0 else -1
if abs(extent) > 360:
extent = dirv * abs(extent) % 360
step = dirv * 360 / n
numsteps = 1 + extent / float(step)
numsteps_int = int(numsteps)
i = 0
while i < numsteps_int:
rad = (start - i * step) * DEG2RAD
x = rx * math.cos(rad)
y = ry * math.sin(rad)
coords.append((xm+x, ym-y))
i += 1
# Figure out where last segment should end
if numsteps != numsteps_int:
# Vecter V1 is last drawn vertext (x,y) from above
# Vector V2 is the edge of the polygon
rad2 = (start - numsteps_int * step) * DEG2RAD
x2 = rx * math.cos(rad2) - x
y2 = ry * math.sin(rad2) - y
# Vector V3 is unit vector in direction we end at
rad3 = (start - extent) * DEG2RAD
x3 = math.cos(rad3)
y3 = math.sin(rad3)
# Find where V3 crosses V1+V2 => find j s.t. V1 + kV2 = jV3
j = (x*y2 - x2*y) / (x3*y2 - x2*y3)
coords.append((xm + j * x3, ym - j * y3))
return coords | [
"def",
"__regpoly_coords",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
",",
"sides",
",",
"start",
",",
"extent",
")",
":",
"coords",
"=",
"[",
"]",
"if",
"extent",
"==",
"0",
":",
"return",
"coords",
"xm",
"=",
"(",
"x0",
"+",
"x1",
")",
"/",
"2.",
"ym",
"=",
"(",
"y0",
"+",
"y1",
")",
"/",
"2.",
"rx",
"=",
"xm",
"-",
"x0",
"ry",
"=",
"ym",
"-",
"y0",
"n",
"=",
"sides",
"if",
"n",
"==",
"0",
":",
"# 0 sides => circle",
"n",
"=",
"round",
"(",
"(",
"rx",
"+",
"ry",
")",
"*",
".5",
")",
"if",
"n",
"<",
"2",
":",
"n",
"=",
"4",
"# Extent can be negative",
"dirv",
"=",
"1",
"if",
"extent",
">",
"0",
"else",
"-",
"1",
"if",
"abs",
"(",
"extent",
")",
">",
"360",
":",
"extent",
"=",
"dirv",
"*",
"abs",
"(",
"extent",
")",
"%",
"360",
"step",
"=",
"dirv",
"*",
"360",
"/",
"n",
"numsteps",
"=",
"1",
"+",
"extent",
"/",
"float",
"(",
"step",
")",
"numsteps_int",
"=",
"int",
"(",
"numsteps",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"numsteps_int",
":",
"rad",
"=",
"(",
"start",
"-",
"i",
"*",
"step",
")",
"*",
"DEG2RAD",
"x",
"=",
"rx",
"*",
"math",
".",
"cos",
"(",
"rad",
")",
"y",
"=",
"ry",
"*",
"math",
".",
"sin",
"(",
"rad",
")",
"coords",
".",
"append",
"(",
"(",
"xm",
"+",
"x",
",",
"ym",
"-",
"y",
")",
")",
"i",
"+=",
"1",
"# Figure out where last segment should end",
"if",
"numsteps",
"!=",
"numsteps_int",
":",
"# Vecter V1 is last drawn vertext (x,y) from above",
"# Vector V2 is the edge of the polygon",
"rad2",
"=",
"(",
"start",
"-",
"numsteps_int",
"*",
"step",
")",
"*",
"DEG2RAD",
"x2",
"=",
"rx",
"*",
"math",
".",
"cos",
"(",
"rad2",
")",
"-",
"x",
"y2",
"=",
"ry",
"*",
"math",
".",
"sin",
"(",
"rad2",
")",
"-",
"y",
"# Vector V3 is unit vector in direction we end at",
"rad3",
"=",
"(",
"start",
"-",
"extent",
")",
"*",
"DEG2RAD",
"x3",
"=",
"math",
".",
"cos",
"(",
"rad3",
")",
"y3",
"=",
"math",
".",
"sin",
"(",
"rad3",
")",
"# Find where V3 crosses V1+V2 => find j s.t. V1 + kV2 = jV3",
"j",
"=",
"(",
"x",
"*",
"y2",
"-",
"x2",
"*",
"y",
")",
"/",
"(",
"x3",
"*",
"y2",
"-",
"x2",
"*",
"y3",
")",
"coords",
".",
"append",
"(",
"(",
"xm",
"+",
"j",
"*",
"x3",
",",
"ym",
"-",
"j",
"*",
"y3",
")",
")",
"return",
"coords"
] | Create the coordinates of the regular polygon specified | [
"Create",
"the",
"coordinates",
"of",
"the",
"regular",
"polygon",
"specified"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/examples/py2exe/myapp.py#L136-L189 |
246,673 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.get_image | def get_image(self, path):
"""Return tk image corresponding to name which is taken form path."""
image = ''
name = os.path.basename(path)
if not StockImage.is_registered(name):
ipath = self.__find_image(path)
if ipath is not None:
StockImage.register(name, ipath)
else:
msg = "Image '{0}' not found in resource paths.".format(name)
logger.warning(msg)
try:
image = StockImage.get(name)
except StockImageException:
# TODO: notify something here.
pass
return image | python | def get_image(self, path):
image = ''
name = os.path.basename(path)
if not StockImage.is_registered(name):
ipath = self.__find_image(path)
if ipath is not None:
StockImage.register(name, ipath)
else:
msg = "Image '{0}' not found in resource paths.".format(name)
logger.warning(msg)
try:
image = StockImage.get(name)
except StockImageException:
# TODO: notify something here.
pass
return image | [
"def",
"get_image",
"(",
"self",
",",
"path",
")",
":",
"image",
"=",
"''",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"if",
"not",
"StockImage",
".",
"is_registered",
"(",
"name",
")",
":",
"ipath",
"=",
"self",
".",
"__find_image",
"(",
"path",
")",
"if",
"ipath",
"is",
"not",
"None",
":",
"StockImage",
".",
"register",
"(",
"name",
",",
"ipath",
")",
"else",
":",
"msg",
"=",
"\"Image '{0}' not found in resource paths.\"",
".",
"format",
"(",
"name",
")",
"logger",
".",
"warning",
"(",
"msg",
")",
"try",
":",
"image",
"=",
"StockImage",
".",
"get",
"(",
"name",
")",
"except",
"StockImageException",
":",
"# TODO: notify something here.",
"pass",
"return",
"image"
] | Return tk image corresponding to name which is taken form path. | [
"Return",
"tk",
"image",
"corresponding",
"to",
"name",
"which",
"is",
"taken",
"form",
"path",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L195-L211 |
246,674 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.import_variables | def import_variables(self, container, varnames=None):
"""Helper method to avoid call get_variable for every variable."""
if varnames is None:
for keyword in self.tkvariables:
setattr(container, keyword, self.tkvariables[keyword])
else:
for keyword in varnames:
if keyword in self.tkvariables:
setattr(container, keyword, self.tkvariables[keyword]) | python | def import_variables(self, container, varnames=None):
if varnames is None:
for keyword in self.tkvariables:
setattr(container, keyword, self.tkvariables[keyword])
else:
for keyword in varnames:
if keyword in self.tkvariables:
setattr(container, keyword, self.tkvariables[keyword]) | [
"def",
"import_variables",
"(",
"self",
",",
"container",
",",
"varnames",
"=",
"None",
")",
":",
"if",
"varnames",
"is",
"None",
":",
"for",
"keyword",
"in",
"self",
".",
"tkvariables",
":",
"setattr",
"(",
"container",
",",
"keyword",
",",
"self",
".",
"tkvariables",
"[",
"keyword",
"]",
")",
"else",
":",
"for",
"keyword",
"in",
"varnames",
":",
"if",
"keyword",
"in",
"self",
".",
"tkvariables",
":",
"setattr",
"(",
"container",
",",
"keyword",
",",
"self",
".",
"tkvariables",
"[",
"keyword",
"]",
")"
] | Helper method to avoid call get_variable for every variable. | [
"Helper",
"method",
"to",
"avoid",
"call",
"get_variable",
"for",
"every",
"variable",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L228-L236 |
246,675 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.create_variable | def create_variable(self, varname, vtype=None):
"""Create a tk variable.
If the variable was created previously return that instance.
"""
var_types = ('string', 'int', 'boolean', 'double')
vname = varname
var = None
type_from_name = 'string' # default type
if ':' in varname:
type_from_name, vname = varname.split(':')
# Fix incorrect order bug #33
if type_from_name not in (var_types):
# Swap order
type_from_name, vname = vname, type_from_name
if type_from_name not in (var_types):
raise Exception('Undefined variable type in "{0}"'.format(varname))
if vname in self.tkvariables:
var = self.tkvariables[vname]
else:
if vtype is None:
# get type from name
if type_from_name == 'int':
var = tkinter.IntVar()
elif type_from_name == 'boolean':
var = tkinter.BooleanVar()
elif type_from_name == 'double':
var = tkinter.DoubleVar()
else:
var = tkinter.StringVar()
else:
var = vtype()
self.tkvariables[vname] = var
return var | python | def create_variable(self, varname, vtype=None):
var_types = ('string', 'int', 'boolean', 'double')
vname = varname
var = None
type_from_name = 'string' # default type
if ':' in varname:
type_from_name, vname = varname.split(':')
# Fix incorrect order bug #33
if type_from_name not in (var_types):
# Swap order
type_from_name, vname = vname, type_from_name
if type_from_name not in (var_types):
raise Exception('Undefined variable type in "{0}"'.format(varname))
if vname in self.tkvariables:
var = self.tkvariables[vname]
else:
if vtype is None:
# get type from name
if type_from_name == 'int':
var = tkinter.IntVar()
elif type_from_name == 'boolean':
var = tkinter.BooleanVar()
elif type_from_name == 'double':
var = tkinter.DoubleVar()
else:
var = tkinter.StringVar()
else:
var = vtype()
self.tkvariables[vname] = var
return var | [
"def",
"create_variable",
"(",
"self",
",",
"varname",
",",
"vtype",
"=",
"None",
")",
":",
"var_types",
"=",
"(",
"'string'",
",",
"'int'",
",",
"'boolean'",
",",
"'double'",
")",
"vname",
"=",
"varname",
"var",
"=",
"None",
"type_from_name",
"=",
"'string'",
"# default type",
"if",
"':'",
"in",
"varname",
":",
"type_from_name",
",",
"vname",
"=",
"varname",
".",
"split",
"(",
"':'",
")",
"# Fix incorrect order bug #33",
"if",
"type_from_name",
"not",
"in",
"(",
"var_types",
")",
":",
"# Swap order",
"type_from_name",
",",
"vname",
"=",
"vname",
",",
"type_from_name",
"if",
"type_from_name",
"not",
"in",
"(",
"var_types",
")",
":",
"raise",
"Exception",
"(",
"'Undefined variable type in \"{0}\"'",
".",
"format",
"(",
"varname",
")",
")",
"if",
"vname",
"in",
"self",
".",
"tkvariables",
":",
"var",
"=",
"self",
".",
"tkvariables",
"[",
"vname",
"]",
"else",
":",
"if",
"vtype",
"is",
"None",
":",
"# get type from name",
"if",
"type_from_name",
"==",
"'int'",
":",
"var",
"=",
"tkinter",
".",
"IntVar",
"(",
")",
"elif",
"type_from_name",
"==",
"'boolean'",
":",
"var",
"=",
"tkinter",
".",
"BooleanVar",
"(",
")",
"elif",
"type_from_name",
"==",
"'double'",
":",
"var",
"=",
"tkinter",
".",
"DoubleVar",
"(",
")",
"else",
":",
"var",
"=",
"tkinter",
".",
"StringVar",
"(",
")",
"else",
":",
"var",
"=",
"vtype",
"(",
")",
"self",
".",
"tkvariables",
"[",
"vname",
"]",
"=",
"var",
"return",
"var"
] | Create a tk variable.
If the variable was created previously return that instance. | [
"Create",
"a",
"tk",
"variable",
".",
"If",
"the",
"variable",
"was",
"created",
"previously",
"return",
"that",
"instance",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L238-L273 |
246,676 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.add_from_file | def add_from_file(self, fpath):
"""Load ui definition from file."""
if self.tree is None:
base, name = os.path.split(fpath)
self.add_resource_path(base)
self.tree = tree = ET.parse(fpath)
self.root = tree.getroot()
self.objects = {}
else:
# TODO: append to current tree
pass | python | def add_from_file(self, fpath):
if self.tree is None:
base, name = os.path.split(fpath)
self.add_resource_path(base)
self.tree = tree = ET.parse(fpath)
self.root = tree.getroot()
self.objects = {}
else:
# TODO: append to current tree
pass | [
"def",
"add_from_file",
"(",
"self",
",",
"fpath",
")",
":",
"if",
"self",
".",
"tree",
"is",
"None",
":",
"base",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"fpath",
")",
"self",
".",
"add_resource_path",
"(",
"base",
")",
"self",
".",
"tree",
"=",
"tree",
"=",
"ET",
".",
"parse",
"(",
"fpath",
")",
"self",
".",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"self",
".",
"objects",
"=",
"{",
"}",
"else",
":",
"# TODO: append to current tree",
"pass"
] | Load ui definition from file. | [
"Load",
"ui",
"definition",
"from",
"file",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L275-L285 |
246,677 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.add_from_string | def add_from_string(self, strdata):
"""Load ui definition from string."""
if self.tree is None:
self.tree = tree = ET.ElementTree(ET.fromstring(strdata))
self.root = tree.getroot()
self.objects = {}
else:
# TODO: append to current tree
pass | python | def add_from_string(self, strdata):
if self.tree is None:
self.tree = tree = ET.ElementTree(ET.fromstring(strdata))
self.root = tree.getroot()
self.objects = {}
else:
# TODO: append to current tree
pass | [
"def",
"add_from_string",
"(",
"self",
",",
"strdata",
")",
":",
"if",
"self",
".",
"tree",
"is",
"None",
":",
"self",
".",
"tree",
"=",
"tree",
"=",
"ET",
".",
"ElementTree",
"(",
"ET",
".",
"fromstring",
"(",
"strdata",
")",
")",
"self",
".",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"self",
".",
"objects",
"=",
"{",
"}",
"else",
":",
"# TODO: append to current tree",
"pass"
] | Load ui definition from string. | [
"Load",
"ui",
"definition",
"from",
"string",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L287-L295 |
246,678 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.add_from_xmlnode | def add_from_xmlnode(self, element):
"""Load ui definition from xml.etree.Element node."""
if self.tree is None:
root = ET.Element('interface')
root.append(element)
self.tree = tree = ET.ElementTree(root)
self.root = tree.getroot()
self.objects = {}
# ET.dump(tree)
else:
# TODO: append to current tree
pass | python | def add_from_xmlnode(self, element):
if self.tree is None:
root = ET.Element('interface')
root.append(element)
self.tree = tree = ET.ElementTree(root)
self.root = tree.getroot()
self.objects = {}
# ET.dump(tree)
else:
# TODO: append to current tree
pass | [
"def",
"add_from_xmlnode",
"(",
"self",
",",
"element",
")",
":",
"if",
"self",
".",
"tree",
"is",
"None",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"'interface'",
")",
"root",
".",
"append",
"(",
"element",
")",
"self",
".",
"tree",
"=",
"tree",
"=",
"ET",
".",
"ElementTree",
"(",
"root",
")",
"self",
".",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"self",
".",
"objects",
"=",
"{",
"}",
"# ET.dump(tree)",
"else",
":",
"# TODO: append to current tree",
"pass"
] | Load ui definition from xml.etree.Element node. | [
"Load",
"ui",
"definition",
"from",
"xml",
".",
"etree",
".",
"Element",
"node",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L297-L308 |
246,679 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.get_object | def get_object(self, name, master=None):
"""Find and create the widget named name.
Use master as parent. If widget was already created, return
that instance."""
widget = None
if name in self.objects:
widget = self.objects[name].widget
else:
xpath = ".//object[@id='{0}']".format(name)
node = self.tree.find(xpath)
if node is not None:
root = BuilderObject(self, dict())
root.widget = master
bobject = self._realize(root, node)
widget = bobject.widget
if widget is None:
msg = 'Widget "{0}" not defined.'.format(name)
raise Exception(msg)
return widget | python | def get_object(self, name, master=None):
widget = None
if name in self.objects:
widget = self.objects[name].widget
else:
xpath = ".//object[@id='{0}']".format(name)
node = self.tree.find(xpath)
if node is not None:
root = BuilderObject(self, dict())
root.widget = master
bobject = self._realize(root, node)
widget = bobject.widget
if widget is None:
msg = 'Widget "{0}" not defined.'.format(name)
raise Exception(msg)
return widget | [
"def",
"get_object",
"(",
"self",
",",
"name",
",",
"master",
"=",
"None",
")",
":",
"widget",
"=",
"None",
"if",
"name",
"in",
"self",
".",
"objects",
":",
"widget",
"=",
"self",
".",
"objects",
"[",
"name",
"]",
".",
"widget",
"else",
":",
"xpath",
"=",
"\".//object[@id='{0}']\"",
".",
"format",
"(",
"name",
")",
"node",
"=",
"self",
".",
"tree",
".",
"find",
"(",
"xpath",
")",
"if",
"node",
"is",
"not",
"None",
":",
"root",
"=",
"BuilderObject",
"(",
"self",
",",
"dict",
"(",
")",
")",
"root",
".",
"widget",
"=",
"master",
"bobject",
"=",
"self",
".",
"_realize",
"(",
"root",
",",
"node",
")",
"widget",
"=",
"bobject",
".",
"widget",
"if",
"widget",
"is",
"None",
":",
"msg",
"=",
"'Widget \"{0}\" not defined.'",
".",
"format",
"(",
"name",
")",
"raise",
"Exception",
"(",
"msg",
")",
"return",
"widget"
] | Find and create the widget named name.
Use master as parent. If widget was already created, return
that instance. | [
"Find",
"and",
"create",
"the",
"widget",
"named",
"name",
".",
"Use",
"master",
"as",
"parent",
".",
"If",
"widget",
"was",
"already",
"created",
"return",
"that",
"instance",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L310-L328 |
246,680 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder._realize | def _realize(self, master, element):
"""Builds a widget from xml element using master as parent."""
data = data_xmlnode_to_dict(element, self.translator)
cname = data['class']
uniqueid = data['id']
if cname not in CLASS_MAP:
self._import_class(cname)
if cname in CLASS_MAP:
self._pre_process_data(data)
parent = CLASS_MAP[cname].builder.factory(self, data)
widget = parent.realize(master)
self.objects[uniqueid] = parent
xpath = "./child"
children = element.findall(xpath)
for child in children:
child_xml = child.find('./object')
child = self._realize(parent, child_xml)
parent.add_child(child)
parent.configure()
parent.layout()
return parent
else:
raise Exception('Class "{0}" not mapped'.format(cname)) | python | def _realize(self, master, element):
data = data_xmlnode_to_dict(element, self.translator)
cname = data['class']
uniqueid = data['id']
if cname not in CLASS_MAP:
self._import_class(cname)
if cname in CLASS_MAP:
self._pre_process_data(data)
parent = CLASS_MAP[cname].builder.factory(self, data)
widget = parent.realize(master)
self.objects[uniqueid] = parent
xpath = "./child"
children = element.findall(xpath)
for child in children:
child_xml = child.find('./object')
child = self._realize(parent, child_xml)
parent.add_child(child)
parent.configure()
parent.layout()
return parent
else:
raise Exception('Class "{0}" not mapped'.format(cname)) | [
"def",
"_realize",
"(",
"self",
",",
"master",
",",
"element",
")",
":",
"data",
"=",
"data_xmlnode_to_dict",
"(",
"element",
",",
"self",
".",
"translator",
")",
"cname",
"=",
"data",
"[",
"'class'",
"]",
"uniqueid",
"=",
"data",
"[",
"'id'",
"]",
"if",
"cname",
"not",
"in",
"CLASS_MAP",
":",
"self",
".",
"_import_class",
"(",
"cname",
")",
"if",
"cname",
"in",
"CLASS_MAP",
":",
"self",
".",
"_pre_process_data",
"(",
"data",
")",
"parent",
"=",
"CLASS_MAP",
"[",
"cname",
"]",
".",
"builder",
".",
"factory",
"(",
"self",
",",
"data",
")",
"widget",
"=",
"parent",
".",
"realize",
"(",
"master",
")",
"self",
".",
"objects",
"[",
"uniqueid",
"]",
"=",
"parent",
"xpath",
"=",
"\"./child\"",
"children",
"=",
"element",
".",
"findall",
"(",
"xpath",
")",
"for",
"child",
"in",
"children",
":",
"child_xml",
"=",
"child",
".",
"find",
"(",
"'./object'",
")",
"child",
"=",
"self",
".",
"_realize",
"(",
"parent",
",",
"child_xml",
")",
"parent",
".",
"add_child",
"(",
"child",
")",
"parent",
".",
"configure",
"(",
")",
"parent",
".",
"layout",
"(",
")",
"return",
"parent",
"else",
":",
"raise",
"Exception",
"(",
"'Class \"{0}\" not mapped'",
".",
"format",
"(",
"cname",
")",
")"
] | Builds a widget from xml element using master as parent. | [
"Builds",
"a",
"widget",
"from",
"xml",
"element",
"using",
"master",
"as",
"parent",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L349-L377 |
246,681 | alejandroautalan/pygubu | pygubu/builder/__init__.py | Builder.connect_callbacks | def connect_callbacks(self, callbacks_bag):
"""Connect callbacks specified in callbacks_bag with callbacks
defined in the ui definition.
Return a list with the name of the callbacks not connected.
"""
notconnected = []
for wname, builderobj in self.objects.items():
missing = builderobj.connect_commands(callbacks_bag)
if missing is not None:
notconnected.extend(missing)
missing = builderobj.connect_bindings(callbacks_bag)
if missing is not None:
notconnected.extend(missing)
if notconnected:
notconnected = list(set(notconnected))
msg = 'Missing callbacks for commands: {}'.format(notconnected)
logger.warning(msg)
return notconnected
else:
return None | python | def connect_callbacks(self, callbacks_bag):
notconnected = []
for wname, builderobj in self.objects.items():
missing = builderobj.connect_commands(callbacks_bag)
if missing is not None:
notconnected.extend(missing)
missing = builderobj.connect_bindings(callbacks_bag)
if missing is not None:
notconnected.extend(missing)
if notconnected:
notconnected = list(set(notconnected))
msg = 'Missing callbacks for commands: {}'.format(notconnected)
logger.warning(msg)
return notconnected
else:
return None | [
"def",
"connect_callbacks",
"(",
"self",
",",
"callbacks_bag",
")",
":",
"notconnected",
"=",
"[",
"]",
"for",
"wname",
",",
"builderobj",
"in",
"self",
".",
"objects",
".",
"items",
"(",
")",
":",
"missing",
"=",
"builderobj",
".",
"connect_commands",
"(",
"callbacks_bag",
")",
"if",
"missing",
"is",
"not",
"None",
":",
"notconnected",
".",
"extend",
"(",
"missing",
")",
"missing",
"=",
"builderobj",
".",
"connect_bindings",
"(",
"callbacks_bag",
")",
"if",
"missing",
"is",
"not",
"None",
":",
"notconnected",
".",
"extend",
"(",
"missing",
")",
"if",
"notconnected",
":",
"notconnected",
"=",
"list",
"(",
"set",
"(",
"notconnected",
")",
")",
"msg",
"=",
"'Missing callbacks for commands: {}'",
".",
"format",
"(",
"notconnected",
")",
"logger",
".",
"warning",
"(",
"msg",
")",
"return",
"notconnected",
"else",
":",
"return",
"None"
] | Connect callbacks specified in callbacks_bag with callbacks
defined in the ui definition.
Return a list with the name of the callbacks not connected. | [
"Connect",
"callbacks",
"specified",
"in",
"callbacks_bag",
"with",
"callbacks",
"defined",
"in",
"the",
"ui",
"definition",
".",
"Return",
"a",
"list",
"with",
"the",
"name",
"of",
"the",
"callbacks",
"not",
"connected",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L388-L407 |
246,682 | alejandroautalan/pygubu | pygubudesigner/util/selecttool.py | SelectTool._start_selecting | def _start_selecting(self, event):
"""Comienza con el proceso de seleccion."""
self._selecting = True
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
self._sstart = (x, y)
if not self._sobject:
self._sobject = canvas.create_rectangle(
self._sstart[0], self._sstart[1], x, y,
dash=(3,5), outline='#0000ff'
)
canvas.itemconfigure(self._sobject, state=tk.NORMAL) | python | def _start_selecting(self, event):
self._selecting = True
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
self._sstart = (x, y)
if not self._sobject:
self._sobject = canvas.create_rectangle(
self._sstart[0], self._sstart[1], x, y,
dash=(3,5), outline='#0000ff'
)
canvas.itemconfigure(self._sobject, state=tk.NORMAL) | [
"def",
"_start_selecting",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"_selecting",
"=",
"True",
"canvas",
"=",
"self",
".",
"_canvas",
"x",
"=",
"canvas",
".",
"canvasx",
"(",
"event",
".",
"x",
")",
"y",
"=",
"canvas",
".",
"canvasy",
"(",
"event",
".",
"y",
")",
"self",
".",
"_sstart",
"=",
"(",
"x",
",",
"y",
")",
"if",
"not",
"self",
".",
"_sobject",
":",
"self",
".",
"_sobject",
"=",
"canvas",
".",
"create_rectangle",
"(",
"self",
".",
"_sstart",
"[",
"0",
"]",
",",
"self",
".",
"_sstart",
"[",
"1",
"]",
",",
"x",
",",
"y",
",",
"dash",
"=",
"(",
"3",
",",
"5",
")",
",",
"outline",
"=",
"'#0000ff'",
")",
"canvas",
".",
"itemconfigure",
"(",
"self",
".",
"_sobject",
",",
"state",
"=",
"tk",
".",
"NORMAL",
")"
] | Comienza con el proceso de seleccion. | [
"Comienza",
"con",
"el",
"proceso",
"de",
"seleccion",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/selecttool.py#L51-L63 |
246,683 | alejandroautalan/pygubu | pygubudesigner/util/selecttool.py | SelectTool._keep_selecting | def _keep_selecting(self, event):
"""Continua con el proceso de seleccion.
Crea o redimensiona el cuadro de seleccion de acuerdo con
la posicion del raton."""
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
canvas.coords(self._sobject,
self._sstart[0], self._sstart[1], x, y) | python | def _keep_selecting(self, event):
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
canvas.coords(self._sobject,
self._sstart[0], self._sstart[1], x, y) | [
"def",
"_keep_selecting",
"(",
"self",
",",
"event",
")",
":",
"canvas",
"=",
"self",
".",
"_canvas",
"x",
"=",
"canvas",
".",
"canvasx",
"(",
"event",
".",
"x",
")",
"y",
"=",
"canvas",
".",
"canvasy",
"(",
"event",
".",
"y",
")",
"canvas",
".",
"coords",
"(",
"self",
".",
"_sobject",
",",
"self",
".",
"_sstart",
"[",
"0",
"]",
",",
"self",
".",
"_sstart",
"[",
"1",
"]",
",",
"x",
",",
"y",
")"
] | Continua con el proceso de seleccion.
Crea o redimensiona el cuadro de seleccion de acuerdo con
la posicion del raton. | [
"Continua",
"con",
"el",
"proceso",
"de",
"seleccion",
".",
"Crea",
"o",
"redimensiona",
"el",
"cuadro",
"de",
"seleccion",
"de",
"acuerdo",
"con",
"la",
"posicion",
"del",
"raton",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/selecttool.py#L65-L73 |
246,684 | alejandroautalan/pygubu | pygubudesigner/util/selecttool.py | SelectTool._finish_selecting | def _finish_selecting(self, event):
"""Finaliza la seleccion.
Marca como seleccionados todos los objetos que se encuentran
dentro del recuadro de seleccion."""
self._selecting = False
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
canvas.coords(self._sobject, -1, -1, -1, -1)
canvas.itemconfigure(self._sobject, state=tk.HIDDEN)
sel_region = self._sstart[0], self._sstart[1], x, y
canvas.region_selected = sel_region
canvas.event_generate('<<RegionSelected>>') | python | def _finish_selecting(self, event):
self._selecting = False
canvas = self._canvas
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
canvas.coords(self._sobject, -1, -1, -1, -1)
canvas.itemconfigure(self._sobject, state=tk.HIDDEN)
sel_region = self._sstart[0], self._sstart[1], x, y
canvas.region_selected = sel_region
canvas.event_generate('<<RegionSelected>>') | [
"def",
"_finish_selecting",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"_selecting",
"=",
"False",
"canvas",
"=",
"self",
".",
"_canvas",
"x",
"=",
"canvas",
".",
"canvasx",
"(",
"event",
".",
"x",
")",
"y",
"=",
"canvas",
".",
"canvasy",
"(",
"event",
".",
"y",
")",
"canvas",
".",
"coords",
"(",
"self",
".",
"_sobject",
",",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"canvas",
".",
"itemconfigure",
"(",
"self",
".",
"_sobject",
",",
"state",
"=",
"tk",
".",
"HIDDEN",
")",
"sel_region",
"=",
"self",
".",
"_sstart",
"[",
"0",
"]",
",",
"self",
".",
"_sstart",
"[",
"1",
"]",
",",
"x",
",",
"y",
"canvas",
".",
"region_selected",
"=",
"sel_region",
"canvas",
".",
"event_generate",
"(",
"'<<RegionSelected>>'",
")"
] | Finaliza la seleccion.
Marca como seleccionados todos los objetos que se encuentran
dentro del recuadro de seleccion. | [
"Finaliza",
"la",
"seleccion",
".",
"Marca",
"como",
"seleccionados",
"todos",
"los",
"objetos",
"que",
"se",
"encuentran",
"dentro",
"del",
"recuadro",
"de",
"seleccion",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/selecttool.py#L75-L89 |
246,685 | alejandroautalan/pygubu | pygubu/widgets/calendarframe.py | matrix_coords | def matrix_coords(rows, cols, rowh, colw, ox=0, oy=0):
"Generate coords for a matrix of rects"
for i, f, c in rowmajor(rows, cols):
x = ox + c * colw
y = oy + f * rowh
x1 = x + colw
y1 = y + rowh
yield (i, x, y, x1, y1) | python | def matrix_coords(rows, cols, rowh, colw, ox=0, oy=0):
"Generate coords for a matrix of rects"
for i, f, c in rowmajor(rows, cols):
x = ox + c * colw
y = oy + f * rowh
x1 = x + colw
y1 = y + rowh
yield (i, x, y, x1, y1) | [
"def",
"matrix_coords",
"(",
"rows",
",",
"cols",
",",
"rowh",
",",
"colw",
",",
"ox",
"=",
"0",
",",
"oy",
"=",
"0",
")",
":",
"for",
"i",
",",
"f",
",",
"c",
"in",
"rowmajor",
"(",
"rows",
",",
"cols",
")",
":",
"x",
"=",
"ox",
"+",
"c",
"*",
"colw",
"y",
"=",
"oy",
"+",
"f",
"*",
"rowh",
"x1",
"=",
"x",
"+",
"colw",
"y1",
"=",
"y",
"+",
"rowh",
"yield",
"(",
"i",
",",
"x",
",",
"y",
",",
"x1",
",",
"y1",
")"
] | Generate coords for a matrix of rects | [
"Generate",
"coords",
"for",
"a",
"matrix",
"of",
"rects"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/calendarframe.py#L40-L47 |
246,686 | alejandroautalan/pygubu | pygubudesigner/util/__init__.py | ArrayVar.get | def get(self):
'''Return a dictionary that represents the Tcl array'''
value = {}
for (elementname, elementvar) in self._elementvars.items():
value[elementname] = elementvar.get()
return value | python | def get(self):
'''Return a dictionary that represents the Tcl array'''
value = {}
for (elementname, elementvar) in self._elementvars.items():
value[elementname] = elementvar.get()
return value | [
"def",
"get",
"(",
"self",
")",
":",
"value",
"=",
"{",
"}",
"for",
"(",
"elementname",
",",
"elementvar",
")",
"in",
"self",
".",
"_elementvars",
".",
"items",
"(",
")",
":",
"value",
"[",
"elementname",
"]",
"=",
"elementvar",
".",
"get",
"(",
")",
"return",
"value"
] | Return a dictionary that represents the Tcl array | [
"Return",
"a",
"dictionary",
"that",
"represents",
"the",
"Tcl",
"array"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/__init__.py#L96-L101 |
246,687 | alejandroautalan/pygubu | pygubu/widgets/editabletreeview.py | EditableTreeview.yview | def yview(self, *args):
"""Update inplace widgets position when doing vertical scroll"""
self.after_idle(self.__updateWnds)
ttk.Treeview.yview(self, *args) | python | def yview(self, *args):
self.after_idle(self.__updateWnds)
ttk.Treeview.yview(self, *args) | [
"def",
"yview",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"after_idle",
"(",
"self",
".",
"__updateWnds",
")",
"ttk",
".",
"Treeview",
".",
"yview",
"(",
"self",
",",
"*",
"args",
")"
] | Update inplace widgets position when doing vertical scroll | [
"Update",
"inplace",
"widgets",
"position",
"when",
"doing",
"vertical",
"scroll"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L106-L109 |
246,688 | alejandroautalan/pygubu | pygubu/widgets/editabletreeview.py | EditableTreeview.xview | def xview(self, *args):
"""Update inplace widgets position when doing horizontal scroll"""
self.after_idle(self.__updateWnds)
ttk.Treeview.xview(self, *args) | python | def xview(self, *args):
self.after_idle(self.__updateWnds)
ttk.Treeview.xview(self, *args) | [
"def",
"xview",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"after_idle",
"(",
"self",
".",
"__updateWnds",
")",
"ttk",
".",
"Treeview",
".",
"xview",
"(",
"self",
",",
"*",
"args",
")"
] | Update inplace widgets position when doing horizontal scroll | [
"Update",
"inplace",
"widgets",
"position",
"when",
"doing",
"horizontal",
"scroll"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L119-L122 |
246,689 | alejandroautalan/pygubu | pygubu/widgets/editabletreeview.py | EditableTreeview.__check_focus | def __check_focus(self, event):
"""Checks if the focus has changed"""
#print('Event:', event.type, event.x, event.y)
changed = False
if not self._curfocus:
changed = True
elif self._curfocus != self.focus():
self.__clear_inplace_widgets()
changed = True
newfocus = self.focus()
if changed:
if newfocus:
#print('Focus changed to:', newfocus)
self._curfocus= newfocus
self.__focus(newfocus)
self.__updateWnds() | python | def __check_focus(self, event):
#print('Event:', event.type, event.x, event.y)
changed = False
if not self._curfocus:
changed = True
elif self._curfocus != self.focus():
self.__clear_inplace_widgets()
changed = True
newfocus = self.focus()
if changed:
if newfocus:
#print('Focus changed to:', newfocus)
self._curfocus= newfocus
self.__focus(newfocus)
self.__updateWnds() | [
"def",
"__check_focus",
"(",
"self",
",",
"event",
")",
":",
"#print('Event:', event.type, event.x, event.y)",
"changed",
"=",
"False",
"if",
"not",
"self",
".",
"_curfocus",
":",
"changed",
"=",
"True",
"elif",
"self",
".",
"_curfocus",
"!=",
"self",
".",
"focus",
"(",
")",
":",
"self",
".",
"__clear_inplace_widgets",
"(",
")",
"changed",
"=",
"True",
"newfocus",
"=",
"self",
".",
"focus",
"(",
")",
"if",
"changed",
":",
"if",
"newfocus",
":",
"#print('Focus changed to:', newfocus)",
"self",
".",
"_curfocus",
"=",
"newfocus",
"self",
".",
"__focus",
"(",
"newfocus",
")",
"self",
".",
"__updateWnds",
"(",
")"
] | Checks if the focus has changed | [
"Checks",
"if",
"the",
"focus",
"has",
"changed"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L132-L147 |
246,690 | alejandroautalan/pygubu | pygubu/widgets/editabletreeview.py | EditableTreeview.__focus | def __focus(self, item):
"""Called when focus item has changed"""
cols = self.__get_display_columns()
for col in cols:
self.__event_info =(col,item)
self.event_generate('<<TreeviewInplaceEdit>>')
if col in self._inplace_widgets:
w = self._inplace_widgets[col]
w.bind('<Key-Tab>',
lambda e: w.tk_focusNext().focus_set())
w.bind('<Shift-Key-Tab>',
lambda e: w.tk_focusPrev().focus_set()) | python | def __focus(self, item):
cols = self.__get_display_columns()
for col in cols:
self.__event_info =(col,item)
self.event_generate('<<TreeviewInplaceEdit>>')
if col in self._inplace_widgets:
w = self._inplace_widgets[col]
w.bind('<Key-Tab>',
lambda e: w.tk_focusNext().focus_set())
w.bind('<Shift-Key-Tab>',
lambda e: w.tk_focusPrev().focus_set()) | [
"def",
"__focus",
"(",
"self",
",",
"item",
")",
":",
"cols",
"=",
"self",
".",
"__get_display_columns",
"(",
")",
"for",
"col",
"in",
"cols",
":",
"self",
".",
"__event_info",
"=",
"(",
"col",
",",
"item",
")",
"self",
".",
"event_generate",
"(",
"'<<TreeviewInplaceEdit>>'",
")",
"if",
"col",
"in",
"self",
".",
"_inplace_widgets",
":",
"w",
"=",
"self",
".",
"_inplace_widgets",
"[",
"col",
"]",
"w",
".",
"bind",
"(",
"'<Key-Tab>'",
",",
"lambda",
"e",
":",
"w",
".",
"tk_focusNext",
"(",
")",
".",
"focus_set",
"(",
")",
")",
"w",
".",
"bind",
"(",
"'<Shift-Key-Tab>'",
",",
"lambda",
"e",
":",
"w",
".",
"tk_focusPrev",
"(",
")",
".",
"focus_set",
"(",
")",
")"
] | Called when focus item has changed | [
"Called",
"when",
"focus",
"item",
"has",
"changed"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L149-L160 |
246,691 | alejandroautalan/pygubu | pygubu/widgets/editabletreeview.py | EditableTreeview.__clear_inplace_widgets | def __clear_inplace_widgets(self):
"""Remove all inplace edit widgets."""
cols = self.__get_display_columns()
#print('Clear:', cols)
for c in cols:
if c in self._inplace_widgets:
widget = self._inplace_widgets[c]
widget.place_forget()
self._inplace_widgets_show.pop(c, None) | python | def __clear_inplace_widgets(self):
cols = self.__get_display_columns()
#print('Clear:', cols)
for c in cols:
if c in self._inplace_widgets:
widget = self._inplace_widgets[c]
widget.place_forget()
self._inplace_widgets_show.pop(c, None) | [
"def",
"__clear_inplace_widgets",
"(",
"self",
")",
":",
"cols",
"=",
"self",
".",
"__get_display_columns",
"(",
")",
"#print('Clear:', cols)",
"for",
"c",
"in",
"cols",
":",
"if",
"c",
"in",
"self",
".",
"_inplace_widgets",
":",
"widget",
"=",
"self",
".",
"_inplace_widgets",
"[",
"c",
"]",
"widget",
".",
"place_forget",
"(",
")",
"self",
".",
"_inplace_widgets_show",
".",
"pop",
"(",
"c",
",",
"None",
")"
] | Remove all inplace edit widgets. | [
"Remove",
"all",
"inplace",
"edit",
"widgets",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L179-L187 |
246,692 | alejandroautalan/pygubu | setup.py | CustomInstall.run | def run(self):
"""Run parent install, and then save the install dir in the script."""
install.run(self)
#
# Remove old pygubu.py from scripts path if exists
spath = os.path.join(self.install_scripts, 'pygubu')
for ext in ('.py', '.pyw'):
filename = spath + ext
if os.path.exists(filename):
os.remove(filename)
#
# Remove old pygubu-designer.bat
if platform.system() == 'Windows':
spath = os.path.join(self.install_scripts, 'pygubu-designer.bat')
if os.path.exists(spath):
os.remove(spath) | python | def run(self):
install.run(self)
#
# Remove old pygubu.py from scripts path if exists
spath = os.path.join(self.install_scripts, 'pygubu')
for ext in ('.py', '.pyw'):
filename = spath + ext
if os.path.exists(filename):
os.remove(filename)
#
# Remove old pygubu-designer.bat
if platform.system() == 'Windows':
spath = os.path.join(self.install_scripts, 'pygubu-designer.bat')
if os.path.exists(spath):
os.remove(spath) | [
"def",
"run",
"(",
"self",
")",
":",
"install",
".",
"run",
"(",
"self",
")",
"#",
"# Remove old pygubu.py from scripts path if exists",
"spath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"install_scripts",
",",
"'pygubu'",
")",
"for",
"ext",
"in",
"(",
"'.py'",
",",
"'.pyw'",
")",
":",
"filename",
"=",
"spath",
"+",
"ext",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"#",
"# Remove old pygubu-designer.bat",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"spath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"install_scripts",
",",
"'pygubu-designer.bat'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"spath",
")",
":",
"os",
".",
"remove",
"(",
"spath",
")"
] | Run parent install, and then save the install dir in the script. | [
"Run",
"parent",
"install",
"and",
"then",
"save",
"the",
"install",
"dir",
"in",
"the",
"script",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/setup.py#L30-L46 |
246,693 | alejandroautalan/pygubu | pygubudesigner/propertieseditor.py | PropertiesEditor.hide_all | def hide_all(self):
"""Hide all properties from property editor."""
self.current = None
for _v, (label, widget) in self._propbag.items():
label.grid_remove()
widget.grid_remove() | python | def hide_all(self):
self.current = None
for _v, (label, widget) in self._propbag.items():
label.grid_remove()
widget.grid_remove() | [
"def",
"hide_all",
"(",
"self",
")",
":",
"self",
".",
"current",
"=",
"None",
"for",
"_v",
",",
"(",
"label",
",",
"widget",
")",
"in",
"self",
".",
"_propbag",
".",
"items",
"(",
")",
":",
"label",
".",
"grid_remove",
"(",
")",
"widget",
".",
"grid_remove",
"(",
")"
] | Hide all properties from property editor. | [
"Hide",
"all",
"properties",
"from",
"property",
"editor",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/propertieseditor.py#L150-L156 |
246,694 | alejandroautalan/pygubu | pygubu/builder/builderobject.py | BuilderObject._get_init_args | def _get_init_args(self):
"""Creates dict with properties marked as readonly"""
args = {}
for rop in self.ro_properties:
if rop in self.properties:
args[rop] = self.properties[rop]
return args | python | def _get_init_args(self):
args = {}
for rop in self.ro_properties:
if rop in self.properties:
args[rop] = self.properties[rop]
return args | [
"def",
"_get_init_args",
"(",
"self",
")",
":",
"args",
"=",
"{",
"}",
"for",
"rop",
"in",
"self",
".",
"ro_properties",
":",
"if",
"rop",
"in",
"self",
".",
"properties",
":",
"args",
"[",
"rop",
"]",
"=",
"self",
".",
"properties",
"[",
"rop",
"]",
"return",
"args"
] | Creates dict with properties marked as readonly | [
"Creates",
"dict",
"with",
"properties",
"marked",
"as",
"readonly"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/builderobject.py#L86-L93 |
246,695 | alejandroautalan/pygubu | pygubudesigner/previewer.py | OnCanvasMenuPreview._calculate_menu_wh | def _calculate_menu_wh(self):
""" Calculate menu widht and height."""
w = iw = 50
h = ih = 0
# menu.index returns None if there are no choices
index = self._menu.index(tk.END)
index = index if index is not None else 0
count = index + 1
# First calculate using the font paramters of root menu:
font = self._menu.cget('font')
font = self._get_font(font)
for i in range(0, count):
mtype = self._menu.type(i)
if mtype == 'tearoff':
continue
label = 'default'
ifont = 'TkMenuFont'
if mtype != 'separator':
label = self._menu.entrycget(i, 'label')
ifont = self._menu.entrycget(i, 'font')
wpx = font.measure(label)
hpx = font.metrics('linespace')
w += wpx
if hpx > h:
h = hpx * 2
# Calculate using font configured for each subitem
ifont = self._get_font(ifont)
wpx = ifont.measure(label)
hpx = ifont.metrics('linespace')
iw += wpx
if hpx > ih:
ih = hpx * 2
# Then compare 2 sizes and use the greatest
w = max(w, iw, 100)
h = max(h, ih, 25)
self._cwidth = w + int(w * 0.25)
self._cheight = h + int(h * 0.25) | python | def _calculate_menu_wh(self):
w = iw = 50
h = ih = 0
# menu.index returns None if there are no choices
index = self._menu.index(tk.END)
index = index if index is not None else 0
count = index + 1
# First calculate using the font paramters of root menu:
font = self._menu.cget('font')
font = self._get_font(font)
for i in range(0, count):
mtype = self._menu.type(i)
if mtype == 'tearoff':
continue
label = 'default'
ifont = 'TkMenuFont'
if mtype != 'separator':
label = self._menu.entrycget(i, 'label')
ifont = self._menu.entrycget(i, 'font')
wpx = font.measure(label)
hpx = font.metrics('linespace')
w += wpx
if hpx > h:
h = hpx * 2
# Calculate using font configured for each subitem
ifont = self._get_font(ifont)
wpx = ifont.measure(label)
hpx = ifont.metrics('linespace')
iw += wpx
if hpx > ih:
ih = hpx * 2
# Then compare 2 sizes and use the greatest
w = max(w, iw, 100)
h = max(h, ih, 25)
self._cwidth = w + int(w * 0.25)
self._cheight = h + int(h * 0.25) | [
"def",
"_calculate_menu_wh",
"(",
"self",
")",
":",
"w",
"=",
"iw",
"=",
"50",
"h",
"=",
"ih",
"=",
"0",
"# menu.index returns None if there are no choices",
"index",
"=",
"self",
".",
"_menu",
".",
"index",
"(",
"tk",
".",
"END",
")",
"index",
"=",
"index",
"if",
"index",
"is",
"not",
"None",
"else",
"0",
"count",
"=",
"index",
"+",
"1",
"# First calculate using the font paramters of root menu:",
"font",
"=",
"self",
".",
"_menu",
".",
"cget",
"(",
"'font'",
")",
"font",
"=",
"self",
".",
"_get_font",
"(",
"font",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"count",
")",
":",
"mtype",
"=",
"self",
".",
"_menu",
".",
"type",
"(",
"i",
")",
"if",
"mtype",
"==",
"'tearoff'",
":",
"continue",
"label",
"=",
"'default'",
"ifont",
"=",
"'TkMenuFont'",
"if",
"mtype",
"!=",
"'separator'",
":",
"label",
"=",
"self",
".",
"_menu",
".",
"entrycget",
"(",
"i",
",",
"'label'",
")",
"ifont",
"=",
"self",
".",
"_menu",
".",
"entrycget",
"(",
"i",
",",
"'font'",
")",
"wpx",
"=",
"font",
".",
"measure",
"(",
"label",
")",
"hpx",
"=",
"font",
".",
"metrics",
"(",
"'linespace'",
")",
"w",
"+=",
"wpx",
"if",
"hpx",
">",
"h",
":",
"h",
"=",
"hpx",
"*",
"2",
"# Calculate using font configured for each subitem",
"ifont",
"=",
"self",
".",
"_get_font",
"(",
"ifont",
")",
"wpx",
"=",
"ifont",
".",
"measure",
"(",
"label",
")",
"hpx",
"=",
"ifont",
".",
"metrics",
"(",
"'linespace'",
")",
"iw",
"+=",
"wpx",
"if",
"hpx",
">",
"ih",
":",
"ih",
"=",
"hpx",
"*",
"2",
"# Then compare 2 sizes and use the greatest",
"w",
"=",
"max",
"(",
"w",
",",
"iw",
",",
"100",
")",
"h",
"=",
"max",
"(",
"h",
",",
"ih",
",",
"25",
")",
"self",
".",
"_cwidth",
"=",
"w",
"+",
"int",
"(",
"w",
"*",
"0.25",
")",
"self",
".",
"_cheight",
"=",
"h",
"+",
"int",
"(",
"h",
"*",
"0.25",
")"
] | Calculate menu widht and height. | [
"Calculate",
"menu",
"widht",
"and",
"height",
"."
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L283-L320 |
246,696 | alejandroautalan/pygubu | pygubudesigner/previewer.py | PreviewHelper._over_resizer | def _over_resizer(self, x, y):
"Returns True if mouse is over a resizer"
over_resizer = False
c = self.canvas
ids = c.find_overlapping(x, y, x, y)
if ids:
o = ids[0]
tags = c.gettags(o)
if 'resizer' in tags:
over_resizer = True
return over_resizer | python | def _over_resizer(self, x, y):
"Returns True if mouse is over a resizer"
over_resizer = False
c = self.canvas
ids = c.find_overlapping(x, y, x, y)
if ids:
o = ids[0]
tags = c.gettags(o)
if 'resizer' in tags:
over_resizer = True
return over_resizer | [
"def",
"_over_resizer",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"over_resizer",
"=",
"False",
"c",
"=",
"self",
".",
"canvas",
"ids",
"=",
"c",
".",
"find_overlapping",
"(",
"x",
",",
"y",
",",
"x",
",",
"y",
")",
"if",
"ids",
":",
"o",
"=",
"ids",
"[",
"0",
"]",
"tags",
"=",
"c",
".",
"gettags",
"(",
"o",
")",
"if",
"'resizer'",
"in",
"tags",
":",
"over_resizer",
"=",
"True",
"return",
"over_resizer"
] | Returns True if mouse is over a resizer | [
"Returns",
"True",
"if",
"mouse",
"is",
"over",
"a",
"resizer"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L453-L464 |
246,697 | alejandroautalan/pygubu | pygubudesigner/previewer.py | PreviewHelper.resize_preview | def resize_preview(self, dw, dh):
"Resizes preview that is currently dragged"
# identify preview
if self._objects_moving:
id_ = self._objects_moving[0]
tags = self.canvas.gettags(id_)
for tag in tags:
if tag.startswith('preview_'):
_, ident = tag.split('preview_')
preview = self.previews[ident]
preview.resize_by(dw, dh)
self.move_previews()
break
self._update_cregion() | python | def resize_preview(self, dw, dh):
"Resizes preview that is currently dragged"
# identify preview
if self._objects_moving:
id_ = self._objects_moving[0]
tags = self.canvas.gettags(id_)
for tag in tags:
if tag.startswith('preview_'):
_, ident = tag.split('preview_')
preview = self.previews[ident]
preview.resize_by(dw, dh)
self.move_previews()
break
self._update_cregion() | [
"def",
"resize_preview",
"(",
"self",
",",
"dw",
",",
"dh",
")",
":",
"# identify preview",
"if",
"self",
".",
"_objects_moving",
":",
"id_",
"=",
"self",
".",
"_objects_moving",
"[",
"0",
"]",
"tags",
"=",
"self",
".",
"canvas",
".",
"gettags",
"(",
"id_",
")",
"for",
"tag",
"in",
"tags",
":",
"if",
"tag",
".",
"startswith",
"(",
"'preview_'",
")",
":",
"_",
",",
"ident",
"=",
"tag",
".",
"split",
"(",
"'preview_'",
")",
"preview",
"=",
"self",
".",
"previews",
"[",
"ident",
"]",
"preview",
".",
"resize_by",
"(",
"dw",
",",
"dh",
")",
"self",
".",
"move_previews",
"(",
")",
"break",
"self",
".",
"_update_cregion",
"(",
")"
] | Resizes preview that is currently dragged | [
"Resizes",
"preview",
"that",
"is",
"currently",
"dragged"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L466-L480 |
246,698 | alejandroautalan/pygubu | pygubudesigner/previewer.py | PreviewHelper.move_previews | def move_previews(self):
"Move previews after a resize event"
# calculate new positions
min_y = self._calc_preview_ypos()
for idx, (key, p) in enumerate(self.previews.items()):
new_dy = min_y[idx] - p.y
self.previews[key].move_by(0, new_dy)
self._update_cregion()
self.show_selected(self._sel_id, self._sel_widget) | python | def move_previews(self):
"Move previews after a resize event"
# calculate new positions
min_y = self._calc_preview_ypos()
for idx, (key, p) in enumerate(self.previews.items()):
new_dy = min_y[idx] - p.y
self.previews[key].move_by(0, new_dy)
self._update_cregion()
self.show_selected(self._sel_id, self._sel_widget) | [
"def",
"move_previews",
"(",
"self",
")",
":",
"# calculate new positions",
"min_y",
"=",
"self",
".",
"_calc_preview_ypos",
"(",
")",
"for",
"idx",
",",
"(",
"key",
",",
"p",
")",
"in",
"enumerate",
"(",
"self",
".",
"previews",
".",
"items",
"(",
")",
")",
":",
"new_dy",
"=",
"min_y",
"[",
"idx",
"]",
"-",
"p",
".",
"y",
"self",
".",
"previews",
"[",
"key",
"]",
".",
"move_by",
"(",
"0",
",",
"new_dy",
")",
"self",
".",
"_update_cregion",
"(",
")",
"self",
".",
"show_selected",
"(",
"self",
".",
"_sel_id",
",",
"self",
".",
"_sel_widget",
")"
] | Move previews after a resize event | [
"Move",
"previews",
"after",
"a",
"resize",
"event"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L490-L499 |
246,699 | alejandroautalan/pygubu | pygubudesigner/previewer.py | PreviewHelper._calc_preview_ypos | def _calc_preview_ypos(self):
"Calculates the previews positions on canvas"
y = 10
min_y = [y]
for k, p in self.previews.items():
y += p.height() + self.padding
min_y.append(y)
return min_y | python | def _calc_preview_ypos(self):
"Calculates the previews positions on canvas"
y = 10
min_y = [y]
for k, p in self.previews.items():
y += p.height() + self.padding
min_y.append(y)
return min_y | [
"def",
"_calc_preview_ypos",
"(",
"self",
")",
":",
"y",
"=",
"10",
"min_y",
"=",
"[",
"y",
"]",
"for",
"k",
",",
"p",
"in",
"self",
".",
"previews",
".",
"items",
"(",
")",
":",
"y",
"+=",
"p",
".",
"height",
"(",
")",
"+",
"self",
".",
"padding",
"min_y",
".",
"append",
"(",
"y",
")",
"return",
"min_y"
] | Calculates the previews positions on canvas | [
"Calculates",
"the",
"previews",
"positions",
"on",
"canvas"
] | 41c8fb37ef973736ec5d68cbe1cd4ecb78712e40 | https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L501-L509 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.