nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
natashamjaques/neural_chat
|
ddb977bb4602a67c460d02231e7bbf7b2cb49a97
|
torchMoji/torchmoji/class_avg_finetuning.py
|
python
|
class_avg_tune_trainable
|
(model, nb_classes, loss_op, optim_op, train, val, test,
epoch_size, nb_epochs, batch_size,
init_weight_path, checkpoint_weight_path, patience=5,
verbose=True)
|
return total_f1 / nb_iter
|
Finetunes the given model using the F1 measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
init_weight_path: Filepath where weights will be initially saved before
training each class. This file will be rewritten by the function.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
verbose: Verbosity flag.
# Returns:
F1 score of the trained model
|
Finetunes the given model using the F1 measure.
|
[
"Finetunes",
"the",
"given",
"model",
"using",
"the",
"F1",
"measure",
"."
] |
def class_avg_tune_trainable(model, nb_classes, loss_op, optim_op, train, val, test,
epoch_size, nb_epochs, batch_size,
init_weight_path, checkpoint_weight_path, patience=5,
verbose=True):
""" Finetunes the given model using the F1 measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
init_weight_path: Filepath where weights will be initially saved before
training each class. This file will be rewritten by the function.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
verbose: Verbosity flag.
# Returns:
F1 score of the trained model
"""
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
# Save and reload initial weights after running for
# each class to avoid learning across classes
torch.save(model.state_dict(), init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i+1, nb_iter))
model.load_state_dict(torch.load(init_weight_path))
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
fit_model(model, loss_op, optim_op, train_gen, [(X_val_resamp, y_val_resamp)],
nb_epochs, checkpoint_weight_path, patience, verbose=0)
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_state_dict(torch.load(checkpoint_weight_path))
# Evaluate
y_pred_val = model(X_val).cpu().numpy()
y_pred_test = model(X_test).cpu().numpy()
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter
|
[
"def",
"class_avg_tune_trainable",
"(",
"model",
",",
"nb_classes",
",",
"loss_op",
",",
"optim_op",
",",
"train",
",",
"val",
",",
"test",
",",
"epoch_size",
",",
"nb_epochs",
",",
"batch_size",
",",
"init_weight_path",
",",
"checkpoint_weight_path",
",",
"patience",
"=",
"5",
",",
"verbose",
"=",
"True",
")",
":",
"total_f1",
"=",
"0",
"nb_iter",
"=",
"nb_classes",
"if",
"nb_classes",
">",
"2",
"else",
"1",
"# Unpack args",
"X_train",
",",
"y_train",
"=",
"train",
"X_val",
",",
"y_val",
"=",
"val",
"X_test",
",",
"y_test",
"=",
"test",
"# Save and reload initial weights after running for",
"# each class to avoid learning across classes",
"torch",
".",
"save",
"(",
"model",
".",
"state_dict",
"(",
")",
",",
"init_weight_path",
")",
"for",
"i",
"in",
"range",
"(",
"nb_iter",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'Iteration number {}/{}'",
".",
"format",
"(",
"i",
"+",
"1",
",",
"nb_iter",
")",
")",
"model",
".",
"load_state_dict",
"(",
"torch",
".",
"load",
"(",
"init_weight_path",
")",
")",
"y_train_new",
",",
"y_val_new",
",",
"y_test_new",
"=",
"prepare_labels",
"(",
"y_train",
",",
"y_val",
",",
"y_test",
",",
"i",
",",
"nb_classes",
")",
"train_gen",
",",
"X_val_resamp",
",",
"y_val_resamp",
"=",
"prepare_generators",
"(",
"X_train",
",",
"y_train_new",
",",
"X_val",
",",
"y_val_new",
",",
"batch_size",
",",
"epoch_size",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Training..\"",
")",
"fit_model",
"(",
"model",
",",
"loss_op",
",",
"optim_op",
",",
"train_gen",
",",
"[",
"(",
"X_val_resamp",
",",
"y_val_resamp",
")",
"]",
",",
"nb_epochs",
",",
"checkpoint_weight_path",
",",
"patience",
",",
"verbose",
"=",
"0",
")",
"# Reload the best weights found to avoid overfitting",
"# Wait a bit to allow proper closing of weights file",
"sleep",
"(",
"1",
")",
"model",
".",
"load_state_dict",
"(",
"torch",
".",
"load",
"(",
"checkpoint_weight_path",
")",
")",
"# Evaluate",
"y_pred_val",
"=",
"model",
"(",
"X_val",
")",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"y_pred_test",
"=",
"model",
"(",
"X_test",
")",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"f1_test",
",",
"best_t",
"=",
"find_f1_threshold",
"(",
"y_val_new",
",",
"y_pred_val",
",",
"y_test_new",
",",
"y_pred_test",
")",
"if",
"verbose",
":",
"print",
"(",
"'f1_test: {}'",
".",
"format",
"(",
"f1_test",
")",
")",
"print",
"(",
"'best_t: {}'",
".",
"format",
"(",
"best_t",
")",
")",
"total_f1",
"+=",
"f1_test",
"return",
"total_f1",
"/",
"nb_iter"
] |
https://github.com/natashamjaques/neural_chat/blob/ddb977bb4602a67c460d02231e7bbf7b2cb49a97/torchMoji/torchmoji/class_avg_finetuning.py#L166-L233
|
|
xonsh/xonsh
|
b76d6f994f22a4078f602f8b386f4ec280c8461f
|
xonsh/procs/proxies.py
|
python
|
proxy_four
|
(f, args, stdin, stdout, stderr, spec, stack)
|
return f(args, stdin, stdout, stderr)
|
Calls a proxy function which takes four parameter: args, stdin, stdout,
and stderr.
|
Calls a proxy function which takes four parameter: args, stdin, stdout,
and stderr.
|
[
"Calls",
"a",
"proxy",
"function",
"which",
"takes",
"four",
"parameter",
":",
"args",
"stdin",
"stdout",
"and",
"stderr",
"."
] |
def proxy_four(f, args, stdin, stdout, stderr, spec, stack):
"""Calls a proxy function which takes four parameter: args, stdin, stdout,
and stderr.
"""
return f(args, stdin, stdout, stderr)
|
[
"def",
"proxy_four",
"(",
"f",
",",
"args",
",",
"stdin",
",",
"stdout",
",",
"stderr",
",",
"spec",
",",
"stack",
")",
":",
"return",
"f",
"(",
"args",
",",
"stdin",
",",
"stdout",
",",
"stderr",
")"
] |
https://github.com/xonsh/xonsh/blob/b76d6f994f22a4078f602f8b386f4ec280c8461f/xonsh/procs/proxies.py#L308-L312
|
|
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/cwp/v20180228/cwp_client.py
|
python
|
CwpClient.DescribeBaselineAnalysisData
|
(self, request)
|
根据基线策略id查询基线策略数据概览统计
:param request: Request instance for DescribeBaselineAnalysisData.
:type request: :class:`tencentcloud.cwp.v20180228.models.DescribeBaselineAnalysisDataRequest`
:rtype: :class:`tencentcloud.cwp.v20180228.models.DescribeBaselineAnalysisDataResponse`
|
根据基线策略id查询基线策略数据概览统计
|
[
"根据基线策略id查询基线策略数据概览统计"
] |
def DescribeBaselineAnalysisData(self, request):
"""根据基线策略id查询基线策略数据概览统计
:param request: Request instance for DescribeBaselineAnalysisData.
:type request: :class:`tencentcloud.cwp.v20180228.models.DescribeBaselineAnalysisDataRequest`
:rtype: :class:`tencentcloud.cwp.v20180228.models.DescribeBaselineAnalysisDataResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeBaselineAnalysisData", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeBaselineAnalysisDataResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
[
"def",
"DescribeBaselineAnalysisData",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"params",
"=",
"request",
".",
"_serialize",
"(",
")",
"body",
"=",
"self",
".",
"call",
"(",
"\"DescribeBaselineAnalysisData\"",
",",
"params",
")",
"response",
"=",
"json",
".",
"loads",
"(",
"body",
")",
"if",
"\"Error\"",
"not",
"in",
"response",
"[",
"\"Response\"",
"]",
":",
"model",
"=",
"models",
".",
"DescribeBaselineAnalysisDataResponse",
"(",
")",
"model",
".",
"_deserialize",
"(",
"response",
"[",
"\"Response\"",
"]",
")",
"return",
"model",
"else",
":",
"code",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Code\"",
"]",
"message",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Message\"",
"]",
"reqid",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"RequestId\"",
"]",
"raise",
"TencentCloudSDKException",
"(",
"code",
",",
"message",
",",
"reqid",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"TencentCloudSDKException",
")",
":",
"raise",
"else",
":",
"raise",
"TencentCloudSDKException",
"(",
"e",
".",
"message",
",",
"e",
".",
"message",
")"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/cwp/v20180228/cwp_client.py#L1877-L1902
|
||
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/IronPython/repackage/pip/pip/utils/logging.py
|
python
|
indent_log
|
(num=2)
|
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
|
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
|
[
"A",
"context",
"manager",
"which",
"will",
"cause",
"the",
"log",
"output",
"to",
"be",
"indented",
"for",
"any",
"log",
"messages",
"emitted",
"inside",
"it",
"."
] |
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
|
[
"def",
"indent_log",
"(",
"num",
"=",
"2",
")",
":",
"_log_state",
".",
"indentation",
"+=",
"num",
"try",
":",
"yield",
"finally",
":",
"_log_state",
".",
"indentation",
"-=",
"num"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/repackage/pip/pip/utils/logging.py#L29-L38
|
||
supernotman/RetinaFace_Pytorch
|
8369b9304e19923c1a02c049df69628890bf30b5
|
anchors.py
|
python
|
generate_anchors
|
(base_size=16, ratios=None, scales=None)
|
return anchors
|
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
|
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
|
[
"Generate",
"anchor",
"(",
"reference",
")",
"windows",
"by",
"enumerating",
"aspect",
"ratios",
"X",
"scales",
"w",
".",
"r",
".",
"t",
".",
"a",
"reference",
"window",
"."
] |
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
if ratios is None:
ratios = np.array([1, 1, 1])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, 1)).T
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
|
[
"def",
"generate_anchors",
"(",
"base_size",
"=",
"16",
",",
"ratios",
"=",
"None",
",",
"scales",
"=",
"None",
")",
":",
"if",
"ratios",
"is",
"None",
":",
"ratios",
"=",
"np",
".",
"array",
"(",
"[",
"1",
",",
"1",
",",
"1",
"]",
")",
"if",
"scales",
"is",
"None",
":",
"scales",
"=",
"np",
".",
"array",
"(",
"[",
"2",
"**",
"0",
",",
"2",
"**",
"(",
"1.0",
"/",
"3.0",
")",
",",
"2",
"**",
"(",
"2.0",
"/",
"3.0",
")",
"]",
")",
"num_anchors",
"=",
"len",
"(",
"scales",
")",
"# initialize output anchors",
"anchors",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_anchors",
",",
"4",
")",
")",
"# scale base_size",
"anchors",
"[",
":",
",",
"2",
":",
"]",
"=",
"base_size",
"*",
"np",
".",
"tile",
"(",
"scales",
",",
"(",
"2",
",",
"1",
")",
")",
".",
"T",
"# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)",
"anchors",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
"-=",
"np",
".",
"tile",
"(",
"anchors",
"[",
":",
",",
"2",
"]",
"*",
"0.5",
",",
"(",
"2",
",",
"1",
")",
")",
".",
"T",
"anchors",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
"-=",
"np",
".",
"tile",
"(",
"anchors",
"[",
":",
",",
"3",
"]",
"*",
"0.5",
",",
"(",
"2",
",",
"1",
")",
")",
".",
"T",
"return",
"anchors"
] |
https://github.com/supernotman/RetinaFace_Pytorch/blob/8369b9304e19923c1a02c049df69628890bf30b5/anchors.py#L42-L66
|
|
mesonbuild/meson
|
a22d0f9a0a787df70ce79b05d0c45de90a970048
|
docs/refman/generatormd.py
|
python
|
GeneratorMD._write_file
|
(self, data: str, file_id: str)
|
Write the data to disk ans store the id for the generated data
|
Write the data to disk ans store the id for the generated data
|
[
"Write",
"the",
"data",
"to",
"disk",
"ans",
"store",
"the",
"id",
"for",
"the",
"generated",
"data"
] |
def _write_file(self, data: str, file_id: str) -> None:#
''' Write the data to disk ans store the id for the generated data '''
self.generated_files[file_id] = self._gen_filename(file_id)
out_file = self.out_dir / self.generated_files[file_id]
out_file.write_text(data, encoding='ascii')
mlog.log('Generated', mlog.bold(out_file.name))
|
[
"def",
"_write_file",
"(",
"self",
",",
"data",
":",
"str",
",",
"file_id",
":",
"str",
")",
"->",
"None",
":",
"#",
"self",
".",
"generated_files",
"[",
"file_id",
"]",
"=",
"self",
".",
"_gen_filename",
"(",
"file_id",
")",
"out_file",
"=",
"self",
".",
"out_dir",
"/",
"self",
".",
"generated_files",
"[",
"file_id",
"]",
"out_file",
".",
"write_text",
"(",
"data",
",",
"encoding",
"=",
"'ascii'",
")",
"mlog",
".",
"log",
"(",
"'Generated'",
",",
"mlog",
".",
"bold",
"(",
"out_file",
".",
"name",
")",
")"
] |
https://github.com/mesonbuild/meson/blob/a22d0f9a0a787df70ce79b05d0c45de90a970048/docs/refman/generatormd.py#L121-L127
|
||
ncullen93/torchsample
|
1f328d1ea3ef533c8c0c4097ed4a3fa16d784ba4
|
torchsample/datasets.py
|
python
|
TensorDataset.__init__
|
(self,
inputs,
targets=None,
input_transform=None,
target_transform=None,
co_transform=None)
|
Dataset class for loading in-memory data.
Arguments
---------
inputs: numpy array
targets : numpy array
input_transform : class with __call__ function implemented
transform to apply to input sample individually
target_transform : class with __call__ function implemented
transform to apply to target sample individually
co_transform : class with __call__ function implemented
transform to apply to both input and target sample simultaneously
|
Dataset class for loading in-memory data.
|
[
"Dataset",
"class",
"for",
"loading",
"in",
"-",
"memory",
"data",
"."
] |
def __init__(self,
inputs,
targets=None,
input_transform=None,
target_transform=None,
co_transform=None):
"""
Dataset class for loading in-memory data.
Arguments
---------
inputs: numpy array
targets : numpy array
input_transform : class with __call__ function implemented
transform to apply to input sample individually
target_transform : class with __call__ function implemented
transform to apply to target sample individually
co_transform : class with __call__ function implemented
transform to apply to both input and target sample simultaneously
"""
self.inputs = _process_array_argument(inputs)
self.num_inputs = len(self.inputs)
self.input_return_processor = _return_first_element_of_list if self.num_inputs==1 else _pass_through
if targets is None:
self.has_target = False
else:
self.targets = _process_array_argument(targets)
self.num_targets = len(self.targets)
self.target_return_processor = _return_first_element_of_list if self.num_targets==1 else _pass_through
self.min_inputs_or_targets = min(self.num_inputs, self.num_targets)
self.has_target = True
self.input_transform = _process_transform_argument(input_transform, self.num_inputs)
if self.has_target:
self.target_transform = _process_transform_argument(target_transform, self.num_targets)
self.co_transform = _process_co_transform_argument(co_transform, self.num_inputs, self.num_targets)
|
[
"def",
"__init__",
"(",
"self",
",",
"inputs",
",",
"targets",
"=",
"None",
",",
"input_transform",
"=",
"None",
",",
"target_transform",
"=",
"None",
",",
"co_transform",
"=",
"None",
")",
":",
"self",
".",
"inputs",
"=",
"_process_array_argument",
"(",
"inputs",
")",
"self",
".",
"num_inputs",
"=",
"len",
"(",
"self",
".",
"inputs",
")",
"self",
".",
"input_return_processor",
"=",
"_return_first_element_of_list",
"if",
"self",
".",
"num_inputs",
"==",
"1",
"else",
"_pass_through",
"if",
"targets",
"is",
"None",
":",
"self",
".",
"has_target",
"=",
"False",
"else",
":",
"self",
".",
"targets",
"=",
"_process_array_argument",
"(",
"targets",
")",
"self",
".",
"num_targets",
"=",
"len",
"(",
"self",
".",
"targets",
")",
"self",
".",
"target_return_processor",
"=",
"_return_first_element_of_list",
"if",
"self",
".",
"num_targets",
"==",
"1",
"else",
"_pass_through",
"self",
".",
"min_inputs_or_targets",
"=",
"min",
"(",
"self",
".",
"num_inputs",
",",
"self",
".",
"num_targets",
")",
"self",
".",
"has_target",
"=",
"True",
"self",
".",
"input_transform",
"=",
"_process_transform_argument",
"(",
"input_transform",
",",
"self",
".",
"num_inputs",
")",
"if",
"self",
".",
"has_target",
":",
"self",
".",
"target_transform",
"=",
"_process_transform_argument",
"(",
"target_transform",
",",
"self",
".",
"num_targets",
")",
"self",
".",
"co_transform",
"=",
"_process_co_transform_argument",
"(",
"co_transform",
",",
"self",
".",
"num_inputs",
",",
"self",
".",
"num_targets",
")"
] |
https://github.com/ncullen93/torchsample/blob/1f328d1ea3ef533c8c0c4097ed4a3fa16d784ba4/torchsample/datasets.py#L203-L244
|
||
LMFDB/lmfdb
|
6cf48a4c18a96e6298da6ae43f587f96845bcb43
|
lmfdb/siegel_modular_forms/sample.py
|
python
|
Sample
|
(collection, name)
|
return Sample_class(doc) if doc else None
|
Return a light instance of Sample_class, where 'light' means 'without eigenvalues, Fourier coefficients or explicit formula'.
|
Return a light instance of Sample_class, where 'light' means 'without eigenvalues, Fourier coefficients or explicit formula'.
|
[
"Return",
"a",
"light",
"instance",
"of",
"Sample_class",
"where",
"light",
"means",
"without",
"eigenvalues",
"Fourier",
"coefficients",
"or",
"explicit",
"formula",
"."
] |
def Sample(collection, name):
"""
Return a light instance of Sample_class, where 'light' means 'without eigenvalues, Fourier coefficients or explicit formula'.
"""
query = {'collection': {'$contains': [collection]}, 'name': name}
doc = db.smf_samples.lucky(query, {'Fourier_coefficients': False, 'eigenvalues': False, 'explicit_formula': False})
return Sample_class(doc) if doc else None
|
[
"def",
"Sample",
"(",
"collection",
",",
"name",
")",
":",
"query",
"=",
"{",
"'collection'",
":",
"{",
"'$contains'",
":",
"[",
"collection",
"]",
"}",
",",
"'name'",
":",
"name",
"}",
"doc",
"=",
"db",
".",
"smf_samples",
".",
"lucky",
"(",
"query",
",",
"{",
"'Fourier_coefficients'",
":",
"False",
",",
"'eigenvalues'",
":",
"False",
",",
"'explicit_formula'",
":",
"False",
"}",
")",
"return",
"Sample_class",
"(",
"doc",
")",
"if",
"doc",
"else",
"None"
] |
https://github.com/LMFDB/lmfdb/blob/6cf48a4c18a96e6298da6ae43f587f96845bcb43/lmfdb/siegel_modular_forms/sample.py#L103-L109
|
|
bruderstein/PythonScript
|
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
|
PythonLib/min/tempfile.py
|
python
|
SpooledTemporaryFile.tell
|
(self)
|
return self._file.tell()
|
[] |
def tell(self):
return self._file.tell()
|
[
"def",
"tell",
"(",
"self",
")",
":",
"return",
"self",
".",
"_file",
".",
"tell",
"(",
")"
] |
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/tempfile.py#L758-L759
|
|||
jansel/opentuner
|
070c5cef6d933eb760a2f9cd5cd08c95f27aee75
|
opentuner/search/manipulator.py
|
python
|
ParameterArray.op1_randomize
|
(self, config)
|
randomly selects a sub-parameter and randomizes it
:param config: the configuration to be changed
|
randomly selects a sub-parameter and randomizes it
|
[
"randomly",
"selects",
"a",
"sub",
"-",
"parameter",
"and",
"randomizes",
"it"
] |
def op1_randomize(self, config):
"""
randomly selects a sub-parameter and randomizes it
:param config: the configuration to be changed
"""
random.choice(self.sub_parameters()).op1_randomize(config)
|
[
"def",
"op1_randomize",
"(",
"self",
",",
"config",
")",
":",
"random",
".",
"choice",
"(",
"self",
".",
"sub_parameters",
"(",
")",
")",
".",
"op1_randomize",
"(",
"config",
")"
] |
https://github.com/jansel/opentuner/blob/070c5cef6d933eb760a2f9cd5cd08c95f27aee75/opentuner/search/manipulator.py#L1507-L1513
|
||
Jakobovski/aws-spot-bot
|
1a84c498df8b98b8fd2439a6c520e7a9b16e4a0d
|
utils/pricing_util.py
|
python
|
get_best_az
|
()
|
return sorted_azs[-1]
|
[] |
def get_best_az():
azs = get_initialized_azs()
for az in azs:
az.calculate_score(uconf.INSTANCE_TYPES, 0.65)
# Sort the AZs by score and return the best one
sorted_azs = sorted(azs, key=attrgetter('score'))
for az in sorted_azs:
print az.name
print '>> price:', az.current_price
print '>> mean:', az.spot_price_mean
print '>> variance:', az.spot_price_variance
print '>> score:', az.score
return sorted_azs[-1]
|
[
"def",
"get_best_az",
"(",
")",
":",
"azs",
"=",
"get_initialized_azs",
"(",
")",
"for",
"az",
"in",
"azs",
":",
"az",
".",
"calculate_score",
"(",
"uconf",
".",
"INSTANCE_TYPES",
",",
"0.65",
")",
"# Sort the AZs by score and return the best one",
"sorted_azs",
"=",
"sorted",
"(",
"azs",
",",
"key",
"=",
"attrgetter",
"(",
"'score'",
")",
")",
"for",
"az",
"in",
"sorted_azs",
":",
"print",
"az",
".",
"name",
"print",
"'>> price:'",
",",
"az",
".",
"current_price",
"print",
"'>> mean:'",
",",
"az",
".",
"spot_price_mean",
"print",
"'>> variance:'",
",",
"az",
".",
"spot_price_variance",
"print",
"'>> score:'",
",",
"az",
".",
"score",
"return",
"sorted_azs",
"[",
"-",
"1",
"]"
] |
https://github.com/Jakobovski/aws-spot-bot/blob/1a84c498df8b98b8fd2439a6c520e7a9b16e4a0d/utils/pricing_util.py#L64-L80
|
|||
Instagram/LibCST
|
13370227703fe3171e94c57bdd7977f3af696b73
|
libcst/_parser/conversions/expression.py
|
python
|
convert_fstring_format_spec
|
(
config: ParserConfig, children: typing.Sequence[typing.Any]
)
|
return FormattedStringFormatSpecPartial(tuple(content), colon.whitespace_before)
|
[] |
def convert_fstring_format_spec(
config: ParserConfig, children: typing.Sequence[typing.Any]
) -> typing.Any:
colon, *content = children
return FormattedStringFormatSpecPartial(tuple(content), colon.whitespace_before)
|
[
"def",
"convert_fstring_format_spec",
"(",
"config",
":",
"ParserConfig",
",",
"children",
":",
"typing",
".",
"Sequence",
"[",
"typing",
".",
"Any",
"]",
")",
"->",
"typing",
".",
"Any",
":",
"colon",
",",
"",
"*",
"content",
"=",
"children",
"return",
"FormattedStringFormatSpecPartial",
"(",
"tuple",
"(",
"content",
")",
",",
"colon",
".",
"whitespace_before",
")"
] |
https://github.com/Instagram/LibCST/blob/13370227703fe3171e94c57bdd7977f3af696b73/libcst/_parser/conversions/expression.py#L1100-L1104
|
|||
ambujraj/hacktoberfest2018
|
53df2cac8b3404261131a873352ec4f2ffa3544d
|
MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_internal/commands/list.py
|
python
|
format_for_columns
|
(pkgs, options)
|
return data, header
|
Convert the package data into something usable
by output_package_listing_columns.
|
Convert the package data into something usable
by output_package_listing_columns.
|
[
"Convert",
"the",
"package",
"data",
"into",
"something",
"usable",
"by",
"output_package_listing_columns",
"."
] |
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs):
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if options.verbose >= 1 or dist_is_editable(proj):
row.append(proj.location)
if options.verbose >= 1:
row.append(get_installer(proj))
data.append(row)
return data, header
|
[
"def",
"format_for_columns",
"(",
"pkgs",
",",
"options",
")",
":",
"running_outdated",
"=",
"options",
".",
"outdated",
"# Adjust the header for the `pip list --outdated` case.",
"if",
"running_outdated",
":",
"header",
"=",
"[",
"\"Package\"",
",",
"\"Version\"",
",",
"\"Latest\"",
",",
"\"Type\"",
"]",
"else",
":",
"header",
"=",
"[",
"\"Package\"",
",",
"\"Version\"",
"]",
"data",
"=",
"[",
"]",
"if",
"options",
".",
"verbose",
">=",
"1",
"or",
"any",
"(",
"dist_is_editable",
"(",
"x",
")",
"for",
"x",
"in",
"pkgs",
")",
":",
"header",
".",
"append",
"(",
"\"Location\"",
")",
"if",
"options",
".",
"verbose",
">=",
"1",
":",
"header",
".",
"append",
"(",
"\"Installer\"",
")",
"for",
"proj",
"in",
"pkgs",
":",
"# if we're working on the 'outdated' list, separate out the",
"# latest_version and type",
"row",
"=",
"[",
"proj",
".",
"project_name",
",",
"proj",
".",
"version",
"]",
"if",
"running_outdated",
":",
"row",
".",
"append",
"(",
"proj",
".",
"latest_version",
")",
"row",
".",
"append",
"(",
"proj",
".",
"latest_filetype",
")",
"if",
"options",
".",
"verbose",
">=",
"1",
"or",
"dist_is_editable",
"(",
"proj",
")",
":",
"row",
".",
"append",
"(",
"proj",
".",
"location",
")",
"if",
"options",
".",
"verbose",
">=",
"1",
":",
"row",
".",
"append",
"(",
"get_installer",
"(",
"proj",
")",
")",
"data",
".",
"append",
"(",
"row",
")",
"return",
"data",
",",
"header"
] |
https://github.com/ambujraj/hacktoberfest2018/blob/53df2cac8b3404261131a873352ec4f2ffa3544d/MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_internal/commands/list.py#L292-L326
|
|
PySimpleGUI/PySimpleGUI
|
6c0d1fb54f493d45e90180b322fbbe70f7a5af3c
|
PySimpleGUIWx/PySimpleGUIWx.py
|
python
|
RealtimeButton
|
(button_text, image_filename=None, image_data=None, image_size=(None, None), image_subsample=None,
border_width=None, tooltip=None, size=(None, None), auto_size_button=None, button_color=None,
font=None, disabled=False, bind_return_key=False, focus=False, pad=None, key=None)
|
return Button(button_text=button_text, button_type=BUTTON_TYPE_REALTIME, image_filename=image_filename,
image_data=image_data, image_size=image_size, image_subsample=image_subsample,
border_width=border_width, tooltip=tooltip, disabled=disabled, size=size,
auto_size_button=auto_size_button, button_color=button_color, font=font,
bind_return_key=bind_return_key, focus=focus, pad=pad, key=key)
|
[] |
def RealtimeButton(button_text, image_filename=None, image_data=None, image_size=(None, None), image_subsample=None,
border_width=None, tooltip=None, size=(None, None), auto_size_button=None, button_color=None,
font=None, disabled=False, bind_return_key=False, focus=False, pad=None, key=None):
return Button(button_text=button_text, button_type=BUTTON_TYPE_REALTIME, image_filename=image_filename,
image_data=image_data, image_size=image_size, image_subsample=image_subsample,
border_width=border_width, tooltip=tooltip, disabled=disabled, size=size,
auto_size_button=auto_size_button, button_color=button_color, font=font,
bind_return_key=bind_return_key, focus=focus, pad=pad, key=key)
|
[
"def",
"RealtimeButton",
"(",
"button_text",
",",
"image_filename",
"=",
"None",
",",
"image_data",
"=",
"None",
",",
"image_size",
"=",
"(",
"None",
",",
"None",
")",
",",
"image_subsample",
"=",
"None",
",",
"border_width",
"=",
"None",
",",
"tooltip",
"=",
"None",
",",
"size",
"=",
"(",
"None",
",",
"None",
")",
",",
"auto_size_button",
"=",
"None",
",",
"button_color",
"=",
"None",
",",
"font",
"=",
"None",
",",
"disabled",
"=",
"False",
",",
"bind_return_key",
"=",
"False",
",",
"focus",
"=",
"False",
",",
"pad",
"=",
"None",
",",
"key",
"=",
"None",
")",
":",
"return",
"Button",
"(",
"button_text",
"=",
"button_text",
",",
"button_type",
"=",
"BUTTON_TYPE_REALTIME",
",",
"image_filename",
"=",
"image_filename",
",",
"image_data",
"=",
"image_data",
",",
"image_size",
"=",
"image_size",
",",
"image_subsample",
"=",
"image_subsample",
",",
"border_width",
"=",
"border_width",
",",
"tooltip",
"=",
"tooltip",
",",
"disabled",
"=",
"disabled",
",",
"size",
"=",
"size",
",",
"auto_size_button",
"=",
"auto_size_button",
",",
"button_color",
"=",
"button_color",
",",
"font",
"=",
"font",
",",
"bind_return_key",
"=",
"bind_return_key",
",",
"focus",
"=",
"focus",
",",
"pad",
"=",
"pad",
",",
"key",
"=",
"key",
")"
] |
https://github.com/PySimpleGUI/PySimpleGUI/blob/6c0d1fb54f493d45e90180b322fbbe70f7a5af3c/PySimpleGUIWx/PySimpleGUIWx.py#L3797-L3804
|
|||
ucsb-seclab/karonte
|
427ac313e596f723e40768b95d13bd7a9fc92fd8
|
karonte-viz/viz-results.py
|
python
|
main
|
()
|
[] |
def main():
if len(sys.argv) != 2:
print('Use: python viz-results.py <PATH_TO_LOG_FILE>')
exit()
try:
raw_data = open(sys.argv[1]).read()
except:
print('Error reading file')
exit()
global res
res = parse_json_log(raw_data)
set_layout()
# Timer(1, open_browser).start();
app.run_server(debug=False, port=PORT)
|
[
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"!=",
"2",
":",
"print",
"(",
"'Use: python viz-results.py <PATH_TO_LOG_FILE>'",
")",
"exit",
"(",
")",
"try",
":",
"raw_data",
"=",
"open",
"(",
"sys",
".",
"argv",
"[",
"1",
"]",
")",
".",
"read",
"(",
")",
"except",
":",
"print",
"(",
"'Error reading file'",
")",
"exit",
"(",
")",
"global",
"res",
"res",
"=",
"parse_json_log",
"(",
"raw_data",
")",
"set_layout",
"(",
")",
"# Timer(1, open_browser).start();",
"app",
".",
"run_server",
"(",
"debug",
"=",
"False",
",",
"port",
"=",
"PORT",
")"
] |
https://github.com/ucsb-seclab/karonte/blob/427ac313e596f723e40768b95d13bd7a9fc92fd8/karonte-viz/viz-results.py#L359-L376
|
||||
foremast/foremast
|
e8eb9bd24e975772532d90efa8a9ba1850e968cc
|
src/foremast/iam/destroy_iam/__main__.py
|
python
|
main
|
()
|
Destroy any IAM related Resources.
|
Destroy any IAM related Resources.
|
[
"Destroy",
"any",
"IAM",
"related",
"Resources",
"."
] |
def main():
"""Destroy any IAM related Resources."""
logging.basicConfig(format=LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=main.__doc__)
add_debug(parser)
add_app(parser)
add_env(parser)
args = parser.parse_args()
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
destroy_iam(**vars(args))
|
[
"def",
"main",
"(",
")",
":",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"LOGGING_FORMAT",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"main",
".",
"__doc__",
")",
"add_debug",
"(",
"parser",
")",
"add_app",
"(",
"parser",
")",
"add_env",
"(",
"parser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"logging",
".",
"getLogger",
"(",
"__package__",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
".",
"setLevel",
"(",
"args",
".",
"debug",
")",
"destroy_iam",
"(",
"*",
"*",
"vars",
"(",
"args",
")",
")"
] |
https://github.com/foremast/foremast/blob/e8eb9bd24e975772532d90efa8a9ba1850e968cc/src/foremast/iam/destroy_iam/__main__.py#L27-L39
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/pip/_internal/configuration.py
|
python
|
Configuration.__init__
|
(self, isolated, load_only=None)
|
[] |
def __init__(self, isolated, load_only=None):
# type: (bool, Kind) -> None
super(Configuration, self).__init__()
_valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.VENV, None]
if load_only not in _valid_load_only:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, _valid_load_only[:-1]))
)
)
self.isolated = isolated # type: bool
self.load_only = load_only # type: Optional[Kind]
# The order here determines the override order.
self._override_order = [
kinds.GLOBAL, kinds.USER, kinds.VENV, kinds.ENV, kinds.ENV_VAR
]
self._ignore_env_names = ["version", "help"]
# Because we keep track of where we got the data from
self._parsers = {
variant: [] for variant in self._override_order
} # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
self._config = {
variant: {} for variant in self._override_order
} # type: Dict[Kind, Dict[str, Any]]
self._modified_parsers = []
|
[
"def",
"__init__",
"(",
"self",
",",
"isolated",
",",
"load_only",
"=",
"None",
")",
":",
"# type: (bool, Kind) -> None",
"super",
"(",
"Configuration",
",",
"self",
")",
".",
"__init__",
"(",
")",
"_valid_load_only",
"=",
"[",
"kinds",
".",
"USER",
",",
"kinds",
".",
"GLOBAL",
",",
"kinds",
".",
"VENV",
",",
"None",
"]",
"if",
"load_only",
"not",
"in",
"_valid_load_only",
":",
"raise",
"ConfigurationError",
"(",
"\"Got invalid value for load_only - should be one of {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"map",
"(",
"repr",
",",
"_valid_load_only",
"[",
":",
"-",
"1",
"]",
")",
")",
")",
")",
"self",
".",
"isolated",
"=",
"isolated",
"# type: bool",
"self",
".",
"load_only",
"=",
"load_only",
"# type: Optional[Kind]",
"# The order here determines the override order.",
"self",
".",
"_override_order",
"=",
"[",
"kinds",
".",
"GLOBAL",
",",
"kinds",
".",
"USER",
",",
"kinds",
".",
"VENV",
",",
"kinds",
".",
"ENV",
",",
"kinds",
".",
"ENV_VAR",
"]",
"self",
".",
"_ignore_env_names",
"=",
"[",
"\"version\"",
",",
"\"help\"",
"]",
"# Because we keep track of where we got the data from",
"self",
".",
"_parsers",
"=",
"{",
"variant",
":",
"[",
"]",
"for",
"variant",
"in",
"self",
".",
"_override_order",
"}",
"# type: Dict[Kind, List[Tuple[str, RawConfigParser]]]",
"self",
".",
"_config",
"=",
"{",
"variant",
":",
"{",
"}",
"for",
"variant",
"in",
"self",
".",
"_override_order",
"}",
"# type: Dict[Kind, Dict[str, Any]]",
"self",
".",
"_modified_parsers",
"=",
"[",
"]"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pip/_internal/configuration.py#L80-L108
|
||||
mapbox/robosat
|
cbb1c73328183afd2d6351b7bfa3f430b73103ea
|
robosat/transforms.py
|
python
|
JointRandomVerticalFlip.__call__
|
(self, images, mask)
|
Randomly flips images and their mask top to bottom.
Args:
images: the PIL.Image image to transform.
mask: the PIL.Image mask to transform.
Returns:
The PIL.Image (images, mask) tuple with either images and mask flipped or none of them flipped.
|
Randomly flips images and their mask top to bottom.
|
[
"Randomly",
"flips",
"images",
"and",
"their",
"mask",
"top",
"to",
"bottom",
"."
] |
def __call__(self, images, mask):
"""Randomly flips images and their mask top to bottom.
Args:
images: the PIL.Image image to transform.
mask: the PIL.Image mask to transform.
Returns:
The PIL.Image (images, mask) tuple with either images and mask flipped or none of them flipped.
"""
if random.random() < self.p:
return [v.transpose(Image.FLIP_TOP_BOTTOM) for v in images], mask.transpose(Image.FLIP_TOP_BOTTOM)
else:
return images, mask
|
[
"def",
"__call__",
"(",
"self",
",",
"images",
",",
"mask",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"self",
".",
"p",
":",
"return",
"[",
"v",
".",
"transpose",
"(",
"Image",
".",
"FLIP_TOP_BOTTOM",
")",
"for",
"v",
"in",
"images",
"]",
",",
"mask",
".",
"transpose",
"(",
"Image",
".",
"FLIP_TOP_BOTTOM",
")",
"else",
":",
"return",
"images",
",",
"mask"
] |
https://github.com/mapbox/robosat/blob/cbb1c73328183afd2d6351b7bfa3f430b73103ea/robosat/transforms.py#L140-L154
|
||
quantumlib/Cirq
|
89f88b01d69222d3f1ec14d649b7b3a85ed9211f
|
cirq-core/cirq/work/observable_settings.py
|
python
|
_max_weight_observable
|
(observables: Iterable[ops.PauliString])
|
return ops.PauliString(qubit_pauli_map)
|
Create a new observable that is compatible with all input observables
and has the maximum non-identity elements.
The returned PauliString is constructed by taking the non-identity
single-qubit Pauli at each qubit position.
This function will return `None` if the input observables do not share a
tensor product basis.
For example, the _max_weight_observable of ["XI", "IZ"] is "XZ". Asking for
the max weight observable of something like ["XI", "ZI"] will return None.
The returned value need not actually be present in the input observables.
Coefficients from input observables will be dropped.
|
Create a new observable that is compatible with all input observables
and has the maximum non-identity elements.
|
[
"Create",
"a",
"new",
"observable",
"that",
"is",
"compatible",
"with",
"all",
"input",
"observables",
"and",
"has",
"the",
"maximum",
"non",
"-",
"identity",
"elements",
"."
] |
def _max_weight_observable(observables: Iterable[ops.PauliString]) -> Union[None, ops.PauliString]:
"""Create a new observable that is compatible with all input observables
and has the maximum non-identity elements.
The returned PauliString is constructed by taking the non-identity
single-qubit Pauli at each qubit position.
This function will return `None` if the input observables do not share a
tensor product basis.
For example, the _max_weight_observable of ["XI", "IZ"] is "XZ". Asking for
the max weight observable of something like ["XI", "ZI"] will return None.
The returned value need not actually be present in the input observables.
Coefficients from input observables will be dropped.
"""
qubit_pauli_map: Dict[ops.Qid, ops.Pauli] = {}
for observable in observables:
for qubit, pauli in observable.items():
if qubit in qubit_pauli_map:
if qubit_pauli_map[qubit] != pauli:
return None
else:
qubit_pauli_map[qubit] = pauli
return ops.PauliString(qubit_pauli_map)
|
[
"def",
"_max_weight_observable",
"(",
"observables",
":",
"Iterable",
"[",
"ops",
".",
"PauliString",
"]",
")",
"->",
"Union",
"[",
"None",
",",
"ops",
".",
"PauliString",
"]",
":",
"qubit_pauli_map",
":",
"Dict",
"[",
"ops",
".",
"Qid",
",",
"ops",
".",
"Pauli",
"]",
"=",
"{",
"}",
"for",
"observable",
"in",
"observables",
":",
"for",
"qubit",
",",
"pauli",
"in",
"observable",
".",
"items",
"(",
")",
":",
"if",
"qubit",
"in",
"qubit_pauli_map",
":",
"if",
"qubit_pauli_map",
"[",
"qubit",
"]",
"!=",
"pauli",
":",
"return",
"None",
"else",
":",
"qubit_pauli_map",
"[",
"qubit",
"]",
"=",
"pauli",
"return",
"ops",
".",
"PauliString",
"(",
"qubit_pauli_map",
")"
] |
https://github.com/quantumlib/Cirq/blob/89f88b01d69222d3f1ec14d649b7b3a85ed9211f/cirq-core/cirq/work/observable_settings.py#L62-L86
|
|
kubernetes-client/python
|
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
|
kubernetes/client/models/v1beta1_endpoint_slice.py
|
python
|
V1beta1EndpointSlice.__ne__
|
(self, other)
|
return self.to_dict() != other.to_dict()
|
Returns true if both objects are not equal
|
Returns true if both objects are not equal
|
[
"Returns",
"true",
"if",
"both",
"objects",
"are",
"not",
"equal"
] |
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1EndpointSlice):
return True
return self.to_dict() != other.to_dict()
|
[
"def",
"__ne__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"V1beta1EndpointSlice",
")",
":",
"return",
"True",
"return",
"self",
".",
"to_dict",
"(",
")",
"!=",
"other",
".",
"to_dict",
"(",
")"
] |
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1beta1_endpoint_slice.py#L257-L262
|
|
ramses-tech/ramses
|
ea2e1e896325b7256cdf5902309e05fd98e0c14c
|
ramses/acl.py
|
python
|
parse_acl
|
(acl_string)
|
return result_acl
|
Parse raw string :acl_string: of RAML-defined ACLs.
If :acl_string: is blank or None, all permissions are given.
Values of ACL action and principal are parsed using `actions` and
`special_principals` maps and are looked up after `strip()` and
`lower()`.
ACEs in :acl_string: may be separated by newlines or semicolons.
Action, principal and permission lists must be separated by spaces.
Permissions must be comma-separated.
E.g. 'allow everyone view,create,update' and 'deny authenticated delete'
:param acl_string: Raw RAML string containing defined ACEs.
|
Parse raw string :acl_string: of RAML-defined ACLs.
|
[
"Parse",
"raw",
"string",
":",
"acl_string",
":",
"of",
"RAML",
"-",
"defined",
"ACLs",
"."
] |
def parse_acl(acl_string):
""" Parse raw string :acl_string: of RAML-defined ACLs.
If :acl_string: is blank or None, all permissions are given.
Values of ACL action and principal are parsed using `actions` and
`special_principals` maps and are looked up after `strip()` and
`lower()`.
ACEs in :acl_string: may be separated by newlines or semicolons.
Action, principal and permission lists must be separated by spaces.
Permissions must be comma-separated.
E.g. 'allow everyone view,create,update' and 'deny authenticated delete'
:param acl_string: Raw RAML string containing defined ACEs.
"""
if not acl_string:
return [ALLOW_ALL]
aces_list = acl_string.replace('\n', ';').split(';')
aces_list = [ace.strip().split(' ', 2) for ace in aces_list if ace]
aces_list = [(a, b, c.split(',')) for a, b, c in aces_list]
result_acl = []
for action_str, princ_str, perms in aces_list:
# Process action
action_str = action_str.strip().lower()
action = actions.get(action_str)
if action is None:
raise ValueError(
'Unknown ACL action: {}. Valid actions: {}'.format(
action_str, list(actions.keys())))
# Process principal
princ_str = princ_str.strip().lower()
if princ_str in special_principals:
principal = special_principals[princ_str]
elif is_callable_tag(princ_str):
principal = resolve_to_callable(princ_str)
else:
principal = princ_str
# Process permissions
permissions = parse_permissions(perms)
result_acl.append((action, principal, permissions))
return result_acl
|
[
"def",
"parse_acl",
"(",
"acl_string",
")",
":",
"if",
"not",
"acl_string",
":",
"return",
"[",
"ALLOW_ALL",
"]",
"aces_list",
"=",
"acl_string",
".",
"replace",
"(",
"'\\n'",
",",
"';'",
")",
".",
"split",
"(",
"';'",
")",
"aces_list",
"=",
"[",
"ace",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
",",
"2",
")",
"for",
"ace",
"in",
"aces_list",
"if",
"ace",
"]",
"aces_list",
"=",
"[",
"(",
"a",
",",
"b",
",",
"c",
".",
"split",
"(",
"','",
")",
")",
"for",
"a",
",",
"b",
",",
"c",
"in",
"aces_list",
"]",
"result_acl",
"=",
"[",
"]",
"for",
"action_str",
",",
"princ_str",
",",
"perms",
"in",
"aces_list",
":",
"# Process action",
"action_str",
"=",
"action_str",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"action",
"=",
"actions",
".",
"get",
"(",
"action_str",
")",
"if",
"action",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Unknown ACL action: {}. Valid actions: {}'",
".",
"format",
"(",
"action_str",
",",
"list",
"(",
"actions",
".",
"keys",
"(",
")",
")",
")",
")",
"# Process principal",
"princ_str",
"=",
"princ_str",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"princ_str",
"in",
"special_principals",
":",
"principal",
"=",
"special_principals",
"[",
"princ_str",
"]",
"elif",
"is_callable_tag",
"(",
"princ_str",
")",
":",
"principal",
"=",
"resolve_to_callable",
"(",
"princ_str",
")",
"else",
":",
"principal",
"=",
"princ_str",
"# Process permissions",
"permissions",
"=",
"parse_permissions",
"(",
"perms",
")",
"result_acl",
".",
"append",
"(",
"(",
"action",
",",
"principal",
",",
"permissions",
")",
")",
"return",
"result_acl"
] |
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/acl.py#L61-L107
|
|
thinkle/gourmet
|
8af29c8ded24528030e5ae2ea3461f61c1e5a575
|
gourmet/plugins/nutritional_information/reccard_plugin.py
|
python
|
NutritionDisplayModule.nutrition_highlighting_label_changed
|
(self, *args)
|
[] |
def nutrition_highlighting_label_changed (self, *args):
self.nutritional_highlighting = True
self.recipe_display.prefs['nutrition_to_highlight'] = self.nutritionLabel.active_name
self.recipe_display.ingredientDisplay.display_ingredients()
|
[
"def",
"nutrition_highlighting_label_changed",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"nutritional_highlighting",
"=",
"True",
"self",
".",
"recipe_display",
".",
"prefs",
"[",
"'nutrition_to_highlight'",
"]",
"=",
"self",
".",
"nutritionLabel",
".",
"active_name",
"self",
".",
"recipe_display",
".",
"ingredientDisplay",
".",
"display_ingredients",
"(",
")"
] |
https://github.com/thinkle/gourmet/blob/8af29c8ded24528030e5ae2ea3461f61c1e5a575/gourmet/plugins/nutritional_information/reccard_plugin.py#L59-L62
|
||||
plotly/plotly.py
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
packages/python/plotly/plotly/graph_objs/heatmapgl/_stream.py
|
python
|
Stream.__init__
|
(self, arg=None, maxpoints=None, token=None, **kwargs)
|
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmapgl.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
|
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmapgl.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
|
[
"Construct",
"a",
"new",
"Stream",
"object",
"Parameters",
"----------",
"arg",
"dict",
"of",
"properties",
"compatible",
"with",
"this",
"constructor",
"or",
"an",
"instance",
"of",
":",
"class",
":",
"plotly",
".",
"graph_objs",
".",
"heatmapgl",
".",
"Stream",
"maxpoints",
"Sets",
"the",
"maximum",
"number",
"of",
"points",
"to",
"keep",
"on",
"the",
"plots",
"from",
"an",
"incoming",
"stream",
".",
"If",
"maxpoints",
"is",
"set",
"to",
"50",
"only",
"the",
"newest",
"50",
"points",
"will",
"be",
"displayed",
"on",
"the",
"plot",
".",
"token",
"The",
"stream",
"id",
"number",
"links",
"a",
"data",
"trace",
"on",
"a",
"plot",
"with",
"a",
"stream",
".",
"See",
"https",
":",
"//",
"chart",
"-",
"studio",
".",
"plotly",
".",
"com",
"/",
"settings",
"for",
"more",
"details",
"."
] |
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmapgl.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmapgl.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[
"def",
"__init__",
"(",
"self",
",",
"arg",
"=",
"None",
",",
"maxpoints",
"=",
"None",
",",
"token",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"Stream",
",",
"self",
")",
".",
"__init__",
"(",
"\"stream\"",
")",
"if",
"\"_parent\"",
"in",
"kwargs",
":",
"self",
".",
"_parent",
"=",
"kwargs",
"[",
"\"_parent\"",
"]",
"return",
"# Validate arg",
"# ------------",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"arg",
",",
"self",
".",
"__class__",
")",
":",
"arg",
"=",
"arg",
".",
"to_plotly_json",
"(",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"_copy",
".",
"copy",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nThe first argument to the plotly.graph_objs.heatmapgl.Stream \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.heatmapgl.Stream`\"\"\"",
")",
"# Handle skip_invalid",
"# -------------------",
"self",
".",
"_skip_invalid",
"=",
"kwargs",
".",
"pop",
"(",
"\"skip_invalid\"",
",",
"False",
")",
"self",
".",
"_validate",
"=",
"kwargs",
".",
"pop",
"(",
"\"_validate\"",
",",
"True",
")",
"# Populate data dict with properties",
"# ----------------------------------",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"maxpoints\"",
",",
"None",
")",
"_v",
"=",
"maxpoints",
"if",
"maxpoints",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"maxpoints\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"token\"",
",",
"None",
")",
"_v",
"=",
"token",
"if",
"token",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"token\"",
"]",
"=",
"_v",
"# Process unknown kwargs",
"# ----------------------",
"self",
".",
"_process_kwargs",
"(",
"*",
"*",
"dict",
"(",
"arg",
",",
"*",
"*",
"kwargs",
")",
")",
"# Reset skip_invalid",
"# ------------------",
"self",
".",
"_skip_invalid",
"=",
"False"
] |
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/heatmapgl/_stream.py#L73-L141
|
||
aleju/imgaug
|
0101108d4fed06bc5056c4a03e2bcb0216dac326
|
imgaug/augmentables/kps.py
|
python
|
KeypointsOnImage.clip_out_of_image_
|
(self)
|
return self.remove_out_of_image_fraction_(0.5)
|
Remove all KPs that are outside of the image plane.
This method exists for consistency with other augmentables, e.g.
bounding boxes.
Added in 0.4.0.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
Keypoints that are inside the image plane.
The object may have been modified in-place.
|
Remove all KPs that are outside of the image plane.
|
[
"Remove",
"all",
"KPs",
"that",
"are",
"outside",
"of",
"the",
"image",
"plane",
"."
] |
def clip_out_of_image_(self):
"""Remove all KPs that are outside of the image plane.
This method exists for consistency with other augmentables, e.g.
bounding boxes.
Added in 0.4.0.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
Keypoints that are inside the image plane.
The object may have been modified in-place.
"""
# we could use anything >0 here as the fraction
return self.remove_out_of_image_fraction_(0.5)
|
[
"def",
"clip_out_of_image_",
"(",
"self",
")",
":",
"# we could use anything >0 here as the fraction",
"return",
"self",
".",
"remove_out_of_image_fraction_",
"(",
"0.5",
")"
] |
https://github.com/aleju/imgaug/blob/0101108d4fed06bc5056c4a03e2bcb0216dac326/imgaug/augmentables/kps.py#L851-L867
|
|
mlflow/mlflow
|
364aca7daf0fcee3ec407ae0b1b16d9cb3085081
|
mlflow/store/tracking/sqlalchemy_store.py
|
python
|
SqlAlchemyStore._list_experiments
|
(
self,
ids=None,
names=None,
view_type=ViewType.ACTIVE_ONLY,
max_results=None,
page_token=None,
eager=False,
)
|
:param eager: If ``True``, eagerly loads each experiments's tags. If ``False``, these tags
are not eagerly loaded and will be loaded if/when their corresponding
object properties are accessed from a resulting ``SqlExperiment`` object.
|
:param eager: If ``True``, eagerly loads each experiments's tags. If ``False``, these tags
are not eagerly loaded and will be loaded if/when their corresponding
object properties are accessed from a resulting ``SqlExperiment`` object.
|
[
":",
"param",
"eager",
":",
"If",
"True",
"eagerly",
"loads",
"each",
"experiments",
"s",
"tags",
".",
"If",
"False",
"these",
"tags",
"are",
"not",
"eagerly",
"loaded",
"and",
"will",
"be",
"loaded",
"if",
"/",
"when",
"their",
"corresponding",
"object",
"properties",
"are",
"accessed",
"from",
"a",
"resulting",
"SqlExperiment",
"object",
"."
] |
def _list_experiments(
self,
ids=None,
names=None,
view_type=ViewType.ACTIVE_ONLY,
max_results=None,
page_token=None,
eager=False,
):
"""
:param eager: If ``True``, eagerly loads each experiments's tags. If ``False``, these tags
are not eagerly loaded and will be loaded if/when their corresponding
object properties are accessed from a resulting ``SqlExperiment`` object.
"""
stages = LifecycleStage.view_type_to_stages(view_type)
conditions = [SqlExperiment.lifecycle_stage.in_(stages)]
if ids and len(ids) > 0:
int_ids = [int(eid) for eid in ids]
conditions.append(SqlExperiment.experiment_id.in_(int_ids))
if names and len(names) > 0:
conditions.append(SqlExperiment.name.in_(names))
max_results_for_query = None
if max_results is not None:
max_results_for_query = max_results + 1
def compute_next_token(current_size):
next_token = None
if max_results_for_query == current_size:
final_offset = offset + max_results
next_token = SearchUtils.create_page_token(final_offset)
return next_token
with self.ManagedSessionMaker() as session:
query_options = self._get_eager_experiment_query_options() if eager else []
if max_results is not None:
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
queried_experiments = (
session.query(SqlExperiment)
.options(*query_options)
.filter(*conditions)
.offset(offset)
.limit(max_results_for_query)
.all()
)
else:
queried_experiments = (
session.query(SqlExperiment).options(*query_options).filter(*conditions).all()
)
experiments = [exp.to_mlflow_entity() for exp in queried_experiments]
if max_results is not None:
return PagedList(experiments[:max_results], compute_next_token(len(experiments)))
else:
return PagedList(experiments, None)
|
[
"def",
"_list_experiments",
"(",
"self",
",",
"ids",
"=",
"None",
",",
"names",
"=",
"None",
",",
"view_type",
"=",
"ViewType",
".",
"ACTIVE_ONLY",
",",
"max_results",
"=",
"None",
",",
"page_token",
"=",
"None",
",",
"eager",
"=",
"False",
",",
")",
":",
"stages",
"=",
"LifecycleStage",
".",
"view_type_to_stages",
"(",
"view_type",
")",
"conditions",
"=",
"[",
"SqlExperiment",
".",
"lifecycle_stage",
".",
"in_",
"(",
"stages",
")",
"]",
"if",
"ids",
"and",
"len",
"(",
"ids",
")",
">",
"0",
":",
"int_ids",
"=",
"[",
"int",
"(",
"eid",
")",
"for",
"eid",
"in",
"ids",
"]",
"conditions",
".",
"append",
"(",
"SqlExperiment",
".",
"experiment_id",
".",
"in_",
"(",
"int_ids",
")",
")",
"if",
"names",
"and",
"len",
"(",
"names",
")",
">",
"0",
":",
"conditions",
".",
"append",
"(",
"SqlExperiment",
".",
"name",
".",
"in_",
"(",
"names",
")",
")",
"max_results_for_query",
"=",
"None",
"if",
"max_results",
"is",
"not",
"None",
":",
"max_results_for_query",
"=",
"max_results",
"+",
"1",
"def",
"compute_next_token",
"(",
"current_size",
")",
":",
"next_token",
"=",
"None",
"if",
"max_results_for_query",
"==",
"current_size",
":",
"final_offset",
"=",
"offset",
"+",
"max_results",
"next_token",
"=",
"SearchUtils",
".",
"create_page_token",
"(",
"final_offset",
")",
"return",
"next_token",
"with",
"self",
".",
"ManagedSessionMaker",
"(",
")",
"as",
"session",
":",
"query_options",
"=",
"self",
".",
"_get_eager_experiment_query_options",
"(",
")",
"if",
"eager",
"else",
"[",
"]",
"if",
"max_results",
"is",
"not",
"None",
":",
"offset",
"=",
"SearchUtils",
".",
"parse_start_offset_from_page_token",
"(",
"page_token",
")",
"queried_experiments",
"=",
"(",
"session",
".",
"query",
"(",
"SqlExperiment",
")",
".",
"options",
"(",
"*",
"query_options",
")",
".",
"filter",
"(",
"*",
"conditions",
")",
".",
"offset",
"(",
"offset",
")",
".",
"limit",
"(",
"max_results_for_query",
")",
".",
"all",
"(",
")",
")",
"else",
":",
"queried_experiments",
"=",
"(",
"session",
".",
"query",
"(",
"SqlExperiment",
")",
".",
"options",
"(",
"*",
"query_options",
")",
".",
"filter",
"(",
"*",
"conditions",
")",
".",
"all",
"(",
")",
")",
"experiments",
"=",
"[",
"exp",
".",
"to_mlflow_entity",
"(",
")",
"for",
"exp",
"in",
"queried_experiments",
"]",
"if",
"max_results",
"is",
"not",
"None",
":",
"return",
"PagedList",
"(",
"experiments",
"[",
":",
"max_results",
"]",
",",
"compute_next_token",
"(",
"len",
"(",
"experiments",
")",
")",
")",
"else",
":",
"return",
"PagedList",
"(",
"experiments",
",",
"None",
")"
] |
https://github.com/mlflow/mlflow/blob/364aca7daf0fcee3ec407ae0b1b16d9cb3085081/mlflow/store/tracking/sqlalchemy_store.py#L255-L310
|
||
transcranial/jupyter-themer
|
c12a953315734b90147a078750cfbe323eda340d
|
jupythemer/jupythemer.py
|
python
|
run
|
(args=None)
|
[] |
def run(args=None):
if args is None:
parser = argparse.ArgumentParser(description='Jupyter notebook themer.')
parser.add_argument('-c', '--color', required=False, dest='color', default=None, help='color style')
parser.add_argument('-l', '--layout', required=False, dest='layout', default=None, help='layout style')
parser.add_argument('-t', '--typography', required=False, dest='typography',
default=None, help='typography style')
parser.add_argument('-f', '--font', required=False, dest='font', default=None, help='code font family')
parser.add_argument('-b', '--background', required=False, dest='background',
default=None, help='background theme styling')
parser.add_argument('-s', '--show', required=False, dest='show',
default=None, help='show available choices')
parser.add_argument('-p', '--css_path', required=False, dest='css_path',
default=custom_css_filepath, help='custom css path.(default:%s)' % custom_css_filepath)
args = parser.parse_args()
if (args.color is None
and args.layout is None
and args.typography is None
and args.font is None
and args.background is None
and args.show is None):
print('Jupyter notebook reverted to default style.')
write_to_css('', args.css_path)
sys.exit()
if args.show in ['color', 'layout', 'typography', 'font', 'background']:
if args.show == 'font':
args.show = 'code_font'
options = glob.glob('{}/styles/{}/*.css'.format(current_dir, args.show))
for option in sorted(options):
print(os.path.basename(option).split('.')[0])
sys.exit()
content_all = ''
if args.typography is not None:
try:
with open('{}/styles/typography/{}.import'.format(current_dir, args.typography), 'r') as f_color:
content_all += f_color.read() + '\n'
except:
print('Bad argument passed to --typography')
sys.exit(1)
if args.font is not None:
try:
with open('{}/styles/code_font/{}.import'.format(current_dir, args.font), 'r') as f_font:
content_all += f_font.read() + '\n'
except:
print('Bad argument passed to --font')
sys.exit(1)
if args.color is not None:
try:
with open('{}/styles/color/{}.css'.format(current_dir, args.color), 'r') as f_color:
content_all += f_color.read() + '\n'
except:
print('Bad argument passed to --color')
sys.exit(1)
if args.layout is not None:
try:
with open('{}/styles/layout/{}.css'.format(current_dir, args.layout), 'r') as f_layout:
content_all += f_layout.read() + '\n'
except:
print('Bad argument passed to --layout')
sys.exit(1)
if args.typography is not None:
try:
with open('{}/styles/typography/{}.css'.format(current_dir, args.typography), 'r') as f_typography:
content_all += f_typography.read() + '\n'
except:
print('Bad argument passed to --typography')
sys.exit(1)
if args.font is not None:
try:
with open('{}/styles/code_font/{}.css'.format(current_dir, args.font), 'r') as f_font:
content_all += f_font.read() + '\n'
except:
print('Bad argument passed to --font')
sys.exit(1)
if args.background is not None:
try:
with open('{}/styles/background/{}.css'.format(current_dir, args.background), 'r') as f_background:
content_all += f_background.read() + '\n'
except:
print('Bad argument passed to --background')
sys.exit(1)
write_to_css(content_all, args.css_path)
print('Custom jupyter notebook theme created - refresh any open jupyter notebooks to apply theme.')
|
[
"def",
"run",
"(",
"args",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Jupyter notebook themer.'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--color'",
",",
"required",
"=",
"False",
",",
"dest",
"=",
"'color'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'color style'",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"'--layout'",
",",
"required",
"=",
"False",
",",
"dest",
"=",
"'layout'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'layout style'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--typography'",
",",
"required",
"=",
"False",
",",
"dest",
"=",
"'typography'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'typography style'",
")",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--font'",
",",
"required",
"=",
"False",
",",
"dest",
"=",
"'font'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'code font family'",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"'--background'",
",",
"required",
"=",
"False",
",",
"dest",
"=",
"'background'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'background theme styling'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--show'",
",",
"required",
"=",
"False",
",",
"dest",
"=",
"'show'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'show available choices'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--css_path'",
",",
"required",
"=",
"False",
",",
"dest",
"=",
"'css_path'",
",",
"default",
"=",
"custom_css_filepath",
",",
"help",
"=",
"'custom css path.(default:%s)'",
"%",
"custom_css_filepath",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"(",
"args",
".",
"color",
"is",
"None",
"and",
"args",
".",
"layout",
"is",
"None",
"and",
"args",
".",
"typography",
"is",
"None",
"and",
"args",
".",
"font",
"is",
"None",
"and",
"args",
".",
"background",
"is",
"None",
"and",
"args",
".",
"show",
"is",
"None",
")",
":",
"print",
"(",
"'Jupyter notebook reverted to default style.'",
")",
"write_to_css",
"(",
"''",
",",
"args",
".",
"css_path",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"args",
".",
"show",
"in",
"[",
"'color'",
",",
"'layout'",
",",
"'typography'",
",",
"'font'",
",",
"'background'",
"]",
":",
"if",
"args",
".",
"show",
"==",
"'font'",
":",
"args",
".",
"show",
"=",
"'code_font'",
"options",
"=",
"glob",
".",
"glob",
"(",
"'{}/styles/{}/*.css'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"show",
")",
")",
"for",
"option",
"in",
"sorted",
"(",
"options",
")",
":",
"print",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"option",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"sys",
".",
"exit",
"(",
")",
"content_all",
"=",
"''",
"if",
"args",
".",
"typography",
"is",
"not",
"None",
":",
"try",
":",
"with",
"open",
"(",
"'{}/styles/typography/{}.import'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"typography",
")",
",",
"'r'",
")",
"as",
"f_color",
":",
"content_all",
"+=",
"f_color",
".",
"read",
"(",
")",
"+",
"'\\n'",
"except",
":",
"print",
"(",
"'Bad argument passed to --typography'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"font",
"is",
"not",
"None",
":",
"try",
":",
"with",
"open",
"(",
"'{}/styles/code_font/{}.import'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"font",
")",
",",
"'r'",
")",
"as",
"f_font",
":",
"content_all",
"+=",
"f_font",
".",
"read",
"(",
")",
"+",
"'\\n'",
"except",
":",
"print",
"(",
"'Bad argument passed to --font'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"color",
"is",
"not",
"None",
":",
"try",
":",
"with",
"open",
"(",
"'{}/styles/color/{}.css'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"color",
")",
",",
"'r'",
")",
"as",
"f_color",
":",
"content_all",
"+=",
"f_color",
".",
"read",
"(",
")",
"+",
"'\\n'",
"except",
":",
"print",
"(",
"'Bad argument passed to --color'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"layout",
"is",
"not",
"None",
":",
"try",
":",
"with",
"open",
"(",
"'{}/styles/layout/{}.css'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"layout",
")",
",",
"'r'",
")",
"as",
"f_layout",
":",
"content_all",
"+=",
"f_layout",
".",
"read",
"(",
")",
"+",
"'\\n'",
"except",
":",
"print",
"(",
"'Bad argument passed to --layout'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"typography",
"is",
"not",
"None",
":",
"try",
":",
"with",
"open",
"(",
"'{}/styles/typography/{}.css'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"typography",
")",
",",
"'r'",
")",
"as",
"f_typography",
":",
"content_all",
"+=",
"f_typography",
".",
"read",
"(",
")",
"+",
"'\\n'",
"except",
":",
"print",
"(",
"'Bad argument passed to --typography'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"font",
"is",
"not",
"None",
":",
"try",
":",
"with",
"open",
"(",
"'{}/styles/code_font/{}.css'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"font",
")",
",",
"'r'",
")",
"as",
"f_font",
":",
"content_all",
"+=",
"f_font",
".",
"read",
"(",
")",
"+",
"'\\n'",
"except",
":",
"print",
"(",
"'Bad argument passed to --font'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"background",
"is",
"not",
"None",
":",
"try",
":",
"with",
"open",
"(",
"'{}/styles/background/{}.css'",
".",
"format",
"(",
"current_dir",
",",
"args",
".",
"background",
")",
",",
"'r'",
")",
"as",
"f_background",
":",
"content_all",
"+=",
"f_background",
".",
"read",
"(",
")",
"+",
"'\\n'",
"except",
":",
"print",
"(",
"'Bad argument passed to --background'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"write_to_css",
"(",
"content_all",
",",
"args",
".",
"css_path",
")",
"print",
"(",
"'Custom jupyter notebook theme created - refresh any open jupyter notebooks to apply theme.'",
")"
] |
https://github.com/transcranial/jupyter-themer/blob/c12a953315734b90147a078750cfbe323eda340d/jupythemer/jupythemer.py#L24-L117
|
||||
NervanaSystems/neon
|
8c3fb8a93b4a89303467b25817c60536542d08bd
|
examples/ssd/datasets/ingest_pascalvoc.py
|
python
|
get_tag_list
|
(index_file)
|
return tag_list
|
[] |
def get_tag_list(index_file):
with open(index_file) as f:
tag_list = [tag.rstrip(os.linesep) for tag in f]
return tag_list
|
[
"def",
"get_tag_list",
"(",
"index_file",
")",
":",
"with",
"open",
"(",
"index_file",
")",
"as",
"f",
":",
"tag_list",
"=",
"[",
"tag",
".",
"rstrip",
"(",
"os",
".",
"linesep",
")",
"for",
"tag",
"in",
"f",
"]",
"return",
"tag_list"
] |
https://github.com/NervanaSystems/neon/blob/8c3fb8a93b4a89303467b25817c60536542d08bd/examples/ssd/datasets/ingest_pascalvoc.py#L153-L157
|
|||
SHI-Labs/Decoupled-Classification-Refinement
|
16202b48eb9cbf79a9b130a98e8c209d4f24693e
|
faster_rcnn/train_end2end.py
|
python
|
train_net
|
(args, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, lr, lr_step)
|
[] |
def train_net(args, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, lr, lr_step):
logger, final_output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
prefix = os.path.join(final_output_path, prefix)
# load symbol
shutil.copy2(os.path.join(curr_path, 'symbols', config.symbol + '.py'), final_output_path)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=True)
feat_sym = sym.get_internals()['rpn_cls_score_output']
# setup multi-gpu
batch_size = len(ctx)
input_batch_size = config.TRAIN.BATCH_IMAGES * batch_size
# print config
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# load dataset and prepare imdb for training
image_sets = [iset for iset in config.dataset.image_set.split('+')]
roidbs = [load_gt_roidb(config.dataset.dataset, image_set, config.dataset.root_path, config.dataset.dataset_path,
flip=config.TRAIN.FLIP)
for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, config)
# load training data
train_data = AnchorLoader(feat_sym, roidb, config, batch_size=input_batch_size, shuffle=config.TRAIN.SHUFFLE, ctx=ctx,
feat_stride=config.network.RPN_FEAT_STRIDE, anchor_scales=config.network.ANCHOR_SCALES,
anchor_ratios=config.network.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING)
# infer max shape
max_data_shape = [('data', (config.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)
max_data_shape.append(('gt_boxes', (config.TRAIN.BATCH_IMAGES, 100, 5)))
print 'providing maximum shape', max_data_shape, max_label_shape
data_shape_dict = dict(train_data.provide_data_single + train_data.provide_label_single)
pprint.pprint(data_shape_dict)
sym_instance.infer_shape(data_shape_dict)
# load and initialize params
if config.TRAIN.RESUME:
print('continue training from ', begin_epoch)
arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)
else:
arg_params, aux_params = load_param(pretrained, epoch, convert=True)
sym_instance.init_weight(config, arg_params, aux_params)
# check parameter shapes
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict)
# create solver
fixed_param_prefix = config.network.FIXED_PARAMS
data_names = [k[0] for k in train_data.provide_data_single]
label_names = [k[0] for k in train_data.provide_label_single]
mod = MutableModule(sym, data_names=data_names, label_names=label_names,
logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in range(batch_size)],
max_label_shapes=[max_label_shape for _ in range(batch_size)], fixed_param_prefix=fixed_param_prefix)
if config.TRAIN.RESUME:
mod._preload_opt_states = '%s-%04d.states'%(prefix, begin_epoch)
# decide training params
# metric
rpn_eval_metric = metric.RPNAccMetric()
rpn_cls_metric = metric.RPNLogLossMetric()
rpn_bbox_metric = metric.RPNL1LossMetric()
eval_metric = metric.RCNNAccMetric(config)
cls_metric = metric.RCNNLogLossMetric(config)
bbox_metric = metric.RCNNL1LossMetric(config)
eval_metrics = mx.metric.CompositeEvalMetric()
# rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric
for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
# callback
batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=args.frequent)
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
epoch_end_callback = [mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True), callback.do_checkpoint(prefix, means, stds)]
# decide learning rate
base_lr = lr
lr_factor = config.TRAIN.lr_factor
lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]
lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, config.TRAIN.warmup, config.TRAIN.warmup_lr, config.TRAIN.warmup_step)
# optimizer
optimizer_params = {'momentum': config.TRAIN.momentum,
'wd': config.TRAIN.wd,
'learning_rate': lr,
'lr_scheduler': lr_scheduler,
'rescale_grad': 1.0,
'clip_gradient': None}
if not isinstance(train_data, PrefetchingIter):
train_data = PrefetchingIter(train_data)
# train
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback, kvstore=config.default.kvstore,
optimizer='sgd', optimizer_params=optimizer_params,
arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)
|
[
"def",
"train_net",
"(",
"args",
",",
"ctx",
",",
"pretrained",
",",
"epoch",
",",
"prefix",
",",
"begin_epoch",
",",
"end_epoch",
",",
"lr",
",",
"lr_step",
")",
":",
"logger",
",",
"final_output_path",
"=",
"create_logger",
"(",
"config",
".",
"output_path",
",",
"args",
".",
"cfg",
",",
"config",
".",
"dataset",
".",
"image_set",
")",
"prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"final_output_path",
",",
"prefix",
")",
"# load symbol",
"shutil",
".",
"copy2",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'symbols'",
",",
"config",
".",
"symbol",
"+",
"'.py'",
")",
",",
"final_output_path",
")",
"sym_instance",
"=",
"eval",
"(",
"config",
".",
"symbol",
"+",
"'.'",
"+",
"config",
".",
"symbol",
")",
"(",
")",
"sym",
"=",
"sym_instance",
".",
"get_symbol",
"(",
"config",
",",
"is_train",
"=",
"True",
")",
"feat_sym",
"=",
"sym",
".",
"get_internals",
"(",
")",
"[",
"'rpn_cls_score_output'",
"]",
"# setup multi-gpu",
"batch_size",
"=",
"len",
"(",
"ctx",
")",
"input_batch_size",
"=",
"config",
".",
"TRAIN",
".",
"BATCH_IMAGES",
"*",
"batch_size",
"# print config",
"pprint",
".",
"pprint",
"(",
"config",
")",
"logger",
".",
"info",
"(",
"'training config:{}\\n'",
".",
"format",
"(",
"pprint",
".",
"pformat",
"(",
"config",
")",
")",
")",
"# load dataset and prepare imdb for training",
"image_sets",
"=",
"[",
"iset",
"for",
"iset",
"in",
"config",
".",
"dataset",
".",
"image_set",
".",
"split",
"(",
"'+'",
")",
"]",
"roidbs",
"=",
"[",
"load_gt_roidb",
"(",
"config",
".",
"dataset",
".",
"dataset",
",",
"image_set",
",",
"config",
".",
"dataset",
".",
"root_path",
",",
"config",
".",
"dataset",
".",
"dataset_path",
",",
"flip",
"=",
"config",
".",
"TRAIN",
".",
"FLIP",
")",
"for",
"image_set",
"in",
"image_sets",
"]",
"roidb",
"=",
"merge_roidb",
"(",
"roidbs",
")",
"roidb",
"=",
"filter_roidb",
"(",
"roidb",
",",
"config",
")",
"# load training data",
"train_data",
"=",
"AnchorLoader",
"(",
"feat_sym",
",",
"roidb",
",",
"config",
",",
"batch_size",
"=",
"input_batch_size",
",",
"shuffle",
"=",
"config",
".",
"TRAIN",
".",
"SHUFFLE",
",",
"ctx",
"=",
"ctx",
",",
"feat_stride",
"=",
"config",
".",
"network",
".",
"RPN_FEAT_STRIDE",
",",
"anchor_scales",
"=",
"config",
".",
"network",
".",
"ANCHOR_SCALES",
",",
"anchor_ratios",
"=",
"config",
".",
"network",
".",
"ANCHOR_RATIOS",
",",
"aspect_grouping",
"=",
"config",
".",
"TRAIN",
".",
"ASPECT_GROUPING",
")",
"# infer max shape",
"max_data_shape",
"=",
"[",
"(",
"'data'",
",",
"(",
"config",
".",
"TRAIN",
".",
"BATCH_IMAGES",
",",
"3",
",",
"max",
"(",
"[",
"v",
"[",
"0",
"]",
"for",
"v",
"in",
"config",
".",
"SCALES",
"]",
")",
",",
"max",
"(",
"[",
"v",
"[",
"1",
"]",
"for",
"v",
"in",
"config",
".",
"SCALES",
"]",
")",
")",
")",
"]",
"max_data_shape",
",",
"max_label_shape",
"=",
"train_data",
".",
"infer_shape",
"(",
"max_data_shape",
")",
"max_data_shape",
".",
"append",
"(",
"(",
"'gt_boxes'",
",",
"(",
"config",
".",
"TRAIN",
".",
"BATCH_IMAGES",
",",
"100",
",",
"5",
")",
")",
")",
"print",
"'providing maximum shape'",
",",
"max_data_shape",
",",
"max_label_shape",
"data_shape_dict",
"=",
"dict",
"(",
"train_data",
".",
"provide_data_single",
"+",
"train_data",
".",
"provide_label_single",
")",
"pprint",
".",
"pprint",
"(",
"data_shape_dict",
")",
"sym_instance",
".",
"infer_shape",
"(",
"data_shape_dict",
")",
"# load and initialize params",
"if",
"config",
".",
"TRAIN",
".",
"RESUME",
":",
"print",
"(",
"'continue training from '",
",",
"begin_epoch",
")",
"arg_params",
",",
"aux_params",
"=",
"load_param",
"(",
"prefix",
",",
"begin_epoch",
",",
"convert",
"=",
"True",
")",
"else",
":",
"arg_params",
",",
"aux_params",
"=",
"load_param",
"(",
"pretrained",
",",
"epoch",
",",
"convert",
"=",
"True",
")",
"sym_instance",
".",
"init_weight",
"(",
"config",
",",
"arg_params",
",",
"aux_params",
")",
"# check parameter shapes",
"sym_instance",
".",
"check_parameter_shapes",
"(",
"arg_params",
",",
"aux_params",
",",
"data_shape_dict",
")",
"# create solver",
"fixed_param_prefix",
"=",
"config",
".",
"network",
".",
"FIXED_PARAMS",
"data_names",
"=",
"[",
"k",
"[",
"0",
"]",
"for",
"k",
"in",
"train_data",
".",
"provide_data_single",
"]",
"label_names",
"=",
"[",
"k",
"[",
"0",
"]",
"for",
"k",
"in",
"train_data",
".",
"provide_label_single",
"]",
"mod",
"=",
"MutableModule",
"(",
"sym",
",",
"data_names",
"=",
"data_names",
",",
"label_names",
"=",
"label_names",
",",
"logger",
"=",
"logger",
",",
"context",
"=",
"ctx",
",",
"max_data_shapes",
"=",
"[",
"max_data_shape",
"for",
"_",
"in",
"range",
"(",
"batch_size",
")",
"]",
",",
"max_label_shapes",
"=",
"[",
"max_label_shape",
"for",
"_",
"in",
"range",
"(",
"batch_size",
")",
"]",
",",
"fixed_param_prefix",
"=",
"fixed_param_prefix",
")",
"if",
"config",
".",
"TRAIN",
".",
"RESUME",
":",
"mod",
".",
"_preload_opt_states",
"=",
"'%s-%04d.states'",
"%",
"(",
"prefix",
",",
"begin_epoch",
")",
"# decide training params",
"# metric",
"rpn_eval_metric",
"=",
"metric",
".",
"RPNAccMetric",
"(",
")",
"rpn_cls_metric",
"=",
"metric",
".",
"RPNLogLossMetric",
"(",
")",
"rpn_bbox_metric",
"=",
"metric",
".",
"RPNL1LossMetric",
"(",
")",
"eval_metric",
"=",
"metric",
".",
"RCNNAccMetric",
"(",
"config",
")",
"cls_metric",
"=",
"metric",
".",
"RCNNLogLossMetric",
"(",
"config",
")",
"bbox_metric",
"=",
"metric",
".",
"RCNNL1LossMetric",
"(",
"config",
")",
"eval_metrics",
"=",
"mx",
".",
"metric",
".",
"CompositeEvalMetric",
"(",
")",
"# rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric",
"for",
"child_metric",
"in",
"[",
"rpn_eval_metric",
",",
"rpn_cls_metric",
",",
"rpn_bbox_metric",
",",
"eval_metric",
",",
"cls_metric",
",",
"bbox_metric",
"]",
":",
"eval_metrics",
".",
"add",
"(",
"child_metric",
")",
"# callback",
"batch_end_callback",
"=",
"callback",
".",
"Speedometer",
"(",
"train_data",
".",
"batch_size",
",",
"frequent",
"=",
"args",
".",
"frequent",
")",
"means",
"=",
"np",
".",
"tile",
"(",
"np",
".",
"array",
"(",
"config",
".",
"TRAIN",
".",
"BBOX_MEANS",
")",
",",
"2",
"if",
"config",
".",
"CLASS_AGNOSTIC",
"else",
"config",
".",
"dataset",
".",
"NUM_CLASSES",
")",
"stds",
"=",
"np",
".",
"tile",
"(",
"np",
".",
"array",
"(",
"config",
".",
"TRAIN",
".",
"BBOX_STDS",
")",
",",
"2",
"if",
"config",
".",
"CLASS_AGNOSTIC",
"else",
"config",
".",
"dataset",
".",
"NUM_CLASSES",
")",
"epoch_end_callback",
"=",
"[",
"mx",
".",
"callback",
".",
"module_checkpoint",
"(",
"mod",
",",
"prefix",
",",
"period",
"=",
"1",
",",
"save_optimizer_states",
"=",
"True",
")",
",",
"callback",
".",
"do_checkpoint",
"(",
"prefix",
",",
"means",
",",
"stds",
")",
"]",
"# decide learning rate",
"base_lr",
"=",
"lr",
"lr_factor",
"=",
"config",
".",
"TRAIN",
".",
"lr_factor",
"lr_epoch",
"=",
"[",
"float",
"(",
"epoch",
")",
"for",
"epoch",
"in",
"lr_step",
".",
"split",
"(",
"','",
")",
"]",
"lr_epoch_diff",
"=",
"[",
"epoch",
"-",
"begin_epoch",
"for",
"epoch",
"in",
"lr_epoch",
"if",
"epoch",
">",
"begin_epoch",
"]",
"lr",
"=",
"base_lr",
"*",
"(",
"lr_factor",
"**",
"(",
"len",
"(",
"lr_epoch",
")",
"-",
"len",
"(",
"lr_epoch_diff",
")",
")",
")",
"lr_iters",
"=",
"[",
"int",
"(",
"epoch",
"*",
"len",
"(",
"roidb",
")",
"/",
"batch_size",
")",
"for",
"epoch",
"in",
"lr_epoch_diff",
"]",
"print",
"(",
"'lr'",
",",
"lr",
",",
"'lr_epoch_diff'",
",",
"lr_epoch_diff",
",",
"'lr_iters'",
",",
"lr_iters",
")",
"lr_scheduler",
"=",
"WarmupMultiFactorScheduler",
"(",
"lr_iters",
",",
"lr_factor",
",",
"config",
".",
"TRAIN",
".",
"warmup",
",",
"config",
".",
"TRAIN",
".",
"warmup_lr",
",",
"config",
".",
"TRAIN",
".",
"warmup_step",
")",
"# optimizer",
"optimizer_params",
"=",
"{",
"'momentum'",
":",
"config",
".",
"TRAIN",
".",
"momentum",
",",
"'wd'",
":",
"config",
".",
"TRAIN",
".",
"wd",
",",
"'learning_rate'",
":",
"lr",
",",
"'lr_scheduler'",
":",
"lr_scheduler",
",",
"'rescale_grad'",
":",
"1.0",
",",
"'clip_gradient'",
":",
"None",
"}",
"if",
"not",
"isinstance",
"(",
"train_data",
",",
"PrefetchingIter",
")",
":",
"train_data",
"=",
"PrefetchingIter",
"(",
"train_data",
")",
"# train",
"mod",
".",
"fit",
"(",
"train_data",
",",
"eval_metric",
"=",
"eval_metrics",
",",
"epoch_end_callback",
"=",
"epoch_end_callback",
",",
"batch_end_callback",
"=",
"batch_end_callback",
",",
"kvstore",
"=",
"config",
".",
"default",
".",
"kvstore",
",",
"optimizer",
"=",
"'sgd'",
",",
"optimizer_params",
"=",
"optimizer_params",
",",
"arg_params",
"=",
"arg_params",
",",
"aux_params",
"=",
"aux_params",
",",
"begin_epoch",
"=",
"begin_epoch",
",",
"num_epoch",
"=",
"end_epoch",
")"
] |
https://github.com/SHI-Labs/Decoupled-Classification-Refinement/blob/16202b48eb9cbf79a9b130a98e8c209d4f24693e/faster_rcnn/train_end2end.py#L57-L162
|
||||
ZZUTK/SRNTT
|
c9a2cf95534e2d3c2c2210718c9903c9f389d67d
|
SRNTT/tensorlayer/db.py
|
python
|
TensorDB._print_dict
|
(self, args)
|
return string
|
[] |
def _print_dict(self, args):
# return " / ".join(str(key) + ": "+ str(value) for key, value in args.items())
string = ''
for key, value in args.items():
if key is not '_id':
string += str(key) + ": "+ str(value) + " / "
return string
|
[
"def",
"_print_dict",
"(",
"self",
",",
"args",
")",
":",
"# return \" / \".join(str(key) + \": \"+ str(value) for key, value in args.items())",
"string",
"=",
"''",
"for",
"key",
",",
"value",
"in",
"args",
".",
"items",
"(",
")",
":",
"if",
"key",
"is",
"not",
"'_id'",
":",
"string",
"+=",
"str",
"(",
"key",
")",
"+",
"\": \"",
"+",
"str",
"(",
"value",
")",
"+",
"\" / \"",
"return",
"string"
] |
https://github.com/ZZUTK/SRNTT/blob/c9a2cf95534e2d3c2c2210718c9903c9f389d67d/SRNTT/tensorlayer/db.py#L216-L223
|
|||
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/codecs.py
|
python
|
StreamRecoder.next
|
(self)
|
return data
|
Return the next decoded line from the input stream.
|
Return the next decoded line from the input stream.
|
[
"Return",
"the",
"next",
"decoded",
"line",
"from",
"the",
"input",
"stream",
"."
] |
def next(self):
""" Return the next decoded line from the input stream."""
data = self.reader.next()
data, bytesencoded = self.encode(data, self.errors)
return data
|
[
"def",
"next",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"reader",
".",
"next",
"(",
")",
"data",
",",
"bytesencoded",
"=",
"self",
".",
"encode",
"(",
"data",
",",
"self",
".",
"errors",
")",
"return",
"data"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/codecs.py#L815-L820
|
|
carnal0wnage/weirdAAL
|
c14e36d7bb82447f38a43da203f4bc29429f4cf4
|
libs/aws/brute.py
|
python
|
brute_ssm_permissions
|
()
|
return generic_permission_bruteforcer('ssm', tests)
|
http://boto3.readthedocs.io/en/latest/reference/services/ssm.html
|
http://boto3.readthedocs.io/en/latest/reference/services/ssm.html
|
[
"http",
":",
"//",
"boto3",
".",
"readthedocs",
".",
"io",
"/",
"en",
"/",
"latest",
"/",
"reference",
"/",
"services",
"/",
"ssm",
".",
"html"
] |
def brute_ssm_permissions():
'''
http://boto3.readthedocs.io/en/latest/reference/services/ssm.html
'''
print("### Enumerating Amazon Simple Systems Manager (SSM) Permissions ###")
tests = [('DescribeActivations', 'describe_activations', (), {}),
# ('DescribeAssociation', 'describe_association', (), {}),
('ListDocuments', 'list_documents', (), {}),
('ListResourceComplianceSummaries', 'list_resource_compliance_summaries', (), {}), ]
return generic_permission_bruteforcer('ssm', tests)
|
[
"def",
"brute_ssm_permissions",
"(",
")",
":",
"print",
"(",
"\"### Enumerating Amazon Simple Systems Manager (SSM) Permissions ###\"",
")",
"tests",
"=",
"[",
"(",
"'DescribeActivations'",
",",
"'describe_activations'",
",",
"(",
")",
",",
"{",
"}",
")",
",",
"# ('DescribeAssociation', 'describe_association', (), {}),",
"(",
"'ListDocuments'",
",",
"'list_documents'",
",",
"(",
")",
",",
"{",
"}",
")",
",",
"(",
"'ListResourceComplianceSummaries'",
",",
"'list_resource_compliance_summaries'",
",",
"(",
")",
",",
"{",
"}",
")",
",",
"]",
"return",
"generic_permission_bruteforcer",
"(",
"'ssm'",
",",
"tests",
")"
] |
https://github.com/carnal0wnage/weirdAAL/blob/c14e36d7bb82447f38a43da203f4bc29429f4cf4/libs/aws/brute.py#L2254-L2263
|
|
ProjectQ-Framework/ProjectQ
|
0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005
|
projectq/meta/_compute.py
|
python
|
ComputeTag.__eq__
|
(self, other)
|
return isinstance(other, ComputeTag)
|
Equal operator.
|
Equal operator.
|
[
"Equal",
"operator",
"."
] |
def __eq__(self, other):
"""Equal operator."""
return isinstance(other, ComputeTag)
|
[
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"return",
"isinstance",
"(",
"other",
",",
"ComputeTag",
")"
] |
https://github.com/ProjectQ-Framework/ProjectQ/blob/0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005/projectq/meta/_compute.py#L39-L41
|
|
AndroidHooker/hooker
|
1f73d741195f6d57c12e0d36bfd8a0a22f573e6c
|
hooker_xp/hooker_xp/analysis/Analysis.py
|
python
|
Analysis.reporter
|
(self)
|
return self.__reporter
|
The reporter
|
The reporter
|
[
"The",
"reporter"
] |
def reporter(self):
"""The reporter
"""
return self.__reporter
|
[
"def",
"reporter",
"(",
"self",
")",
":",
"return",
"self",
".",
"__reporter"
] |
https://github.com/AndroidHooker/hooker/blob/1f73d741195f6d57c12e0d36bfd8a0a22f573e6c/hooker_xp/hooker_xp/analysis/Analysis.py#L217-L220
|
|
leo-editor/leo-editor
|
383d6776d135ef17d73d935a2f0ecb3ac0e99494
|
leo/plugins/backlink.py
|
python
|
backlinkController.loadLinksInt
|
(self)
|
load links after file opened or reload on request from UI
|
load links after file opened or reload on request from UI
|
[
"load",
"links",
"after",
"file",
"opened",
"or",
"reload",
"on",
"request",
"from",
"UI"
] |
def loadLinksInt(self):
"""load links after file opened or reload on request from UI"""
c = self.c # checked in loadLinks()
self.initIvars() # clears self.vnode
idsSeen = set() # just the vnodes with link info.
# make map from linked node's ids to their vnodes
for p in c.all_positions():
v = p.v
self.vnode[v.gnx] = v
if v.u and '_bklnk' in v.u:
idsSeen.add(v.gnx)
for vnode in idsSeen: # just the vnodes with link info.
if 'links' not in self.vnode[vnode].u['_bklnk']:
g.trace(self.vnode[vnode].u)
# graphcanvas.py will only init x and y keys
self.vnode[vnode].u['_bklnk']['links'] = []
links = self.vnode[vnode].u['_bklnk']['links']
newlinks = [] # start with empty list and include only good links
for link in links:
if link[1] not in self.vnode:
# other end if missing
lt = ('to', 'from')
if link[0] == 'S':
lt = ('from', 'to')
# use g.es rather than showMessage here
g.error('backlink: link %s %s %s ??? lost' % (
lt[0], self.vnode[vnode].h, lt[1]))
continue
# check other end has link
other = self.vnode[link[1]]
if '_bklnk' not in other.u or 'links' not in other.u['_bklnk']:
self.initBacklink(other)
if not [
i for i in other.u['_bklnk']['links']
if i[1] == vnode
]:
# we are not in the other's list
direc = {'U': 'U', 'S': 'D', 'D': 'S'}[link[0]]
other.u['_bklnk']['links'].append((direc, vnode))
newlinks.append((link[0], link[1]))
self.vnode[vnode].u['_bklnk']['links'] = newlinks
self.showMessage('Link info. loaded on %d nodes' % len(idsSeen))
|
[
"def",
"loadLinksInt",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"c",
"# checked in loadLinks()",
"self",
".",
"initIvars",
"(",
")",
"# clears self.vnode",
"idsSeen",
"=",
"set",
"(",
")",
"# just the vnodes with link info.",
"# make map from linked node's ids to their vnodes",
"for",
"p",
"in",
"c",
".",
"all_positions",
"(",
")",
":",
"v",
"=",
"p",
".",
"v",
"self",
".",
"vnode",
"[",
"v",
".",
"gnx",
"]",
"=",
"v",
"if",
"v",
".",
"u",
"and",
"'_bklnk'",
"in",
"v",
".",
"u",
":",
"idsSeen",
".",
"add",
"(",
"v",
".",
"gnx",
")",
"for",
"vnode",
"in",
"idsSeen",
":",
"# just the vnodes with link info.",
"if",
"'links'",
"not",
"in",
"self",
".",
"vnode",
"[",
"vnode",
"]",
".",
"u",
"[",
"'_bklnk'",
"]",
":",
"g",
".",
"trace",
"(",
"self",
".",
"vnode",
"[",
"vnode",
"]",
".",
"u",
")",
"# graphcanvas.py will only init x and y keys",
"self",
".",
"vnode",
"[",
"vnode",
"]",
".",
"u",
"[",
"'_bklnk'",
"]",
"[",
"'links'",
"]",
"=",
"[",
"]",
"links",
"=",
"self",
".",
"vnode",
"[",
"vnode",
"]",
".",
"u",
"[",
"'_bklnk'",
"]",
"[",
"'links'",
"]",
"newlinks",
"=",
"[",
"]",
"# start with empty list and include only good links",
"for",
"link",
"in",
"links",
":",
"if",
"link",
"[",
"1",
"]",
"not",
"in",
"self",
".",
"vnode",
":",
"# other end if missing",
"lt",
"=",
"(",
"'to'",
",",
"'from'",
")",
"if",
"link",
"[",
"0",
"]",
"==",
"'S'",
":",
"lt",
"=",
"(",
"'from'",
",",
"'to'",
")",
"# use g.es rather than showMessage here",
"g",
".",
"error",
"(",
"'backlink: link %s %s %s ??? lost'",
"%",
"(",
"lt",
"[",
"0",
"]",
",",
"self",
".",
"vnode",
"[",
"vnode",
"]",
".",
"h",
",",
"lt",
"[",
"1",
"]",
")",
")",
"continue",
"# check other end has link",
"other",
"=",
"self",
".",
"vnode",
"[",
"link",
"[",
"1",
"]",
"]",
"if",
"'_bklnk'",
"not",
"in",
"other",
".",
"u",
"or",
"'links'",
"not",
"in",
"other",
".",
"u",
"[",
"'_bklnk'",
"]",
":",
"self",
".",
"initBacklink",
"(",
"other",
")",
"if",
"not",
"[",
"i",
"for",
"i",
"in",
"other",
".",
"u",
"[",
"'_bklnk'",
"]",
"[",
"'links'",
"]",
"if",
"i",
"[",
"1",
"]",
"==",
"vnode",
"]",
":",
"# we are not in the other's list",
"direc",
"=",
"{",
"'U'",
":",
"'U'",
",",
"'S'",
":",
"'D'",
",",
"'D'",
":",
"'S'",
"}",
"[",
"link",
"[",
"0",
"]",
"]",
"other",
".",
"u",
"[",
"'_bklnk'",
"]",
"[",
"'links'",
"]",
".",
"append",
"(",
"(",
"direc",
",",
"vnode",
")",
")",
"newlinks",
".",
"append",
"(",
"(",
"link",
"[",
"0",
"]",
",",
"link",
"[",
"1",
"]",
")",
")",
"self",
".",
"vnode",
"[",
"vnode",
"]",
".",
"u",
"[",
"'_bklnk'",
"]",
"[",
"'links'",
"]",
"=",
"newlinks",
"self",
".",
"showMessage",
"(",
"'Link info. loaded on %d nodes'",
"%",
"len",
"(",
"idsSeen",
")",
")"
] |
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/plugins/backlink.py#L424-L475
|
||
golismero/golismero
|
7d605b937e241f51c1ca4f47b20f755eeefb9d76
|
thirdparty_libs/django/dispatch/dispatcher.py
|
python
|
receiver
|
(signal, **kwargs)
|
return _decorator
|
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
|
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
|
[
"A",
"decorator",
"for",
"connecting",
"receivers",
"to",
"signals",
".",
"Used",
"by",
"passing",
"in",
"the",
"signal",
"(",
"or",
"list",
"of",
"signals",
")",
"and",
"keyword",
"arguments",
"to",
"connect",
"::"
] |
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
|
[
"def",
"receiver",
"(",
"signal",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_decorator",
"(",
"func",
")",
":",
"if",
"isinstance",
"(",
"signal",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"s",
"in",
"signal",
":",
"s",
".",
"connect",
"(",
"func",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"signal",
".",
"connect",
"(",
"func",
",",
"*",
"*",
"kwargs",
")",
"return",
"func",
"return",
"_decorator"
] |
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/django/dispatch/dispatcher.py#L252-L273
|
|
Abjad/abjad
|
d0646dfbe83db3dc5ab268f76a0950712b87b7fd
|
abjad/bind.py
|
python
|
Wrapper.indicator
|
(self)
|
return self._indicator
|
Gets indicator.
|
Gets indicator.
|
[
"Gets",
"indicator",
"."
] |
def indicator(self) -> typing.Any:
"""
Gets indicator.
"""
return self._indicator
|
[
"def",
"indicator",
"(",
"self",
")",
"->",
"typing",
".",
"Any",
":",
"return",
"self",
".",
"_indicator"
] |
https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/bind.py#L543-L547
|
|
edfungus/Crouton
|
ada98b3930192938a48909072b45cb84b945f875
|
clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/werkzeug/utils.py
|
python
|
find_modules
|
(import_path, include_packages=False, recursive=False)
|
Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
|
Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
|
[
"Finds",
"all",
"the",
"modules",
"below",
"a",
"package",
".",
"This",
"can",
"be",
"useful",
"to",
"automatically",
"import",
"all",
"views",
"/",
"controllers",
"so",
"that",
"their",
"metaclasses",
"/",
"function",
"decorators",
"have",
"a",
"chance",
"to",
"register",
"themselves",
"on",
"the",
"application",
"."
] |
def find_modules(import_path, include_packages=False, recursive=False):
"""Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, '__path__', None)
if path is None:
raise ValueError('%r is not a package' % import_path)
basename = module.__name__ + '.'
for importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
|
[
"def",
"find_modules",
"(",
"import_path",
",",
"include_packages",
"=",
"False",
",",
"recursive",
"=",
"False",
")",
":",
"module",
"=",
"import_string",
"(",
"import_path",
")",
"path",
"=",
"getattr",
"(",
"module",
",",
"'__path__'",
",",
"None",
")",
"if",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'%r is not a package'",
"%",
"import_path",
")",
"basename",
"=",
"module",
".",
"__name__",
"+",
"'.'",
"for",
"importer",
",",
"modname",
",",
"ispkg",
"in",
"pkgutil",
".",
"iter_modules",
"(",
"path",
")",
":",
"modname",
"=",
"basename",
"+",
"modname",
"if",
"ispkg",
":",
"if",
"include_packages",
":",
"yield",
"modname",
"if",
"recursive",
":",
"for",
"item",
"in",
"find_modules",
"(",
"modname",
",",
"include_packages",
",",
"True",
")",
":",
"yield",
"item",
"else",
":",
"yield",
"modname"
] |
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/werkzeug/utils.py#L446-L475
|
||
NVIDIA/Megatron-LM
|
9a8b89acd8f6ba096860170d0e30ddc0bc2bacd4
|
megatron/text_generation/tokenization.py
|
python
|
detokenize_generations
|
(tokens_gpu_tensor,
lengths_gpu_tensor,
return_segments)
|
return tokens, prompts_plus_generations
|
Detokenize the generated tokens.
|
Detokenize the generated tokens.
|
[
"Detokenize",
"the",
"generated",
"tokens",
"."
] |
def detokenize_generations(tokens_gpu_tensor,
lengths_gpu_tensor,
return_segments):
"""Detokenize the generated tokens."""
tokenizer = get_tokenizer()
prompts_plus_generations = []
if return_segments:
prompts_plus_generations_segments = []
tokens = tokens_gpu_tensor.cpu().numpy().tolist()
lengths = lengths_gpu_tensor.cpu().numpy().tolist()
for sequence_tokens, length in zip(tokens, lengths):
sequence_tokens = sequence_tokens[:length]
prompts_plus_generations.append(
tokenizer.detokenize(sequence_tokens))
if return_segments:
words = []
for token in sequence_tokens:
word = tokenizer.tokenizer.decoder[token]
word = bytearray(
[tokenizer.tokenizer.byte_decoder[c] for c in word]).decode(
'utf-8', errors='replace')
words.append(word)
prompts_plus_generations_segments.append(words)
if return_segments:
return tokens, prompts_plus_generations, \
prompts_plus_generations_segments
return tokens, prompts_plus_generations
|
[
"def",
"detokenize_generations",
"(",
"tokens_gpu_tensor",
",",
"lengths_gpu_tensor",
",",
"return_segments",
")",
":",
"tokenizer",
"=",
"get_tokenizer",
"(",
")",
"prompts_plus_generations",
"=",
"[",
"]",
"if",
"return_segments",
":",
"prompts_plus_generations_segments",
"=",
"[",
"]",
"tokens",
"=",
"tokens_gpu_tensor",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
".",
"tolist",
"(",
")",
"lengths",
"=",
"lengths_gpu_tensor",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
".",
"tolist",
"(",
")",
"for",
"sequence_tokens",
",",
"length",
"in",
"zip",
"(",
"tokens",
",",
"lengths",
")",
":",
"sequence_tokens",
"=",
"sequence_tokens",
"[",
":",
"length",
"]",
"prompts_plus_generations",
".",
"append",
"(",
"tokenizer",
".",
"detokenize",
"(",
"sequence_tokens",
")",
")",
"if",
"return_segments",
":",
"words",
"=",
"[",
"]",
"for",
"token",
"in",
"sequence_tokens",
":",
"word",
"=",
"tokenizer",
".",
"tokenizer",
".",
"decoder",
"[",
"token",
"]",
"word",
"=",
"bytearray",
"(",
"[",
"tokenizer",
".",
"tokenizer",
".",
"byte_decoder",
"[",
"c",
"]",
"for",
"c",
"in",
"word",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")",
"words",
".",
"append",
"(",
"word",
")",
"prompts_plus_generations_segments",
".",
"append",
"(",
"words",
")",
"if",
"return_segments",
":",
"return",
"tokens",
",",
"prompts_plus_generations",
",",
"prompts_plus_generations_segments",
"return",
"tokens",
",",
"prompts_plus_generations"
] |
https://github.com/NVIDIA/Megatron-LM/blob/9a8b89acd8f6ba096860170d0e30ddc0bc2bacd4/megatron/text_generation/tokenization.py#L26-L57
|
|
sdispater/tomlkit
|
7b450661e02d161cbf9a3bec3b3955cbcb64efef
|
tomlkit/container.py
|
python
|
Container._previous_item_with_index
|
(
self, idx: Optional[int] = None, ignore=(Null,)
)
|
return None
|
Find the immediate previous item before index ``idx``
|
Find the immediate previous item before index ``idx``
|
[
"Find",
"the",
"immediate",
"previous",
"item",
"before",
"index",
"idx"
] |
def _previous_item_with_index(
self, idx: Optional[int] = None, ignore=(Null,)
) -> Optional[Tuple[int, Item]]:
"""Find the immediate previous item before index ``idx``"""
if idx is None or idx > len(self._body):
idx = len(self._body)
for i in range(idx - 1, -1, -1):
v = self._body[i][-1]
if not isinstance(v, ignore):
return i, v
return None
|
[
"def",
"_previous_item_with_index",
"(",
"self",
",",
"idx",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"ignore",
"=",
"(",
"Null",
",",
")",
")",
"->",
"Optional",
"[",
"Tuple",
"[",
"int",
",",
"Item",
"]",
"]",
":",
"if",
"idx",
"is",
"None",
"or",
"idx",
">",
"len",
"(",
"self",
".",
"_body",
")",
":",
"idx",
"=",
"len",
"(",
"self",
".",
"_body",
")",
"for",
"i",
"in",
"range",
"(",
"idx",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"v",
"=",
"self",
".",
"_body",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
"if",
"not",
"isinstance",
"(",
"v",
",",
"ignore",
")",
":",
"return",
"i",
",",
"v",
"return",
"None"
] |
https://github.com/sdispater/tomlkit/blob/7b450661e02d161cbf9a3bec3b3955cbcb64efef/tomlkit/container.py#L755-L765
|
|
ales-tsurko/cells
|
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
|
packaging/macos/python/lib/python3.7/idlelib/searchbase.py
|
python
|
SearchDialogBase.create_option_buttons
|
(self)
|
return frame, options
|
Return (filled frame, options) for testing.
Options is a list of searchengine booleanvar, label pairs.
A gridded frame from make_frame is filled with a Checkbutton
for each pair, bound to the var, with the corresponding label.
|
Return (filled frame, options) for testing.
|
[
"Return",
"(",
"filled",
"frame",
"options",
")",
"for",
"testing",
"."
] |
def create_option_buttons(self):
'''Return (filled frame, options) for testing.
Options is a list of searchengine booleanvar, label pairs.
A gridded frame from make_frame is filled with a Checkbutton
for each pair, bound to the var, with the corresponding label.
'''
frame = self.make_frame("Options")[0]
engine = self.engine
options = [(engine.revar, "Regular expression"),
(engine.casevar, "Match case"),
(engine.wordvar, "Whole word")]
if self.needwrapbutton:
options.append((engine.wrapvar, "Wrap around"))
for var, label in options:
btn = Checkbutton(frame, variable=var, text=label)
btn.pack(side="left", fill="both")
return frame, options
|
[
"def",
"create_option_buttons",
"(",
"self",
")",
":",
"frame",
"=",
"self",
".",
"make_frame",
"(",
"\"Options\"",
")",
"[",
"0",
"]",
"engine",
"=",
"self",
".",
"engine",
"options",
"=",
"[",
"(",
"engine",
".",
"revar",
",",
"\"Regular expression\"",
")",
",",
"(",
"engine",
".",
"casevar",
",",
"\"Match case\"",
")",
",",
"(",
"engine",
".",
"wordvar",
",",
"\"Whole word\"",
")",
"]",
"if",
"self",
".",
"needwrapbutton",
":",
"options",
".",
"append",
"(",
"(",
"engine",
".",
"wrapvar",
",",
"\"Wrap around\"",
")",
")",
"for",
"var",
",",
"label",
"in",
"options",
":",
"btn",
"=",
"Checkbutton",
"(",
"frame",
",",
"variable",
"=",
"var",
",",
"text",
"=",
"label",
")",
"btn",
".",
"pack",
"(",
"side",
"=",
"\"left\"",
",",
"fill",
"=",
"\"both\"",
")",
"return",
"frame",
",",
"options"
] |
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/idlelib/searchbase.py#L129-L146
|
|
IBM/lale
|
b4d6829c143a4735b06083a0e6c70d2cca244162
|
lale/lib/rasl/_eval_spark_df.py
|
python
|
day_of_year
|
(call: ast.Call)
|
return time_functions(call, dayofyear)
|
[] |
def day_of_year(call: ast.Call):
return time_functions(call, dayofyear)
|
[
"def",
"day_of_year",
"(",
"call",
":",
"ast",
".",
"Call",
")",
":",
"return",
"time_functions",
"(",
"call",
",",
"dayofyear",
")"
] |
https://github.com/IBM/lale/blob/b4d6829c143a4735b06083a0e6c70d2cca244162/lale/lib/rasl/_eval_spark_df.py#L147-L148
|
|||
deepmind/bsuite
|
f305972cf05042f6ce23d638477ea9b33918ba17
|
bsuite/utils/gym_wrapper.py
|
python
|
DMEnvFromGym.close
|
(self)
|
[] |
def close(self):
self.gym_env.close()
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"gym_env",
".",
"close",
"(",
")"
] |
https://github.com/deepmind/bsuite/blob/f305972cf05042f6ce23d638477ea9b33918ba17/bsuite/utils/gym_wrapper.py#L178-L179
|
||||
esafak/mca
|
f2b79ecbf37629902ccdbad2e1a556977c53d370
|
src/mca.py
|
python
|
MCA.fs_c_sup
|
(self, DF, N=None)
|
return _mul((DF/DF.sum()).T, self.F, S_inv)[:, :N]
|
Find the supplementary column factor scores.
ncols: The number of singular vectors to retain.
If both are passed, cols is given preference.
|
Find the supplementary column factor scores.
|
[
"Find",
"the",
"supplementary",
"column",
"factor",
"scores",
"."
] |
def fs_c_sup(self, DF, N=None):
"""Find the supplementary column factor scores.
ncols: The number of singular vectors to retain.
If both are passed, cols is given preference.
"""
if not hasattr(self, 'F'):
self.fs_r(N=self.rank) # generate F
if N and (not isinstance(N, int) or N <= 0):
raise ValueError("ncols should be a positive integer.")
s = -sqrt(self.E) if self.cor else self.s
N = min(N, self.rank) if N else self.rank
S_inv = diagsvd(-1/s[:N], len(self.F.T), N)
# S = diagsvd(s[:N], len(self.tau), N)
return _mul((DF/DF.sum()).T, self.F, S_inv)[:, :N]
|
[
"def",
"fs_c_sup",
"(",
"self",
",",
"DF",
",",
"N",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'F'",
")",
":",
"self",
".",
"fs_r",
"(",
"N",
"=",
"self",
".",
"rank",
")",
"# generate F",
"if",
"N",
"and",
"(",
"not",
"isinstance",
"(",
"N",
",",
"int",
")",
"or",
"N",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"ncols should be a positive integer.\"",
")",
"s",
"=",
"-",
"sqrt",
"(",
"self",
".",
"E",
")",
"if",
"self",
".",
"cor",
"else",
"self",
".",
"s",
"N",
"=",
"min",
"(",
"N",
",",
"self",
".",
"rank",
")",
"if",
"N",
"else",
"self",
".",
"rank",
"S_inv",
"=",
"diagsvd",
"(",
"-",
"1",
"/",
"s",
"[",
":",
"N",
"]",
",",
"len",
"(",
"self",
".",
"F",
".",
"T",
")",
",",
"N",
")",
"# S = diagsvd(s[:N], len(self.tau), N)",
"return",
"_mul",
"(",
"(",
"DF",
"/",
"DF",
".",
"sum",
"(",
")",
")",
".",
"T",
",",
"self",
".",
"F",
",",
"S_inv",
")",
"[",
":",
",",
":",
"N",
"]"
] |
https://github.com/esafak/mca/blob/f2b79ecbf37629902ccdbad2e1a556977c53d370/src/mca.py#L216-L231
|
|
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/lib/cherrypy/cherrypy/lib/caching.py
|
python
|
Cache.delete
|
(self)
|
Remove ALL cached variants of the current resource.
|
Remove ALL cached variants of the current resource.
|
[
"Remove",
"ALL",
"cached",
"variants",
"of",
"the",
"current",
"resource",
"."
] |
def delete(self):
"""Remove ALL cached variants of the current resource."""
raise NotImplemented
|
[
"def",
"delete",
"(",
"self",
")",
":",
"raise",
"NotImplemented"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/cherrypy/cherrypy/lib/caching.py#L56-L58
|
||
deanishe/alfred-vpn-manager
|
f5d0dd1433ea69b1517d4866a12b1118097057b9
|
src/workflow/web.py
|
python
|
Response._get_encoding
|
(self)
|
return encoding
|
Get encoding from HTTP headers or content.
:returns: encoding or `None`
:rtype: unicode or ``None``
|
Get encoding from HTTP headers or content.
|
[
"Get",
"encoding",
"from",
"HTTP",
"headers",
"or",
"content",
"."
] |
def _get_encoding(self):
"""Get encoding from HTTP headers or content.
:returns: encoding or `None`
:rtype: unicode or ``None``
"""
headers = self.raw.info()
encoding = None
if headers.getparam('charset'):
encoding = headers.getparam('charset')
# HTTP Content-Type header
for param in headers.getplist():
if param.startswith('charset='):
encoding = param[8:]
break
if not self.stream: # Try sniffing response content
# Encoding declared in document should override HTTP headers
if self.mimetype == 'text/html': # sniff HTML headers
m = re.search(r"""<meta.+charset=["']{0,1}(.+?)["'].*>""",
self.content)
if m:
encoding = m.group(1)
elif ((self.mimetype.startswith('application/')
or self.mimetype.startswith('text/'))
and 'xml' in self.mimetype):
m = re.search(r"""<?xml.+encoding=["'](.+?)["'][^>]*\?>""",
self.content)
if m:
encoding = m.group(1)
# Format defaults
if self.mimetype == 'application/json' and not encoding:
# The default encoding for JSON
encoding = 'utf-8'
elif self.mimetype == 'application/xml' and not encoding:
# The default for 'application/xml'
encoding = 'utf-8'
if encoding:
encoding = encoding.lower()
return encoding
|
[
"def",
"_get_encoding",
"(",
"self",
")",
":",
"headers",
"=",
"self",
".",
"raw",
".",
"info",
"(",
")",
"encoding",
"=",
"None",
"if",
"headers",
".",
"getparam",
"(",
"'charset'",
")",
":",
"encoding",
"=",
"headers",
".",
"getparam",
"(",
"'charset'",
")",
"# HTTP Content-Type header",
"for",
"param",
"in",
"headers",
".",
"getplist",
"(",
")",
":",
"if",
"param",
".",
"startswith",
"(",
"'charset='",
")",
":",
"encoding",
"=",
"param",
"[",
"8",
":",
"]",
"break",
"if",
"not",
"self",
".",
"stream",
":",
"# Try sniffing response content",
"# Encoding declared in document should override HTTP headers",
"if",
"self",
".",
"mimetype",
"==",
"'text/html'",
":",
"# sniff HTML headers",
"m",
"=",
"re",
".",
"search",
"(",
"r\"\"\"<meta.+charset=[\"']{0,1}(.+?)[\"'].*>\"\"\"",
",",
"self",
".",
"content",
")",
"if",
"m",
":",
"encoding",
"=",
"m",
".",
"group",
"(",
"1",
")",
"elif",
"(",
"(",
"self",
".",
"mimetype",
".",
"startswith",
"(",
"'application/'",
")",
"or",
"self",
".",
"mimetype",
".",
"startswith",
"(",
"'text/'",
")",
")",
"and",
"'xml'",
"in",
"self",
".",
"mimetype",
")",
":",
"m",
"=",
"re",
".",
"search",
"(",
"r\"\"\"<?xml.+encoding=[\"'](.+?)[\"'][^>]*\\?>\"\"\"",
",",
"self",
".",
"content",
")",
"if",
"m",
":",
"encoding",
"=",
"m",
".",
"group",
"(",
"1",
")",
"# Format defaults",
"if",
"self",
".",
"mimetype",
"==",
"'application/json'",
"and",
"not",
"encoding",
":",
"# The default encoding for JSON",
"encoding",
"=",
"'utf-8'",
"elif",
"self",
".",
"mimetype",
"==",
"'application/xml'",
"and",
"not",
"encoding",
":",
"# The default for 'application/xml'",
"encoding",
"=",
"'utf-8'",
"if",
"encoding",
":",
"encoding",
"=",
"encoding",
".",
"lower",
"(",
")",
"return",
"encoding"
] |
https://github.com/deanishe/alfred-vpn-manager/blob/f5d0dd1433ea69b1517d4866a12b1118097057b9/src/workflow/web.py#L416-L463
|
|
privacyidea/privacyidea
|
9490c12ddbf77a34ac935b082d09eb583dfafa2c
|
privacyidea/lib/importotp.py
|
python
|
_create_static_password
|
(key_hex)
|
return password
|
According to yubikey manual 5.5.5 the static-ticket is the same
algorithm with no moving factors.
The msg_hex that is encoded with the AES key is
'000000000000ffffffffffffffff0f2e'
|
According to yubikey manual 5.5.5 the static-ticket is the same
algorithm with no moving factors.
The msg_hex that is encoded with the AES key is
'000000000000ffffffffffffffff0f2e'
|
[
"According",
"to",
"yubikey",
"manual",
"5",
".",
"5",
".",
"5",
"the",
"static",
"-",
"ticket",
"is",
"the",
"same",
"algorithm",
"with",
"no",
"moving",
"factors",
".",
"The",
"msg_hex",
"that",
"is",
"encoded",
"with",
"the",
"AES",
"key",
"is",
"000000000000ffffffffffffffff0f2e"
] |
def _create_static_password(key_hex):
'''
According to yubikey manual 5.5.5 the static-ticket is the same
algorithm with no moving factors.
The msg_hex that is encoded with the AES key is
'000000000000ffffffffffffffff0f2e'
'''
msg_hex = "000000000000ffffffffffffffff0f2e"
msg_bin = binascii.unhexlify(msg_hex)
cipher = Cipher(algorithms.AES(binascii.unhexlify(key_hex)),
modes.ECB(), default_backend())
encryptor = cipher.encryptor()
password_bin = encryptor.update(msg_bin) + encryptor.finalize()
password = modhex_encode(password_bin)
return password
|
[
"def",
"_create_static_password",
"(",
"key_hex",
")",
":",
"msg_hex",
"=",
"\"000000000000ffffffffffffffff0f2e\"",
"msg_bin",
"=",
"binascii",
".",
"unhexlify",
"(",
"msg_hex",
")",
"cipher",
"=",
"Cipher",
"(",
"algorithms",
".",
"AES",
"(",
"binascii",
".",
"unhexlify",
"(",
"key_hex",
")",
")",
",",
"modes",
".",
"ECB",
"(",
")",
",",
"default_backend",
"(",
")",
")",
"encryptor",
"=",
"cipher",
".",
"encryptor",
"(",
")",
"password_bin",
"=",
"encryptor",
".",
"update",
"(",
"msg_bin",
")",
"+",
"encryptor",
".",
"finalize",
"(",
")",
"password",
"=",
"modhex_encode",
"(",
"password_bin",
")",
"return",
"password"
] |
https://github.com/privacyidea/privacyidea/blob/9490c12ddbf77a34ac935b082d09eb583dfafa2c/privacyidea/lib/importotp.py#L77-L92
|
|
deepfakes/faceswap
|
09c7d8aca3c608d1afad941ea78e9fd9b64d9219
|
lib/gui/popup_session.py
|
python
|
SessionPopUp._opts_buttons
|
(self, frame)
|
Add the option buttons.
Parameters
----------
frame: `tkinter.ttk.Frame`
The frame that the options reside in
|
Add the option buttons.
|
[
"Add",
"the",
"option",
"buttons",
"."
] |
def _opts_buttons(self, frame):
""" Add the option buttons.
Parameters
----------
frame: `tkinter.ttk.Frame`
The frame that the options reside in
"""
logger.debug("Building Buttons")
btnframe = ttk.Frame(frame)
lblstatus = ttk.Label(btnframe,
width=40,
textvariable=self._vars["status"],
anchor=tk.W)
for btntype in ("reload", "save"):
cmd = getattr(self, "_option_button_{}".format(btntype))
btn = ttk.Button(btnframe,
image=get_images().icons[btntype],
command=cmd)
hlp = self._set_help(btntype)
Tooltip(btn, text=hlp, wrap_length=200)
btn.pack(padx=2, side=tk.RIGHT)
lblstatus.pack(side=tk.LEFT, anchor=tk.W, fill=tk.X, expand=True)
btnframe.pack(fill=tk.X, pady=5, padx=5, side=tk.BOTTOM)
logger.debug("Built Buttons")
|
[
"def",
"_opts_buttons",
"(",
"self",
",",
"frame",
")",
":",
"logger",
".",
"debug",
"(",
"\"Building Buttons\"",
")",
"btnframe",
"=",
"ttk",
".",
"Frame",
"(",
"frame",
")",
"lblstatus",
"=",
"ttk",
".",
"Label",
"(",
"btnframe",
",",
"width",
"=",
"40",
",",
"textvariable",
"=",
"self",
".",
"_vars",
"[",
"\"status\"",
"]",
",",
"anchor",
"=",
"tk",
".",
"W",
")",
"for",
"btntype",
"in",
"(",
"\"reload\"",
",",
"\"save\"",
")",
":",
"cmd",
"=",
"getattr",
"(",
"self",
",",
"\"_option_button_{}\"",
".",
"format",
"(",
"btntype",
")",
")",
"btn",
"=",
"ttk",
".",
"Button",
"(",
"btnframe",
",",
"image",
"=",
"get_images",
"(",
")",
".",
"icons",
"[",
"btntype",
"]",
",",
"command",
"=",
"cmd",
")",
"hlp",
"=",
"self",
".",
"_set_help",
"(",
"btntype",
")",
"Tooltip",
"(",
"btn",
",",
"text",
"=",
"hlp",
",",
"wrap_length",
"=",
"200",
")",
"btn",
".",
"pack",
"(",
"padx",
"=",
"2",
",",
"side",
"=",
"tk",
".",
"RIGHT",
")",
"lblstatus",
".",
"pack",
"(",
"side",
"=",
"tk",
".",
"LEFT",
",",
"anchor",
"=",
"tk",
".",
"W",
",",
"fill",
"=",
"tk",
".",
"X",
",",
"expand",
"=",
"True",
")",
"btnframe",
".",
"pack",
"(",
"fill",
"=",
"tk",
".",
"X",
",",
"pady",
"=",
"5",
",",
"padx",
"=",
"5",
",",
"side",
"=",
"tk",
".",
"BOTTOM",
")",
"logger",
".",
"debug",
"(",
"\"Built Buttons\"",
")"
] |
https://github.com/deepfakes/faceswap/blob/09c7d8aca3c608d1afad941ea78e9fd9b64d9219/lib/gui/popup_session.py#L246-L272
|
||
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/states/mount.py
|
python
|
_convert_to
|
(maybe_device, convert_to)
|
return result
|
Convert a device name, UUID or LABEL to a device name, UUID or
LABEL.
Return the fs_spec required for fstab.
|
Convert a device name, UUID or LABEL to a device name, UUID or
LABEL.
|
[
"Convert",
"a",
"device",
"name",
"UUID",
"or",
"LABEL",
"to",
"a",
"device",
"name",
"UUID",
"or",
"LABEL",
"."
] |
def _convert_to(maybe_device, convert_to):
"""
Convert a device name, UUID or LABEL to a device name, UUID or
LABEL.
Return the fs_spec required for fstab.
"""
# Fast path. If we already have the information required, we can
# save one blkid call
if (
not convert_to
or (convert_to == "device" and maybe_device.startswith("/"))
or maybe_device.startswith("{}=".format(convert_to.upper()))
):
return maybe_device
# Get the device information
if maybe_device.startswith("/"):
blkid = __salt__["disk.blkid"](maybe_device)
else:
blkid = __salt__["disk.blkid"](token=maybe_device)
result = None
if len(blkid) == 1:
if convert_to == "device":
result = next(iter(blkid))
else:
key = convert_to.upper()
result = "{}={}".format(key, next(iter(blkid.values()))[key])
return result
|
[
"def",
"_convert_to",
"(",
"maybe_device",
",",
"convert_to",
")",
":",
"# Fast path. If we already have the information required, we can",
"# save one blkid call",
"if",
"(",
"not",
"convert_to",
"or",
"(",
"convert_to",
"==",
"\"device\"",
"and",
"maybe_device",
".",
"startswith",
"(",
"\"/\"",
")",
")",
"or",
"maybe_device",
".",
"startswith",
"(",
"\"{}=\"",
".",
"format",
"(",
"convert_to",
".",
"upper",
"(",
")",
")",
")",
")",
":",
"return",
"maybe_device",
"# Get the device information",
"if",
"maybe_device",
".",
"startswith",
"(",
"\"/\"",
")",
":",
"blkid",
"=",
"__salt__",
"[",
"\"disk.blkid\"",
"]",
"(",
"maybe_device",
")",
"else",
":",
"blkid",
"=",
"__salt__",
"[",
"\"disk.blkid\"",
"]",
"(",
"token",
"=",
"maybe_device",
")",
"result",
"=",
"None",
"if",
"len",
"(",
"blkid",
")",
"==",
"1",
":",
"if",
"convert_to",
"==",
"\"device\"",
":",
"result",
"=",
"next",
"(",
"iter",
"(",
"blkid",
")",
")",
"else",
":",
"key",
"=",
"convert_to",
".",
"upper",
"(",
")",
"result",
"=",
"\"{}={}\"",
".",
"format",
"(",
"key",
",",
"next",
"(",
"iter",
"(",
"blkid",
".",
"values",
"(",
")",
")",
")",
"[",
"key",
"]",
")",
"return",
"result"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/states/mount.py#L1038-L1070
|
|
google-research/disentanglement_lib
|
86a644d4ed35c771560dc3360756363d35477357
|
disentanglement_lib/evaluation/abstract_reasoning/reason.py
|
python
|
reason
|
(
input_dir,
output_dir,
overwrite=False,
model=gin.REQUIRED,
num_iterations=gin.REQUIRED,
training_steps_per_iteration=gin.REQUIRED,
eval_steps_per_iteration=gin.REQUIRED,
random_seed=gin.REQUIRED,
batch_size=gin.REQUIRED,
name="",
)
|
Trains the estimator and exports the snapshot and the gin config.
The use of this function requires the gin binding 'dataset.name' to be
specified if a model is trained from scratch as that determines the data set
used for training.
Args:
input_dir: String with path to directory where the representation function
is saved.
output_dir: String with the path where the results should be saved.
overwrite: Boolean indicating whether to overwrite output directory.
model: GaussianEncoderModel that should be trained and exported.
num_iterations: Integer with number of training steps.
training_steps_per_iteration: Integer with number of training steps per
iteration.
eval_steps_per_iteration: Integer with number of validationand test steps
per iteration.
random_seed: Integer with random seed used for training.
batch_size: Integer with the batch size.
name: Optional string with name of the model (can be used to name models).
|
Trains the estimator and exports the snapshot and the gin config.
|
[
"Trains",
"the",
"estimator",
"and",
"exports",
"the",
"snapshot",
"and",
"the",
"gin",
"config",
"."
] |
def reason(
input_dir,
output_dir,
overwrite=False,
model=gin.REQUIRED,
num_iterations=gin.REQUIRED,
training_steps_per_iteration=gin.REQUIRED,
eval_steps_per_iteration=gin.REQUIRED,
random_seed=gin.REQUIRED,
batch_size=gin.REQUIRED,
name="",
):
"""Trains the estimator and exports the snapshot and the gin config.
The use of this function requires the gin binding 'dataset.name' to be
specified if a model is trained from scratch as that determines the data set
used for training.
Args:
input_dir: String with path to directory where the representation function
is saved.
output_dir: String with the path where the results should be saved.
overwrite: Boolean indicating whether to overwrite output directory.
model: GaussianEncoderModel that should be trained and exported.
num_iterations: Integer with number of training steps.
training_steps_per_iteration: Integer with number of training steps per
iteration.
eval_steps_per_iteration: Integer with number of validationand test steps
per iteration.
random_seed: Integer with random seed used for training.
batch_size: Integer with the batch size.
name: Optional string with name of the model (can be used to name models).
"""
# We do not use the variable 'name'. Instead, it can be used to name results
# as it will be part of the saved gin config.
del name
# Delete the output directory if it already exists.
if tf.gfile.IsDirectory(output_dir):
if overwrite:
tf.gfile.DeleteRecursively(output_dir)
else:
raise ValueError("Directory already exists and overwrite is False.")
# Create a numpy random state. We will sample the random seeds for training
# and evaluation from this.
random_state = np.random.RandomState(random_seed)
# Automatically set the proper data set if necessary. We replace the active
# gin config as this will lead to a valid gin config file where the data set
# is present.
if gin.query_parameter("dataset.name") == "auto":
if input_dir is None:
raise ValueError("Cannot automatically infer data set for methods with"
" no prior model directory.")
# Obtain the dataset name from the gin config of the previous step.
gin_config_file = os.path.join(input_dir, "results", "gin",
"postprocess.gin")
gin_dict = results.gin_dict(gin_config_file)
with gin.unlock_config():
gin.bind_parameter("dataset.name",
gin_dict["dataset.name"].replace("'", ""))
dataset = pgm_data.get_pgm_dataset()
# Set the path to the TFHub embedding if we are training based on a
# pre-trained embedding..
if input_dir is not None:
tfhub_dir = os.path.join(input_dir, "tfhub")
with gin.unlock_config():
gin.bind_parameter("HubEmbedding.hub_path", tfhub_dir)
# We create a TPUEstimator based on the provided model. This is primarily so
# that we could switch to TPU training in the future. For now, we train
# locally on GPUs.
run_config = contrib_tpu.RunConfig(
tf_random_seed=random_seed,
keep_checkpoint_max=1,
tpu_config=contrib_tpu.TPUConfig(iterations_per_loop=500))
tpu_estimator = contrib_tpu.TPUEstimator(
use_tpu=False,
model_fn=model.model_fn,
model_dir=os.path.join(output_dir, "tf_checkpoint"),
train_batch_size=batch_size,
eval_batch_size=batch_size,
config=run_config)
# Set up time to keep track of elapsed time in results.
experiment_timer = time.time()
# Create a dictionary to keep track of all relevant information.
results_dict_of_dicts = {}
validation_scores = []
all_dicts = []
for i in range(num_iterations):
steps_so_far = i * training_steps_per_iteration
tf.logging.info("Training to %d steps.", steps_so_far)
# Train the model for the specified steps.
tpu_estimator.train(
input_fn=dataset.make_input_fn(random_state.randint(2**32)),
steps=training_steps_per_iteration)
# Compute validation scores used for model selection.
validation_results = tpu_estimator.evaluate(
input_fn=dataset.make_input_fn(
random_state.randint(2**32), num_batches=eval_steps_per_iteration))
validation_scores.append(validation_results["accuracy"])
tf.logging.info("Validation results %s", validation_results)
# Compute test scores for final results.
test_results = tpu_estimator.evaluate(
input_fn=dataset.make_input_fn(
random_state.randint(2**32), num_batches=eval_steps_per_iteration),
name="test")
dict_at_iteration = results.namespaced_dict(
val=validation_results, test=test_results)
results_dict_of_dicts["step{}".format(steps_so_far)] = dict_at_iteration
all_dicts.append(dict_at_iteration)
# Select the best number of steps based on the validation scores and add it as
# as a special key to the dictionary.
best_index = np.argmax(validation_scores)
results_dict_of_dicts["best"] = all_dicts[best_index]
# Save the results. The result dir will contain all the results and config
# files that we copied along, as we progress in the pipeline. The idea is that
# these files will be available for analysis at the end.
if input_dir is not None:
original_results_dir = os.path.join(input_dir, "results")
else:
original_results_dir = None
results_dict = results.namespaced_dict(**results_dict_of_dicts)
results_dir = os.path.join(output_dir, "results")
results_dict["elapsed_time"] = time.time() - experiment_timer
results.update_result_directory(results_dir, "abstract_reasoning",
results_dict, original_results_dir)
|
[
"def",
"reason",
"(",
"input_dir",
",",
"output_dir",
",",
"overwrite",
"=",
"False",
",",
"model",
"=",
"gin",
".",
"REQUIRED",
",",
"num_iterations",
"=",
"gin",
".",
"REQUIRED",
",",
"training_steps_per_iteration",
"=",
"gin",
".",
"REQUIRED",
",",
"eval_steps_per_iteration",
"=",
"gin",
".",
"REQUIRED",
",",
"random_seed",
"=",
"gin",
".",
"REQUIRED",
",",
"batch_size",
"=",
"gin",
".",
"REQUIRED",
",",
"name",
"=",
"\"\"",
",",
")",
":",
"# We do not use the variable 'name'. Instead, it can be used to name results",
"# as it will be part of the saved gin config.",
"del",
"name",
"# Delete the output directory if it already exists.",
"if",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"output_dir",
")",
":",
"if",
"overwrite",
":",
"tf",
".",
"gfile",
".",
"DeleteRecursively",
"(",
"output_dir",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Directory already exists and overwrite is False.\"",
")",
"# Create a numpy random state. We will sample the random seeds for training",
"# and evaluation from this.",
"random_state",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"random_seed",
")",
"# Automatically set the proper data set if necessary. We replace the active",
"# gin config as this will lead to a valid gin config file where the data set",
"# is present.",
"if",
"gin",
".",
"query_parameter",
"(",
"\"dataset.name\"",
")",
"==",
"\"auto\"",
":",
"if",
"input_dir",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot automatically infer data set for methods with\"",
"\" no prior model directory.\"",
")",
"# Obtain the dataset name from the gin config of the previous step.",
"gin_config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"\"results\"",
",",
"\"gin\"",
",",
"\"postprocess.gin\"",
")",
"gin_dict",
"=",
"results",
".",
"gin_dict",
"(",
"gin_config_file",
")",
"with",
"gin",
".",
"unlock_config",
"(",
")",
":",
"gin",
".",
"bind_parameter",
"(",
"\"dataset.name\"",
",",
"gin_dict",
"[",
"\"dataset.name\"",
"]",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
")",
"dataset",
"=",
"pgm_data",
".",
"get_pgm_dataset",
"(",
")",
"# Set the path to the TFHub embedding if we are training based on a",
"# pre-trained embedding..",
"if",
"input_dir",
"is",
"not",
"None",
":",
"tfhub_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"\"tfhub\"",
")",
"with",
"gin",
".",
"unlock_config",
"(",
")",
":",
"gin",
".",
"bind_parameter",
"(",
"\"HubEmbedding.hub_path\"",
",",
"tfhub_dir",
")",
"# We create a TPUEstimator based on the provided model. This is primarily so",
"# that we could switch to TPU training in the future. For now, we train",
"# locally on GPUs.",
"run_config",
"=",
"contrib_tpu",
".",
"RunConfig",
"(",
"tf_random_seed",
"=",
"random_seed",
",",
"keep_checkpoint_max",
"=",
"1",
",",
"tpu_config",
"=",
"contrib_tpu",
".",
"TPUConfig",
"(",
"iterations_per_loop",
"=",
"500",
")",
")",
"tpu_estimator",
"=",
"contrib_tpu",
".",
"TPUEstimator",
"(",
"use_tpu",
"=",
"False",
",",
"model_fn",
"=",
"model",
".",
"model_fn",
",",
"model_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"\"tf_checkpoint\"",
")",
",",
"train_batch_size",
"=",
"batch_size",
",",
"eval_batch_size",
"=",
"batch_size",
",",
"config",
"=",
"run_config",
")",
"# Set up time to keep track of elapsed time in results.",
"experiment_timer",
"=",
"time",
".",
"time",
"(",
")",
"# Create a dictionary to keep track of all relevant information.",
"results_dict_of_dicts",
"=",
"{",
"}",
"validation_scores",
"=",
"[",
"]",
"all_dicts",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"num_iterations",
")",
":",
"steps_so_far",
"=",
"i",
"*",
"training_steps_per_iteration",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Training to %d steps.\"",
",",
"steps_so_far",
")",
"# Train the model for the specified steps.",
"tpu_estimator",
".",
"train",
"(",
"input_fn",
"=",
"dataset",
".",
"make_input_fn",
"(",
"random_state",
".",
"randint",
"(",
"2",
"**",
"32",
")",
")",
",",
"steps",
"=",
"training_steps_per_iteration",
")",
"# Compute validation scores used for model selection.",
"validation_results",
"=",
"tpu_estimator",
".",
"evaluate",
"(",
"input_fn",
"=",
"dataset",
".",
"make_input_fn",
"(",
"random_state",
".",
"randint",
"(",
"2",
"**",
"32",
")",
",",
"num_batches",
"=",
"eval_steps_per_iteration",
")",
")",
"validation_scores",
".",
"append",
"(",
"validation_results",
"[",
"\"accuracy\"",
"]",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Validation results %s\"",
",",
"validation_results",
")",
"# Compute test scores for final results.",
"test_results",
"=",
"tpu_estimator",
".",
"evaluate",
"(",
"input_fn",
"=",
"dataset",
".",
"make_input_fn",
"(",
"random_state",
".",
"randint",
"(",
"2",
"**",
"32",
")",
",",
"num_batches",
"=",
"eval_steps_per_iteration",
")",
",",
"name",
"=",
"\"test\"",
")",
"dict_at_iteration",
"=",
"results",
".",
"namespaced_dict",
"(",
"val",
"=",
"validation_results",
",",
"test",
"=",
"test_results",
")",
"results_dict_of_dicts",
"[",
"\"step{}\"",
".",
"format",
"(",
"steps_so_far",
")",
"]",
"=",
"dict_at_iteration",
"all_dicts",
".",
"append",
"(",
"dict_at_iteration",
")",
"# Select the best number of steps based on the validation scores and add it as",
"# as a special key to the dictionary.",
"best_index",
"=",
"np",
".",
"argmax",
"(",
"validation_scores",
")",
"results_dict_of_dicts",
"[",
"\"best\"",
"]",
"=",
"all_dicts",
"[",
"best_index",
"]",
"# Save the results. The result dir will contain all the results and config",
"# files that we copied along, as we progress in the pipeline. The idea is that",
"# these files will be available for analysis at the end.",
"if",
"input_dir",
"is",
"not",
"None",
":",
"original_results_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"\"results\"",
")",
"else",
":",
"original_results_dir",
"=",
"None",
"results_dict",
"=",
"results",
".",
"namespaced_dict",
"(",
"*",
"*",
"results_dict_of_dicts",
")",
"results_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"\"results\"",
")",
"results_dict",
"[",
"\"elapsed_time\"",
"]",
"=",
"time",
".",
"time",
"(",
")",
"-",
"experiment_timer",
"results",
".",
"update_result_directory",
"(",
"results_dir",
",",
"\"abstract_reasoning\"",
",",
"results_dict",
",",
"original_results_dir",
")"
] |
https://github.com/google-research/disentanglement_lib/blob/86a644d4ed35c771560dc3360756363d35477357/disentanglement_lib/evaluation/abstract_reasoning/reason.py#L67-L200
|
||
fooof-tools/fooof
|
14d6196e0b60c7e6da95b5cf858b20adcc5fc0ac
|
fooof/objs/fit.py
|
python
|
FOOOF.add_settings
|
(self, fooof_settings)
|
Add settings into object from a FOOOFSettings object.
Parameters
----------
fooof_settings : FOOOFSettings
A data object containing the settings for a FOOOF model.
|
Add settings into object from a FOOOFSettings object.
|
[
"Add",
"settings",
"into",
"object",
"from",
"a",
"FOOOFSettings",
"object",
"."
] |
def add_settings(self, fooof_settings):
"""Add settings into object from a FOOOFSettings object.
Parameters
----------
fooof_settings : FOOOFSettings
A data object containing the settings for a FOOOF model.
"""
for setting in OBJ_DESC['settings']:
setattr(self, setting, getattr(fooof_settings, setting))
self._check_loaded_settings(fooof_settings._asdict())
|
[
"def",
"add_settings",
"(",
"self",
",",
"fooof_settings",
")",
":",
"for",
"setting",
"in",
"OBJ_DESC",
"[",
"'settings'",
"]",
":",
"setattr",
"(",
"self",
",",
"setting",
",",
"getattr",
"(",
"fooof_settings",
",",
"setting",
")",
")",
"self",
".",
"_check_loaded_settings",
"(",
"fooof_settings",
".",
"_asdict",
"(",
")",
")"
] |
https://github.com/fooof-tools/fooof/blob/14d6196e0b60c7e6da95b5cf858b20adcc5fc0ac/fooof/objs/fit.py#L327-L339
|
||
arizvisa/ida-minsc
|
8627a60f047b5e55d3efeecde332039cd1a16eea
|
custom/tags.py
|
python
|
read.frame
|
(cls, ea)
|
return
|
Iterate through each field within the frame belonging to the function `ea`.
|
Iterate through each field within the frame belonging to the function `ea`.
|
[
"Iterate",
"through",
"each",
"field",
"within",
"the",
"frame",
"belonging",
"to",
"the",
"function",
"ea",
"."
] |
def frame(cls, ea):
'''Iterate through each field within the frame belonging to the function `ea`.'''
F = func.by(ea)
# iterate through all of the frame's members
try:
res = func.frame(F)
except internal.exceptions.MissingTypeOrAttribute:
logging.info(u"{:s}.frame({:#x}) : Skipping function at {:#x} due to a missing frame.".format('.'.join([__name__, cls.__name__]), ea, ea))
return
for member in res.members:
# if ida has named it and there's no comment, then skip
if lvarNameQ(member.name) and not member.comment:
continue
# if it's a structure, then the type is the structure name
if isinstance(member.type, struc.structure_t):
logging.debug(u"{:s}.frame({:#x}) : Storing structure-based type as name for field {:+#x} with tne type {!s}.".format('.'.join([__name__, cls.__name__]), ea, member.offset, internal.utils.string.repr(member.type)))
type = member.type.name
# otherwise, the type is a tuple that we can serializer
else:
type = member.type
# otherwise, it's just a regular field. so we can just save what's important.
yield member.offset, (member.name, type, member.comment)
return
|
[
"def",
"frame",
"(",
"cls",
",",
"ea",
")",
":",
"F",
"=",
"func",
".",
"by",
"(",
"ea",
")",
"# iterate through all of the frame's members",
"try",
":",
"res",
"=",
"func",
".",
"frame",
"(",
"F",
")",
"except",
"internal",
".",
"exceptions",
".",
"MissingTypeOrAttribute",
":",
"logging",
".",
"info",
"(",
"u\"{:s}.frame({:#x}) : Skipping function at {:#x} due to a missing frame.\"",
".",
"format",
"(",
"'.'",
".",
"join",
"(",
"[",
"__name__",
",",
"cls",
".",
"__name__",
"]",
")",
",",
"ea",
",",
"ea",
")",
")",
"return",
"for",
"member",
"in",
"res",
".",
"members",
":",
"# if ida has named it and there's no comment, then skip",
"if",
"lvarNameQ",
"(",
"member",
".",
"name",
")",
"and",
"not",
"member",
".",
"comment",
":",
"continue",
"# if it's a structure, then the type is the structure name",
"if",
"isinstance",
"(",
"member",
".",
"type",
",",
"struc",
".",
"structure_t",
")",
":",
"logging",
".",
"debug",
"(",
"u\"{:s}.frame({:#x}) : Storing structure-based type as name for field {:+#x} with tne type {!s}.\"",
".",
"format",
"(",
"'.'",
".",
"join",
"(",
"[",
"__name__",
",",
"cls",
".",
"__name__",
"]",
")",
",",
"ea",
",",
"member",
".",
"offset",
",",
"internal",
".",
"utils",
".",
"string",
".",
"repr",
"(",
"member",
".",
"type",
")",
")",
")",
"type",
"=",
"member",
".",
"type",
".",
"name",
"# otherwise, the type is a tuple that we can serializer",
"else",
":",
"type",
"=",
"member",
".",
"type",
"# otherwise, it's just a regular field. so we can just save what's important.",
"yield",
"member",
".",
"offset",
",",
"(",
"member",
".",
"name",
",",
"type",
",",
"member",
".",
"comment",
")",
"return"
] |
https://github.com/arizvisa/ida-minsc/blob/8627a60f047b5e55d3efeecde332039cd1a16eea/custom/tags.py#L117-L144
|
|
flask-admin/flask-admin
|
7cff9c742d44d42a8d3495c73a6d71381c796396
|
flask_admin/contrib/sqla/fields.py
|
python
|
InlineModelFormList.__init__
|
(self, form, session, model, prop, inline_view, **kwargs)
|
Default constructor.
:param form:
Form for the related model
:param session:
SQLAlchemy session
:param model:
Related model
:param prop:
Related property name
:param inline_view:
Inline view
|
Default constructor.
|
[
"Default",
"constructor",
"."
] |
def __init__(self, form, session, model, prop, inline_view, **kwargs):
"""
Default constructor.
:param form:
Form for the related model
:param session:
SQLAlchemy session
:param model:
Related model
:param prop:
Related property name
:param inline_view:
Inline view
"""
self.form = form
self.session = session
self.model = model
self.prop = prop
self.inline_view = inline_view
self._pk = get_primary_key(model)
# Generate inline form field
form_opts = FormOpts(widget_args=getattr(inline_view, 'form_widget_args', None),
form_rules=inline_view._form_rules)
form_field = self.form_field_type(form, self._pk, form_opts=form_opts)
super(InlineModelFormList, self).__init__(form_field, **kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"form",
",",
"session",
",",
"model",
",",
"prop",
",",
"inline_view",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"form",
"=",
"form",
"self",
".",
"session",
"=",
"session",
"self",
".",
"model",
"=",
"model",
"self",
".",
"prop",
"=",
"prop",
"self",
".",
"inline_view",
"=",
"inline_view",
"self",
".",
"_pk",
"=",
"get_primary_key",
"(",
"model",
")",
"# Generate inline form field",
"form_opts",
"=",
"FormOpts",
"(",
"widget_args",
"=",
"getattr",
"(",
"inline_view",
",",
"'form_widget_args'",
",",
"None",
")",
",",
"form_rules",
"=",
"inline_view",
".",
"_form_rules",
")",
"form_field",
"=",
"self",
".",
"form_field_type",
"(",
"form",
",",
"self",
".",
"_pk",
",",
"form_opts",
"=",
"form_opts",
")",
"super",
"(",
"InlineModelFormList",
",",
"self",
")",
".",
"__init__",
"(",
"form_field",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/flask-admin/flask-admin/blob/7cff9c742d44d42a8d3495c73a6d71381c796396/flask_admin/contrib/sqla/fields.py#L259-L288
|
||
achael/eht-imaging
|
bbd3aeb06bef52bf89fa1c06de71e5509a5b0015
|
ehtim/imager.py
|
python
|
Imager.check_params
|
(self)
|
Check parameter consistency.
|
Check parameter consistency.
|
[
"Check",
"parameter",
"consistency",
"."
] |
def check_params(self):
"""Check parameter consistency.
"""
if ((self.prior_next.psize != self.init_next.psize) or
(self.prior_next.xdim != self.init_next.xdim) or
(self.prior_next.ydim != self.prior_next.ydim)):
raise Exception("Initial image does not match dimensions of the prior image!")
if (self.prior_next.polrep != self.init_next.polrep):
raise Exception(
"Initial image polrep does not match prior polrep!")
if (self.prior_next.polrep == 'circ' and not(self.pol_next in ['P', 'RR', 'LL'])):
raise Exception("Initial image polrep is 'circ': pol_next must be 'RR' or 'LL' or 'P'!")
if (self.prior_next.polrep == 'stokes' and not(self.pol_next in ['I', 'Q', 'U', 'V', 'P','IP','IQU'])):
raise Exception(
"Initial image polrep is 'stokes': pol_next must be in 'I','Q','U','V','P','IP','IQU'!")
if ('log' in self.transform_next and self.pol_next in ['Q', 'U', 'V']):
raise Exception("Cannot image Stokes Q, U, V with log image transformation!")
if self._ttype not in ['fast', 'direct', 'nfft']:
raise Exception("Possible ttype values are 'fast', 'direct','nfft'!")
if(self.pol_next in ['Q', 'U', 'V'] and
('gs' in self.reg_term_next.keys() or 'simple' in self.reg_term_next.keys())):
raise Exception(
"'simple' and 'gs' methods do not work with Stokes Q, U, or V images!")
# Catch errors in multifrequency imaging setup
if self.mf_next and len(set(self.freq_list)) < 2:
raise Exception(
"must have observations at at least two frequencies for multifrequency imaging!")
# Catch errors for polarimetric imaging setup
if (self.pol_next == 'P'):
if 'mcv' not in self.transform_next:
raise Exception("P imaging needs 'mcv' transform!")
if (self._ttype not in ["direct", "nfft"]):
raise Exception("FFT no yet implemented in polarimetric imaging -- use NFFT!")
dt_here = False
dt_type = True
for term in sorted(self.dat_term_next.keys()):
if (term is not None) and (term is not False):
dt_here = True
if not ((term in DATATERMS_POL) or (term is False)):
dt_type = False
st_here = False
st_type = True
for term in sorted(self.reg_term_next.keys()):
if (term is not None) and (term is not False):
st_here = True
if not ((term in REGULARIZERS_POL) or (term is False)):
st_type = False
if not dt_here:
raise Exception("Must have at least one data term!")
if not st_here:
raise Exception("Must have at least one regularizer term!")
if not dt_type:
raise Exception(
"Invalid data term for P imaging: " +
"valid data terms are: " + ','.join(DATATERMS_POL))
if not st_type:
raise Exception(
"Invalid regularizer for P imaging: " +
"valid regularizers are: " + ','.join(REGULARIZERS_POL))
# Catch errors for simultaneous I + polarimetric imaging setup
elif (self.pol_next == 'IP' or self.pol_next == 'IQU'):
if 'mcv' not in self.transform_next:
raise Exception("P imaging needs 'mcv' transform!")
if (self._ttype not in ["direct", "nfft"]):
raise Exception("FFT no yet implemented in polarimetric imaging -- use NFFT!")
dt_here = False
dt_type = True
dt_pol = False
for term in sorted(self.dat_term_next.keys()):
if (term is not None) and (term is not False):
dt_here = True
if not ((term in DATATERMS_POL) or (term in DATATERMS) or (term is False)):
dt_type = False
if term in DATATERMS_POL:
dt_pol = True
st_here = False
st_type = True
for term in sorted(self.reg_term_next.keys()):
if (term is not None) and (term is not False):
st_here = True
if not ((term in REGULARIZERS_POL) or (term in REGULARIZERS) or (term is False)):
st_type = False
if not dt_here:
raise Exception("Must have at least one data term!")
if not st_here:
raise Exception("Must have at least one regularizer term!")
if not dt_type:
raise Exception(
"Invalid data term for IP imaging: " +
"valid data terms are: " + ','.join(DATATERMS_POL + DATATERMS))
if not st_type:
raise Exception(
"Invalid regularizer for IP imaging: " +
"valid regularizers are: " + ','.join(REGULARIZERS_POL + REGULARIZERS))
if not dt_pol:
raise Exception(
"IP imaging must have at least one pol data term from: " +
','.join(DATATERMS_POL))
# Catch errors in single pol imaging setup
else:
dt_here = False
dt_type = True
for term in sorted(self.dat_term_next.keys()):
if (term is not None) and (term is not False):
dt_here = True
if not ((term in DATATERMS) or (term is False)):
dt_type = False
st_here = False
st_type = True
for term in sorted(self.reg_term_next.keys()):
if (term is not None) and (term is not False):
st_here = True
if not ((term in REGULARIZERS or
term in REGULARIZERS_SPECIND or
term in REGULARIZERS_CURV) or
term is False):
st_type = False
if not dt_here:
raise Exception("Must have at least one data term!")
if not st_here:
raise Exception("Must have at least one regularizer term!")
if not dt_type:
raise Exception("Invalid data term: valid data terms are: " + ','.join(DATATERMS))
if not st_type:
raise Exception("Invalid regularizer: valid regularizers are: " +
','.join(REGULARIZERS))
# Determine if we need to recompute the saved imager parameters on the next imager run
if self.nruns == 0:
return
if self.pol_next != self.pol_last():
print("changed polarization!")
self._change_imgr_params = True
return
if self.obslist_next != self.obslist_last():
print("changed observation!")
self._change_imgr_params = True
return
if len(self.reg_term_next) != len(self.reg_terms_last()):
print("changed number of regularizer terms!")
self._change_imgr_params = True
return
if len(self.dat_term_next) != len(self.dat_terms_last()):
print("changed number of data terms!")
self._change_imgr_params = True
return
for term in sorted(self.dat_term_next.keys()):
if term not in self.dat_terms_last().keys():
print("added %s to data terms" % term)
self._change_imgr_params = True
return
for term in sorted(self.reg_term_next.keys()):
if term not in self.reg_terms_last().keys():
print("added %s to regularizers!" % term)
self._change_imgr_params = True
return
if ((self.prior_next.psize != self.prior_last().psize) or
(self.prior_next.xdim != self.prior_last().xdim) or
(self.prior_next.ydim != self.prior_last().ydim)):
print("changed prior dimensions!")
self._change_imgr_params = True
if self.debias_next != self.debias_last():
print("changed debiasing!")
self._change_imgr_params = True
return
if self.snrcut_next != self.snrcut_last():
print("changed snrcut!")
self._change_imgr_params = True
return
if self.weighting_next != self.weighting_last():
print("changed data weighting!")
self._change_imgr_params = True
return
if self.systematic_noise_next != self.systematic_noise_last():
print("changed systematic noise!")
self._change_imgr_params = True
return
if self.systematic_cphase_noise_next != self.systematic_cphase_noise_last():
print("changed systematic cphase noise!")
self._change_imgr_params = True
return
|
[
"def",
"check_params",
"(",
"self",
")",
":",
"if",
"(",
"(",
"self",
".",
"prior_next",
".",
"psize",
"!=",
"self",
".",
"init_next",
".",
"psize",
")",
"or",
"(",
"self",
".",
"prior_next",
".",
"xdim",
"!=",
"self",
".",
"init_next",
".",
"xdim",
")",
"or",
"(",
"self",
".",
"prior_next",
".",
"ydim",
"!=",
"self",
".",
"prior_next",
".",
"ydim",
")",
")",
":",
"raise",
"Exception",
"(",
"\"Initial image does not match dimensions of the prior image!\"",
")",
"if",
"(",
"self",
".",
"prior_next",
".",
"polrep",
"!=",
"self",
".",
"init_next",
".",
"polrep",
")",
":",
"raise",
"Exception",
"(",
"\"Initial image polrep does not match prior polrep!\"",
")",
"if",
"(",
"self",
".",
"prior_next",
".",
"polrep",
"==",
"'circ'",
"and",
"not",
"(",
"self",
".",
"pol_next",
"in",
"[",
"'P'",
",",
"'RR'",
",",
"'LL'",
"]",
")",
")",
":",
"raise",
"Exception",
"(",
"\"Initial image polrep is 'circ': pol_next must be 'RR' or 'LL' or 'P'!\"",
")",
"if",
"(",
"self",
".",
"prior_next",
".",
"polrep",
"==",
"'stokes'",
"and",
"not",
"(",
"self",
".",
"pol_next",
"in",
"[",
"'I'",
",",
"'Q'",
",",
"'U'",
",",
"'V'",
",",
"'P'",
",",
"'IP'",
",",
"'IQU'",
"]",
")",
")",
":",
"raise",
"Exception",
"(",
"\"Initial image polrep is 'stokes': pol_next must be in 'I','Q','U','V','P','IP','IQU'!\"",
")",
"if",
"(",
"'log'",
"in",
"self",
".",
"transform_next",
"and",
"self",
".",
"pol_next",
"in",
"[",
"'Q'",
",",
"'U'",
",",
"'V'",
"]",
")",
":",
"raise",
"Exception",
"(",
"\"Cannot image Stokes Q, U, V with log image transformation!\"",
")",
"if",
"self",
".",
"_ttype",
"not",
"in",
"[",
"'fast'",
",",
"'direct'",
",",
"'nfft'",
"]",
":",
"raise",
"Exception",
"(",
"\"Possible ttype values are 'fast', 'direct','nfft'!\"",
")",
"if",
"(",
"self",
".",
"pol_next",
"in",
"[",
"'Q'",
",",
"'U'",
",",
"'V'",
"]",
"and",
"(",
"'gs'",
"in",
"self",
".",
"reg_term_next",
".",
"keys",
"(",
")",
"or",
"'simple'",
"in",
"self",
".",
"reg_term_next",
".",
"keys",
"(",
")",
")",
")",
":",
"raise",
"Exception",
"(",
"\"'simple' and 'gs' methods do not work with Stokes Q, U, or V images!\"",
")",
"# Catch errors in multifrequency imaging setup",
"if",
"self",
".",
"mf_next",
"and",
"len",
"(",
"set",
"(",
"self",
".",
"freq_list",
")",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"\"must have observations at at least two frequencies for multifrequency imaging!\"",
")",
"# Catch errors for polarimetric imaging setup",
"if",
"(",
"self",
".",
"pol_next",
"==",
"'P'",
")",
":",
"if",
"'mcv'",
"not",
"in",
"self",
".",
"transform_next",
":",
"raise",
"Exception",
"(",
"\"P imaging needs 'mcv' transform!\"",
")",
"if",
"(",
"self",
".",
"_ttype",
"not",
"in",
"[",
"\"direct\"",
",",
"\"nfft\"",
"]",
")",
":",
"raise",
"Exception",
"(",
"\"FFT no yet implemented in polarimetric imaging -- use NFFT!\"",
")",
"dt_here",
"=",
"False",
"dt_type",
"=",
"True",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"dat_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"(",
"term",
"is",
"not",
"None",
")",
"and",
"(",
"term",
"is",
"not",
"False",
")",
":",
"dt_here",
"=",
"True",
"if",
"not",
"(",
"(",
"term",
"in",
"DATATERMS_POL",
")",
"or",
"(",
"term",
"is",
"False",
")",
")",
":",
"dt_type",
"=",
"False",
"st_here",
"=",
"False",
"st_type",
"=",
"True",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"reg_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"(",
"term",
"is",
"not",
"None",
")",
"and",
"(",
"term",
"is",
"not",
"False",
")",
":",
"st_here",
"=",
"True",
"if",
"not",
"(",
"(",
"term",
"in",
"REGULARIZERS_POL",
")",
"or",
"(",
"term",
"is",
"False",
")",
")",
":",
"st_type",
"=",
"False",
"if",
"not",
"dt_here",
":",
"raise",
"Exception",
"(",
"\"Must have at least one data term!\"",
")",
"if",
"not",
"st_here",
":",
"raise",
"Exception",
"(",
"\"Must have at least one regularizer term!\"",
")",
"if",
"not",
"dt_type",
":",
"raise",
"Exception",
"(",
"\"Invalid data term for P imaging: \"",
"+",
"\"valid data terms are: \"",
"+",
"','",
".",
"join",
"(",
"DATATERMS_POL",
")",
")",
"if",
"not",
"st_type",
":",
"raise",
"Exception",
"(",
"\"Invalid regularizer for P imaging: \"",
"+",
"\"valid regularizers are: \"",
"+",
"','",
".",
"join",
"(",
"REGULARIZERS_POL",
")",
")",
"# Catch errors for simultaneous I + polarimetric imaging setup",
"elif",
"(",
"self",
".",
"pol_next",
"==",
"'IP'",
"or",
"self",
".",
"pol_next",
"==",
"'IQU'",
")",
":",
"if",
"'mcv'",
"not",
"in",
"self",
".",
"transform_next",
":",
"raise",
"Exception",
"(",
"\"P imaging needs 'mcv' transform!\"",
")",
"if",
"(",
"self",
".",
"_ttype",
"not",
"in",
"[",
"\"direct\"",
",",
"\"nfft\"",
"]",
")",
":",
"raise",
"Exception",
"(",
"\"FFT no yet implemented in polarimetric imaging -- use NFFT!\"",
")",
"dt_here",
"=",
"False",
"dt_type",
"=",
"True",
"dt_pol",
"=",
"False",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"dat_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"(",
"term",
"is",
"not",
"None",
")",
"and",
"(",
"term",
"is",
"not",
"False",
")",
":",
"dt_here",
"=",
"True",
"if",
"not",
"(",
"(",
"term",
"in",
"DATATERMS_POL",
")",
"or",
"(",
"term",
"in",
"DATATERMS",
")",
"or",
"(",
"term",
"is",
"False",
")",
")",
":",
"dt_type",
"=",
"False",
"if",
"term",
"in",
"DATATERMS_POL",
":",
"dt_pol",
"=",
"True",
"st_here",
"=",
"False",
"st_type",
"=",
"True",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"reg_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"(",
"term",
"is",
"not",
"None",
")",
"and",
"(",
"term",
"is",
"not",
"False",
")",
":",
"st_here",
"=",
"True",
"if",
"not",
"(",
"(",
"term",
"in",
"REGULARIZERS_POL",
")",
"or",
"(",
"term",
"in",
"REGULARIZERS",
")",
"or",
"(",
"term",
"is",
"False",
")",
")",
":",
"st_type",
"=",
"False",
"if",
"not",
"dt_here",
":",
"raise",
"Exception",
"(",
"\"Must have at least one data term!\"",
")",
"if",
"not",
"st_here",
":",
"raise",
"Exception",
"(",
"\"Must have at least one regularizer term!\"",
")",
"if",
"not",
"dt_type",
":",
"raise",
"Exception",
"(",
"\"Invalid data term for IP imaging: \"",
"+",
"\"valid data terms are: \"",
"+",
"','",
".",
"join",
"(",
"DATATERMS_POL",
"+",
"DATATERMS",
")",
")",
"if",
"not",
"st_type",
":",
"raise",
"Exception",
"(",
"\"Invalid regularizer for IP imaging: \"",
"+",
"\"valid regularizers are: \"",
"+",
"','",
".",
"join",
"(",
"REGULARIZERS_POL",
"+",
"REGULARIZERS",
")",
")",
"if",
"not",
"dt_pol",
":",
"raise",
"Exception",
"(",
"\"IP imaging must have at least one pol data term from: \"",
"+",
"','",
".",
"join",
"(",
"DATATERMS_POL",
")",
")",
"# Catch errors in single pol imaging setup",
"else",
":",
"dt_here",
"=",
"False",
"dt_type",
"=",
"True",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"dat_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"(",
"term",
"is",
"not",
"None",
")",
"and",
"(",
"term",
"is",
"not",
"False",
")",
":",
"dt_here",
"=",
"True",
"if",
"not",
"(",
"(",
"term",
"in",
"DATATERMS",
")",
"or",
"(",
"term",
"is",
"False",
")",
")",
":",
"dt_type",
"=",
"False",
"st_here",
"=",
"False",
"st_type",
"=",
"True",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"reg_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"(",
"term",
"is",
"not",
"None",
")",
"and",
"(",
"term",
"is",
"not",
"False",
")",
":",
"st_here",
"=",
"True",
"if",
"not",
"(",
"(",
"term",
"in",
"REGULARIZERS",
"or",
"term",
"in",
"REGULARIZERS_SPECIND",
"or",
"term",
"in",
"REGULARIZERS_CURV",
")",
"or",
"term",
"is",
"False",
")",
":",
"st_type",
"=",
"False",
"if",
"not",
"dt_here",
":",
"raise",
"Exception",
"(",
"\"Must have at least one data term!\"",
")",
"if",
"not",
"st_here",
":",
"raise",
"Exception",
"(",
"\"Must have at least one regularizer term!\"",
")",
"if",
"not",
"dt_type",
":",
"raise",
"Exception",
"(",
"\"Invalid data term: valid data terms are: \"",
"+",
"','",
".",
"join",
"(",
"DATATERMS",
")",
")",
"if",
"not",
"st_type",
":",
"raise",
"Exception",
"(",
"\"Invalid regularizer: valid regularizers are: \"",
"+",
"','",
".",
"join",
"(",
"REGULARIZERS",
")",
")",
"# Determine if we need to recompute the saved imager parameters on the next imager run",
"if",
"self",
".",
"nruns",
"==",
"0",
":",
"return",
"if",
"self",
".",
"pol_next",
"!=",
"self",
".",
"pol_last",
"(",
")",
":",
"print",
"(",
"\"changed polarization!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"self",
".",
"obslist_next",
"!=",
"self",
".",
"obslist_last",
"(",
")",
":",
"print",
"(",
"\"changed observation!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"len",
"(",
"self",
".",
"reg_term_next",
")",
"!=",
"len",
"(",
"self",
".",
"reg_terms_last",
"(",
")",
")",
":",
"print",
"(",
"\"changed number of regularizer terms!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"len",
"(",
"self",
".",
"dat_term_next",
")",
"!=",
"len",
"(",
"self",
".",
"dat_terms_last",
"(",
")",
")",
":",
"print",
"(",
"\"changed number of data terms!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"dat_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"term",
"not",
"in",
"self",
".",
"dat_terms_last",
"(",
")",
".",
"keys",
"(",
")",
":",
"print",
"(",
"\"added %s to data terms\"",
"%",
"term",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"for",
"term",
"in",
"sorted",
"(",
"self",
".",
"reg_term_next",
".",
"keys",
"(",
")",
")",
":",
"if",
"term",
"not",
"in",
"self",
".",
"reg_terms_last",
"(",
")",
".",
"keys",
"(",
")",
":",
"print",
"(",
"\"added %s to regularizers!\"",
"%",
"term",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"(",
"(",
"self",
".",
"prior_next",
".",
"psize",
"!=",
"self",
".",
"prior_last",
"(",
")",
".",
"psize",
")",
"or",
"(",
"self",
".",
"prior_next",
".",
"xdim",
"!=",
"self",
".",
"prior_last",
"(",
")",
".",
"xdim",
")",
"or",
"(",
"self",
".",
"prior_next",
".",
"ydim",
"!=",
"self",
".",
"prior_last",
"(",
")",
".",
"ydim",
")",
")",
":",
"print",
"(",
"\"changed prior dimensions!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"if",
"self",
".",
"debias_next",
"!=",
"self",
".",
"debias_last",
"(",
")",
":",
"print",
"(",
"\"changed debiasing!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"self",
".",
"snrcut_next",
"!=",
"self",
".",
"snrcut_last",
"(",
")",
":",
"print",
"(",
"\"changed snrcut!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"self",
".",
"weighting_next",
"!=",
"self",
".",
"weighting_last",
"(",
")",
":",
"print",
"(",
"\"changed data weighting!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"self",
".",
"systematic_noise_next",
"!=",
"self",
".",
"systematic_noise_last",
"(",
")",
":",
"print",
"(",
"\"changed systematic noise!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return",
"if",
"self",
".",
"systematic_cphase_noise_next",
"!=",
"self",
".",
"systematic_cphase_noise_last",
"(",
")",
":",
"print",
"(",
"\"changed systematic cphase noise!\"",
")",
"self",
".",
"_change_imgr_params",
"=",
"True",
"return"
] |
https://github.com/achael/eht-imaging/blob/bbd3aeb06bef52bf89fa1c06de71e5509a5b0015/ehtim/imager.py#L424-L630
|
||
rembo10/headphones
|
b3199605be1ebc83a7a8feab6b1e99b64014187c
|
lib/beets/dbcore/db.py
|
python
|
Model._getters
|
(cls)
|
Return a mapping from field names to getter functions.
|
Return a mapping from field names to getter functions.
|
[
"Return",
"a",
"mapping",
"from",
"field",
"names",
"to",
"getter",
"functions",
"."
] |
def _getters(cls):
"""Return a mapping from field names to getter functions.
"""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
raise NotImplementedError()
|
[
"def",
"_getters",
"(",
"cls",
")",
":",
"# We could cache this if it becomes a performance problem to",
"# gather the getter mapping every time.",
"raise",
"NotImplementedError",
"(",
")"
] |
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/beets/dbcore/db.py#L144-L149
|
||
keiffster/program-y
|
8c99b56f8c32f01a7b9887b5daae9465619d0385
|
src/programy/parser/template/nodes/rand.py
|
python
|
TemplateRandomNode.parse_expression
|
(self, graph, expression)
|
[] |
def parse_expression(self, graph, expression):
li_found = False
for child in expression:
tag_name = TextUtils.tag_from_text(child.tag)
if tag_name == 'li':
li_found = True
li_node = graph.get_base_node()
self.children.append(li_node)
li_node.parse_template_node(graph, child)
else:
raise ParserException("Unsupported random child tag: %s" % (tag_name), xml_element=expression)
if li_found is False:
raise ParserException("No li children of random element!", xml_element=expression)
|
[
"def",
"parse_expression",
"(",
"self",
",",
"graph",
",",
"expression",
")",
":",
"li_found",
"=",
"False",
"for",
"child",
"in",
"expression",
":",
"tag_name",
"=",
"TextUtils",
".",
"tag_from_text",
"(",
"child",
".",
"tag",
")",
"if",
"tag_name",
"==",
"'li'",
":",
"li_found",
"=",
"True",
"li_node",
"=",
"graph",
".",
"get_base_node",
"(",
")",
"self",
".",
"children",
".",
"append",
"(",
"li_node",
")",
"li_node",
".",
"parse_template_node",
"(",
"graph",
",",
"child",
")",
"else",
":",
"raise",
"ParserException",
"(",
"\"Unsupported random child tag: %s\"",
"%",
"(",
"tag_name",
")",
",",
"xml_element",
"=",
"expression",
")",
"if",
"li_found",
"is",
"False",
":",
"raise",
"ParserException",
"(",
"\"No li children of random element!\"",
",",
"xml_element",
"=",
"expression",
")"
] |
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/parser/template/nodes/rand.py#L50-L64
|
||||
enzienaudio/hvcc
|
30e47328958d600c54889e2a254c3f17f2b2fd06
|
interpreters/max2hv/MaxUnopObject.py
|
python
|
MaxUnopObject.get_supported_objects
|
(clazz)
|
return MaxUnopObject.__MAX_HEAVY_DICT.keys()
|
[] |
def get_supported_objects(clazz):
return MaxUnopObject.__MAX_HEAVY_DICT.keys()
|
[
"def",
"get_supported_objects",
"(",
"clazz",
")",
":",
"return",
"MaxUnopObject",
".",
"__MAX_HEAVY_DICT",
".",
"keys",
"(",
")"
] |
https://github.com/enzienaudio/hvcc/blob/30e47328958d600c54889e2a254c3f17f2b2fd06/interpreters/max2hv/MaxUnopObject.py#L23-L24
|
|||
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/beacons/memusage.py
|
python
|
beacon
|
(config)
|
return ret
|
Monitor the memory usage of the minion
Specify thresholds for percent used and only emit a beacon
if it is exceeded.
.. code-block:: yaml
beacons:
memusage:
- percent: 63%
|
Monitor the memory usage of the minion
|
[
"Monitor",
"the",
"memory",
"usage",
"of",
"the",
"minion"
] |
def beacon(config):
"""
Monitor the memory usage of the minion
Specify thresholds for percent used and only emit a beacon
if it is exceeded.
.. code-block:: yaml
beacons:
memusage:
- percent: 63%
"""
ret = []
config = salt.utils.beacons.list_to_dict(config)
_current_usage = psutil.virtual_memory()
current_usage = _current_usage.percent
monitor_usage = config["percent"]
if isinstance(monitor_usage, str) and "%" in monitor_usage:
monitor_usage = re.sub("%", "", monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({"memusage": current_usage})
return ret
|
[
"def",
"beacon",
"(",
"config",
")",
":",
"ret",
"=",
"[",
"]",
"config",
"=",
"salt",
".",
"utils",
".",
"beacons",
".",
"list_to_dict",
"(",
"config",
")",
"_current_usage",
"=",
"psutil",
".",
"virtual_memory",
"(",
")",
"current_usage",
"=",
"_current_usage",
".",
"percent",
"monitor_usage",
"=",
"config",
"[",
"\"percent\"",
"]",
"if",
"isinstance",
"(",
"monitor_usage",
",",
"str",
")",
"and",
"\"%\"",
"in",
"monitor_usage",
":",
"monitor_usage",
"=",
"re",
".",
"sub",
"(",
"\"%\"",
",",
"\"\"",
",",
"monitor_usage",
")",
"monitor_usage",
"=",
"float",
"(",
"monitor_usage",
")",
"if",
"current_usage",
">=",
"monitor_usage",
":",
"ret",
".",
"append",
"(",
"{",
"\"memusage\"",
":",
"current_usage",
"}",
")",
"return",
"ret"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/beacons/memusage.py#L47-L73
|
|
runawayhorse001/LearningApacheSpark
|
67f3879dce17553195f094f5728b94a01badcf24
|
pyspark/sql/catalog.py
|
python
|
Catalog.listDatabases
|
(self)
|
return databases
|
Returns a list of databases available across all sessions.
|
Returns a list of databases available across all sessions.
|
[
"Returns",
"a",
"list",
"of",
"databases",
"available",
"across",
"all",
"sessions",
"."
] |
def listDatabases(self):
"""Returns a list of databases available across all sessions."""
iter = self._jcatalog.listDatabases().toLocalIterator()
databases = []
while iter.hasNext():
jdb = iter.next()
databases.append(Database(
name=jdb.name(),
description=jdb.description(),
locationUri=jdb.locationUri()))
return databases
|
[
"def",
"listDatabases",
"(",
"self",
")",
":",
"iter",
"=",
"self",
".",
"_jcatalog",
".",
"listDatabases",
"(",
")",
".",
"toLocalIterator",
"(",
")",
"databases",
"=",
"[",
"]",
"while",
"iter",
".",
"hasNext",
"(",
")",
":",
"jdb",
"=",
"iter",
".",
"next",
"(",
")",
"databases",
".",
"append",
"(",
"Database",
"(",
"name",
"=",
"jdb",
".",
"name",
"(",
")",
",",
"description",
"=",
"jdb",
".",
"description",
"(",
")",
",",
"locationUri",
"=",
"jdb",
".",
"locationUri",
"(",
")",
")",
")",
"return",
"databases"
] |
https://github.com/runawayhorse001/LearningApacheSpark/blob/67f3879dce17553195f094f5728b94a01badcf24/pyspark/sql/catalog.py#L61-L71
|
|
faucetsdn/ryu
|
537f35f4b2bc634ef05e3f28373eb5e24609f989
|
ryu/services/protocols/bgp/operator/views/base.py
|
python
|
OperatorAbstractView.__init__
|
(self, obj, filter_func=None)
|
Init
:param obj: data model for view. In other words object we
are creating view for. In case of ListView it should be
a list and in case of DictView it should be a dict.
:param filter_func: function to filter models
|
Init
|
[
"Init"
] |
def __init__(self, obj, filter_func=None):
"""Init
:param obj: data model for view. In other words object we
are creating view for. In case of ListView it should be
a list and in case of DictView it should be a dict.
:param filter_func: function to filter models
"""
self._filter_func = filter_func
self._fields = self._collect_fields()
self._obj = obj
|
[
"def",
"__init__",
"(",
"self",
",",
"obj",
",",
"filter_func",
"=",
"None",
")",
":",
"self",
".",
"_filter_func",
"=",
"filter_func",
"self",
".",
"_fields",
"=",
"self",
".",
"_collect_fields",
"(",
")",
"self",
".",
"_obj",
"=",
"obj"
] |
https://github.com/faucetsdn/ryu/blob/537f35f4b2bc634ef05e3f28373eb5e24609f989/ryu/services/protocols/bgp/operator/views/base.py#L35-L45
|
||
nsacyber/WALKOFF
|
52d3311abe99d64cd2a902eb998c5e398efe0e07
|
common/walkoff_client/walkoff_client/models/copy_workflow.py
|
python
|
CopyWorkflow.to_dict
|
(self)
|
return result
|
Returns the model properties as a dict
|
Returns the model properties as a dict
|
[
"Returns",
"the",
"model",
"properties",
"as",
"a",
"dict"
] |
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"for",
"attr",
",",
"_",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"openapi_types",
")",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"result",
"[",
"attr",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"to_dict",
"(",
")",
"if",
"hasattr",
"(",
"x",
",",
"\"to_dict\"",
")",
"else",
"x",
",",
"value",
")",
")",
"elif",
"hasattr",
"(",
"value",
",",
"\"to_dict\"",
")",
":",
"result",
"[",
"attr",
"]",
"=",
"value",
".",
"to_dict",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"result",
"[",
"attr",
"]",
"=",
"dict",
"(",
"map",
"(",
"lambda",
"item",
":",
"(",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"1",
"]",
".",
"to_dict",
"(",
")",
")",
"if",
"hasattr",
"(",
"item",
"[",
"1",
"]",
",",
"\"to_dict\"",
")",
"else",
"item",
",",
"value",
".",
"items",
"(",
")",
")",
")",
"else",
":",
"result",
"[",
"attr",
"]",
"=",
"value",
"return",
"result"
] |
https://github.com/nsacyber/WALKOFF/blob/52d3311abe99d64cd2a902eb998c5e398efe0e07/common/walkoff_client/walkoff_client/models/copy_workflow.py#L100-L122
|
|
emesene/emesene
|
4548a4098310e21b16437bb36223a7f632a4f7bc
|
emesene/e3/papylib/papyon/papyon/event/media.py
|
python
|
MediaStreamEventInterface.on_remote_candidates_received
|
(self, candidates)
|
Called when the remote candidates for this stream are received
@param candidates: the remote candidates
@type candidates: L{ICECandidate<papyon.sip.ice.ICECandidate>}
|
Called when the remote candidates for this stream are received
|
[
"Called",
"when",
"the",
"remote",
"candidates",
"for",
"this",
"stream",
"are",
"received"
] |
def on_remote_candidates_received(self, candidates):
"""Called when the remote candidates for this stream are received
@param candidates: the remote candidates
@type candidates: L{ICECandidate<papyon.sip.ice.ICECandidate>}"""
pass
|
[
"def",
"on_remote_candidates_received",
"(",
"self",
",",
"candidates",
")",
":",
"pass"
] |
https://github.com/emesene/emesene/blob/4548a4098310e21b16437bb36223a7f632a4f7bc/emesene/e3/papylib/papyon/papyon/event/media.py#L85-L89
|
||
hzy46/fast-neural-style-tensorflow
|
eeaa47d359e5c589a4cc6ccbf8c0450ccc657d2d
|
preprocessing/lenet_preprocessing.py
|
python
|
preprocess_image
|
(image, output_height, output_width, is_training)
|
return image
|
Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
|
Preprocesses the given image.
|
[
"Preprocesses",
"the",
"given",
"image",
"."
] |
def preprocess_image(image, output_height, output_width, is_training):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
image = tf.to_float(image)
image = tf.image.resize_image_with_crop_or_pad(
image, output_width, output_height)
image = tf.sub(image, 128.0)
image = tf.div(image, 128.0)
return image
|
[
"def",
"preprocess_image",
"(",
"image",
",",
"output_height",
",",
"output_width",
",",
"is_training",
")",
":",
"image",
"=",
"tf",
".",
"to_float",
"(",
"image",
")",
"image",
"=",
"tf",
".",
"image",
".",
"resize_image_with_crop_or_pad",
"(",
"image",
",",
"output_width",
",",
"output_height",
")",
"image",
"=",
"tf",
".",
"sub",
"(",
"image",
",",
"128.0",
")",
"image",
"=",
"tf",
".",
"div",
"(",
"image",
",",
"128.0",
")",
"return",
"image"
] |
https://github.com/hzy46/fast-neural-style-tensorflow/blob/eeaa47d359e5c589a4cc6ccbf8c0450ccc657d2d/preprocessing/lenet_preprocessing.py#L26-L44
|
|
snwh/suru-icon-theme
|
2d8102084eaf194f04076ec6949feacb0eb4a1ba
|
src/cursors/render-cursors.py
|
python
|
SVGHandler.startElement_svg
|
(self, name, attrs)
|
Callback hook which handles the start of an svg image
|
Callback hook which handles the start of an svg image
|
[
"Callback",
"hook",
"which",
"handles",
"the",
"start",
"of",
"an",
"svg",
"image"
] |
def startElement_svg(self, name, attrs):
"""Callback hook which handles the start of an svg image"""
dbg('startElement_svg called')
width = attrs.get('width', None)
height = attrs.get('height', None)
self.pageBounds.x2 = self.parseCoordinates(width)
self.pageBounds.y2 = self.parseCoordinates(height)
|
[
"def",
"startElement_svg",
"(",
"self",
",",
"name",
",",
"attrs",
")",
":",
"dbg",
"(",
"'startElement_svg called'",
")",
"width",
"=",
"attrs",
".",
"get",
"(",
"'width'",
",",
"None",
")",
"height",
"=",
"attrs",
".",
"get",
"(",
"'height'",
",",
"None",
")",
"self",
".",
"pageBounds",
".",
"x2",
"=",
"self",
".",
"parseCoordinates",
"(",
"width",
")",
"self",
".",
"pageBounds",
".",
"y2",
"=",
"self",
".",
"parseCoordinates",
"(",
"height",
")"
] |
https://github.com/snwh/suru-icon-theme/blob/2d8102084eaf194f04076ec6949feacb0eb4a1ba/src/cursors/render-cursors.py#L384-L390
|
||
Erotemic/ubelt
|
221d5f6262d5c8e78638e1a38e3adcc9cc9a15e9
|
ubelt/util_hash.py
|
python
|
_rectify_hashlen
|
(hashlen)
|
Example:
>>> assert _rectify_hashlen(NoParam) is None
>>> assert _rectify_hashlen(8) == 8
|
Example:
>>> assert _rectify_hashlen(NoParam) is None
>>> assert _rectify_hashlen(8) == 8
|
[
"Example",
":",
">>>",
"assert",
"_rectify_hashlen",
"(",
"NoParam",
")",
"is",
"None",
">>>",
"assert",
"_rectify_hashlen",
"(",
"8",
")",
"==",
"8"
] |
def _rectify_hashlen(hashlen): # nocover
"""
Example:
>>> assert _rectify_hashlen(NoParam) is None
>>> assert _rectify_hashlen(8) == 8
"""
if hashlen is NoParam:
return None
else: # nocover
# import warnings
from ubelt._util_deprecated import schedule_deprecation2
schedule_deprecation2(
migration='Use slice syntax instead', name='hashlen', type='kwarg',
deprecate='0.9.6', remove='1.0.0',
)
# warnings.warn('Specifying hashlen is deprecated and will be removed. '
# 'Use slice syntax instead', DeprecationWarning)
if hashlen == 'default': # nocover
return None
else:
return hashlen
|
[
"def",
"_rectify_hashlen",
"(",
"hashlen",
")",
":",
"# nocover",
"if",
"hashlen",
"is",
"NoParam",
":",
"return",
"None",
"else",
":",
"# nocover",
"# import warnings",
"from",
"ubelt",
".",
"_util_deprecated",
"import",
"schedule_deprecation2",
"schedule_deprecation2",
"(",
"migration",
"=",
"'Use slice syntax instead'",
",",
"name",
"=",
"'hashlen'",
",",
"type",
"=",
"'kwarg'",
",",
"deprecate",
"=",
"'0.9.6'",
",",
"remove",
"=",
"'1.0.0'",
",",
")",
"# warnings.warn('Specifying hashlen is deprecated and will be removed. '",
"# 'Use slice syntax instead', DeprecationWarning)",
"if",
"hashlen",
"==",
"'default'",
":",
"# nocover",
"return",
"None",
"else",
":",
"return",
"hashlen"
] |
https://github.com/Erotemic/ubelt/blob/221d5f6262d5c8e78638e1a38e3adcc9cc9a15e9/ubelt/util_hash.py#L371-L391
|
||
oracle/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
src/oci/jms/java_management_service_client.py
|
python
|
JavaManagementServiceClient.summarize_installation_usage
|
(self, fleet_id, **kwargs)
|
List Java installation usage in a Fleet filtered by query parameters.
:param str fleet_id: (required)
The `OCID`__ of the Fleet.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str jre_vendor: (optional)
The vendor of the related Java Runtime.
:param str jre_distribution: (optional)
The distribution of the related Java Runtime.
:param str jre_version: (optional)
The version of the related Java Runtime.
:param str installation_path: (optional)
The file system path of the installation.
:param str application_id: (optional)
The Fleet-unique identifier of the related application.
:param str managed_instance_id: (optional)
The Fleet-unique identifier of the related managed instance.
:param list[str] fields: (optional)
Additional fields to include into the returned model on top of the required ones.
This parameter can also include 'approximateApplicationCount' and 'approximateManagedInstanceCount'.
For example 'approximateApplicationCount,approximateManagedInstanceCount'.
Allowed values are: "approximateApplicationCount", "approximateManagedInstanceCount"
:param datetime time_start: (optional)
The start of the time period during which resources are searched (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:param datetime time_end: (optional)
The end of the time period during which resources are searched (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. The token is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort installation views. Only one sort order may be provided.
Default order for _timeFirstSeen_, _timeLastSeen_, and _jreVersion_, _approximateApplicationCount_
and _approximateManagedInstanceCount_ is **descending**.
Default order for _jreDistribution_ and _jreVendor_ is **ascending**. If no value is specified _timeLastSeen_ is default.
Allowed values are: "jreDistribution", "jreVendor", "jreVersion", "path", "timeFirstSeen", "timeLastSeen", "approximateApplicationCount", "approximateManagedInstanceCount", "osName"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param list[str] os_family: (optional)
The operating system type.
Allowed values are: "LINUX", "WINDOWS", "MACOS", "UNKNOWN"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.jms.models.InstallationUsageCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/jms/summarize_installation_usage.py.html>`__ to see an example of how to use summarize_installation_usage API.
|
List Java installation usage in a Fleet filtered by query parameters.
|
[
"List",
"Java",
"installation",
"usage",
"in",
"a",
"Fleet",
"filtered",
"by",
"query",
"parameters",
"."
] |
def summarize_installation_usage(self, fleet_id, **kwargs):
"""
List Java installation usage in a Fleet filtered by query parameters.
:param str fleet_id: (required)
The `OCID`__ of the Fleet.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str jre_vendor: (optional)
The vendor of the related Java Runtime.
:param str jre_distribution: (optional)
The distribution of the related Java Runtime.
:param str jre_version: (optional)
The version of the related Java Runtime.
:param str installation_path: (optional)
The file system path of the installation.
:param str application_id: (optional)
The Fleet-unique identifier of the related application.
:param str managed_instance_id: (optional)
The Fleet-unique identifier of the related managed instance.
:param list[str] fields: (optional)
Additional fields to include into the returned model on top of the required ones.
This parameter can also include 'approximateApplicationCount' and 'approximateManagedInstanceCount'.
For example 'approximateApplicationCount,approximateManagedInstanceCount'.
Allowed values are: "approximateApplicationCount", "approximateManagedInstanceCount"
:param datetime time_start: (optional)
The start of the time period during which resources are searched (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:param datetime time_end: (optional)
The end of the time period during which resources are searched (formatted according to `RFC3339`__).
__ https://datatracker.ietf.org/doc/html/rfc3339
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. The token is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort installation views. Only one sort order may be provided.
Default order for _timeFirstSeen_, _timeLastSeen_, and _jreVersion_, _approximateApplicationCount_
and _approximateManagedInstanceCount_ is **descending**.
Default order for _jreDistribution_ and _jreVendor_ is **ascending**. If no value is specified _timeLastSeen_ is default.
Allowed values are: "jreDistribution", "jreVendor", "jreVersion", "path", "timeFirstSeen", "timeLastSeen", "approximateApplicationCount", "approximateManagedInstanceCount", "osName"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param list[str] os_family: (optional)
The operating system type.
Allowed values are: "LINUX", "WINDOWS", "MACOS", "UNKNOWN"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.jms.models.InstallationUsageCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/jms/summarize_installation_usage.py.html>`__ to see an example of how to use summarize_installation_usage API.
"""
resource_path = "/fleets/{fleetId}/actions/summarizeInstallationUsage"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"jre_vendor",
"jre_distribution",
"jre_version",
"installation_path",
"application_id",
"managed_instance_id",
"fields",
"time_start",
"time_end",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"summarize_installation_usage got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"fleetId": fleet_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'fields' in kwargs:
fields_allowed_values = ["approximateApplicationCount", "approximateManagedInstanceCount"]
for fields_item in kwargs['fields']:
if fields_item not in fields_allowed_values:
raise ValueError(
"Invalid value for `fields`, must be one of {0}".format(fields_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["jreDistribution", "jreVendor", "jreVersion", "path", "timeFirstSeen", "timeLastSeen", "approximateApplicationCount", "approximateManagedInstanceCount", "osName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "MACOS", "UNKNOWN"]
for os_family_item in kwargs['os_family']:
if os_family_item not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"jreVendor": kwargs.get("jre_vendor", missing),
"jreDistribution": kwargs.get("jre_distribution", missing),
"jreVersion": kwargs.get("jre_version", missing),
"installationPath": kwargs.get("installation_path", missing),
"applicationId": kwargs.get("application_id", missing),
"managedInstanceId": kwargs.get("managed_instance_id", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"timeStart": kwargs.get("time_start", missing),
"timeEnd": kwargs.get("time_end", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"osFamily": self.base_client.generate_collection_format_param(kwargs.get("os_family", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="InstallationUsageCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="InstallationUsageCollection")
|
[
"def",
"summarize_installation_usage",
"(",
"self",
",",
"fleet_id",
",",
"*",
"*",
"kwargs",
")",
":",
"resource_path",
"=",
"\"/fleets/{fleetId}/actions/summarizeInstallationUsage\"",
"method",
"=",
"\"GET\"",
"# Don't accept unknown kwargs",
"expected_kwargs",
"=",
"[",
"\"retry_strategy\"",
",",
"\"jre_vendor\"",
",",
"\"jre_distribution\"",
",",
"\"jre_version\"",
",",
"\"installation_path\"",
",",
"\"application_id\"",
",",
"\"managed_instance_id\"",
",",
"\"fields\"",
",",
"\"time_start\"",
",",
"\"time_end\"",
",",
"\"limit\"",
",",
"\"page\"",
",",
"\"sort_order\"",
",",
"\"sort_by\"",
",",
"\"opc_request_id\"",
",",
"\"os_family\"",
"]",
"extra_kwargs",
"=",
"[",
"_key",
"for",
"_key",
"in",
"six",
".",
"iterkeys",
"(",
"kwargs",
")",
"if",
"_key",
"not",
"in",
"expected_kwargs",
"]",
"if",
"extra_kwargs",
":",
"raise",
"ValueError",
"(",
"\"summarize_installation_usage got unknown kwargs: {!r}\"",
".",
"format",
"(",
"extra_kwargs",
")",
")",
"path_params",
"=",
"{",
"\"fleetId\"",
":",
"fleet_id",
"}",
"path_params",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"path_params",
")",
"if",
"v",
"is",
"not",
"missing",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"path_params",
")",
":",
"if",
"v",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"v",
",",
"six",
".",
"string_types",
")",
"and",
"len",
"(",
"v",
".",
"strip",
"(",
")",
")",
"==",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Parameter {} cannot be None, whitespace or empty string'",
".",
"format",
"(",
"k",
")",
")",
"if",
"'fields'",
"in",
"kwargs",
":",
"fields_allowed_values",
"=",
"[",
"\"approximateApplicationCount\"",
",",
"\"approximateManagedInstanceCount\"",
"]",
"for",
"fields_item",
"in",
"kwargs",
"[",
"'fields'",
"]",
":",
"if",
"fields_item",
"not",
"in",
"fields_allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `fields`, must be one of {0}\"",
".",
"format",
"(",
"fields_allowed_values",
")",
")",
"if",
"'sort_order'",
"in",
"kwargs",
":",
"sort_order_allowed_values",
"=",
"[",
"\"ASC\"",
",",
"\"DESC\"",
"]",
"if",
"kwargs",
"[",
"'sort_order'",
"]",
"not",
"in",
"sort_order_allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `sort_order`, must be one of {0}\"",
".",
"format",
"(",
"sort_order_allowed_values",
")",
")",
"if",
"'sort_by'",
"in",
"kwargs",
":",
"sort_by_allowed_values",
"=",
"[",
"\"jreDistribution\"",
",",
"\"jreVendor\"",
",",
"\"jreVersion\"",
",",
"\"path\"",
",",
"\"timeFirstSeen\"",
",",
"\"timeLastSeen\"",
",",
"\"approximateApplicationCount\"",
",",
"\"approximateManagedInstanceCount\"",
",",
"\"osName\"",
"]",
"if",
"kwargs",
"[",
"'sort_by'",
"]",
"not",
"in",
"sort_by_allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `sort_by`, must be one of {0}\"",
".",
"format",
"(",
"sort_by_allowed_values",
")",
")",
"if",
"'os_family'",
"in",
"kwargs",
":",
"os_family_allowed_values",
"=",
"[",
"\"LINUX\"",
",",
"\"WINDOWS\"",
",",
"\"MACOS\"",
",",
"\"UNKNOWN\"",
"]",
"for",
"os_family_item",
"in",
"kwargs",
"[",
"'os_family'",
"]",
":",
"if",
"os_family_item",
"not",
"in",
"os_family_allowed_values",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `os_family`, must be one of {0}\"",
".",
"format",
"(",
"os_family_allowed_values",
")",
")",
"query_params",
"=",
"{",
"\"jreVendor\"",
":",
"kwargs",
".",
"get",
"(",
"\"jre_vendor\"",
",",
"missing",
")",
",",
"\"jreDistribution\"",
":",
"kwargs",
".",
"get",
"(",
"\"jre_distribution\"",
",",
"missing",
")",
",",
"\"jreVersion\"",
":",
"kwargs",
".",
"get",
"(",
"\"jre_version\"",
",",
"missing",
")",
",",
"\"installationPath\"",
":",
"kwargs",
".",
"get",
"(",
"\"installation_path\"",
",",
"missing",
")",
",",
"\"applicationId\"",
":",
"kwargs",
".",
"get",
"(",
"\"application_id\"",
",",
"missing",
")",
",",
"\"managedInstanceId\"",
":",
"kwargs",
".",
"get",
"(",
"\"managed_instance_id\"",
",",
"missing",
")",
",",
"\"fields\"",
":",
"self",
".",
"base_client",
".",
"generate_collection_format_param",
"(",
"kwargs",
".",
"get",
"(",
"\"fields\"",
",",
"missing",
")",
",",
"'multi'",
")",
",",
"\"timeStart\"",
":",
"kwargs",
".",
"get",
"(",
"\"time_start\"",
",",
"missing",
")",
",",
"\"timeEnd\"",
":",
"kwargs",
".",
"get",
"(",
"\"time_end\"",
",",
"missing",
")",
",",
"\"limit\"",
":",
"kwargs",
".",
"get",
"(",
"\"limit\"",
",",
"missing",
")",
",",
"\"page\"",
":",
"kwargs",
".",
"get",
"(",
"\"page\"",
",",
"missing",
")",
",",
"\"sortOrder\"",
":",
"kwargs",
".",
"get",
"(",
"\"sort_order\"",
",",
"missing",
")",
",",
"\"sortBy\"",
":",
"kwargs",
".",
"get",
"(",
"\"sort_by\"",
",",
"missing",
")",
",",
"\"osFamily\"",
":",
"self",
".",
"base_client",
".",
"generate_collection_format_param",
"(",
"kwargs",
".",
"get",
"(",
"\"os_family\"",
",",
"missing",
")",
",",
"'multi'",
")",
"}",
"query_params",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"query_params",
")",
"if",
"v",
"is",
"not",
"missing",
"and",
"v",
"is",
"not",
"None",
"}",
"header_params",
"=",
"{",
"\"accept\"",
":",
"\"application/json\"",
",",
"\"content-type\"",
":",
"\"application/json\"",
",",
"\"opc-request-id\"",
":",
"kwargs",
".",
"get",
"(",
"\"opc_request_id\"",
",",
"missing",
")",
"}",
"header_params",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"header_params",
")",
"if",
"v",
"is",
"not",
"missing",
"and",
"v",
"is",
"not",
"None",
"}",
"retry_strategy",
"=",
"self",
".",
"base_client",
".",
"get_preferred_retry_strategy",
"(",
"operation_retry_strategy",
"=",
"kwargs",
".",
"get",
"(",
"'retry_strategy'",
")",
",",
"client_retry_strategy",
"=",
"self",
".",
"retry_strategy",
")",
"if",
"retry_strategy",
":",
"if",
"not",
"isinstance",
"(",
"retry_strategy",
",",
"retry",
".",
"NoneRetryStrategy",
")",
":",
"self",
".",
"base_client",
".",
"add_opc_client_retries_header",
"(",
"header_params",
")",
"retry_strategy",
".",
"add_circuit_breaker_callback",
"(",
"self",
".",
"circuit_breaker_callback",
")",
"return",
"retry_strategy",
".",
"make_retrying_call",
"(",
"self",
".",
"base_client",
".",
"call_api",
",",
"resource_path",
"=",
"resource_path",
",",
"method",
"=",
"method",
",",
"path_params",
"=",
"path_params",
",",
"query_params",
"=",
"query_params",
",",
"header_params",
"=",
"header_params",
",",
"response_type",
"=",
"\"InstallationUsageCollection\"",
")",
"else",
":",
"return",
"self",
".",
"base_client",
".",
"call_api",
"(",
"resource_path",
"=",
"resource_path",
",",
"method",
"=",
"method",
",",
"path_params",
"=",
"path_params",
",",
"query_params",
"=",
"query_params",
",",
"header_params",
"=",
"header_params",
",",
"response_type",
"=",
"\"InstallationUsageCollection\"",
")"
] |
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/jms/java_management_service_client.py#L1408-L1610
|
||
linxid/Machine_Learning_Study_Path
|
558e82d13237114bbb8152483977806fc0c222af
|
Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/requests/packages/urllib3/_collections.py
|
python
|
HTTPHeaderDict.__init__
|
(self, headers=None, **kwargs)
|
[] |
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = OrderedDict()
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"HTTPHeaderDict",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_container",
"=",
"OrderedDict",
"(",
")",
"if",
"headers",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"headers",
",",
"HTTPHeaderDict",
")",
":",
"self",
".",
"_copy_from",
"(",
"headers",
")",
"else",
":",
"self",
".",
"extend",
"(",
"headers",
")",
"if",
"kwargs",
":",
"self",
".",
"extend",
"(",
"kwargs",
")"
] |
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/requests/packages/urllib3/_collections.py#L135-L144
|
||||
geoopt/geoopt
|
c0163cde17aa215aa0f34e833364ac918ec5e974
|
geoopt/optim/rlinesearch.py
|
python
|
RiemannianLineSearch._derphi
|
(self, step_size)
|
return derphi
|
Compute derivative of phi.
The derivative of phi is given by computing inner
product between all tensor gradients at target point and those at source point.
The source gradients are transported to the target point, and both gradients are
projected.
|
Compute derivative of phi.
|
[
"Compute",
"derivative",
"of",
"phi",
"."
] |
def _derphi(self, step_size):
"""Compute derivative of phi.
The derivative of phi is given by computing inner
product between all tensor gradients at target point and those at source point.
The source gradients are transported to the target point, and both gradients are
projected.
"""
if not self.compute_derphi:
raise ValueError("Cannot call _derphi if self.compute_derphi=False!")
# Call _phi to compute gradients; Does nothing if _phi was
# already called with this stepsize during this step
self._phi(step_size)
derphi = 0
for point in self._params:
state = self.state[point]
if "der_phi" not in state:
continue
derphi += state["der_phi"]
return derphi
|
[
"def",
"_derphi",
"(",
"self",
",",
"step_size",
")",
":",
"if",
"not",
"self",
".",
"compute_derphi",
":",
"raise",
"ValueError",
"(",
"\"Cannot call _derphi if self.compute_derphi=False!\"",
")",
"# Call _phi to compute gradients; Does nothing if _phi was",
"# already called with this stepsize during this step",
"self",
".",
"_phi",
"(",
"step_size",
")",
"derphi",
"=",
"0",
"for",
"point",
"in",
"self",
".",
"_params",
":",
"state",
"=",
"self",
".",
"state",
"[",
"point",
"]",
"if",
"\"der_phi\"",
"not",
"in",
"state",
":",
"continue",
"derphi",
"+=",
"state",
"[",
"\"der_phi\"",
"]",
"return",
"derphi"
] |
https://github.com/geoopt/geoopt/blob/c0163cde17aa215aa0f34e833364ac918ec5e974/geoopt/optim/rlinesearch.py#L306-L330
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/click/utils.py
|
python
|
LazyFile.__getattr__
|
(self, name)
|
return getattr(self.open(), name)
|
[] |
def __getattr__(self, name):
return getattr(self.open(), name)
|
[
"def",
"__getattr__",
"(",
"self",
",",
"name",
")",
":",
"return",
"getattr",
"(",
"self",
".",
"open",
"(",
")",
",",
"name",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/click/utils.py#L96-L97
|
|||
PowerScript/KatanaFramework
|
0f6ad90a88de865d58ec26941cb4460501e75496
|
lib/scapy/scapy/contrib/gsm_um.py
|
python
|
applicationInformation
|
()
|
return packet
|
APPLICATION INFORMATION Section 9.1.53
|
APPLICATION INFORMATION Section 9.1.53
|
[
"APPLICATION",
"INFORMATION",
"Section",
"9",
".",
"1",
".",
"53"
] |
def applicationInformation():
"""APPLICATION INFORMATION Section 9.1.53"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0x38) # 00111000
c = ApduIDAndApduFlags()
e = ApduData()
packet = a / b / c / e
return packet
|
[
"def",
"applicationInformation",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"0x6",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x38",
")",
"# 00111000",
"c",
"=",
"ApduIDAndApduFlags",
"(",
")",
"e",
"=",
"ApduData",
"(",
")",
"packet",
"=",
"a",
"/",
"b",
"/",
"c",
"/",
"e",
"return",
"packet"
] |
https://github.com/PowerScript/KatanaFramework/blob/0f6ad90a88de865d58ec26941cb4460501e75496/lib/scapy/scapy/contrib/gsm_um.py#L1302-L1309
|
|
researchmm/tasn
|
5dba8ccc096cedc63913730eeea14a9647911129
|
tasn-mxnet/docs/mxdoc.py
|
python
|
generate_doxygen
|
(app)
|
Run the doxygen make commands
|
Run the doxygen make commands
|
[
"Run",
"the",
"doxygen",
"make",
"commands"
] |
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
|
[
"def",
"generate_doxygen",
"(",
"app",
")",
":",
"_run_cmd",
"(",
"\"cd %s/.. && make doxygen\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"_run_cmd",
"(",
"\"cp -rf doxygen/html %s/doxygen\"",
"%",
"app",
".",
"builder",
".",
"outdir",
")"
] |
https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/docs/mxdoc.py#L82-L85
|
||
chb/indivo_server
|
9826c67ab17d7fc0df935db327344fb0c7d237e5
|
indivo/serializers/python.py
|
python
|
Deserializer
|
(object_list, **options)
|
Deserialization is not currently supported
|
Deserialization is not currently supported
|
[
"Deserialization",
"is",
"not",
"currently",
"supported"
] |
def Deserializer(object_list, **options):
"""
Deserialization is not currently supported
"""
raise NotImplementedError
|
[
"def",
"Deserializer",
"(",
"object_list",
",",
"*",
"*",
"options",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/chb/indivo_server/blob/9826c67ab17d7fc0df935db327344fb0c7d237e5/indivo/serializers/python.py#L74-L79
|
||
OWASP/ZSC
|
5bb9fed69efdc17996be4856b54af632aaed87b0
|
module/readline_windows/pyreadline/modes/vi.py
|
python
|
ViCommand.key_percent
|
(self, char)
|
find matching <([{}])>
|
find matching <([{}])>
|
[
"find",
"matching",
"<",
"(",
"[",
"{}",
"]",
")",
">"
] |
def key_percent(self, char):
'''find matching <([{}])>'''
self.motion = self.motion_matching
self.delete_right = 1
self.state = _VI_MOTION
self.apply()
|
[
"def",
"key_percent",
"(",
"self",
",",
"char",
")",
":",
"self",
".",
"motion",
"=",
"self",
".",
"motion_matching",
"self",
".",
"delete_right",
"=",
"1",
"self",
".",
"state",
"=",
"_VI_MOTION",
"self",
".",
"apply",
"(",
")"
] |
https://github.com/OWASP/ZSC/blob/5bb9fed69efdc17996be4856b54af632aaed87b0/module/readline_windows/pyreadline/modes/vi.py#L562-L567
|
||
biubug6/Face-Detector-1MB-with-landmark
|
2b075657aef954b9426f938ac7fce100b6910fe6
|
train.py
|
python
|
adjust_learning_rate
|
(optimizer, gamma, epoch, step_index, iteration, epoch_size)
|
return lr
|
Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
|
Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
|
[
"Sets",
"the",
"learning",
"rate",
"#",
"Adapted",
"from",
"PyTorch",
"Imagenet",
"example",
":",
"#",
"https",
":",
"//",
"github",
".",
"com",
"/",
"pytorch",
"/",
"examples",
"/",
"blob",
"/",
"master",
"/",
"imagenet",
"/",
"main",
".",
"py"
] |
def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):
"""Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
warmup_epoch = -1
if epoch <= warmup_epoch:
lr = 1e-6 + (initial_lr-1e-6) * iteration / (epoch_size * warmup_epoch)
else:
lr = initial_lr * (gamma ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
|
[
"def",
"adjust_learning_rate",
"(",
"optimizer",
",",
"gamma",
",",
"epoch",
",",
"step_index",
",",
"iteration",
",",
"epoch_size",
")",
":",
"warmup_epoch",
"=",
"-",
"1",
"if",
"epoch",
"<=",
"warmup_epoch",
":",
"lr",
"=",
"1e-6",
"+",
"(",
"initial_lr",
"-",
"1e-6",
")",
"*",
"iteration",
"/",
"(",
"epoch_size",
"*",
"warmup_epoch",
")",
"else",
":",
"lr",
"=",
"initial_lr",
"*",
"(",
"gamma",
"**",
"(",
"step_index",
")",
")",
"for",
"param_group",
"in",
"optimizer",
".",
"param_groups",
":",
"param_group",
"[",
"'lr'",
"]",
"=",
"lr",
"return",
"lr"
] |
https://github.com/biubug6/Face-Detector-1MB-with-landmark/blob/2b075657aef954b9426f938ac7fce100b6910fe6/train.py#L153-L165
|
|
GoogleCloudPlatform/professional-services
|
0c707aa97437f3d154035ef8548109b7882f71da
|
examples/dataflow-data-generator/data-generator-pipeline/data_generator_pipeline.py
|
python
|
run
|
(argv=None)
|
This function parses the command line arguments and runs the Beam Pipeline.
Args:
argv: list containing the commandline arguments for this call of the
script.
|
This function parses the command line arguments and runs the Beam Pipeline.
|
[
"This",
"function",
"parses",
"the",
"command",
"line",
"arguments",
"and",
"runs",
"the",
"Beam",
"Pipeline",
"."
] |
def run(argv=None):
"""
This function parses the command line arguments and runs the Beam Pipeline.
Args:
argv: list containing the commandline arguments for this call of the
script.
"""
# Keeps track if schema was inferred by input or ouput table.
schema_inferred = False
data_args, pipeline_args = parse_data_generator_args(argv)
data_args, schema_inferred = fetch_schema(data_args, schema_inferred)
pipeline_options = PipelineOptions(pipeline_args)
temp_location = pipeline_options.display_data()['temp_location']
temp_blob = write_n_line_file_to_gcs(
pipeline_options.display_data()['project'], temp_location,
data_args.num_records)
data_gen = DataGenerator(bq_schema_filename=data_args.schema_file,
input_bq_table=data_args.input_bq_table,
p_null=data_args.p_null,
n_keys=data_args.n_keys,
min_date=data_args.min_date,
max_date=data_args.max_date,
only_pos=data_args.only_pos,
max_int=data_args.max_int,
max_float=data_args.max_float,
float_precision=data_args.float_precision,
write_disp=data_args.write_disp,
key_skew=data_args.key_skew,
primary_key_cols=data_args.primary_key_cols)
# Initiate the pipeline using the pipeline arguments passed in from the
# command line. This includes information including where Dataflow should
# store temp files, and what the project id is and what runner to use.
p = beam.Pipeline(options=pipeline_options)
rows = (
p
# Read the file we created with num_records newlines.
| 'Read file with num_records lines' >> beam.io.ReadFromText(
os.path.join('gs://', temp_blob.bucket.name, temp_blob.name))
# Use our instance of our custom DataGenerator Class to generate 1 fake
# datum with the appropriate schema for each element in the PColleciton
# created above.
| 'Generate Data' >> beam.ParDo(FakeRowGen(data_gen))
| 'Parse Json Strings' >> beam.FlatMap(lambda row: [json.loads(row)]))
if data_args.primary_key_cols:
for key in data_args.primary_key_cols.split(','):
rows |= 'Enforcing primary key: {}'.format(
key) >> EnforcePrimaryKeys(key)
if data_args.csv_schema_order:
(rows
| 'Order fields for CSV writing.' >> beam.FlatMap(
lambda d: [dict_to_csv(d, data_args.csv_schema_order.split(','))])
| 'Write to GCS' >> beam.io.textio.WriteToText(
file_path_prefix=data_args.output_prefix, file_name_suffix='.csv')
)
if data_args.avro_schema_file:
fastavro_avsc = fastavro.schema.load_schema(data_args.avro_schema_file)
(rows
# Need to convert time stamps from strings to timestamp-micros
| 'Fix date and time Types for Avro.' >>
beam.FlatMap(lambda row: fix_record_for_avro(row, fastavro_avsc))
| 'Write to Avro.' >> beam.io.avroio.WriteToAvro(
file_path_prefix=data_args.output_prefix,
codec='null',
file_name_suffix='.avro',
use_fastavro=True,
schema=fastavro_avsc))
if data_args.write_to_parquet:
with open(data_args.schema_file, 'r') as infile:
str_schema = json.load(infile)
pa_schema = get_pyarrow_translated_schema(str_schema)
(rows
| 'Fix data and time Types for Parquet.' >>
beam.FlatMap(lambda row: fix_record_for_parquet(row, str_schema))
| 'Write to Parquet.' >> beam.io.WriteToParquet(
file_path_prefix=data_args.output_prefix,
codec='null',
file_name_suffix='.parquet',
schema=pa_schema))
if data_args.output_bq_table:
(rows
| 'Write to BigQuery.' >> beam.io.gcp.bigquery.WriteToBigQuery(
# The table name is a required argument for the BigQuery sink.
# In this case we use the value passed in from the command
# line.
data_args.output_bq_table,
schema=None if schema_inferred else data_gen.get_bq_schema(),
# Creates the table in BigQuery if it does not yet exist.
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=data_gen.write_disp,
# Use the max recommended batch size.
batch_size=500))
p.run().wait_until_finish()
# Manually clean up of temp_num_records.txt because it will be outside this
# job's directory and Dataflow will not remove it for us.
temp_blob.delete()
|
[
"def",
"run",
"(",
"argv",
"=",
"None",
")",
":",
"# Keeps track if schema was inferred by input or ouput table.",
"schema_inferred",
"=",
"False",
"data_args",
",",
"pipeline_args",
"=",
"parse_data_generator_args",
"(",
"argv",
")",
"data_args",
",",
"schema_inferred",
"=",
"fetch_schema",
"(",
"data_args",
",",
"schema_inferred",
")",
"pipeline_options",
"=",
"PipelineOptions",
"(",
"pipeline_args",
")",
"temp_location",
"=",
"pipeline_options",
".",
"display_data",
"(",
")",
"[",
"'temp_location'",
"]",
"temp_blob",
"=",
"write_n_line_file_to_gcs",
"(",
"pipeline_options",
".",
"display_data",
"(",
")",
"[",
"'project'",
"]",
",",
"temp_location",
",",
"data_args",
".",
"num_records",
")",
"data_gen",
"=",
"DataGenerator",
"(",
"bq_schema_filename",
"=",
"data_args",
".",
"schema_file",
",",
"input_bq_table",
"=",
"data_args",
".",
"input_bq_table",
",",
"p_null",
"=",
"data_args",
".",
"p_null",
",",
"n_keys",
"=",
"data_args",
".",
"n_keys",
",",
"min_date",
"=",
"data_args",
".",
"min_date",
",",
"max_date",
"=",
"data_args",
".",
"max_date",
",",
"only_pos",
"=",
"data_args",
".",
"only_pos",
",",
"max_int",
"=",
"data_args",
".",
"max_int",
",",
"max_float",
"=",
"data_args",
".",
"max_float",
",",
"float_precision",
"=",
"data_args",
".",
"float_precision",
",",
"write_disp",
"=",
"data_args",
".",
"write_disp",
",",
"key_skew",
"=",
"data_args",
".",
"key_skew",
",",
"primary_key_cols",
"=",
"data_args",
".",
"primary_key_cols",
")",
"# Initiate the pipeline using the pipeline arguments passed in from the",
"# command line. This includes information including where Dataflow should",
"# store temp files, and what the project id is and what runner to use.",
"p",
"=",
"beam",
".",
"Pipeline",
"(",
"options",
"=",
"pipeline_options",
")",
"rows",
"=",
"(",
"p",
"# Read the file we created with num_records newlines.",
"|",
"'Read file with num_records lines'",
">>",
"beam",
".",
"io",
".",
"ReadFromText",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'gs://'",
",",
"temp_blob",
".",
"bucket",
".",
"name",
",",
"temp_blob",
".",
"name",
")",
")",
"# Use our instance of our custom DataGenerator Class to generate 1 fake",
"# datum with the appropriate schema for each element in the PColleciton",
"# created above.",
"|",
"'Generate Data'",
">>",
"beam",
".",
"ParDo",
"(",
"FakeRowGen",
"(",
"data_gen",
")",
")",
"|",
"'Parse Json Strings'",
">>",
"beam",
".",
"FlatMap",
"(",
"lambda",
"row",
":",
"[",
"json",
".",
"loads",
"(",
"row",
")",
"]",
")",
")",
"if",
"data_args",
".",
"primary_key_cols",
":",
"for",
"key",
"in",
"data_args",
".",
"primary_key_cols",
".",
"split",
"(",
"','",
")",
":",
"rows",
"|=",
"'Enforcing primary key: {}'",
".",
"format",
"(",
"key",
")",
">>",
"EnforcePrimaryKeys",
"(",
"key",
")",
"if",
"data_args",
".",
"csv_schema_order",
":",
"(",
"rows",
"|",
"'Order fields for CSV writing.'",
">>",
"beam",
".",
"FlatMap",
"(",
"lambda",
"d",
":",
"[",
"dict_to_csv",
"(",
"d",
",",
"data_args",
".",
"csv_schema_order",
".",
"split",
"(",
"','",
")",
")",
"]",
")",
"|",
"'Write to GCS'",
">>",
"beam",
".",
"io",
".",
"textio",
".",
"WriteToText",
"(",
"file_path_prefix",
"=",
"data_args",
".",
"output_prefix",
",",
"file_name_suffix",
"=",
"'.csv'",
")",
")",
"if",
"data_args",
".",
"avro_schema_file",
":",
"fastavro_avsc",
"=",
"fastavro",
".",
"schema",
".",
"load_schema",
"(",
"data_args",
".",
"avro_schema_file",
")",
"(",
"rows",
"# Need to convert time stamps from strings to timestamp-micros",
"|",
"'Fix date and time Types for Avro.'",
">>",
"beam",
".",
"FlatMap",
"(",
"lambda",
"row",
":",
"fix_record_for_avro",
"(",
"row",
",",
"fastavro_avsc",
")",
")",
"|",
"'Write to Avro.'",
">>",
"beam",
".",
"io",
".",
"avroio",
".",
"WriteToAvro",
"(",
"file_path_prefix",
"=",
"data_args",
".",
"output_prefix",
",",
"codec",
"=",
"'null'",
",",
"file_name_suffix",
"=",
"'.avro'",
",",
"use_fastavro",
"=",
"True",
",",
"schema",
"=",
"fastavro_avsc",
")",
")",
"if",
"data_args",
".",
"write_to_parquet",
":",
"with",
"open",
"(",
"data_args",
".",
"schema_file",
",",
"'r'",
")",
"as",
"infile",
":",
"str_schema",
"=",
"json",
".",
"load",
"(",
"infile",
")",
"pa_schema",
"=",
"get_pyarrow_translated_schema",
"(",
"str_schema",
")",
"(",
"rows",
"|",
"'Fix data and time Types for Parquet.'",
">>",
"beam",
".",
"FlatMap",
"(",
"lambda",
"row",
":",
"fix_record_for_parquet",
"(",
"row",
",",
"str_schema",
")",
")",
"|",
"'Write to Parquet.'",
">>",
"beam",
".",
"io",
".",
"WriteToParquet",
"(",
"file_path_prefix",
"=",
"data_args",
".",
"output_prefix",
",",
"codec",
"=",
"'null'",
",",
"file_name_suffix",
"=",
"'.parquet'",
",",
"schema",
"=",
"pa_schema",
")",
")",
"if",
"data_args",
".",
"output_bq_table",
":",
"(",
"rows",
"|",
"'Write to BigQuery.'",
">>",
"beam",
".",
"io",
".",
"gcp",
".",
"bigquery",
".",
"WriteToBigQuery",
"(",
"# The table name is a required argument for the BigQuery sink.",
"# In this case we use the value passed in from the command",
"# line.",
"data_args",
".",
"output_bq_table",
",",
"schema",
"=",
"None",
"if",
"schema_inferred",
"else",
"data_gen",
".",
"get_bq_schema",
"(",
")",
",",
"# Creates the table in BigQuery if it does not yet exist.",
"create_disposition",
"=",
"beam",
".",
"io",
".",
"BigQueryDisposition",
".",
"CREATE_IF_NEEDED",
",",
"write_disposition",
"=",
"data_gen",
".",
"write_disp",
",",
"# Use the max recommended batch size.",
"batch_size",
"=",
"500",
")",
")",
"p",
".",
"run",
"(",
")",
".",
"wait_until_finish",
"(",
")",
"# Manually clean up of temp_num_records.txt because it will be outside this",
"# job's directory and Dataflow will not remove it for us.",
"temp_blob",
".",
"delete",
"(",
")"
] |
https://github.com/GoogleCloudPlatform/professional-services/blob/0c707aa97437f3d154035ef8548109b7882f71da/examples/dataflow-data-generator/data-generator-pipeline/data_generator_pipeline.py#L43-L152
|
||
toxygen-project/toxygen
|
0a54012cf5ee72434b923bcde7d8f1a4e575ce2f
|
toxygen/callbacks.py
|
python
|
file_recv_control
|
(tox, friend_number, file_number, file_control, user_data)
|
Friend cancelled, paused or resumed file transfer
|
Friend cancelled, paused or resumed file transfer
|
[
"Friend",
"cancelled",
"paused",
"or",
"resumed",
"file",
"transfer"
] |
def file_recv_control(tox, friend_number, file_number, file_control, user_data):
"""
Friend cancelled, paused or resumed file transfer
"""
if file_control == TOX_FILE_CONTROL['CANCEL']:
invoke_in_main_thread(Profile.get_instance().cancel_transfer, friend_number, file_number, True)
elif file_control == TOX_FILE_CONTROL['PAUSE']:
invoke_in_main_thread(Profile.get_instance().pause_transfer, friend_number, file_number, True)
elif file_control == TOX_FILE_CONTROL['RESUME']:
invoke_in_main_thread(Profile.get_instance().resume_transfer, friend_number, file_number, True)
|
[
"def",
"file_recv_control",
"(",
"tox",
",",
"friend_number",
",",
"file_number",
",",
"file_control",
",",
"user_data",
")",
":",
"if",
"file_control",
"==",
"TOX_FILE_CONTROL",
"[",
"'CANCEL'",
"]",
":",
"invoke_in_main_thread",
"(",
"Profile",
".",
"get_instance",
"(",
")",
".",
"cancel_transfer",
",",
"friend_number",
",",
"file_number",
",",
"True",
")",
"elif",
"file_control",
"==",
"TOX_FILE_CONTROL",
"[",
"'PAUSE'",
"]",
":",
"invoke_in_main_thread",
"(",
"Profile",
".",
"get_instance",
"(",
")",
".",
"pause_transfer",
",",
"friend_number",
",",
"file_number",
",",
"True",
")",
"elif",
"file_control",
"==",
"TOX_FILE_CONTROL",
"[",
"'RESUME'",
"]",
":",
"invoke_in_main_thread",
"(",
"Profile",
".",
"get_instance",
"(",
")",
".",
"resume_transfer",
",",
"friend_number",
",",
"file_number",
",",
"True",
")"
] |
https://github.com/toxygen-project/toxygen/blob/0a54012cf5ee72434b923bcde7d8f1a4e575ce2f/toxygen/callbacks.py#L257-L266
|
||
httpie/httpie
|
4c56d894ba9e2bb1c097a3a6067006843ac2944d
|
httpie/models.py
|
python
|
HTTPRequest.iter_lines
|
(self, chunk_size)
|
[] |
def iter_lines(self, chunk_size):
yield self.body, b''
|
[
"def",
"iter_lines",
"(",
"self",
",",
"chunk_size",
")",
":",
"yield",
"self",
".",
"body",
",",
"b''"
] |
https://github.com/httpie/httpie/blob/4c56d894ba9e2bb1c097a3a6067006843ac2944d/httpie/models.py#L112-L113
|
||||
python273/vk_api
|
1ef82594baabc80802ef4792aceee9180ae3e9c9
|
examples/captcha_handle.py
|
python
|
main
|
()
|
Пример обработки капчи
|
Пример обработки капчи
|
[
"Пример",
"обработки",
"капчи"
] |
def main():
""" Пример обработки капчи """
login, password = '[email protected]', 'mypassword'
vk_session = vk_api.VkApi(
login, password,
captcha_handler=captcha_handler # функция для обработки капчи
)
try:
vk_session.auth()
except vk_api.AuthError as error_msg:
print(error_msg)
return
|
[
"def",
"main",
"(",
")",
":",
"login",
",",
"password",
"=",
"'[email protected]'",
",",
"'mypassword'",
"vk_session",
"=",
"vk_api",
".",
"VkApi",
"(",
"login",
",",
"password",
",",
"captcha_handler",
"=",
"captcha_handler",
"# функция для обработки капчи",
")",
"try",
":",
"vk_session",
".",
"auth",
"(",
")",
"except",
"vk_api",
".",
"AuthError",
"as",
"error_msg",
":",
"print",
"(",
"error_msg",
")",
"return"
] |
https://github.com/python273/vk_api/blob/1ef82594baabc80802ef4792aceee9180ae3e9c9/examples/captcha_handle.py#L17-L30
|
||
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
ext/trakt/sync.py
|
python
|
Scrobbler.pause
|
(self)
|
Pause the scrobbling of this :class:`Scrobbler`'s *media* object
|
Pause the scrobbling of this :class:`Scrobbler`'s *media* object
|
[
"Pause",
"the",
"scrobbling",
"of",
"this",
":",
"class",
":",
"Scrobbler",
"s",
"*",
"media",
"*",
"object"
] |
def pause(self):
"""Pause the scrobbling of this :class:`Scrobbler`'s *media* object"""
self._post('scrobble/pause')
|
[
"def",
"pause",
"(",
"self",
")",
":",
"self",
".",
"_post",
"(",
"'scrobble/pause'",
")"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/trakt/sync.py#L461-L463
|
||
IJDykeman/wangTiles
|
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
|
experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/pkg_resources/__init__.py
|
python
|
yield_lines
|
(strs)
|
Yield non-empty/non-comment lines of a string or sequence
|
Yield non-empty/non-comment lines of a string or sequence
|
[
"Yield",
"non",
"-",
"empty",
"/",
"non",
"-",
"comment",
"lines",
"of",
"a",
"string",
"or",
"sequence"
] |
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
|
[
"def",
"yield_lines",
"(",
"strs",
")",
":",
"if",
"isinstance",
"(",
"strs",
",",
"six",
".",
"string_types",
")",
":",
"for",
"s",
"in",
"strs",
".",
"splitlines",
"(",
")",
":",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"# skip blank lines/comments",
"if",
"s",
"and",
"not",
"s",
".",
"startswith",
"(",
"'#'",
")",
":",
"yield",
"s",
"else",
":",
"for",
"ss",
"in",
"strs",
":",
"for",
"s",
"in",
"yield_lines",
"(",
"ss",
")",
":",
"yield",
"s"
] |
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/pkg_resources/__init__.py#L2343-L2354
|
||
bjmayor/hacker
|
e3ce2ad74839c2733b27dac6c0f495e0743e1866
|
venv/lib/python3.5/site-packages/pip/_vendor/requests/api.py
|
python
|
get
|
(url, params=None, **kwargs)
|
return request('get', url, params=params, **kwargs)
|
Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
|
Sends a GET request.
|
[
"Sends",
"a",
"GET",
"request",
"."
] |
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
|
[
"def",
"get",
"(",
"url",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'allow_redirects'",
",",
"True",
")",
"return",
"request",
"(",
"'get'",
",",
"url",
",",
"params",
"=",
"params",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/bjmayor/hacker/blob/e3ce2ad74839c2733b27dac6c0f495e0743e1866/venv/lib/python3.5/site-packages/pip/_vendor/requests/api.py#L59-L70
|
|
ethereum/web3.py
|
6a90a26ea12e5a789834c9cd6a7ae6d302648f88
|
ethpm/package.py
|
python
|
Package.from_uri
|
(cls, uri: URI, w3: "Web3")
|
return cls(manifest, w3, uri)
|
Returns a Package object instantiated by a manifest located at a content-addressed URI.
A valid ``Web3`` instance is also required.
URI schemes supported:
- IPFS: `ipfs://Qm...`
- HTTP: `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha`
- Registry: `erc1319://registry.eth:1/greeter?version=1.0.0`
.. code:: python
OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501
|
Returns a Package object instantiated by a manifest located at a content-addressed URI.
A valid ``Web3`` instance is also required.
URI schemes supported:
|
[
"Returns",
"a",
"Package",
"object",
"instantiated",
"by",
"a",
"manifest",
"located",
"at",
"a",
"content",
"-",
"addressed",
"URI",
".",
"A",
"valid",
"Web3",
"instance",
"is",
"also",
"required",
".",
"URI",
"schemes",
"supported",
":"
] |
def from_uri(cls, uri: URI, w3: "Web3") -> "Package":
"""
Returns a Package object instantiated by a manifest located at a content-addressed URI.
A valid ``Web3`` instance is also required.
URI schemes supported:
- IPFS: `ipfs://Qm...`
- HTTP: `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha`
- Registry: `erc1319://registry.eth:1/greeter?version=1.0.0`
.. code:: python
OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501
"""
contents = to_text(resolve_uri_contents(uri))
validate_raw_manifest_format(contents)
manifest = json.loads(contents)
return cls(manifest, w3, uri)
|
[
"def",
"from_uri",
"(",
"cls",
",",
"uri",
":",
"URI",
",",
"w3",
":",
"\"Web3\"",
")",
"->",
"\"Package\"",
":",
"contents",
"=",
"to_text",
"(",
"resolve_uri_contents",
"(",
"uri",
")",
")",
"validate_raw_manifest_format",
"(",
"contents",
")",
"manifest",
"=",
"json",
".",
"loads",
"(",
"contents",
")",
"return",
"cls",
"(",
"manifest",
",",
"w3",
",",
"uri",
")"
] |
https://github.com/ethereum/web3.py/blob/6a90a26ea12e5a789834c9cd6a7ae6d302648f88/ethpm/package.py#L222-L241
|
|
google/active-learning
|
efedd8f1c45421ee13af2b9ff593ad31f3835942
|
utils/create_data.py
|
python
|
get_csv_data
|
(filename)
|
return data
|
Parse csv and return Dataset object with data and targets.
Create pickle data from csv, assumes the first column contains the targets
Args:
filename: complete path of the csv file
Returns:
Dataset object
|
Parse csv and return Dataset object with data and targets.
|
[
"Parse",
"csv",
"and",
"return",
"Dataset",
"object",
"with",
"data",
"and",
"targets",
"."
] |
def get_csv_data(filename):
"""Parse csv and return Dataset object with data and targets.
Create pickle data from csv, assumes the first column contains the targets
Args:
filename: complete path of the csv file
Returns:
Dataset object
"""
f = gfile.GFile(filename, 'r')
mat = []
for l in f:
row = l.strip()
row = row.replace('"', '')
row = row.split(',')
row = [float(x) for x in row]
mat.append(row)
mat = np.array(mat)
y = mat[:, 0]
X = mat[:, 1:]
data = Dataset(X, y)
return data
|
[
"def",
"get_csv_data",
"(",
"filename",
")",
":",
"f",
"=",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"'r'",
")",
"mat",
"=",
"[",
"]",
"for",
"l",
"in",
"f",
":",
"row",
"=",
"l",
".",
"strip",
"(",
")",
"row",
"=",
"row",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"row",
"=",
"row",
".",
"split",
"(",
"','",
")",
"row",
"=",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"row",
"]",
"mat",
".",
"append",
"(",
"row",
")",
"mat",
"=",
"np",
".",
"array",
"(",
"mat",
")",
"y",
"=",
"mat",
"[",
":",
",",
"0",
"]",
"X",
"=",
"mat",
"[",
":",
",",
"1",
":",
"]",
"data",
"=",
"Dataset",
"(",
"X",
",",
"y",
")",
"return",
"data"
] |
https://github.com/google/active-learning/blob/efedd8f1c45421ee13af2b9ff593ad31f3835942/utils/create_data.py#L65-L86
|
|
sebastien/cuisine
|
f6f70268ef1361db66815383017f7c8969002154
|
src/cuisine.py
|
python
|
group_ensure_linux
|
(name, gid=None)
|
Ensures that the group with the given name (and optional gid)
exists.
|
Ensures that the group with the given name (and optional gid)
exists.
|
[
"Ensures",
"that",
"the",
"group",
"with",
"the",
"given",
"name",
"(",
"and",
"optional",
"gid",
")",
"exists",
"."
] |
def group_ensure_linux(name, gid=None):
"""Ensures that the group with the given name (and optional gid)
exists."""
d = group_check(name)
if not d:
group_create(name, gid)
else:
if gid != None and d.get("gid") != gid:
sudo("groupmod -g %s '%s'" % (gid, name))
|
[
"def",
"group_ensure_linux",
"(",
"name",
",",
"gid",
"=",
"None",
")",
":",
"d",
"=",
"group_check",
"(",
"name",
")",
"if",
"not",
"d",
":",
"group_create",
"(",
"name",
",",
"gid",
")",
"else",
":",
"if",
"gid",
"!=",
"None",
"and",
"d",
".",
"get",
"(",
"\"gid\"",
")",
"!=",
"gid",
":",
"sudo",
"(",
"\"groupmod -g %s '%s'\"",
"%",
"(",
"gid",
",",
"name",
")",
")"
] |
https://github.com/sebastien/cuisine/blob/f6f70268ef1361db66815383017f7c8969002154/src/cuisine.py#L1872-L1880
|
||
xdress/xdress
|
eb7f0a02b3edf617d401939ede7f0d713a88917f
|
xdress/_enum/__init__.py
|
python
|
_make_class_unpicklable
|
(cls)
|
Make the given class un-picklable.
|
Make the given class un-picklable.
|
[
"Make",
"the",
"given",
"class",
"un",
"-",
"picklable",
"."
] |
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
|
[
"def",
"_make_class_unpicklable",
"(",
"cls",
")",
":",
"def",
"_break_on_call_reduce",
"(",
"self",
")",
":",
"raise",
"TypeError",
"(",
"'%r cannot be pickled'",
"%",
"self",
")",
"cls",
".",
"__reduce__",
"=",
"_break_on_call_reduce",
"cls",
".",
"__module__",
"=",
"'<unknown>'"
] |
https://github.com/xdress/xdress/blob/eb7f0a02b3edf617d401939ede7f0d713a88917f/xdress/_enum/__init__.py#L67-L72
|
||
andresriancho/w3af
|
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
|
w3af/core/controllers/threads/pool276.py
|
python
|
ApplyResult.get
|
(self, timeout=None)
|
[] |
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
|
[
"def",
"get",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"wait",
"(",
"timeout",
")",
"if",
"not",
"self",
".",
"_ready",
":",
"raise",
"TimeoutError",
"if",
"self",
".",
"_success",
":",
"return",
"self",
".",
"_value",
"else",
":",
"raise",
"self",
".",
"_value"
] |
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/core/controllers/threads/pool276.py#L671-L678
|
||||
pycontribs/pyrax
|
a0c022981f76a4cba96a22ecc19bb52843ac4fbe
|
pyrax/__init__.py
|
python
|
connect_to_autoscale
|
(region=None)
|
return _create_client(ep_name="autoscale", region=region)
|
Creates a client for working with AutoScale.
|
Creates a client for working with AutoScale.
|
[
"Creates",
"a",
"client",
"for",
"working",
"with",
"AutoScale",
"."
] |
def connect_to_autoscale(region=None):
"""Creates a client for working with AutoScale."""
return _create_client(ep_name="autoscale", region=region)
|
[
"def",
"connect_to_autoscale",
"(",
"region",
"=",
"None",
")",
":",
"return",
"_create_client",
"(",
"ep_name",
"=",
"\"autoscale\"",
",",
"region",
"=",
"region",
")"
] |
https://github.com/pycontribs/pyrax/blob/a0c022981f76a4cba96a22ecc19bb52843ac4fbe/pyrax/__init__.py#L813-L815
|
|
tensorly/tensorly
|
87b435b3f3343447b49d47ebb5461118f6c8a9ab
|
tensorly/random/base.py
|
python
|
random_tt
|
(shape, rank, full=False, random_state=None, **context)
|
Generates a random TT/MPS tensor
Parameters
----------
shape : tuple
shape of the tensor to generate
rank : int
rank of the TT decomposition
must verify rank[0] == rank[-1] ==1 (boundary conditions)
and len(rank) == len(shape)+1
full : bool, optional, default is False
if True, a full tensor is returned
otherwise, the decomposed tensor is returned
random_state : `np.random.RandomState`
context : dict
context in which to create the tensor
Returns
-------
TT_tensor : ND-array or 3D-array list
* ND-array : full tensor if `full` is True
* 3D-array list : list of factors otherwise
|
Generates a random TT/MPS tensor
|
[
"Generates",
"a",
"random",
"TT",
"/",
"MPS",
"tensor"
] |
def random_tt(shape, rank, full=False, random_state=None, **context):
"""Generates a random TT/MPS tensor
Parameters
----------
shape : tuple
shape of the tensor to generate
rank : int
rank of the TT decomposition
must verify rank[0] == rank[-1] ==1 (boundary conditions)
and len(rank) == len(shape)+1
full : bool, optional, default is False
if True, a full tensor is returned
otherwise, the decomposed tensor is returned
random_state : `np.random.RandomState`
context : dict
context in which to create the tensor
Returns
-------
TT_tensor : ND-array or 3D-array list
* ND-array : full tensor if `full` is True
* 3D-array list : list of factors otherwise
"""
n_dim = len(shape)
rank = validate_tt_rank(shape, rank)
# Make sure it's not a tuple but a list
rank = list(rank)
# Initialization
if rank[0] != 1:
message = 'Provided rank[0] == {} but boundaring conditions dictatate rank[0] == rank[-1] == 1: setting rank[0] to 1.'.format(rank[0])
raise ValueError(message)
if rank[-1] != 1:
message = 'Provided rank[-1] == {} but boundaring conditions dictatate rank[0] == rank[-1] == 1: setting rank[-1] to 1.'.format(rank[0])
raise ValueError(message)
rns = T.check_random_state(random_state)
factors = [T.tensor(rns.random_sample((rank[i], s, rank[i+1])), **context)\
for i, s in enumerate(shape)]
if full:
return tt_to_tensor(factors)
else:
return TTTensor(factors)
|
[
"def",
"random_tt",
"(",
"shape",
",",
"rank",
",",
"full",
"=",
"False",
",",
"random_state",
"=",
"None",
",",
"*",
"*",
"context",
")",
":",
"n_dim",
"=",
"len",
"(",
"shape",
")",
"rank",
"=",
"validate_tt_rank",
"(",
"shape",
",",
"rank",
")",
"# Make sure it's not a tuple but a list",
"rank",
"=",
"list",
"(",
"rank",
")",
"# Initialization",
"if",
"rank",
"[",
"0",
"]",
"!=",
"1",
":",
"message",
"=",
"'Provided rank[0] == {} but boundaring conditions dictatate rank[0] == rank[-1] == 1: setting rank[0] to 1.'",
".",
"format",
"(",
"rank",
"[",
"0",
"]",
")",
"raise",
"ValueError",
"(",
"message",
")",
"if",
"rank",
"[",
"-",
"1",
"]",
"!=",
"1",
":",
"message",
"=",
"'Provided rank[-1] == {} but boundaring conditions dictatate rank[0] == rank[-1] == 1: setting rank[-1] to 1.'",
".",
"format",
"(",
"rank",
"[",
"0",
"]",
")",
"raise",
"ValueError",
"(",
"message",
")",
"rns",
"=",
"T",
".",
"check_random_state",
"(",
"random_state",
")",
"factors",
"=",
"[",
"T",
".",
"tensor",
"(",
"rns",
".",
"random_sample",
"(",
"(",
"rank",
"[",
"i",
"]",
",",
"s",
",",
"rank",
"[",
"i",
"+",
"1",
"]",
")",
")",
",",
"*",
"*",
"context",
")",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"shape",
")",
"]",
"if",
"full",
":",
"return",
"tt_to_tensor",
"(",
"factors",
")",
"else",
":",
"return",
"TTTensor",
"(",
"factors",
")"
] |
https://github.com/tensorly/tensorly/blob/87b435b3f3343447b49d47ebb5461118f6c8a9ab/tensorly/random/base.py#L153-L199
|
||
wxWidgets/Phoenix
|
b2199e299a6ca6d866aa6f3d0888499136ead9d6
|
wx/lib/ogl/basic.py
|
python
|
ShapeRegion.SetProportions
|
(self, xp, yp)
|
Set the proportions.
:param `xp`: the x region proportion
:Param `yp`: the y region proportion
|
Set the proportions.
|
[
"Set",
"the",
"proportions",
"."
] |
def SetProportions(self, xp, yp):
"""
Set the proportions.
:param `xp`: the x region proportion
:Param `yp`: the y region proportion
"""
self._regionProportionX = xp
self._regionProportionY = yp
|
[
"def",
"SetProportions",
"(",
"self",
",",
"xp",
",",
"yp",
")",
":",
"self",
".",
"_regionProportionX",
"=",
"xp",
"self",
".",
"_regionProportionY",
"=",
"yp"
] |
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/ogl/basic.py#L3679-L3688
|
||
flairNLP/flair
|
b774774752c8338aab3d620f7e5062f66ec7a69d
|
flair/datasets/biomedical.py
|
python
|
Entity.overlaps
|
(self, other_entity)
|
return (self.char_span.start <= other_entity.char_span.start < self.char_span.stop) or (
self.char_span.start < other_entity.char_span.stop <= self.char_span.stop
)
|
Checks whether this and the given entity overlap
:param other_entity: Entity to check
|
Checks whether this and the given entity overlap
|
[
"Checks",
"whether",
"this",
"and",
"the",
"given",
"entity",
"overlap"
] |
def overlaps(self, other_entity) -> bool:
"""
Checks whether this and the given entity overlap
:param other_entity: Entity to check
"""
return (self.char_span.start <= other_entity.char_span.start < self.char_span.stop) or (
self.char_span.start < other_entity.char_span.stop <= self.char_span.stop
)
|
[
"def",
"overlaps",
"(",
"self",
",",
"other_entity",
")",
"->",
"bool",
":",
"return",
"(",
"self",
".",
"char_span",
".",
"start",
"<=",
"other_entity",
".",
"char_span",
".",
"start",
"<",
"self",
".",
"char_span",
".",
"stop",
")",
"or",
"(",
"self",
".",
"char_span",
".",
"start",
"<",
"other_entity",
".",
"char_span",
".",
"stop",
"<=",
"self",
".",
"char_span",
".",
"stop",
")"
] |
https://github.com/flairNLP/flair/blob/b774774752c8338aab3d620f7e5062f66ec7a69d/flair/datasets/biomedical.py#L81-L89
|
|
jgagneastro/coffeegrindsize
|
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
|
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/numpy/polynomial/legendre.py
|
python
|
legsub
|
(c1, c2)
|
return pu.trimseq(ret)
|
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmulx, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
|
Subtract one Legendre series from another.
|
[
"Subtract",
"one",
"Legendre",
"series",
"from",
"another",
"."
] |
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmulx, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
|
[
"def",
"legsub",
"(",
"c1",
",",
"c2",
")",
":",
"# c1, c2 are trimmed copies",
"[",
"c1",
",",
"c2",
"]",
"=",
"pu",
".",
"as_series",
"(",
"[",
"c1",
",",
"c2",
"]",
")",
"if",
"len",
"(",
"c1",
")",
">",
"len",
"(",
"c2",
")",
":",
"c1",
"[",
":",
"c2",
".",
"size",
"]",
"-=",
"c2",
"ret",
"=",
"c1",
"else",
":",
"c2",
"=",
"-",
"c2",
"c2",
"[",
":",
"c1",
".",
"size",
"]",
"+=",
"c1",
"ret",
"=",
"c2",
"return",
"pu",
".",
"trimseq",
"(",
"ret",
")"
] |
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/numpy/polynomial/legendre.py#L383-L433
|
|
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/mac_portspkg.py
|
python
|
install
|
(name=None, refresh=False, pkgs=None, **kwargs)
|
return ret
|
Install the passed package(s) with ``port install``
name
The name of the formula to be installed. Note that this parameter is
ignored if "pkgs" is passed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
version
Specify a version to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5'
variant
Specify a variant to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5' variant='+credential_osxkeychain+doc+pcre'
Multiple Package Installation Options:
pkgs
A list of formulas to install. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo","bar"]'
salt '*' pkg.install pkgs='["[email protected]","bar"]'
salt '*' pkg.install pkgs='["[email protected]+ssl","[email protected]"]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install 'package package package'
|
Install the passed package(s) with ``port install``
|
[
"Install",
"the",
"passed",
"package",
"(",
"s",
")",
"with",
"port",
"install"
] |
def install(name=None, refresh=False, pkgs=None, **kwargs):
"""
Install the passed package(s) with ``port install``
name
The name of the formula to be installed. Note that this parameter is
ignored if "pkgs" is passed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
version
Specify a version to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5'
variant
Specify a variant to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5' variant='+credential_osxkeychain+doc+pcre'
Multiple Package Installation Options:
pkgs
A list of formulas to install. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo","bar"]'
salt '*' pkg.install pkgs='["[email protected]","bar"]'
salt '*' pkg.install pkgs='["[email protected]+ssl","[email protected]"]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install 'package package package'
"""
pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](name, pkgs, {})
if salt.utils.data.is_true(refresh):
refresh_db()
# Handle version kwarg for a single package target
if pkgs is None:
version_num = kwargs.get("version")
variant_spec = kwargs.get("variant")
spec = {}
if version_num:
spec["version"] = version_num
if variant_spec:
spec["variant"] = variant_spec
pkg_params = {name: spec}
if not pkg_params:
return {}
formulas_array = []
for pname, pparams in pkg_params.items():
formulas_array.append(pname)
if pparams:
if "version" in pparams:
formulas_array.append("@" + pparams["version"])
if "variant" in pparams:
formulas_array.append(pparams["variant"])
old = list_pkgs()
cmd = ["port", "install"]
cmd.extend(formulas_array)
err_message = ""
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
err_message = exc.strerror
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if err_message:
raise CommandExecutionError(
"Problem encountered installing package(s)",
info={"errors": err_message, "changes": ret},
)
return ret
|
[
"def",
"install",
"(",
"name",
"=",
"None",
",",
"refresh",
"=",
"False",
",",
"pkgs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"pkg_params",
",",
"pkg_type",
"=",
"__salt__",
"[",
"\"pkg_resource.parse_targets\"",
"]",
"(",
"name",
",",
"pkgs",
",",
"{",
"}",
")",
"if",
"salt",
".",
"utils",
".",
"data",
".",
"is_true",
"(",
"refresh",
")",
":",
"refresh_db",
"(",
")",
"# Handle version kwarg for a single package target",
"if",
"pkgs",
"is",
"None",
":",
"version_num",
"=",
"kwargs",
".",
"get",
"(",
"\"version\"",
")",
"variant_spec",
"=",
"kwargs",
".",
"get",
"(",
"\"variant\"",
")",
"spec",
"=",
"{",
"}",
"if",
"version_num",
":",
"spec",
"[",
"\"version\"",
"]",
"=",
"version_num",
"if",
"variant_spec",
":",
"spec",
"[",
"\"variant\"",
"]",
"=",
"variant_spec",
"pkg_params",
"=",
"{",
"name",
":",
"spec",
"}",
"if",
"not",
"pkg_params",
":",
"return",
"{",
"}",
"formulas_array",
"=",
"[",
"]",
"for",
"pname",
",",
"pparams",
"in",
"pkg_params",
".",
"items",
"(",
")",
":",
"formulas_array",
".",
"append",
"(",
"pname",
")",
"if",
"pparams",
":",
"if",
"\"version\"",
"in",
"pparams",
":",
"formulas_array",
".",
"append",
"(",
"\"@\"",
"+",
"pparams",
"[",
"\"version\"",
"]",
")",
"if",
"\"variant\"",
"in",
"pparams",
":",
"formulas_array",
".",
"append",
"(",
"pparams",
"[",
"\"variant\"",
"]",
")",
"old",
"=",
"list_pkgs",
"(",
")",
"cmd",
"=",
"[",
"\"port\"",
",",
"\"install\"",
"]",
"cmd",
".",
"extend",
"(",
"formulas_array",
")",
"err_message",
"=",
"\"\"",
"try",
":",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"execute_return_success",
"(",
"cmd",
")",
"except",
"CommandExecutionError",
"as",
"exc",
":",
"err_message",
"=",
"exc",
".",
"strerror",
"__context__",
".",
"pop",
"(",
"\"pkg.list_pkgs\"",
",",
"None",
")",
"new",
"=",
"list_pkgs",
"(",
")",
"ret",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"compare_dicts",
"(",
"old",
",",
"new",
")",
"if",
"err_message",
":",
"raise",
"CommandExecutionError",
"(",
"\"Problem encountered installing package(s)\"",
",",
"info",
"=",
"{",
"\"errors\"",
":",
"err_message",
",",
"\"changes\"",
":",
"ret",
"}",
",",
")",
"return",
"ret"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/mac_portspkg.py#L247-L359
|
|
dmnfarrell/pandastable
|
9c268b3e2bfe2e718eaee4a30bd02832a0ad1614
|
pandastable/plugins/rename.py
|
python
|
BatchRenamePlugin.refresh
|
(self)
|
return
|
Load files list
|
Load files list
|
[
"Load",
"files",
"list"
] |
def refresh(self):
"""Load files list"""
self.fileslist.delete('1.0',END)
fp = self.patternvar.get()
flist = glob.glob(os.path.join(self.path,fp))
filestr = '\n'.join(flist)
self.fileslist.insert(END, filestr)
return
|
[
"def",
"refresh",
"(",
"self",
")",
":",
"self",
".",
"fileslist",
".",
"delete",
"(",
"'1.0'",
",",
"END",
")",
"fp",
"=",
"self",
".",
"patternvar",
".",
"get",
"(",
")",
"flist",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"fp",
")",
")",
"filestr",
"=",
"'\\n'",
".",
"join",
"(",
"flist",
")",
"self",
".",
"fileslist",
".",
"insert",
"(",
"END",
",",
"filestr",
")",
"return"
] |
https://github.com/dmnfarrell/pandastable/blob/9c268b3e2bfe2e718eaee4a30bd02832a0ad1614/pandastable/plugins/rename.py#L126-L134
|
|
tensorflow/transform
|
bc5c3da6aebe9c8780da806e7e8103959c242863
|
tensorflow_transform/impl_helper.py
|
python
|
make_tensor_to_arrow_converter
|
(
schema: schema_pb2.Schema)
|
return tensor_to_arrow.TensorsToRecordBatchConverter(type_specs)
|
Constructs a `tf.Tensor` to `pa.RecordBatch` converter.
|
Constructs a `tf.Tensor` to `pa.RecordBatch` converter.
|
[
"Constructs",
"a",
"tf",
".",
"Tensor",
"to",
"pa",
".",
"RecordBatch",
"converter",
"."
] |
def make_tensor_to_arrow_converter(
schema: schema_pb2.Schema) -> tensor_to_arrow.TensorsToRecordBatchConverter:
"""Constructs a `tf.Tensor` to `pa.RecordBatch` converter."""
feature_specs = schema_utils.schema_as_feature_spec(schema).feature_spec
type_specs = get_type_specs_from_feature_specs(feature_specs)
return tensor_to_arrow.TensorsToRecordBatchConverter(type_specs)
|
[
"def",
"make_tensor_to_arrow_converter",
"(",
"schema",
":",
"schema_pb2",
".",
"Schema",
")",
"->",
"tensor_to_arrow",
".",
"TensorsToRecordBatchConverter",
":",
"feature_specs",
"=",
"schema_utils",
".",
"schema_as_feature_spec",
"(",
"schema",
")",
".",
"feature_spec",
"type_specs",
"=",
"get_type_specs_from_feature_specs",
"(",
"feature_specs",
")",
"return",
"tensor_to_arrow",
".",
"TensorsToRecordBatchConverter",
"(",
"type_specs",
")"
] |
https://github.com/tensorflow/transform/blob/bc5c3da6aebe9c8780da806e7e8103959c242863/tensorflow_transform/impl_helper.py#L550-L555
|
|
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/distutils/cygwinccompiler.py
|
python
|
get_versions
|
()
|
return tuple([_find_exe_version(cmd) for cmd in commands])
|
Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
|
Try to find out the versions of gcc, ld and dllwrap.
|
[
"Try",
"to",
"find",
"out",
"the",
"versions",
"of",
"gcc",
"ld",
"and",
"dllwrap",
"."
] |
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
|
[
"def",
"get_versions",
"(",
")",
":",
"commands",
"=",
"[",
"'gcc -dumpversion'",
",",
"'ld -v'",
",",
"'dllwrap --version'",
"]",
"return",
"tuple",
"(",
"[",
"_find_exe_version",
"(",
"cmd",
")",
"for",
"cmd",
"in",
"commands",
"]",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/distutils/cygwinccompiler.py#L394-L400
|
|
PaddlePaddle/Research
|
2da0bd6c72d60e9df403aff23a7802779561c4a1
|
NLP/ACL2020-GraphSum/src/networks/graphsum/run_graphsum.py
|
python
|
evaluate
|
(args, exe, program, pyreader, graph_vars, eval_phase, vocab_size,
do_dec=False, vocab_path=None, features=None, decode_path="")
|
Obtain model loss or decoding output
|
Obtain model loss or decoding output
|
[
"Obtain",
"model",
"loss",
"or",
"decoding",
"output"
] |
def evaluate(args, exe, program, pyreader, graph_vars, eval_phase, vocab_size,
do_dec=False, vocab_path=None, features=None, decode_path=""):
"""Obtain model loss or decoding output"""
if args.label_smooth_eps:
# the best cross-entropy value with label smoothing
loss_normalizer = -(
(1. - args.label_smooth_eps) * np.log((1. - args.label_smooth_eps)) +
args.label_smooth_eps * np.log(args.label_smooth_eps / (vocab_size - 1) + 1e-20))
else:
loss_normalizer = 0.0
if do_dec and not hasattr(evaluate, 'spm_vocab'):
"""load vocabulary"""
spm = sentencepiece.SentencePieceProcessor()
spm.Load(vocab_path)
symbols = {'BOS': spm.PieceToId('<S>'), 'EOS': spm.PieceToId('</S>'), 'PAD': spm.PieceToId('<PAD>'),
'EOT': spm.PieceToId('<T>'), 'EOP': spm.PieceToId('<P>'), 'EOQ': spm.PieceToId('<Q>'),
'UNK': spm.PieceToId('<UNK>')}
logger.info(symbols)
evaluate.spm_vocab = spm
evaluate.symbols = symbols
if eval_phase == "train":
fetch_list = [
graph_vars["loss"].name,
graph_vars["sum_correct"].name,
graph_vars["token_num"].name
]
if "learning_rate" in graph_vars:
fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=fetch_list)
sum_cost_val = outputs[0]
sum_correct_val = outputs[1]
token_num_val = outputs[2]
# sum the cost from multi-devices
total_avg_cost = np.mean(sum_cost_val)
total_token_num = token_num_val.sum()
total_correct = sum_correct_val.sum()
total_acc = (total_correct / total_token_num) * 100
ret = {
"loss": total_avg_cost - loss_normalizer,
"ppl": np.exp(total_avg_cost - loss_normalizer),
"acc": total_acc
}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
if not do_dec:
fetch_list = [
graph_vars["loss"].name,
graph_vars["sum_correct"].name,
graph_vars["token_num"].name
]
else:
fetch_list = [
graph_vars["finished_ids"].name,
graph_vars["finished_scores"].name,
graph_vars["data_ids"].name,
]
if do_dec:
return_numpy = False
dec_out = {}
else:
steps = 0
cost = 0.0
acc = 0.0
return_numpy = True
time_begin = time.time()
pyreader.start()
while True:
try:
if args.use_multi_gpu_test:
outputs = exe.run(fetch_list=fetch_list,
return_numpy=return_numpy)
else:
outputs = exe.run(program=program, fetch_list=fetch_list,
return_numpy=return_numpy)
if not do_dec:
sum_cost_val = outputs[0]
sum_correct_val = outputs[1]
token_num_val = outputs[2]
# sum the cost from multi-devices
total_avg_cost = np.mean(sum_cost_val)
total_token_num = token_num_val.sum()
total_correct = sum_correct_val.sum()
total_acc = (total_correct / total_token_num) * 100
cost += total_avg_cost - loss_normalizer
acc += total_acc
steps += 1
else:
seq_ids, seq_scores, data_ids = outputs
seq_ids_list, seq_scores_list = [seq_ids], [
seq_scores] if isinstance(
seq_ids, paddle.fluid.core.LoDTensor) else (seq_ids, seq_scores)
data_ids = np.array(data_ids).reshape(-1).tolist()
data_idx = 0
for seq_ids, seq_scores in zip(seq_ids_list, seq_scores_list):
# How to parse the results:
# Suppose the lod of seq_ids is:
# [[0, 3, 6], [0, 12, 24, 40, 54, 67, 82]]
# then from lod[0]:
# there are 2 source sentences, beam width is 3.
# from lod[1]:
# the first source sentence has 3 hyps; the lengths are 12, 12, 16
# the second source sentence has 3 hyps; the lengths are 14, 13, 15
# hyps = [[] for i in range(len(seq_ids.lod()[0]) - 1)]
# scores = [[] for i in range(len(seq_scores.lod()[0]) - 1)]
for i in range(len(seq_ids.lod()[0]) - 1): # for each source sentence
start = seq_ids.lod()[0][i]
end = seq_ids.lod()[0][i + 1]
for j in range(end - start): # for each candidate
sub_start = seq_ids.lod()[1][start + j]
sub_end = seq_ids.lod()[1][start + j + 1]
token_ids = [int(idx) for idx in post_process_seq(
np.array(seq_ids)[sub_start:sub_end],
evaluate.symbols['BOS'], evaluate.symbols['EOS'])]
print(len(token_ids))
hyp_str = evaluate.spm_vocab.DecodeIds(token_ids).replace(' ##', '').replace('<S>', ''). \
replace('</S>', '').replace('<Q>', '<q>').replace('<P>', ' '). \
replace('<T>', '').replace('<PAD>', '').replace('⁇', '"')
hyp_str = re.sub('\\s+', ' ', hyp_str)
print(hyp_str)
score = np.array(seq_scores)[sub_end - 1]
print(score)
data_id = data_ids[data_idx]
data_idx += 1
dec_out[data_id] = (hyp_str, score)
break
except fluid.core.EOFException:
pyreader.reset()
break
time_end = time.time()
if not do_dec:
logger.info(
"[%s evaluation] loss: %f, ppl: %f, acc: %f, elapsed time: %f s"
% (eval_phase, cost / steps, np.exp(cost / steps), acc / steps, time_end - time_begin))
else:
# start predicting
gold_path = decode_path + '.gold'
can_path = decode_path + '.candidate'
gold_out_file = codecs.open(gold_path, 'w', 'utf-8')
can_out_file = codecs.open(can_path, 'w', 'utf-8')
preds = []
refs = []
keys = features.keys()
for i in keys:
ref_str = evaluate.spm_vocab.DecodeIds(
post_process_seq(features[i].tgt, evaluate.symbols['BOS'], evaluate.symbols['EOS'])). \
replace(' ##', '').replace('<S>', '').replace('</S>', '').replace('<Q>', '<q>').replace('<P>', ' '). \
replace('<T>', '').replace('<PAD>', '').replace('⁇', '"')
ref_str = re.sub('\\s+', ' ', ref_str)
refs.append(ref_str)
preds.append(dec_out[i][0])
# logger.info("scores[i] = %.4f" % dec_out[i][1])
gold_out_file.write(refs[i] + '\n')
can_out_file.write(preds[i] + '\n')
gold_out_file.close()
can_out_file.close()
if args.evaluate_blue:
bleu = evaluate_bleu(refs, preds)
logger.info(
"[%s evaluation] bleu-4: %f, elapsed time: %f s"
% (eval_phase, bleu, time_end - time_begin))
if args.report_rouge:
rouges = report_rouge(gold_path, can_path)
logger.info('Rouges \n%s' % rouge_results_to_str(rouges))
logger.info('elapsed time: %f s' % (time_end - time_begin))
|
[
"def",
"evaluate",
"(",
"args",
",",
"exe",
",",
"program",
",",
"pyreader",
",",
"graph_vars",
",",
"eval_phase",
",",
"vocab_size",
",",
"do_dec",
"=",
"False",
",",
"vocab_path",
"=",
"None",
",",
"features",
"=",
"None",
",",
"decode_path",
"=",
"\"\"",
")",
":",
"if",
"args",
".",
"label_smooth_eps",
":",
"# the best cross-entropy value with label smoothing",
"loss_normalizer",
"=",
"-",
"(",
"(",
"1.",
"-",
"args",
".",
"label_smooth_eps",
")",
"*",
"np",
".",
"log",
"(",
"(",
"1.",
"-",
"args",
".",
"label_smooth_eps",
")",
")",
"+",
"args",
".",
"label_smooth_eps",
"*",
"np",
".",
"log",
"(",
"args",
".",
"label_smooth_eps",
"/",
"(",
"vocab_size",
"-",
"1",
")",
"+",
"1e-20",
")",
")",
"else",
":",
"loss_normalizer",
"=",
"0.0",
"if",
"do_dec",
"and",
"not",
"hasattr",
"(",
"evaluate",
",",
"'spm_vocab'",
")",
":",
"\"\"\"load vocabulary\"\"\"",
"spm",
"=",
"sentencepiece",
".",
"SentencePieceProcessor",
"(",
")",
"spm",
".",
"Load",
"(",
"vocab_path",
")",
"symbols",
"=",
"{",
"'BOS'",
":",
"spm",
".",
"PieceToId",
"(",
"'<S>'",
")",
",",
"'EOS'",
":",
"spm",
".",
"PieceToId",
"(",
"'</S>'",
")",
",",
"'PAD'",
":",
"spm",
".",
"PieceToId",
"(",
"'<PAD>'",
")",
",",
"'EOT'",
":",
"spm",
".",
"PieceToId",
"(",
"'<T>'",
")",
",",
"'EOP'",
":",
"spm",
".",
"PieceToId",
"(",
"'<P>'",
")",
",",
"'EOQ'",
":",
"spm",
".",
"PieceToId",
"(",
"'<Q>'",
")",
",",
"'UNK'",
":",
"spm",
".",
"PieceToId",
"(",
"'<UNK>'",
")",
"}",
"logger",
".",
"info",
"(",
"symbols",
")",
"evaluate",
".",
"spm_vocab",
"=",
"spm",
"evaluate",
".",
"symbols",
"=",
"symbols",
"if",
"eval_phase",
"==",
"\"train\"",
":",
"fetch_list",
"=",
"[",
"graph_vars",
"[",
"\"loss\"",
"]",
".",
"name",
",",
"graph_vars",
"[",
"\"sum_correct\"",
"]",
".",
"name",
",",
"graph_vars",
"[",
"\"token_num\"",
"]",
".",
"name",
"]",
"if",
"\"learning_rate\"",
"in",
"graph_vars",
":",
"fetch_list",
".",
"append",
"(",
"graph_vars",
"[",
"\"learning_rate\"",
"]",
".",
"name",
")",
"outputs",
"=",
"exe",
".",
"run",
"(",
"fetch_list",
"=",
"fetch_list",
")",
"sum_cost_val",
"=",
"outputs",
"[",
"0",
"]",
"sum_correct_val",
"=",
"outputs",
"[",
"1",
"]",
"token_num_val",
"=",
"outputs",
"[",
"2",
"]",
"# sum the cost from multi-devices",
"total_avg_cost",
"=",
"np",
".",
"mean",
"(",
"sum_cost_val",
")",
"total_token_num",
"=",
"token_num_val",
".",
"sum",
"(",
")",
"total_correct",
"=",
"sum_correct_val",
".",
"sum",
"(",
")",
"total_acc",
"=",
"(",
"total_correct",
"/",
"total_token_num",
")",
"*",
"100",
"ret",
"=",
"{",
"\"loss\"",
":",
"total_avg_cost",
"-",
"loss_normalizer",
",",
"\"ppl\"",
":",
"np",
".",
"exp",
"(",
"total_avg_cost",
"-",
"loss_normalizer",
")",
",",
"\"acc\"",
":",
"total_acc",
"}",
"if",
"\"learning_rate\"",
"in",
"graph_vars",
":",
"ret",
"[",
"\"learning_rate\"",
"]",
"=",
"float",
"(",
"outputs",
"[",
"3",
"]",
"[",
"0",
"]",
")",
"return",
"ret",
"if",
"not",
"do_dec",
":",
"fetch_list",
"=",
"[",
"graph_vars",
"[",
"\"loss\"",
"]",
".",
"name",
",",
"graph_vars",
"[",
"\"sum_correct\"",
"]",
".",
"name",
",",
"graph_vars",
"[",
"\"token_num\"",
"]",
".",
"name",
"]",
"else",
":",
"fetch_list",
"=",
"[",
"graph_vars",
"[",
"\"finished_ids\"",
"]",
".",
"name",
",",
"graph_vars",
"[",
"\"finished_scores\"",
"]",
".",
"name",
",",
"graph_vars",
"[",
"\"data_ids\"",
"]",
".",
"name",
",",
"]",
"if",
"do_dec",
":",
"return_numpy",
"=",
"False",
"dec_out",
"=",
"{",
"}",
"else",
":",
"steps",
"=",
"0",
"cost",
"=",
"0.0",
"acc",
"=",
"0.0",
"return_numpy",
"=",
"True",
"time_begin",
"=",
"time",
".",
"time",
"(",
")",
"pyreader",
".",
"start",
"(",
")",
"while",
"True",
":",
"try",
":",
"if",
"args",
".",
"use_multi_gpu_test",
":",
"outputs",
"=",
"exe",
".",
"run",
"(",
"fetch_list",
"=",
"fetch_list",
",",
"return_numpy",
"=",
"return_numpy",
")",
"else",
":",
"outputs",
"=",
"exe",
".",
"run",
"(",
"program",
"=",
"program",
",",
"fetch_list",
"=",
"fetch_list",
",",
"return_numpy",
"=",
"return_numpy",
")",
"if",
"not",
"do_dec",
":",
"sum_cost_val",
"=",
"outputs",
"[",
"0",
"]",
"sum_correct_val",
"=",
"outputs",
"[",
"1",
"]",
"token_num_val",
"=",
"outputs",
"[",
"2",
"]",
"# sum the cost from multi-devices",
"total_avg_cost",
"=",
"np",
".",
"mean",
"(",
"sum_cost_val",
")",
"total_token_num",
"=",
"token_num_val",
".",
"sum",
"(",
")",
"total_correct",
"=",
"sum_correct_val",
".",
"sum",
"(",
")",
"total_acc",
"=",
"(",
"total_correct",
"/",
"total_token_num",
")",
"*",
"100",
"cost",
"+=",
"total_avg_cost",
"-",
"loss_normalizer",
"acc",
"+=",
"total_acc",
"steps",
"+=",
"1",
"else",
":",
"seq_ids",
",",
"seq_scores",
",",
"data_ids",
"=",
"outputs",
"seq_ids_list",
",",
"seq_scores_list",
"=",
"[",
"seq_ids",
"]",
",",
"[",
"seq_scores",
"]",
"if",
"isinstance",
"(",
"seq_ids",
",",
"paddle",
".",
"fluid",
".",
"core",
".",
"LoDTensor",
")",
"else",
"(",
"seq_ids",
",",
"seq_scores",
")",
"data_ids",
"=",
"np",
".",
"array",
"(",
"data_ids",
")",
".",
"reshape",
"(",
"-",
"1",
")",
".",
"tolist",
"(",
")",
"data_idx",
"=",
"0",
"for",
"seq_ids",
",",
"seq_scores",
"in",
"zip",
"(",
"seq_ids_list",
",",
"seq_scores_list",
")",
":",
"# How to parse the results:",
"# Suppose the lod of seq_ids is:",
"# [[0, 3, 6], [0, 12, 24, 40, 54, 67, 82]]",
"# then from lod[0]:",
"# there are 2 source sentences, beam width is 3.",
"# from lod[1]:",
"# the first source sentence has 3 hyps; the lengths are 12, 12, 16",
"# the second source sentence has 3 hyps; the lengths are 14, 13, 15",
"# hyps = [[] for i in range(len(seq_ids.lod()[0]) - 1)]",
"# scores = [[] for i in range(len(seq_scores.lod()[0]) - 1)]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"seq_ids",
".",
"lod",
"(",
")",
"[",
"0",
"]",
")",
"-",
"1",
")",
":",
"# for each source sentence",
"start",
"=",
"seq_ids",
".",
"lod",
"(",
")",
"[",
"0",
"]",
"[",
"i",
"]",
"end",
"=",
"seq_ids",
".",
"lod",
"(",
")",
"[",
"0",
"]",
"[",
"i",
"+",
"1",
"]",
"for",
"j",
"in",
"range",
"(",
"end",
"-",
"start",
")",
":",
"# for each candidate",
"sub_start",
"=",
"seq_ids",
".",
"lod",
"(",
")",
"[",
"1",
"]",
"[",
"start",
"+",
"j",
"]",
"sub_end",
"=",
"seq_ids",
".",
"lod",
"(",
")",
"[",
"1",
"]",
"[",
"start",
"+",
"j",
"+",
"1",
"]",
"token_ids",
"=",
"[",
"int",
"(",
"idx",
")",
"for",
"idx",
"in",
"post_process_seq",
"(",
"np",
".",
"array",
"(",
"seq_ids",
")",
"[",
"sub_start",
":",
"sub_end",
"]",
",",
"evaluate",
".",
"symbols",
"[",
"'BOS'",
"]",
",",
"evaluate",
".",
"symbols",
"[",
"'EOS'",
"]",
")",
"]",
"print",
"(",
"len",
"(",
"token_ids",
")",
")",
"hyp_str",
"=",
"evaluate",
".",
"spm_vocab",
".",
"DecodeIds",
"(",
"token_ids",
")",
".",
"replace",
"(",
"' ##'",
",",
"''",
")",
".",
"replace",
"(",
"'<S>'",
",",
"''",
")",
".",
"replace",
"(",
"'</S>'",
",",
"''",
")",
".",
"replace",
"(",
"'<Q>'",
",",
"'<q>'",
")",
".",
"replace",
"(",
"'<P>'",
",",
"' '",
")",
".",
"replace",
"(",
"'<T>'",
",",
"''",
")",
".",
"replace",
"(",
"'<PAD>'",
",",
"''",
")",
".",
"replace",
"(",
"'⁇', ",
"'",
"')",
"",
"hyp_str",
"=",
"re",
".",
"sub",
"(",
"'\\\\s+'",
",",
"' '",
",",
"hyp_str",
")",
"print",
"(",
"hyp_str",
")",
"score",
"=",
"np",
".",
"array",
"(",
"seq_scores",
")",
"[",
"sub_end",
"-",
"1",
"]",
"print",
"(",
"score",
")",
"data_id",
"=",
"data_ids",
"[",
"data_idx",
"]",
"data_idx",
"+=",
"1",
"dec_out",
"[",
"data_id",
"]",
"=",
"(",
"hyp_str",
",",
"score",
")",
"break",
"except",
"fluid",
".",
"core",
".",
"EOFException",
":",
"pyreader",
".",
"reset",
"(",
")",
"break",
"time_end",
"=",
"time",
".",
"time",
"(",
")",
"if",
"not",
"do_dec",
":",
"logger",
".",
"info",
"(",
"\"[%s evaluation] loss: %f, ppl: %f, acc: %f, elapsed time: %f s\"",
"%",
"(",
"eval_phase",
",",
"cost",
"/",
"steps",
",",
"np",
".",
"exp",
"(",
"cost",
"/",
"steps",
")",
",",
"acc",
"/",
"steps",
",",
"time_end",
"-",
"time_begin",
")",
")",
"else",
":",
"# start predicting",
"gold_path",
"=",
"decode_path",
"+",
"'.gold'",
"can_path",
"=",
"decode_path",
"+",
"'.candidate'",
"gold_out_file",
"=",
"codecs",
".",
"open",
"(",
"gold_path",
",",
"'w'",
",",
"'utf-8'",
")",
"can_out_file",
"=",
"codecs",
".",
"open",
"(",
"can_path",
",",
"'w'",
",",
"'utf-8'",
")",
"preds",
"=",
"[",
"]",
"refs",
"=",
"[",
"]",
"keys",
"=",
"features",
".",
"keys",
"(",
")",
"for",
"i",
"in",
"keys",
":",
"ref_str",
"=",
"evaluate",
".",
"spm_vocab",
".",
"DecodeIds",
"(",
"post_process_seq",
"(",
"features",
"[",
"i",
"]",
".",
"tgt",
",",
"evaluate",
".",
"symbols",
"[",
"'BOS'",
"]",
",",
"evaluate",
".",
"symbols",
"[",
"'EOS'",
"]",
")",
")",
".",
"replace",
"(",
"' ##'",
",",
"''",
")",
".",
"replace",
"(",
"'<S>'",
",",
"''",
")",
".",
"replace",
"(",
"'</S>'",
",",
"''",
")",
".",
"replace",
"(",
"'<Q>'",
",",
"'<q>'",
")",
".",
"replace",
"(",
"'<P>'",
",",
"' '",
")",
".",
"replace",
"(",
"'<T>'",
",",
"''",
")",
".",
"replace",
"(",
"'<PAD>'",
",",
"''",
")",
".",
"replace",
"(",
"'⁇', ",
"'",
"')",
"",
"ref_str",
"=",
"re",
".",
"sub",
"(",
"'\\\\s+'",
",",
"' '",
",",
"ref_str",
")",
"refs",
".",
"append",
"(",
"ref_str",
")",
"preds",
".",
"append",
"(",
"dec_out",
"[",
"i",
"]",
"[",
"0",
"]",
")",
"# logger.info(\"scores[i] = %.4f\" % dec_out[i][1])",
"gold_out_file",
".",
"write",
"(",
"refs",
"[",
"i",
"]",
"+",
"'\\n'",
")",
"can_out_file",
".",
"write",
"(",
"preds",
"[",
"i",
"]",
"+",
"'\\n'",
")",
"gold_out_file",
".",
"close",
"(",
")",
"can_out_file",
".",
"close",
"(",
")",
"if",
"args",
".",
"evaluate_blue",
":",
"bleu",
"=",
"evaluate_bleu",
"(",
"refs",
",",
"preds",
")",
"logger",
".",
"info",
"(",
"\"[%s evaluation] bleu-4: %f, elapsed time: %f s\"",
"%",
"(",
"eval_phase",
",",
"bleu",
",",
"time_end",
"-",
"time_begin",
")",
")",
"if",
"args",
".",
"report_rouge",
":",
"rouges",
"=",
"report_rouge",
"(",
"gold_path",
",",
"can_path",
")",
"logger",
".",
"info",
"(",
"'Rouges \\n%s'",
"%",
"rouge_results_to_str",
"(",
"rouges",
")",
")",
"logger",
".",
"info",
"(",
"'elapsed time: %f s'",
"%",
"(",
"time_end",
"-",
"time_begin",
")",
")"
] |
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/NLP/ACL2020-GraphSum/src/networks/graphsum/run_graphsum.py#L453-L641
|
||
magicalraccoon/tootstream
|
6dd84fc3767ef25df645a599cb632ad3745744df
|
src/tootstream/toot.py
|
python
|
unmute
|
(mastodon, rest)
|
Unmutes a user by username or id.
ex: unmute 23
unmute @user
unmute @[email protected]
|
Unmutes a user by username or id.
|
[
"Unmutes",
"a",
"user",
"by",
"username",
"or",
"id",
"."
] |
def unmute(mastodon, rest):
"""Unmutes a user by username or id.
ex: unmute 23
unmute @user
unmute @[email protected]"""
userid = get_userid(mastodon, rest)
if isinstance(userid, list):
cprint(" multiple matches found:", fg('red'))
printUsersShort(userid)
elif userid == -1:
cprint(" username not found", fg('red'))
else:
try:
relations = mastodon.account_unmute(userid)
if not relations['muting']:
cprint(" user " + str(userid) + " is now unmuted", fg('blue'))
except:
cprint(" Error, unable to unmute.", fg('red'))
|
[
"def",
"unmute",
"(",
"mastodon",
",",
"rest",
")",
":",
"userid",
"=",
"get_userid",
"(",
"mastodon",
",",
"rest",
")",
"if",
"isinstance",
"(",
"userid",
",",
"list",
")",
":",
"cprint",
"(",
"\" multiple matches found:\"",
",",
"fg",
"(",
"'red'",
")",
")",
"printUsersShort",
"(",
"userid",
")",
"elif",
"userid",
"==",
"-",
"1",
":",
"cprint",
"(",
"\" username not found\"",
",",
"fg",
"(",
"'red'",
")",
")",
"else",
":",
"try",
":",
"relations",
"=",
"mastodon",
".",
"account_unmute",
"(",
"userid",
")",
"if",
"not",
"relations",
"[",
"'muting'",
"]",
":",
"cprint",
"(",
"\" user \"",
"+",
"str",
"(",
"userid",
")",
"+",
"\" is now unmuted\"",
",",
"fg",
"(",
"'blue'",
")",
")",
"except",
":",
"cprint",
"(",
"\" Error, unable to unmute.\"",
",",
"fg",
"(",
"'red'",
")",
")"
] |
https://github.com/magicalraccoon/tootstream/blob/6dd84fc3767ef25df645a599cb632ad3745744df/src/tootstream/toot.py#L1737-L1755
|
||
JacquesLucke/animation_nodes
|
b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1
|
animation_nodes/id_keys/data_types/transforms_type.py
|
python
|
TransformDataType.iterSubpropertyPaths
|
(cls, name)
|
[] |
def iterSubpropertyPaths(cls, name):
yield '["AN*Transforms*Location*%s"]' % name
yield '["AN*Transforms*Rotation*%s"]' % name
yield '["AN*Transforms*Scale*%s"]' % name
|
[
"def",
"iterSubpropertyPaths",
"(",
"cls",
",",
"name",
")",
":",
"yield",
"'[\"AN*Transforms*Location*%s\"]'",
"%",
"name",
"yield",
"'[\"AN*Transforms*Rotation*%s\"]'",
"%",
"name",
"yield",
"'[\"AN*Transforms*Scale*%s\"]'",
"%",
"name"
] |
https://github.com/JacquesLucke/animation_nodes/blob/b1e3ace8dcb0a771fd882fc3ac4e490b009fa0d1/animation_nodes/id_keys/data_types/transforms_type.py#L109-L112
|
||||
missionpinball/mpf
|
8e6b74cff4ba06d2fec9445742559c1068b88582
|
mpf/platforms/visual_pinball_engine/visual_pinball_engine.py
|
python
|
VisualPinballEnginePlatform.send_command
|
(self, command)
|
Send command to VPE.
|
Send command to VPE.
|
[
"Send",
"command",
"to",
"VPE",
"."
] |
def send_command(self, command):
"""Send command to VPE."""
self.platform_rpc.send_command(command)
|
[
"def",
"send_command",
"(",
"self",
",",
"command",
")",
":",
"self",
".",
"platform_rpc",
".",
"send_command",
"(",
"command",
")"
] |
https://github.com/missionpinball/mpf/blob/8e6b74cff4ba06d2fec9445742559c1068b88582/mpf/platforms/visual_pinball_engine/visual_pinball_engine.py#L303-L305
|
||
PaddlePaddle/Research
|
2da0bd6c72d60e9df403aff23a7802779561c4a1
|
ST_DM/KDD2021-MSTPAC/code/MST-PAC/frame/core/gpu_trainer.py
|
python
|
GPUTrainer.set_optimizer
|
(self, FLAGS, net_output)
|
return optimizer.minimize(net_output['loss'])
|
set optimizer
|
set optimizer
|
[
"set",
"optimizer"
] |
def set_optimizer(self, FLAGS, net_output):
"""
set optimizer
"""
optimizer = net_output['optimizer']
if self.is_multi_gpu(FLAGS):
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
num_trainers = int(os.getenv("PADDLE_TRAINERS_NUM"))
trainer_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
logging.info("train_id:%s, num_trainers:%s, trainer_endpoints:%s" % (trainer_id,
num_trainers, trainer_endpoints))
trainer_endpoints = trainer_endpoints.split(',')
role = role_maker.UserDefinedCollectiveRoleMaker(current_id=trainer_id,
worker_endpoints=trainer_endpoints)
fleet.init(role)
dist_strategy = DistributedStrategy()
#num_nodes = len(set([x.split(':')[0] for x in trainer_endpoints]))
#if num_nodes == 1:
# dist_strategy.use_local_sgd = True
#dist_strategy.mode = "collective" #multi node is nccl2
#dist_strategy.collective_mode = "local_sgd" # local_sgd or grad_allreduce
# logging.info("use local sgd, not nccl2 for single node.")
"""
#TODO:
dist_strategy.enable_inplace = FLAGS.with_inplace
if FLAGS.fuse_ops:
dist_strategy.fuse_all_reduce_ops = 1
dist_strategy.nccl_comm_num = FLAGS.nccl_comm_num
"""
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer.minimize(net_output['loss'])
|
[
"def",
"set_optimizer",
"(",
"self",
",",
"FLAGS",
",",
"net_output",
")",
":",
"optimizer",
"=",
"net_output",
"[",
"'optimizer'",
"]",
"if",
"self",
".",
"is_multi_gpu",
"(",
"FLAGS",
")",
":",
"trainer_id",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"\"PADDLE_TRAINER_ID\"",
")",
")",
"num_trainers",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"\"PADDLE_TRAINERS_NUM\"",
")",
")",
"trainer_endpoints",
"=",
"os",
".",
"getenv",
"(",
"\"PADDLE_TRAINER_ENDPOINTS\"",
")",
"logging",
".",
"info",
"(",
"\"train_id:%s, num_trainers:%s, trainer_endpoints:%s\"",
"%",
"(",
"trainer_id",
",",
"num_trainers",
",",
"trainer_endpoints",
")",
")",
"trainer_endpoints",
"=",
"trainer_endpoints",
".",
"split",
"(",
"','",
")",
"role",
"=",
"role_maker",
".",
"UserDefinedCollectiveRoleMaker",
"(",
"current_id",
"=",
"trainer_id",
",",
"worker_endpoints",
"=",
"trainer_endpoints",
")",
"fleet",
".",
"init",
"(",
"role",
")",
"dist_strategy",
"=",
"DistributedStrategy",
"(",
")",
"#num_nodes = len(set([x.split(':')[0] for x in trainer_endpoints]))",
"#if num_nodes == 1:",
"# dist_strategy.use_local_sgd = True",
"#dist_strategy.mode = \"collective\" #multi node is nccl2",
"#dist_strategy.collective_mode = \"local_sgd\" # local_sgd or grad_allreduce",
"# logging.info(\"use local sgd, not nccl2 for single node.\")",
"\"\"\"\n #TODO:\n dist_strategy.enable_inplace = FLAGS.with_inplace\n if FLAGS.fuse_ops:\n dist_strategy.fuse_all_reduce_ops = 1\n dist_strategy.nccl_comm_num = FLAGS.nccl_comm_num\n \"\"\"",
"optimizer",
"=",
"fleet",
".",
"distributed_optimizer",
"(",
"optimizer",
",",
"strategy",
"=",
"dist_strategy",
")",
"return",
"optimizer",
".",
"minimize",
"(",
"net_output",
"[",
"'loss'",
"]",
")"
] |
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/ST_DM/KDD2021-MSTPAC/code/MST-PAC/frame/core/gpu_trainer.py#L43-L78
|
|
SteveDoyle2/pyNastran
|
eda651ac2d4883d95a34951f8a002ff94f642a1a
|
pyNastran/dev/bdf_vectorized/cards/aero/aero_cards.py
|
python
|
FLFACT.add_card
|
(cls, card, comment='')
|
return FLFACT(sid, factors, comment=comment)
|
Adds an FLFACT card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
|
Adds an FLFACT card from ``BDF.add_card(...)``
|
[
"Adds",
"an",
"FLFACT",
"card",
"from",
"BDF",
".",
"add_card",
"(",
"...",
")"
] |
def add_card(cls, card, comment=''):
"""
Adds an FLFACT card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
assert len(card) > 2, 'len(FLFACT card)=%s; card=%s' % (len(card), card)
field3 = double_string_or_blank(card, 3, 'THRU')
if field3 is None:
f1 = double(card, 2, 'f1')
factors = [f1]
assert len(card) == 3, 'len(FLFACT card)=%s; card=%s' % (len(card), card)
elif isinstance(field3, float):
factors = fields(double, card, 'factors', i=2, j=len(card))
elif isinstance(field3, str) and field3 == 'THRU':
f1 = double(card, 2, 'f1')
fnf = double(card, 4, 'fnf')
nf = integer(card, 5, 'nf')
fmid_default = (f1 + fnf) / 2.
fmid = double_or_blank(card, 6, 'fmid', fmid_default)
assert len(card) <= 7, 'len(FLFACT card)=%s; card=%s' % (len(card), card)
factors = [f1, 'THRU', fnf, nf, fmid]
else:
raise SyntaxError('expected a float or string for FLFACT field 3; value=%r' % field3)
return FLFACT(sid, factors, comment=comment)
|
[
"def",
"add_card",
"(",
"cls",
",",
"card",
",",
"comment",
"=",
"''",
")",
":",
"sid",
"=",
"integer",
"(",
"card",
",",
"1",
",",
"'sid'",
")",
"assert",
"len",
"(",
"card",
")",
">",
"2",
",",
"'len(FLFACT card)=%s; card=%s'",
"%",
"(",
"len",
"(",
"card",
")",
",",
"card",
")",
"field3",
"=",
"double_string_or_blank",
"(",
"card",
",",
"3",
",",
"'THRU'",
")",
"if",
"field3",
"is",
"None",
":",
"f1",
"=",
"double",
"(",
"card",
",",
"2",
",",
"'f1'",
")",
"factors",
"=",
"[",
"f1",
"]",
"assert",
"len",
"(",
"card",
")",
"==",
"3",
",",
"'len(FLFACT card)=%s; card=%s'",
"%",
"(",
"len",
"(",
"card",
")",
",",
"card",
")",
"elif",
"isinstance",
"(",
"field3",
",",
"float",
")",
":",
"factors",
"=",
"fields",
"(",
"double",
",",
"card",
",",
"'factors'",
",",
"i",
"=",
"2",
",",
"j",
"=",
"len",
"(",
"card",
")",
")",
"elif",
"isinstance",
"(",
"field3",
",",
"str",
")",
"and",
"field3",
"==",
"'THRU'",
":",
"f1",
"=",
"double",
"(",
"card",
",",
"2",
",",
"'f1'",
")",
"fnf",
"=",
"double",
"(",
"card",
",",
"4",
",",
"'fnf'",
")",
"nf",
"=",
"integer",
"(",
"card",
",",
"5",
",",
"'nf'",
")",
"fmid_default",
"=",
"(",
"f1",
"+",
"fnf",
")",
"/",
"2.",
"fmid",
"=",
"double_or_blank",
"(",
"card",
",",
"6",
",",
"'fmid'",
",",
"fmid_default",
")",
"assert",
"len",
"(",
"card",
")",
"<=",
"7",
",",
"'len(FLFACT card)=%s; card=%s'",
"%",
"(",
"len",
"(",
"card",
")",
",",
"card",
")",
"factors",
"=",
"[",
"f1",
",",
"'THRU'",
",",
"fnf",
",",
"nf",
",",
"fmid",
"]",
"else",
":",
"raise",
"SyntaxError",
"(",
"'expected a float or string for FLFACT field 3; value=%r'",
"%",
"field3",
")",
"return",
"FLFACT",
"(",
"sid",
",",
"factors",
",",
"comment",
"=",
"comment",
")"
] |
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/dev/bdf_vectorized/cards/aero/aero_cards.py#L3308-L3338
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.