nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
plaid/plaid-python
|
8c60fca608e426f3ff30da8857775946d29e122c
|
plaid/model/wallet_balance.py
|
python
|
WalletBalance.__init__
|
(self, iso_currency_code, current, *args, **kwargs)
|
WalletBalance - a model defined in OpenAPI
Args:
iso_currency_code (str): The ISO-4217 currency code of the balance
current (float): The total amount of funds in the account
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
|
WalletBalance - a model defined in OpenAPI
|
[
"WalletBalance",
"-",
"a",
"model",
"defined",
"in",
"OpenAPI"
] |
def __init__(self, iso_currency_code, current, *args, **kwargs): # noqa: E501
"""WalletBalance - a model defined in OpenAPI
Args:
iso_currency_code (str): The ISO-4217 currency code of the balance
current (float): The total amount of funds in the account
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.iso_currency_code = iso_currency_code
self.current = current
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"def",
"__init__",
"(",
"self",
",",
"iso_currency_code",
",",
"current",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"_check_type",
"=",
"kwargs",
".",
"pop",
"(",
"'_check_type'",
",",
"True",
")",
"_spec_property_naming",
"=",
"kwargs",
".",
"pop",
"(",
"'_spec_property_naming'",
",",
"False",
")",
"_path_to_item",
"=",
"kwargs",
".",
"pop",
"(",
"'_path_to_item'",
",",
"(",
")",
")",
"_configuration",
"=",
"kwargs",
".",
"pop",
"(",
"'_configuration'",
",",
"None",
")",
"_visited_composed_classes",
"=",
"kwargs",
".",
"pop",
"(",
"'_visited_composed_classes'",
",",
"(",
")",
")",
"if",
"args",
":",
"raise",
"ApiTypeError",
"(",
"\"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"",
"%",
"(",
"args",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
")",
",",
"path_to_item",
"=",
"_path_to_item",
",",
"valid_classes",
"=",
"(",
"self",
".",
"__class__",
",",
")",
",",
")",
"self",
".",
"_data_store",
"=",
"{",
"}",
"self",
".",
"_check_type",
"=",
"_check_type",
"self",
".",
"_spec_property_naming",
"=",
"_spec_property_naming",
"self",
".",
"_path_to_item",
"=",
"_path_to_item",
"self",
".",
"_configuration",
"=",
"_configuration",
"self",
".",
"_visited_composed_classes",
"=",
"_visited_composed_classes",
"+",
"(",
"self",
".",
"__class__",
",",
")",
"self",
".",
"iso_currency_code",
"=",
"iso_currency_code",
"self",
".",
"current",
"=",
"current",
"for",
"var_name",
",",
"var_value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"var_name",
"not",
"in",
"self",
".",
"attribute_map",
"and",
"self",
".",
"_configuration",
"is",
"not",
"None",
"and",
"self",
".",
"_configuration",
".",
"discard_unknown_keys",
"and",
"self",
".",
"additional_properties_type",
"is",
"None",
":",
"# discard variable.",
"continue",
"setattr",
"(",
"self",
",",
"var_name",
",",
"var_value",
")"
] |
https://github.com/plaid/plaid-python/blob/8c60fca608e426f3ff30da8857775946d29e122c/plaid/model/wallet_balance.py#L105-L177
|
||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/asn1crypto/x509.py
|
python
|
Name.sha256
|
(self)
|
return self._sha256
|
:return:
The SHA-256 hash of the DER-encoded bytes of this name
|
:return:
The SHA-256 hash of the DER-encoded bytes of this name
|
[
":",
"return",
":",
"The",
"SHA",
"-",
"256",
"hash",
"of",
"the",
"DER",
"-",
"encoded",
"bytes",
"of",
"this",
"name"
] |
def sha256(self):
"""
:return:
The SHA-256 hash of the DER-encoded bytes of this name
"""
if self._sha256 is None:
self._sha256 = hashlib.sha256(self.dump()).digest()
return self._sha256
|
[
"def",
"sha256",
"(",
"self",
")",
":",
"if",
"self",
".",
"_sha256",
"is",
"None",
":",
"self",
".",
"_sha256",
"=",
"hashlib",
".",
"sha256",
"(",
"self",
".",
"dump",
"(",
")",
")",
".",
"digest",
"(",
")",
"return",
"self",
".",
"_sha256"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/asn1crypto/x509.py#L1145-L1153
|
|
appu1232/Discord-Selfbot
|
2305be70cdd8499c4ddb8b79101c70ac2f3fbb0d
|
cogs/utility.py
|
python
|
Utility.hb
|
(self, ctx, *, msg)
|
Posts to Hastebin
|
Posts to Hastebin
|
[
"Posts",
"to",
"Hastebin"
] |
async def hb(self, ctx, *, msg):
"""Posts to Hastebin"""
if ctx.invoked_subcommand is None:
pre = cmd_prefix_len()
url = await hastebin(msg, self.bot.session)
await ctx.send(self.bot.bot_prefix + 'Hastebin output: ' + url)
await ctx.message.delete()
|
[
"async",
"def",
"hb",
"(",
"self",
",",
"ctx",
",",
"*",
",",
"msg",
")",
":",
"if",
"ctx",
".",
"invoked_subcommand",
"is",
"None",
":",
"pre",
"=",
"cmd_prefix_len",
"(",
")",
"url",
"=",
"await",
"hastebin",
"(",
"msg",
",",
"self",
".",
"bot",
".",
"session",
")",
"await",
"ctx",
".",
"send",
"(",
"self",
".",
"bot",
".",
"bot_prefix",
"+",
"'Hastebin output: '",
"+",
"url",
")",
"await",
"ctx",
".",
"message",
".",
"delete",
"(",
")"
] |
https://github.com/appu1232/Discord-Selfbot/blob/2305be70cdd8499c4ddb8b79101c70ac2f3fbb0d/cogs/utility.py#L269-L275
|
||
ayoolaolafenwa/PixelLib
|
ae56003c416a98780141a1170c9d888fe9a31317
|
pixellib/instance/mask_rcnn.py
|
python
|
mrcnn_mask_loss_graph
|
(target_masks, target_class_ids, pred_masks)
|
return loss
|
Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
|
Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
|
[
"Mask",
"binary",
"cross",
"-",
"entropy",
"loss",
"for",
"the",
"masks",
"head",
".",
"target_masks",
":",
"[",
"batch",
"num_rois",
"height",
"width",
"]",
".",
"A",
"float32",
"tensor",
"of",
"values",
"0",
"or",
"1",
".",
"Uses",
"zero",
"padding",
"to",
"fill",
"array",
".",
"target_class_ids",
":",
"[",
"batch",
"num_rois",
"]",
".",
"Integer",
"class",
"IDs",
".",
"Zero",
"padded",
".",
"pred_masks",
":",
"[",
"batch",
"proposals",
"height",
"width",
"num_classes",
"]",
"float32",
"tensor",
"with",
"values",
"from",
"0",
"to",
"1",
"."
] |
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(input=target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(input=pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(a=pred_masks, perm=[0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(input=y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
|
[
"def",
"mrcnn_mask_loss_graph",
"(",
"target_masks",
",",
"target_class_ids",
",",
"pred_masks",
")",
":",
"# Reshape for simplicity. Merge first two dimensions into one.",
"target_class_ids",
"=",
"K",
".",
"reshape",
"(",
"target_class_ids",
",",
"(",
"-",
"1",
",",
")",
")",
"mask_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"target_masks",
")",
"target_masks",
"=",
"K",
".",
"reshape",
"(",
"target_masks",
",",
"(",
"-",
"1",
",",
"mask_shape",
"[",
"2",
"]",
",",
"mask_shape",
"[",
"3",
"]",
")",
")",
"pred_shape",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"pred_masks",
")",
"pred_masks",
"=",
"K",
".",
"reshape",
"(",
"pred_masks",
",",
"(",
"-",
"1",
",",
"pred_shape",
"[",
"2",
"]",
",",
"pred_shape",
"[",
"3",
"]",
",",
"pred_shape",
"[",
"4",
"]",
")",
")",
"# Permute predicted masks to [N, num_classes, height, width]",
"pred_masks",
"=",
"tf",
".",
"transpose",
"(",
"a",
"=",
"pred_masks",
",",
"perm",
"=",
"[",
"0",
",",
"3",
",",
"1",
",",
"2",
"]",
")",
"# Only positive ROIs contribute to the loss. And only",
"# the class specific mask of each ROI.",
"positive_ix",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"where",
"(",
"target_class_ids",
">",
"0",
")",
"[",
":",
",",
"0",
"]",
"positive_class_ids",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"gather",
"(",
"target_class_ids",
",",
"positive_ix",
")",
",",
"tf",
".",
"int64",
")",
"indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"positive_ix",
",",
"positive_class_ids",
"]",
",",
"axis",
"=",
"1",
")",
"# Gather the masks (predicted and true) that contribute to loss",
"y_true",
"=",
"tf",
".",
"gather",
"(",
"target_masks",
",",
"positive_ix",
")",
"y_pred",
"=",
"tf",
".",
"gather_nd",
"(",
"pred_masks",
",",
"indices",
")",
"# Compute binary cross entropy. If no positive ROIs, then return 0.",
"# shape: [batch, roi, num_classes]",
"loss",
"=",
"K",
".",
"switch",
"(",
"tf",
".",
"size",
"(",
"input",
"=",
"y_true",
")",
">",
"0",
",",
"K",
".",
"binary_crossentropy",
"(",
"target",
"=",
"y_true",
",",
"output",
"=",
"y_pred",
")",
",",
"tf",
".",
"constant",
"(",
"0.0",
")",
")",
"loss",
"=",
"K",
".",
"mean",
"(",
"loss",
")",
"return",
"loss"
] |
https://github.com/ayoolaolafenwa/PixelLib/blob/ae56003c416a98780141a1170c9d888fe9a31317/pixellib/instance/mask_rcnn.py#L1145-L1180
|
|
Qiskit/qiskit-terra
|
b66030e3b9192efdd3eb95cf25c6545fe0a13da4
|
qiskit/circuit/library/n_local/qaoa_ansatz.py
|
python
|
QAOAAnsatz.parameter_bounds
|
(
self, bounds: Optional[List[Tuple[Optional[float], Optional[float]]]]
)
|
Set the parameter bounds.
Args:
bounds: The new parameter bounds.
|
Set the parameter bounds.
|
[
"Set",
"the",
"parameter",
"bounds",
"."
] |
def parameter_bounds(
self, bounds: Optional[List[Tuple[Optional[float], Optional[float]]]]
) -> None:
"""Set the parameter bounds.
Args:
bounds: The new parameter bounds.
"""
self._bounds = bounds
|
[
"def",
"parameter_bounds",
"(",
"self",
",",
"bounds",
":",
"Optional",
"[",
"List",
"[",
"Tuple",
"[",
"Optional",
"[",
"float",
"]",
",",
"Optional",
"[",
"float",
"]",
"]",
"]",
"]",
")",
"->",
"None",
":",
"self",
".",
"_bounds",
"=",
"bounds"
] |
https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/circuit/library/n_local/qaoa_ansatz.py#L137-L145
|
||
hatRiot/zarp
|
2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad
|
src/lib/libmproxy/contrib/jsbeautifier/unpackers/__init__.py
|
python
|
filtercomments
|
(source)
|
return '\n'.join(trailing_comments) + source
|
NOT USED: strips trailing comments and put them at the top.
|
NOT USED: strips trailing comments and put them at the top.
|
[
"NOT",
"USED",
":",
"strips",
"trailing",
"comments",
"and",
"put",
"them",
"at",
"the",
"top",
"."
] |
def filtercomments(source):
"""NOT USED: strips trailing comments and put them at the top."""
trailing_comments = []
comment = True
while comment:
if re.search(r'^\s*\/\*', source):
comment = source[0, source.index('*/') + 2]
elif re.search(r'^\s*\/\/', source):
comment = re.search(r'^\s*\/\/', source).group(0)
else:
comment = None
if comment:
source = re.sub(r'^\s+', '', source[len(comment):])
trailing_comments.append(comment)
return '\n'.join(trailing_comments) + source
|
[
"def",
"filtercomments",
"(",
"source",
")",
":",
"trailing_comments",
"=",
"[",
"]",
"comment",
"=",
"True",
"while",
"comment",
":",
"if",
"re",
".",
"search",
"(",
"r'^\\s*\\/\\*'",
",",
"source",
")",
":",
"comment",
"=",
"source",
"[",
"0",
",",
"source",
".",
"index",
"(",
"'*/'",
")",
"+",
"2",
"]",
"elif",
"re",
".",
"search",
"(",
"r'^\\s*\\/\\/'",
",",
"source",
")",
":",
"comment",
"=",
"re",
".",
"search",
"(",
"r'^\\s*\\/\\/'",
",",
"source",
")",
".",
"group",
"(",
"0",
")",
"else",
":",
"comment",
"=",
"None",
"if",
"comment",
":",
"source",
"=",
"re",
".",
"sub",
"(",
"r'^\\s+'",
",",
"''",
",",
"source",
"[",
"len",
"(",
"comment",
")",
":",
"]",
")",
"trailing_comments",
".",
"append",
"(",
"comment",
")",
"return",
"'\\n'",
".",
"join",
"(",
"trailing_comments",
")",
"+",
"source"
] |
https://github.com/hatRiot/zarp/blob/2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad/src/lib/libmproxy/contrib/jsbeautifier/unpackers/__init__.py#L50-L67
|
|
facebookresearch/open_lth
|
2ce732fe48abd5a80c10a153c45d397b048e980c
|
training/train.py
|
python
|
train
|
(
training_hparams: hparams.TrainingHparams,
model: Model,
train_loader: DataLoader,
output_location: str,
callbacks: typing.List[typing.Callable] = [],
start_step: Step = None,
end_step: Step = None
)
|
The main training loop for this framework.
Args:
* training_hparams: The training hyperparameters whose schema is specified in hparams.py.
* model: The model to train. Must be a models.base.Model
* train_loader: The training data. Must be a datasets.base.DataLoader
* output_location: The string path where all outputs should be stored.
* callbacks: A list of functions that are called before each training step and once more
after the last training step. Each function takes five arguments: the current step,
the output location, the model, the optimizer, and the logger.
Callbacks are used for running the test set, saving the logger, saving the state of the
model, etc. The provide hooks into the training loop for customization so that the
training loop itself can remain simple.
* start_step: The step at which the training data and learning rate schedule should begin.
Defaults to step 0.
* end_step: The step at which training should cease. Otherwise, training will go for the
full `training_hparams.training_steps` steps.
|
The main training loop for this framework.
|
[
"The",
"main",
"training",
"loop",
"for",
"this",
"framework",
"."
] |
def train(
training_hparams: hparams.TrainingHparams,
model: Model,
train_loader: DataLoader,
output_location: str,
callbacks: typing.List[typing.Callable] = [],
start_step: Step = None,
end_step: Step = None
):
"""The main training loop for this framework.
Args:
* training_hparams: The training hyperparameters whose schema is specified in hparams.py.
* model: The model to train. Must be a models.base.Model
* train_loader: The training data. Must be a datasets.base.DataLoader
* output_location: The string path where all outputs should be stored.
* callbacks: A list of functions that are called before each training step and once more
after the last training step. Each function takes five arguments: the current step,
the output location, the model, the optimizer, and the logger.
Callbacks are used for running the test set, saving the logger, saving the state of the
model, etc. The provide hooks into the training loop for customization so that the
training loop itself can remain simple.
* start_step: The step at which the training data and learning rate schedule should begin.
Defaults to step 0.
* end_step: The step at which training should cease. Otherwise, training will go for the
full `training_hparams.training_steps` steps.
"""
# Create the output location if it doesn't already exist.
if not get_platform().exists(output_location) and get_platform().is_primary_process:
get_platform().makedirs(output_location)
# Get the optimizer and learning rate schedule.
model.to(get_platform().torch_device)
optimizer = optimizers.get_optimizer(training_hparams, model)
step_optimizer = optimizer
lr_schedule = optimizers.get_lr_schedule(training_hparams, optimizer, train_loader.iterations_per_epoch)
# Adapt for FP16.
if training_hparams.apex_fp16:
if NO_APEX: raise ImportError('Must install nvidia apex to use this model.')
model, step_optimizer = apex.amp.initialize(model, optimizer, loss_scale='dynamic', verbosity=0)
# Handle parallelism if applicable.
if get_platform().is_distributed:
model = DistributedDataParallel(model, device_ids=[get_platform().rank])
elif get_platform().is_parallel:
model = DataParallel(model)
# Get the random seed for the data order.
data_order_seed = training_hparams.data_order_seed
# Restore the model from a saved checkpoint if the checkpoint exists.
cp_step, cp_logger = restore_checkpoint(output_location, model, optimizer, train_loader.iterations_per_epoch)
start_step = cp_step or start_step or Step.zero(train_loader.iterations_per_epoch)
logger = cp_logger or MetricLogger()
with warnings.catch_warnings(): # Filter unnecessary warning.
warnings.filterwarnings("ignore", category=UserWarning)
for _ in range(start_step.iteration): lr_schedule.step()
# Determine when to end training.
end_step = end_step or Step.from_str(training_hparams.training_steps, train_loader.iterations_per_epoch)
if end_step <= start_step: return
# The training loop.
for ep in range(start_step.ep, end_step.ep + 1):
# Ensure the data order is different for each epoch.
train_loader.shuffle(None if data_order_seed is None else (data_order_seed + ep))
for it, (examples, labels) in enumerate(train_loader):
# Advance the data loader until the start epoch and iteration.
if ep == start_step.ep and it < start_step.it: continue
# Run the callbacks.
step = Step.from_epoch(ep, it, train_loader.iterations_per_epoch)
for callback in callbacks: callback(output_location, step, model, optimizer, logger)
# Exit at the end step.
if ep == end_step.ep and it == end_step.it: return
# Otherwise, train.
examples = examples.to(device=get_platform().torch_device)
labels = labels.to(device=get_platform().torch_device)
step_optimizer.zero_grad()
model.train()
loss = model.loss_criterion(model(examples), labels)
if training_hparams.apex_fp16:
with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Step forward. Ignore extraneous warnings that the lr_schedule generates.
step_optimizer.step()
with warnings.catch_warnings(): # Filter unnecessary warning.
warnings.filterwarnings("ignore", category=UserWarning)
lr_schedule.step()
get_platform().barrier()
|
[
"def",
"train",
"(",
"training_hparams",
":",
"hparams",
".",
"TrainingHparams",
",",
"model",
":",
"Model",
",",
"train_loader",
":",
"DataLoader",
",",
"output_location",
":",
"str",
",",
"callbacks",
":",
"typing",
".",
"List",
"[",
"typing",
".",
"Callable",
"]",
"=",
"[",
"]",
",",
"start_step",
":",
"Step",
"=",
"None",
",",
"end_step",
":",
"Step",
"=",
"None",
")",
":",
"# Create the output location if it doesn't already exist.",
"if",
"not",
"get_platform",
"(",
")",
".",
"exists",
"(",
"output_location",
")",
"and",
"get_platform",
"(",
")",
".",
"is_primary_process",
":",
"get_platform",
"(",
")",
".",
"makedirs",
"(",
"output_location",
")",
"# Get the optimizer and learning rate schedule.",
"model",
".",
"to",
"(",
"get_platform",
"(",
")",
".",
"torch_device",
")",
"optimizer",
"=",
"optimizers",
".",
"get_optimizer",
"(",
"training_hparams",
",",
"model",
")",
"step_optimizer",
"=",
"optimizer",
"lr_schedule",
"=",
"optimizers",
".",
"get_lr_schedule",
"(",
"training_hparams",
",",
"optimizer",
",",
"train_loader",
".",
"iterations_per_epoch",
")",
"# Adapt for FP16.",
"if",
"training_hparams",
".",
"apex_fp16",
":",
"if",
"NO_APEX",
":",
"raise",
"ImportError",
"(",
"'Must install nvidia apex to use this model.'",
")",
"model",
",",
"step_optimizer",
"=",
"apex",
".",
"amp",
".",
"initialize",
"(",
"model",
",",
"optimizer",
",",
"loss_scale",
"=",
"'dynamic'",
",",
"verbosity",
"=",
"0",
")",
"# Handle parallelism if applicable.",
"if",
"get_platform",
"(",
")",
".",
"is_distributed",
":",
"model",
"=",
"DistributedDataParallel",
"(",
"model",
",",
"device_ids",
"=",
"[",
"get_platform",
"(",
")",
".",
"rank",
"]",
")",
"elif",
"get_platform",
"(",
")",
".",
"is_parallel",
":",
"model",
"=",
"DataParallel",
"(",
"model",
")",
"# Get the random seed for the data order.",
"data_order_seed",
"=",
"training_hparams",
".",
"data_order_seed",
"# Restore the model from a saved checkpoint if the checkpoint exists.",
"cp_step",
",",
"cp_logger",
"=",
"restore_checkpoint",
"(",
"output_location",
",",
"model",
",",
"optimizer",
",",
"train_loader",
".",
"iterations_per_epoch",
")",
"start_step",
"=",
"cp_step",
"or",
"start_step",
"or",
"Step",
".",
"zero",
"(",
"train_loader",
".",
"iterations_per_epoch",
")",
"logger",
"=",
"cp_logger",
"or",
"MetricLogger",
"(",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# Filter unnecessary warning.",
"warnings",
".",
"filterwarnings",
"(",
"\"ignore\"",
",",
"category",
"=",
"UserWarning",
")",
"for",
"_",
"in",
"range",
"(",
"start_step",
".",
"iteration",
")",
":",
"lr_schedule",
".",
"step",
"(",
")",
"# Determine when to end training.",
"end_step",
"=",
"end_step",
"or",
"Step",
".",
"from_str",
"(",
"training_hparams",
".",
"training_steps",
",",
"train_loader",
".",
"iterations_per_epoch",
")",
"if",
"end_step",
"<=",
"start_step",
":",
"return",
"# The training loop.",
"for",
"ep",
"in",
"range",
"(",
"start_step",
".",
"ep",
",",
"end_step",
".",
"ep",
"+",
"1",
")",
":",
"# Ensure the data order is different for each epoch.",
"train_loader",
".",
"shuffle",
"(",
"None",
"if",
"data_order_seed",
"is",
"None",
"else",
"(",
"data_order_seed",
"+",
"ep",
")",
")",
"for",
"it",
",",
"(",
"examples",
",",
"labels",
")",
"in",
"enumerate",
"(",
"train_loader",
")",
":",
"# Advance the data loader until the start epoch and iteration.",
"if",
"ep",
"==",
"start_step",
".",
"ep",
"and",
"it",
"<",
"start_step",
".",
"it",
":",
"continue",
"# Run the callbacks.",
"step",
"=",
"Step",
".",
"from_epoch",
"(",
"ep",
",",
"it",
",",
"train_loader",
".",
"iterations_per_epoch",
")",
"for",
"callback",
"in",
"callbacks",
":",
"callback",
"(",
"output_location",
",",
"step",
",",
"model",
",",
"optimizer",
",",
"logger",
")",
"# Exit at the end step.",
"if",
"ep",
"==",
"end_step",
".",
"ep",
"and",
"it",
"==",
"end_step",
".",
"it",
":",
"return",
"# Otherwise, train.",
"examples",
"=",
"examples",
".",
"to",
"(",
"device",
"=",
"get_platform",
"(",
")",
".",
"torch_device",
")",
"labels",
"=",
"labels",
".",
"to",
"(",
"device",
"=",
"get_platform",
"(",
")",
".",
"torch_device",
")",
"step_optimizer",
".",
"zero_grad",
"(",
")",
"model",
".",
"train",
"(",
")",
"loss",
"=",
"model",
".",
"loss_criterion",
"(",
"model",
"(",
"examples",
")",
",",
"labels",
")",
"if",
"training_hparams",
".",
"apex_fp16",
":",
"with",
"apex",
".",
"amp",
".",
"scale_loss",
"(",
"loss",
",",
"optimizer",
")",
"as",
"scaled_loss",
":",
"scaled_loss",
".",
"backward",
"(",
")",
"else",
":",
"loss",
".",
"backward",
"(",
")",
"# Step forward. Ignore extraneous warnings that the lr_schedule generates.",
"step_optimizer",
".",
"step",
"(",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# Filter unnecessary warning.",
"warnings",
".",
"filterwarnings",
"(",
"\"ignore\"",
",",
"category",
"=",
"UserWarning",
")",
"lr_schedule",
".",
"step",
"(",
")",
"get_platform",
"(",
")",
".",
"barrier",
"(",
")"
] |
https://github.com/facebookresearch/open_lth/blob/2ce732fe48abd5a80c10a153c45d397b048e980c/training/train.py#L29-L131
|
||
DataBiosphere/toil
|
2e148eee2114ece8dcc3ec8a83f36333266ece0d
|
src/toil/fileStores/cachingFileStore.py
|
python
|
CachingFileStore._tryToFreeUpSpace
|
(self)
|
If disk space is overcommitted, try one round of collecting files to upload/download/delete/evict.
Return whether we manage to get any space freed or not.
|
If disk space is overcommitted, try one round of collecting files to upload/download/delete/evict.
Return whether we manage to get any space freed or not.
|
[
"If",
"disk",
"space",
"is",
"overcommitted",
"try",
"one",
"round",
"of",
"collecting",
"files",
"to",
"upload",
"/",
"download",
"/",
"delete",
"/",
"evict",
".",
"Return",
"whether",
"we",
"manage",
"to",
"get",
"any",
"space",
"freed",
"or",
"not",
"."
] |
def _tryToFreeUpSpace(self):
"""
If disk space is overcommitted, try one round of collecting files to upload/download/delete/evict.
Return whether we manage to get any space freed or not.
"""
# First we want to make sure that dead jobs aren't holding
# references to files and keeping them from looking unused.
self._removeDeadJobs(self.workDir, self.con)
# Adopt work from any dead workers
self._stealWorkFromTheDead()
if self._executePendingDeletions(self.workDir, self.con, self.cur) > 0:
# We actually had something to delete, which we deleted.
# Maybe there is space now
logger.debug('Successfully executed pending deletions to free space')
return True
if self._executePendingUploads(self.con, self.cur) > 0:
# We had something to upload. Maybe it can be evicted now.
logger.debug('Successfully executed pending uploads to free space')
return True
# Otherwise, not enough files could be found in deleting state to solve our problem.
# We need to put something into the deleting state.
# TODO: give other people time to finish their in-progress
# evictions before starting more, or we might evict everything as
# soon as we hit the cache limit.
# Find something that has no non-mutable references and is not already being deleted.
self.cur.execute("""
SELECT files.id FROM files WHERE files.state = 'cached' AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
) LIMIT 1
""")
row = self.cur.fetchone()
if row is None:
# Nothing can be evicted by us.
# Someone else might be in the process of evicting something that will free up space for us too.
# Or someone mught be uploading something and we have to wait for them to finish before it can be deleted.
logger.debug('Could not find anything to evict! Cannot free up space!')
return False
# Otherwise we found an eviction candidate.
fileID = row[0]
# Work out who we are
me = get_process_name(self.workDir)
# Try and grab it for deletion, subject to the condition that nothing has started reading it
self._write([("""
UPDATE files SET owner = ?, state = ? WHERE id = ? AND state = ?
AND owner IS NULL AND NOT EXISTS (
SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'
)
""",
(me, 'deleting', fileID, 'cached'))])
logger.debug('Evicting file %s', fileID)
# Whether we actually got it or not, try deleting everything we have to delete
if self._executePendingDeletions(self.workDir, self.con, self.cur) > 0:
# We deleted something
logger.debug('Successfully executed pending deletions to free space')
return True
|
[
"def",
"_tryToFreeUpSpace",
"(",
"self",
")",
":",
"# First we want to make sure that dead jobs aren't holding",
"# references to files and keeping them from looking unused.",
"self",
".",
"_removeDeadJobs",
"(",
"self",
".",
"workDir",
",",
"self",
".",
"con",
")",
"# Adopt work from any dead workers",
"self",
".",
"_stealWorkFromTheDead",
"(",
")",
"if",
"self",
".",
"_executePendingDeletions",
"(",
"self",
".",
"workDir",
",",
"self",
".",
"con",
",",
"self",
".",
"cur",
")",
">",
"0",
":",
"# We actually had something to delete, which we deleted.",
"# Maybe there is space now",
"logger",
".",
"debug",
"(",
"'Successfully executed pending deletions to free space'",
")",
"return",
"True",
"if",
"self",
".",
"_executePendingUploads",
"(",
"self",
".",
"con",
",",
"self",
".",
"cur",
")",
">",
"0",
":",
"# We had something to upload. Maybe it can be evicted now.",
"logger",
".",
"debug",
"(",
"'Successfully executed pending uploads to free space'",
")",
"return",
"True",
"# Otherwise, not enough files could be found in deleting state to solve our problem.",
"# We need to put something into the deleting state.",
"# TODO: give other people time to finish their in-progress",
"# evictions before starting more, or we might evict everything as",
"# soon as we hit the cache limit.",
"# Find something that has no non-mutable references and is not already being deleted.",
"self",
".",
"cur",
".",
"execute",
"(",
"\"\"\"\n SELECT files.id FROM files WHERE files.state = 'cached' AND NOT EXISTS (\n SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'\n ) LIMIT 1\n \"\"\"",
")",
"row",
"=",
"self",
".",
"cur",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"None",
":",
"# Nothing can be evicted by us.",
"# Someone else might be in the process of evicting something that will free up space for us too.",
"# Or someone mught be uploading something and we have to wait for them to finish before it can be deleted.",
"logger",
".",
"debug",
"(",
"'Could not find anything to evict! Cannot free up space!'",
")",
"return",
"False",
"# Otherwise we found an eviction candidate.",
"fileID",
"=",
"row",
"[",
"0",
"]",
"# Work out who we are",
"me",
"=",
"get_process_name",
"(",
"self",
".",
"workDir",
")",
"# Try and grab it for deletion, subject to the condition that nothing has started reading it",
"self",
".",
"_write",
"(",
"[",
"(",
"\"\"\"\n UPDATE files SET owner = ?, state = ? WHERE id = ? AND state = ?\n AND owner IS NULL AND NOT EXISTS (\n SELECT NULL FROM refs WHERE refs.file_id = files.id AND refs.state != 'mutable'\n )\n \"\"\"",
",",
"(",
"me",
",",
"'deleting'",
",",
"fileID",
",",
"'cached'",
")",
")",
"]",
")",
"logger",
".",
"debug",
"(",
"'Evicting file %s'",
",",
"fileID",
")",
"# Whether we actually got it or not, try deleting everything we have to delete",
"if",
"self",
".",
"_executePendingDeletions",
"(",
"self",
".",
"workDir",
",",
"self",
".",
"con",
",",
"self",
".",
"cur",
")",
">",
"0",
":",
"# We deleted something",
"logger",
".",
"debug",
"(",
"'Successfully executed pending deletions to free space'",
")",
"return",
"True"
] |
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/fileStores/cachingFileStore.py#L862-L927
|
||
quantumlib/OpenFermion
|
6187085f2a7707012b68370b625acaeed547e62b
|
src/openfermion/transforms/repconversions/conversions.py
|
python
|
get_interaction_operator
|
(fermion_operator, n_qubits=None)
|
return interaction_operator
|
r"""Convert a 2-body fermionic operator to InteractionOperator.
This function should only be called on fermionic operators which
consist of only a_p^\dagger a_q and a_p^\dagger a_q^\dagger a_r a_s
terms. The one-body terms are stored in a matrix, one_body[p, q], and
the two-body terms are stored in a tensor, two_body[p, q, r, s].
Returns:
interaction_operator: An instance of the InteractionOperator class.
Raises:
TypeError: Input must be a FermionOperator.
TypeError: FermionOperator does not map to InteractionOperator.
Warning:
Even assuming that each creation or annihilation operator appears
at most a constant number of times in the original operator, the
runtime of this method is exponential in the number of qubits.
|
r"""Convert a 2-body fermionic operator to InteractionOperator.
|
[
"r",
"Convert",
"a",
"2",
"-",
"body",
"fermionic",
"operator",
"to",
"InteractionOperator",
"."
] |
def get_interaction_operator(fermion_operator, n_qubits=None):
r"""Convert a 2-body fermionic operator to InteractionOperator.
This function should only be called on fermionic operators which
consist of only a_p^\dagger a_q and a_p^\dagger a_q^\dagger a_r a_s
terms. The one-body terms are stored in a matrix, one_body[p, q], and
the two-body terms are stored in a tensor, two_body[p, q, r, s].
Returns:
interaction_operator: An instance of the InteractionOperator class.
Raises:
TypeError: Input must be a FermionOperator.
TypeError: FermionOperator does not map to InteractionOperator.
Warning:
Even assuming that each creation or annihilation operator appears
at most a constant number of times in the original operator, the
runtime of this method is exponential in the number of qubits.
"""
if not isinstance(fermion_operator, FermionOperator):
raise TypeError('Input must be a FermionOperator.')
check_no_sympy(fermion_operator)
if n_qubits is None:
n_qubits = op_utils.count_qubits(fermion_operator)
if n_qubits < op_utils.count_qubits(fermion_operator):
raise ValueError('Invalid number of qubits specified.')
# Normal order the terms and initialize.
fermion_operator = normal_ordered(fermion_operator)
constant = 0.
one_body = numpy.zeros((n_qubits, n_qubits), complex)
two_body = numpy.zeros((n_qubits, n_qubits, n_qubits, n_qubits), complex)
# Loop through terms and assign to matrix.
for term in fermion_operator.terms:
coefficient = fermion_operator.terms[term]
# Ignore this term if the coefficient is zero
if abs(coefficient) < EQ_TOLERANCE:
# not testable because normal_ordered kills
# fermion terms lower than EQ_TOLERANCE
continue # pragma: no cover
# Handle constant shift.
if len(term) == 0:
constant = coefficient
elif len(term) == 2:
# Handle one-body terms.
if [operator[1] for operator in term] == [1, 0]:
p, q = [operator[0] for operator in term]
one_body[p, q] = coefficient
else:
raise InteractionOperatorError('FermionOperator does not map '
'to InteractionOperator.')
elif len(term) == 4:
# Handle two-body terms.
if [operator[1] for operator in term] == [1, 1, 0, 0]:
p, q, r, s = [operator[0] for operator in term]
two_body[p, q, r, s] = coefficient
else:
raise InteractionOperatorError('FermionOperator does not map '
'to InteractionOperator.')
else:
# Handle non-molecular Hamiltonian.
raise InteractionOperatorError('FermionOperator does not map '
'to InteractionOperator.')
# Form InteractionOperator and return.
interaction_operator = InteractionOperator(constant, one_body, two_body)
return interaction_operator
|
[
"def",
"get_interaction_operator",
"(",
"fermion_operator",
",",
"n_qubits",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"fermion_operator",
",",
"FermionOperator",
")",
":",
"raise",
"TypeError",
"(",
"'Input must be a FermionOperator.'",
")",
"check_no_sympy",
"(",
"fermion_operator",
")",
"if",
"n_qubits",
"is",
"None",
":",
"n_qubits",
"=",
"op_utils",
".",
"count_qubits",
"(",
"fermion_operator",
")",
"if",
"n_qubits",
"<",
"op_utils",
".",
"count_qubits",
"(",
"fermion_operator",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid number of qubits specified.'",
")",
"# Normal order the terms and initialize.",
"fermion_operator",
"=",
"normal_ordered",
"(",
"fermion_operator",
")",
"constant",
"=",
"0.",
"one_body",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"n_qubits",
",",
"n_qubits",
")",
",",
"complex",
")",
"two_body",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"n_qubits",
",",
"n_qubits",
",",
"n_qubits",
",",
"n_qubits",
")",
",",
"complex",
")",
"# Loop through terms and assign to matrix.",
"for",
"term",
"in",
"fermion_operator",
".",
"terms",
":",
"coefficient",
"=",
"fermion_operator",
".",
"terms",
"[",
"term",
"]",
"# Ignore this term if the coefficient is zero",
"if",
"abs",
"(",
"coefficient",
")",
"<",
"EQ_TOLERANCE",
":",
"# not testable because normal_ordered kills",
"# fermion terms lower than EQ_TOLERANCE",
"continue",
"# pragma: no cover",
"# Handle constant shift.",
"if",
"len",
"(",
"term",
")",
"==",
"0",
":",
"constant",
"=",
"coefficient",
"elif",
"len",
"(",
"term",
")",
"==",
"2",
":",
"# Handle one-body terms.",
"if",
"[",
"operator",
"[",
"1",
"]",
"for",
"operator",
"in",
"term",
"]",
"==",
"[",
"1",
",",
"0",
"]",
":",
"p",
",",
"q",
"=",
"[",
"operator",
"[",
"0",
"]",
"for",
"operator",
"in",
"term",
"]",
"one_body",
"[",
"p",
",",
"q",
"]",
"=",
"coefficient",
"else",
":",
"raise",
"InteractionOperatorError",
"(",
"'FermionOperator does not map '",
"'to InteractionOperator.'",
")",
"elif",
"len",
"(",
"term",
")",
"==",
"4",
":",
"# Handle two-body terms.",
"if",
"[",
"operator",
"[",
"1",
"]",
"for",
"operator",
"in",
"term",
"]",
"==",
"[",
"1",
",",
"1",
",",
"0",
",",
"0",
"]",
":",
"p",
",",
"q",
",",
"r",
",",
"s",
"=",
"[",
"operator",
"[",
"0",
"]",
"for",
"operator",
"in",
"term",
"]",
"two_body",
"[",
"p",
",",
"q",
",",
"r",
",",
"s",
"]",
"=",
"coefficient",
"else",
":",
"raise",
"InteractionOperatorError",
"(",
"'FermionOperator does not map '",
"'to InteractionOperator.'",
")",
"else",
":",
"# Handle non-molecular Hamiltonian.",
"raise",
"InteractionOperatorError",
"(",
"'FermionOperator does not map '",
"'to InteractionOperator.'",
")",
"# Form InteractionOperator and return.",
"interaction_operator",
"=",
"InteractionOperator",
"(",
"constant",
",",
"one_body",
",",
"two_body",
")",
"return",
"interaction_operator"
] |
https://github.com/quantumlib/OpenFermion/blob/6187085f2a7707012b68370b625acaeed547e62b/src/openfermion/transforms/repconversions/conversions.py#L240-L314
|
|
django-nonrel/django-nonrel
|
4fbfe7344481a5eab8698f79207f09124310131b
|
django/contrib/gis/geos/geometry.py
|
python
|
GEOSGeometry.intersection
|
(self, other)
|
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
|
Returns a Geometry representing the points shared by this Geometry and other.
|
Returns a Geometry representing the points shared by this Geometry and other.
|
[
"Returns",
"a",
"Geometry",
"representing",
"the",
"points",
"shared",
"by",
"this",
"Geometry",
"and",
"other",
"."
] |
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
|
[
"def",
"intersection",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"_topology",
"(",
"capi",
".",
"geos_intersection",
"(",
"self",
".",
"ptr",
",",
"other",
".",
"ptr",
")",
")"
] |
https://github.com/django-nonrel/django-nonrel/blob/4fbfe7344481a5eab8698f79207f09124310131b/django/contrib/gis/geos/geometry.py#L586-L588
|
|
plasticityai/supersqlite
|
d74da749c6fa5df021df3968b854b9a59f829e17
|
supersqlite/third_party/_apsw/tools/shell.py
|
python
|
Shell.command_schema
|
(self, cmd)
|
schema ?TABLE? [TABLE...]: Shows SQL for table
If you give one or more tables then their schema is listed
(including indices). If you don't specify any then all
schemas are listed. TABLE is a like pattern so you can % for
wildcards.
|
schema ?TABLE? [TABLE...]: Shows SQL for table
|
[
"schema",
"?TABLE?",
"[",
"TABLE",
"...",
"]",
":",
"Shows",
"SQL",
"for",
"table"
] |
def command_schema(self, cmd):
"""schema ?TABLE? [TABLE...]: Shows SQL for table
If you give one or more tables then their schema is listed
(including indices). If you don't specify any then all
schemas are listed. TABLE is a like pattern so you can % for
wildcards.
"""
self.push_output()
self.output=self.output_list
self.header=False
try:
if len(cmd)==0:
cmd=['%']
for n in cmd:
self.process_sql("SELECT sql||';' FROM "
"(SELECT sql sql, type type, tbl_name tbl_name, name name "
"FROM sqlite_master UNION ALL "
"SELECT sql, type, tbl_name, name FROM sqlite_temp_master) "
"WHERE tbl_name LIKE ?1 AND type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' "
"ORDER BY substr(type,2,1), name", (n,), internal=True)
finally:
self.pop_output()
|
[
"def",
"command_schema",
"(",
"self",
",",
"cmd",
")",
":",
"self",
".",
"push_output",
"(",
")",
"self",
".",
"output",
"=",
"self",
".",
"output_list",
"self",
".",
"header",
"=",
"False",
"try",
":",
"if",
"len",
"(",
"cmd",
")",
"==",
"0",
":",
"cmd",
"=",
"[",
"'%'",
"]",
"for",
"n",
"in",
"cmd",
":",
"self",
".",
"process_sql",
"(",
"\"SELECT sql||';' FROM \"",
"\"(SELECT sql sql, type type, tbl_name tbl_name, name name \"",
"\"FROM sqlite_master UNION ALL \"",
"\"SELECT sql, type, tbl_name, name FROM sqlite_temp_master) \"",
"\"WHERE tbl_name LIKE ?1 AND type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' \"",
"\"ORDER BY substr(type,2,1), name\"",
",",
"(",
"n",
",",
")",
",",
"internal",
"=",
"True",
")",
"finally",
":",
"self",
".",
"pop_output",
"(",
")"
] |
https://github.com/plasticityai/supersqlite/blob/d74da749c6fa5df021df3968b854b9a59f829e17/supersqlite/third_party/_apsw/tools/shell.py#L2185-L2207
|
||
microsoft/NimbusML
|
f6be39ce9359786976429bab0ccd837e849b4ba5
|
src/python/nimbusml/datasets/image.py
|
python
|
get_RevolutionAnalyticslogo
|
()
|
return os.path.join(this, "images", "RevolutionAnalyticslogo.png")
|
Return a path to *RevolutionAnalyticslogo.png*.
.. image:: images/RevolutionAnalyticslogo.png
|
Return a path to *RevolutionAnalyticslogo.png*.
|
[
"Return",
"a",
"path",
"to",
"*",
"RevolutionAnalyticslogo",
".",
"png",
"*",
"."
] |
def get_RevolutionAnalyticslogo():
"""
Return a path to *RevolutionAnalyticslogo.png*.
.. image:: images/RevolutionAnalyticslogo.png
"""
this = os.path.abspath(os.path.dirname(__file__))
return os.path.join(this, "images", "RevolutionAnalyticslogo.png")
|
[
"def",
"get_RevolutionAnalyticslogo",
"(",
")",
":",
"this",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"this",
",",
"\"images\"",
",",
"\"RevolutionAnalyticslogo.png\"",
")"
] |
https://github.com/microsoft/NimbusML/blob/f6be39ce9359786976429bab0ccd837e849b4ba5/src/python/nimbusml/datasets/image.py#L11-L18
|
|
hyperspy/hyperspy
|
1ffb3fab33e607045a37f30c1463350b72617e10
|
hyperspy/roi.py
|
python
|
BaseROI.is_valid
|
(self)
|
return t.Undefined not in tuple(self)
|
Determine if the ROI is in a valid state.
This is typically determined by all the coordinates being defined,
and that the values makes sense relative to each other.
|
Determine if the ROI is in a valid state.
|
[
"Determine",
"if",
"the",
"ROI",
"is",
"in",
"a",
"valid",
"state",
"."
] |
def is_valid(self):
"""
Determine if the ROI is in a valid state.
This is typically determined by all the coordinates being defined,
and that the values makes sense relative to each other.
"""
return t.Undefined not in tuple(self)
|
[
"def",
"is_valid",
"(",
"self",
")",
":",
"return",
"t",
".",
"Undefined",
"not",
"in",
"tuple",
"(",
"self",
")"
] |
https://github.com/hyperspy/hyperspy/blob/1ffb3fab33e607045a37f30c1463350b72617e10/hyperspy/roi.py#L129-L136
|
|
BIGBALLON/CIFAR-ZOO
|
94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5
|
models/lenet.py
|
python
|
lenet
|
(num_classes)
|
return LeNet(num_classes=num_classes)
|
[] |
def lenet(num_classes):
return LeNet(num_classes=num_classes)
|
[
"def",
"lenet",
"(",
"num_classes",
")",
":",
"return",
"LeNet",
"(",
"num_classes",
"=",
"num_classes",
")"
] |
https://github.com/BIGBALLON/CIFAR-ZOO/blob/94b4c75e02d0c62ec1c7ce862863b0a810d9d0a5/models/lenet.py#L29-L30
|
|||
yzhao062/combo
|
229d578de498b47ae03cf2580472aceebf8c2766
|
combo/models/classifier_dcs.py
|
python
|
DCS_LA.predict
|
(self, X)
|
return self._predict_internal(X, predict_proba=False)
|
Predict the class labels for the provided data.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
Returns
-------
labels : numpy array of shape (n_samples,)
Class labels for each data sample.
|
Predict the class labels for the provided data.
|
[
"Predict",
"the",
"class",
"labels",
"for",
"the",
"provided",
"data",
"."
] |
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
Returns
-------
labels : numpy array of shape (n_samples,)
Class labels for each data sample.
"""
return self._predict_internal(X, predict_proba=False)
|
[
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"return",
"self",
".",
"_predict_internal",
"(",
"X",
",",
"predict_proba",
"=",
"False",
")"
] |
https://github.com/yzhao062/combo/blob/229d578de498b47ae03cf2580472aceebf8c2766/combo/models/classifier_dcs.py#L115-L128
|
|
meduza-corp/interstellar
|
40a801ccd7856491726f5a126621d9318cabe2e1
|
gsutil/third_party/boto/boto/opsworks/layer1.py
|
python
|
OpsWorksConnection.create_stack
|
(self, name, region, service_role_arn,
default_instance_profile_arn, vpc_id=None,
attributes=None, default_os=None, hostname_theme=None,
default_availability_zone=None, default_subnet_id=None,
custom_json=None, configuration_manager=None,
use_custom_cookbooks=None, custom_cookbooks_source=None,
default_ssh_key_name=None,
default_root_device_type=None)
|
return self.make_request(action='CreateStack',
body=json.dumps(params))
|
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
:type region: string
:param region: The stack AWS region, such as "us-east-1". For more
information about Amazon regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
It must be in the specified region. All instances will be launched
into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
:type service_role_arn: string
:param service_role_arn: The stack AWS Identity and Access Management
(IAM) role, which allows AWS OpsWorks to work with AWS resources on
your behalf. You must set this parameter to the Amazon Resource
Name (ARN) for an existing IAM role. For more information about IAM
ARNs, see `Using Identifiers`_.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's default operating system, which must be
set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is
`Amazon Linux`.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default subnet ID. All instances
will be launched into this subnet unless you specify otherwise when
you create the instance. If you also specify a value for
`DefaultAvailabilityZone`, the subnet must be in that zone. For
information on default values and when this parameter is required,
see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.: `"{\"key1\":
\"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you
create a stack we recommend that you use the configuration manager
to specify the Chef version, 0.9 or 11.4. The default value is
currently 0.9. However, we expect to change the default value to
11.4 in September 2013.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
|
Creates a new stack. For more information, see `Create a New
Stack`_.
|
[
"Creates",
"a",
"new",
"stack",
".",
"For",
"more",
"information",
"see",
"Create",
"a",
"New",
"Stack",
"_",
"."
] |
def create_stack(self, name, region, service_role_arn,
default_instance_profile_arn, vpc_id=None,
attributes=None, default_os=None, hostname_theme=None,
default_availability_zone=None, default_subnet_id=None,
custom_json=None, configuration_manager=None,
use_custom_cookbooks=None, custom_cookbooks_source=None,
default_ssh_key_name=None,
default_root_device_type=None):
"""
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
:type region: string
:param region: The stack AWS region, such as "us-east-1". For more
information about Amazon regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
It must be in the specified region. All instances will be launched
into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes bag.
:type service_role_arn: string
:param service_role_arn: The stack AWS Identity and Access Management
(IAM) role, which allows AWS OpsWorks to work with AWS resources on
your behalf. You must set this parameter to the Amazon Resource
Name (ARN) for an existing IAM role. For more information about IAM
ARNs, see `Using Identifiers`_.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's default operating system, which must be
set to `Amazon Linux` or `Ubuntu 12.04 LTS`. The default option is
`Amazon Linux`.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default subnet ID. All instances
will be launched into this subnet unless you specify otherwise when
you create the instance. If you also specify a value for
`DefaultAvailabilityZone`, the subnet must be in that zone. For
information on default values and when this parameter is required,
see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.: `"{\"key1\":
\"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you
create a stack we recommend that you use the configuration manager
to specify the Chef version, 0.9 or 11.4. The default value is
currently 0.9. However, we expect to change the default value to
11.4 in September 2013.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {
'Name': name,
'Region': region,
'ServiceRoleArn': service_role_arn,
'DefaultInstanceProfileArn': default_instance_profile_arn,
}
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CreateStack',
body=json.dumps(params))
|
[
"def",
"create_stack",
"(",
"self",
",",
"name",
",",
"region",
",",
"service_role_arn",
",",
"default_instance_profile_arn",
",",
"vpc_id",
"=",
"None",
",",
"attributes",
"=",
"None",
",",
"default_os",
"=",
"None",
",",
"hostname_theme",
"=",
"None",
",",
"default_availability_zone",
"=",
"None",
",",
"default_subnet_id",
"=",
"None",
",",
"custom_json",
"=",
"None",
",",
"configuration_manager",
"=",
"None",
",",
"use_custom_cookbooks",
"=",
"None",
",",
"custom_cookbooks_source",
"=",
"None",
",",
"default_ssh_key_name",
"=",
"None",
",",
"default_root_device_type",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'Name'",
":",
"name",
",",
"'Region'",
":",
"region",
",",
"'ServiceRoleArn'",
":",
"service_role_arn",
",",
"'DefaultInstanceProfileArn'",
":",
"default_instance_profile_arn",
",",
"}",
"if",
"vpc_id",
"is",
"not",
"None",
":",
"params",
"[",
"'VpcId'",
"]",
"=",
"vpc_id",
"if",
"attributes",
"is",
"not",
"None",
":",
"params",
"[",
"'Attributes'",
"]",
"=",
"attributes",
"if",
"default_os",
"is",
"not",
"None",
":",
"params",
"[",
"'DefaultOs'",
"]",
"=",
"default_os",
"if",
"hostname_theme",
"is",
"not",
"None",
":",
"params",
"[",
"'HostnameTheme'",
"]",
"=",
"hostname_theme",
"if",
"default_availability_zone",
"is",
"not",
"None",
":",
"params",
"[",
"'DefaultAvailabilityZone'",
"]",
"=",
"default_availability_zone",
"if",
"default_subnet_id",
"is",
"not",
"None",
":",
"params",
"[",
"'DefaultSubnetId'",
"]",
"=",
"default_subnet_id",
"if",
"custom_json",
"is",
"not",
"None",
":",
"params",
"[",
"'CustomJson'",
"]",
"=",
"custom_json",
"if",
"configuration_manager",
"is",
"not",
"None",
":",
"params",
"[",
"'ConfigurationManager'",
"]",
"=",
"configuration_manager",
"if",
"use_custom_cookbooks",
"is",
"not",
"None",
":",
"params",
"[",
"'UseCustomCookbooks'",
"]",
"=",
"use_custom_cookbooks",
"if",
"custom_cookbooks_source",
"is",
"not",
"None",
":",
"params",
"[",
"'CustomCookbooksSource'",
"]",
"=",
"custom_cookbooks_source",
"if",
"default_ssh_key_name",
"is",
"not",
"None",
":",
"params",
"[",
"'DefaultSshKeyName'",
"]",
"=",
"default_ssh_key_name",
"if",
"default_root_device_type",
"is",
"not",
"None",
":",
"params",
"[",
"'DefaultRootDeviceType'",
"]",
"=",
"default_root_device_type",
"return",
"self",
".",
"make_request",
"(",
"action",
"=",
"'CreateStack'",
",",
"body",
"=",
"json",
".",
"dumps",
"(",
"params",
")",
")"
] |
https://github.com/meduza-corp/interstellar/blob/40a801ccd7856491726f5a126621d9318cabe2e1/gsutil/third_party/boto/boto/opsworks/layer1.py#L796-L976
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/combinat/permutation.py
|
python
|
StandardPermutations_avoiding_312.cardinality
|
(self)
|
return catalan_number(self.n)
|
EXAMPLES::
sage: Permutations(5, avoiding=[3, 1, 2]).cardinality()
42
sage: len( Permutations(5, avoiding=[3, 1, 2]).list() )
42
|
EXAMPLES::
|
[
"EXAMPLES",
"::"
] |
def cardinality(self):
"""
EXAMPLES::
sage: Permutations(5, avoiding=[3, 1, 2]).cardinality()
42
sage: len( Permutations(5, avoiding=[3, 1, 2]).list() )
42
"""
return catalan_number(self.n)
|
[
"def",
"cardinality",
"(",
"self",
")",
":",
"return",
"catalan_number",
"(",
"self",
".",
"n",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/permutation.py#L9127-L9136
|
|
mirumee/ariadne
|
1b8b7ef0ed65cde95a6bd9e25500584a38393b71
|
ariadne/graphql.py
|
python
|
handle_graphql_errors
|
(
errors: Sequence[GraphQLError],
*,
logger,
error_formatter,
debug,
extension_manager=None,
)
|
return False, response
|
[] |
def handle_graphql_errors(
errors: Sequence[GraphQLError],
*,
logger,
error_formatter,
debug,
extension_manager=None,
) -> GraphQLResult:
for error in errors:
log_error(error, logger)
response = {"errors": [error_formatter(error, debug) for error in errors]}
if extension_manager:
extension_manager.has_errors(errors)
add_extensions_to_response(extension_manager, response)
return False, response
|
[
"def",
"handle_graphql_errors",
"(",
"errors",
":",
"Sequence",
"[",
"GraphQLError",
"]",
",",
"*",
",",
"logger",
",",
"error_formatter",
",",
"debug",
",",
"extension_manager",
"=",
"None",
",",
")",
"->",
"GraphQLResult",
":",
"for",
"error",
"in",
"errors",
":",
"log_error",
"(",
"error",
",",
"logger",
")",
"response",
"=",
"{",
"\"errors\"",
":",
"[",
"error_formatter",
"(",
"error",
",",
"debug",
")",
"for",
"error",
"in",
"errors",
"]",
"}",
"if",
"extension_manager",
":",
"extension_manager",
".",
"has_errors",
"(",
"errors",
")",
"add_extensions_to_response",
"(",
"extension_manager",
",",
"response",
")",
"return",
"False",
",",
"response"
] |
https://github.com/mirumee/ariadne/blob/1b8b7ef0ed65cde95a6bd9e25500584a38393b71/ariadne/graphql.py#L291-L305
|
|||
SpockBotMC/SpockBot
|
f89911551f18357720034fbaa52837a0d09f66ea
|
spockbot/mcp/nbt.py
|
python
|
TagList.__repr__
|
(self)
|
return "%i entries of type %s" % (
len(self.tags), TAGLIST[self.tagID].__name__)
|
[] |
def __repr__(self):
return "%i entries of type %s" % (
len(self.tags), TAGLIST[self.tagID].__name__)
|
[
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"\"%i entries of type %s\"",
"%",
"(",
"len",
"(",
"self",
".",
"tags",
")",
",",
"TAGLIST",
"[",
"self",
".",
"tagID",
"]",
".",
"__name__",
")"
] |
https://github.com/SpockBotMC/SpockBot/blob/f89911551f18357720034fbaa52837a0d09f66ea/spockbot/mcp/nbt.py#L382-L384
|
|||
markj3d/Red9_StudioPack
|
1d40a8bf84c45ce7eaefdd9ccfa3cdbeb1471919
|
core/Red9_AnimationUtils.py
|
python
|
AnimationUI.getPosePath
|
(self)
|
return os.path.join(self.getPoseDir(), '%s.pose' % self.getPoseSelected())
|
Return the full posePath for loading
|
Return the full posePath for loading
|
[
"Return",
"the",
"full",
"posePath",
"for",
"loading"
] |
def getPosePath(self):
'''
Return the full posePath for loading
'''
return os.path.join(self.getPoseDir(), '%s.pose' % self.getPoseSelected())
|
[
"def",
"getPosePath",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"getPoseDir",
"(",
")",
",",
"'%s.pose'",
"%",
"self",
".",
"getPoseSelected",
"(",
")",
")"
] |
https://github.com/markj3d/Red9_StudioPack/blob/1d40a8bf84c45ce7eaefdd9ccfa3cdbeb1471919/core/Red9_AnimationUtils.py#L2176-L2180
|
|
cbaziotis/ekphrasis
|
e70c29f3c0c27e274a40a5ad45a5f2a0e24d432d
|
ekphrasis/classes/segmenter.py
|
python
|
Segmenter.splits
|
(self, text)
|
return [(text[:i + 1], text[i + 1:])
for i in range(min(len(text), self.L))]
|
Return a list of all possible (first, rem) pairs with max length of first <=L
:param text:
:return:
|
Return a list of all possible (first, rem) pairs with max length of first <=L
:param text:
:return:
|
[
"Return",
"a",
"list",
"of",
"all",
"possible",
"(",
"first",
"rem",
")",
"pairs",
"with",
"max",
"length",
"of",
"first",
"<",
"=",
"L",
":",
"param",
"text",
":",
":",
"return",
":"
] |
def splits(self, text):
"""
Return a list of all possible (first, rem) pairs with max length of first <=L
:param text:
:return:
"""
return [(text[:i + 1], text[i + 1:])
for i in range(min(len(text), self.L))]
|
[
"def",
"splits",
"(",
"self",
",",
"text",
")",
":",
"return",
"[",
"(",
"text",
"[",
":",
"i",
"+",
"1",
"]",
",",
"text",
"[",
"i",
"+",
"1",
":",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"min",
"(",
"len",
"(",
"text",
")",
",",
"self",
".",
"L",
")",
")",
"]"
] |
https://github.com/cbaziotis/ekphrasis/blob/e70c29f3c0c27e274a40a5ad45a5f2a0e24d432d/ekphrasis/classes/segmenter.py#L107-L114
|
|
MozillaSecurity/grizzly
|
1c41478e32f323189a2c322ec041c3e0902a158a
|
grizzly/common/status.py
|
python
|
Status.rate
|
(self)
|
return self.iteration / float(runtime) if runtime else 0
|
Calculate the number of iterations performed per second since start()
was called.
Args:
None
Returns:
float: Number of iterations performed per second.
|
Calculate the number of iterations performed per second since start()
was called.
|
[
"Calculate",
"the",
"number",
"of",
"iterations",
"performed",
"per",
"second",
"since",
"start",
"()",
"was",
"called",
"."
] |
def rate(self):
"""Calculate the number of iterations performed per second since start()
was called.
Args:
None
Returns:
float: Number of iterations performed per second.
"""
runtime = self.runtime
return self.iteration / float(runtime) if runtime else 0
|
[
"def",
"rate",
"(",
"self",
")",
":",
"runtime",
"=",
"self",
".",
"runtime",
"return",
"self",
".",
"iteration",
"/",
"float",
"(",
"runtime",
")",
"if",
"runtime",
"else",
"0"
] |
https://github.com/MozillaSecurity/grizzly/blob/1c41478e32f323189a2c322ec041c3e0902a158a/grizzly/common/status.py#L279-L290
|
|
thautwarm/restrain-jit
|
f76b3e9ae8a34d2eef87a42cc87197153f14634c
|
restrain_jit/becython/cy_loader.py
|
python
|
compile_module
|
(under_dir: Path, mod_name: str, source_code: str, libs=())
|
return mod
|
[] |
def compile_module(under_dir: Path, mod_name: str, source_code: str, libs=()):
# TODO:
# tempfile.TemporaryDirectory will close unexpectedly before removing the generated module.
# Since that we don't delete the temporary dir as a workaround.
mod_name = mod_name
dirname = tempfile.mkdtemp(dir=str(under_dir))
mod_path = mod_name + '.pyx'
with open(os.path.join(dirname, mod_path), 'w') as pyx_file, open(
os.path.join(dirname, 'setup.py'), 'w') as setup_file:
pyx_file.write(source_code)
setup_file.write(
template.substitute(
module=repr(mod_name),
module_path=repr(mod_path),
**get_includes_and_libs()))
cwd = os.getcwd()
try:
os.chdir(dirname)
args = ['setup.py', 'build_ext', '--inplace']
c = exec_cc(sys.executable, args)
hd = next(c)
if hd is not 0:
sys.stderr.buffer.write(b''.join(c))
raise RuntimeError("Cython compiler failed.")
# find the python extension module.
# pyd_name = next(each for each in os.listdir(dirname) if each.endswith(suffix))
finally:
os.chdir(cwd)
mod = import_module("{}.{}.{}".format(under_dir.name,
os.path.split(dirname)[1], mod_name))
return mod
|
[
"def",
"compile_module",
"(",
"under_dir",
":",
"Path",
",",
"mod_name",
":",
"str",
",",
"source_code",
":",
"str",
",",
"libs",
"=",
"(",
")",
")",
":",
"# TODO:",
"# tempfile.TemporaryDirectory will close unexpectedly before removing the generated module.",
"# Since that we don't delete the temporary dir as a workaround.",
"mod_name",
"=",
"mod_name",
"dirname",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"dir",
"=",
"str",
"(",
"under_dir",
")",
")",
"mod_path",
"=",
"mod_name",
"+",
"'.pyx'",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"mod_path",
")",
",",
"'w'",
")",
"as",
"pyx_file",
",",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"'setup.py'",
")",
",",
"'w'",
")",
"as",
"setup_file",
":",
"pyx_file",
".",
"write",
"(",
"source_code",
")",
"setup_file",
".",
"write",
"(",
"template",
".",
"substitute",
"(",
"module",
"=",
"repr",
"(",
"mod_name",
")",
",",
"module_path",
"=",
"repr",
"(",
"mod_path",
")",
",",
"*",
"*",
"get_includes_and_libs",
"(",
")",
")",
")",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"try",
":",
"os",
".",
"chdir",
"(",
"dirname",
")",
"args",
"=",
"[",
"'setup.py'",
",",
"'build_ext'",
",",
"'--inplace'",
"]",
"c",
"=",
"exec_cc",
"(",
"sys",
".",
"executable",
",",
"args",
")",
"hd",
"=",
"next",
"(",
"c",
")",
"if",
"hd",
"is",
"not",
"0",
":",
"sys",
".",
"stderr",
".",
"buffer",
".",
"write",
"(",
"b''",
".",
"join",
"(",
"c",
")",
")",
"raise",
"RuntimeError",
"(",
"\"Cython compiler failed.\"",
")",
"# find the python extension module.",
"# pyd_name = next(each for each in os.listdir(dirname) if each.endswith(suffix))",
"finally",
":",
"os",
".",
"chdir",
"(",
"cwd",
")",
"mod",
"=",
"import_module",
"(",
"\"{}.{}.{}\"",
".",
"format",
"(",
"under_dir",
".",
"name",
",",
"os",
".",
"path",
".",
"split",
"(",
"dirname",
")",
"[",
"1",
"]",
",",
"mod_name",
")",
")",
"return",
"mod"
] |
https://github.com/thautwarm/restrain-jit/blob/f76b3e9ae8a34d2eef87a42cc87197153f14634c/restrain_jit/becython/cy_loader.py#L71-L108
|
|||
spyder-ide/spyder
|
55da47c032dfcf519600f67f8b30eab467f965e7
|
spyder/plugins/ipythonconsole/widgets/shell.py
|
python
|
ShellWidget.interrupt_kernel
|
(self)
|
Attempts to interrupt the running kernel.
|
Attempts to interrupt the running kernel.
|
[
"Attempts",
"to",
"interrupt",
"the",
"running",
"kernel",
"."
] |
def interrupt_kernel(self):
"""Attempts to interrupt the running kernel."""
# Empty queue when interrupting
# Fixes spyder-ide/spyder#7293.
self._execute_queue = []
super(ShellWidget, self).interrupt_kernel()
|
[
"def",
"interrupt_kernel",
"(",
"self",
")",
":",
"# Empty queue when interrupting",
"# Fixes spyder-ide/spyder#7293.",
"self",
".",
"_execute_queue",
"=",
"[",
"]",
"super",
"(",
"ShellWidget",
",",
"self",
")",
".",
"interrupt_kernel",
"(",
")"
] |
https://github.com/spyder-ide/spyder/blob/55da47c032dfcf519600f67f8b30eab467f965e7/spyder/plugins/ipythonconsole/widgets/shell.py#L242-L247
|
||
Droidtown/ArticutAPI
|
ee415bb30c9722a85334d54d7015d5ad3870205f
|
ArticutAPI/Toolkit/NER.py
|
python
|
GenericNER.getDecimal
|
(self, ArticutResultDICT, indexWithPOS=True)
|
return resultLIST
|
依 MSRA (微軟亞洲研究院, Microsoft Research Lab Asia) NER 標準取出文本中的描述「小數」的字串
|
依 MSRA (微軟亞洲研究院, Microsoft Research Lab Asia) NER 標準取出文本中的描述「小數」的字串
|
[
"依",
"MSRA",
"(",
"微軟亞洲研究院",
"Microsoft",
"Research",
"Lab",
"Asia",
")",
"NER",
"標準取出文本中的描述「小數」的字串"
] |
def getDecimal(self, ArticutResultDICT, indexWithPOS=True):
'''
依 MSRA (微軟亞洲研究院, Microsoft Research Lab Asia) NER 標準取出文本中的描述「小數」的字串
'''
if self.decimalPat !=None:
pass
else:
self.decimalPat = re.compile("<ENTITY_num>[^<..點]*?[..點][^<..點]+?</ENTITY_num>")
resultLIST = self._getMSRA(ArticutResultDICT, self.decimalPat, indexWithPOS)
return resultLIST
|
[
"def",
"getDecimal",
"(",
"self",
",",
"ArticutResultDICT",
",",
"indexWithPOS",
"=",
"True",
")",
":",
"if",
"self",
".",
"decimalPat",
"!=",
"None",
":",
"pass",
"else",
":",
"self",
".",
"decimalPat",
"=",
"re",
".",
"compile",
"(",
"\"<ENTITY_num>[^<..點]*?[..點][^<..點]+?</ENTITY_num>\")",
"",
"resultLIST",
"=",
"self",
".",
"_getMSRA",
"(",
"ArticutResultDICT",
",",
"self",
".",
"decimalPat",
",",
"indexWithPOS",
")",
"return",
"resultLIST"
] |
https://github.com/Droidtown/ArticutAPI/blob/ee415bb30c9722a85334d54d7015d5ad3870205f/ArticutAPI/Toolkit/NER.py#L340-L351
|
|
GoogleCloudPlatform/appengine-mapreduce
|
2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6
|
python/src/mapreduce/shuffler.py
|
python
|
_MergingReader.split_input
|
(cls, mapper_spec)
|
return [cls([0] * len(files), max_values_count, max_values_size)
for files in filelists]
|
Split input into multiple shards.
|
Split input into multiple shards.
|
[
"Split",
"input",
"into",
"multiple",
"shards",
"."
] |
def split_input(cls, mapper_spec):
"""Split input into multiple shards."""
filelists = mapper_spec.params[cls.FILES_PARAM]
max_values_count = mapper_spec.params.get(cls.MAX_VALUES_COUNT_PARAM, -1)
max_values_size = mapper_spec.params.get(cls.MAX_VALUES_SIZE_PARAM, -1)
return [cls([0] * len(files), max_values_count, max_values_size)
for files in filelists]
|
[
"def",
"split_input",
"(",
"cls",
",",
"mapper_spec",
")",
":",
"filelists",
"=",
"mapper_spec",
".",
"params",
"[",
"cls",
".",
"FILES_PARAM",
"]",
"max_values_count",
"=",
"mapper_spec",
".",
"params",
".",
"get",
"(",
"cls",
".",
"MAX_VALUES_COUNT_PARAM",
",",
"-",
"1",
")",
"max_values_size",
"=",
"mapper_spec",
".",
"params",
".",
"get",
"(",
"cls",
".",
"MAX_VALUES_SIZE_PARAM",
",",
"-",
"1",
")",
"return",
"[",
"cls",
"(",
"[",
"0",
"]",
"*",
"len",
"(",
"files",
")",
",",
"max_values_count",
",",
"max_values_size",
")",
"for",
"files",
"in",
"filelists",
"]"
] |
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/shuffler.py#L386-L392
|
|
sahana/eden
|
1696fa50e90ce967df69f66b571af45356cc18da
|
modules/s3/s3gis.py
|
python
|
S3GIS.get_locations
|
(table,
query,
join = True,
geojson = True,
)
|
return output
|
Returns the locations for an XML export
- used by S3GIS.get_location_data() and S3PivotTable.geojson()
TODO:
Support multiple locations for a single resource
(e.g. a Project working in multiple Communities)
|
Returns the locations for an XML export
- used by S3GIS.get_location_data() and S3PivotTable.geojson()
|
[
"Returns",
"the",
"locations",
"for",
"an",
"XML",
"export",
"-",
"used",
"by",
"S3GIS",
".",
"get_location_data",
"()",
"and",
"S3PivotTable",
".",
"geojson",
"()"
] |
def get_locations(table,
query,
join = True,
geojson = True,
):
"""
Returns the locations for an XML export
- used by S3GIS.get_location_data() and S3PivotTable.geojson()
TODO:
Support multiple locations for a single resource
(e.g. a Project working in multiple Communities)
"""
db = current.db
tablename = table._tablename
gtable = current.s3db.gis_location
settings = current.deployment_settings
tolerance = settings.get_gis_simplify_tolerance()
output = {}
if settings.get_gis_spatialdb():
if geojson:
precision = settings.get_gis_precision()
if tolerance:
# Do the Simplify & GeoJSON direct from the DB
web2py_installed_version = parse_version(global_settings.web2py_version)
web2py_installed_datetime = web2py_installed_version[4] # datetime_index = 4
if web2py_installed_datetime >= datetime.datetime(2015, 1, 17, 0, 7, 4):
# Use http://www.postgis.org/docs/ST_SimplifyPreserveTopology.html
rows = db(query).select(table.id,
gtable.the_geom.st_simplifypreservetopology(tolerance).st_asgeojson(precision=precision).with_alias("geojson"))
else:
# Use http://www.postgis.org/docs/ST_Simplify.html
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_asgeojson(precision=precision).with_alias("geojson"))
else:
# Do the GeoJSON direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_asgeojson(precision=precision).with_alias("geojson"))
for row in rows:
key = row[tablename].id
if key in output:
output[key].append(row.geojson)
else:
output[key] = [row.geojson]
else:
if tolerance:
# Do the Simplify direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_astext().with_alias("wkt"))
else:
rows = db(query).select(table.id,
gtable.the_geom.st_astext().with_alias("wkt"))
for row in rows:
key = row[tablename].id
if key in output:
output[key].append(row.wkt)
else:
output[key] = [row.wkt]
else:
rows = db(query).select(table.id,
gtable.wkt,
)
simplify = S3GIS.simplify
if geojson:
# Simplify the polygon to reduce download size
if join:
for row in rows:
g = simplify(row["gis_location"].wkt,
tolerance = tolerance,
output = "geojson")
if g:
key = row[tablename].id
if key in output:
output[key].append(g)
else:
output[key] = [g]
else:
# gis_location: always single
for row in rows:
g = simplify(row.wkt,
tolerance = tolerance,
output = "geojson")
if g:
output[row.id] = g
else:
if join:
if tolerance:
# Simplify the polygon to reduce download size
# & also to work around the recursion limit in libxslt
# http://blog.gmane.org/gmane.comp.python.lxml.devel/day=20120309
for row in rows:
wkt = simplify(row["gis_location"].wkt)
if wkt:
key = row[tablename].id
if key in output:
output[key].append(wkt)
else:
output[key] = [wkt]
else:
for row in rows:
wkt = row["gis_location"].wkt
if wkt:
key = row[tablename].id
if key in output:
output[key].append(wkt)
else:
output[key] = [wkt]
else:
# gis_location: always single
if tolerance:
for row in rows:
wkt = simplify(row.wkt)
if wkt:
output[row.id] = wkt
else:
for row in rows:
wkt = row.wkt
if wkt:
output[row.id] = wkt
return output
|
[
"def",
"get_locations",
"(",
"table",
",",
"query",
",",
"join",
"=",
"True",
",",
"geojson",
"=",
"True",
",",
")",
":",
"db",
"=",
"current",
".",
"db",
"tablename",
"=",
"table",
".",
"_tablename",
"gtable",
"=",
"current",
".",
"s3db",
".",
"gis_location",
"settings",
"=",
"current",
".",
"deployment_settings",
"tolerance",
"=",
"settings",
".",
"get_gis_simplify_tolerance",
"(",
")",
"output",
"=",
"{",
"}",
"if",
"settings",
".",
"get_gis_spatialdb",
"(",
")",
":",
"if",
"geojson",
":",
"precision",
"=",
"settings",
".",
"get_gis_precision",
"(",
")",
"if",
"tolerance",
":",
"# Do the Simplify & GeoJSON direct from the DB",
"web2py_installed_version",
"=",
"parse_version",
"(",
"global_settings",
".",
"web2py_version",
")",
"web2py_installed_datetime",
"=",
"web2py_installed_version",
"[",
"4",
"]",
"# datetime_index = 4",
"if",
"web2py_installed_datetime",
">=",
"datetime",
".",
"datetime",
"(",
"2015",
",",
"1",
",",
"17",
",",
"0",
",",
"7",
",",
"4",
")",
":",
"# Use http://www.postgis.org/docs/ST_SimplifyPreserveTopology.html",
"rows",
"=",
"db",
"(",
"query",
")",
".",
"select",
"(",
"table",
".",
"id",
",",
"gtable",
".",
"the_geom",
".",
"st_simplifypreservetopology",
"(",
"tolerance",
")",
".",
"st_asgeojson",
"(",
"precision",
"=",
"precision",
")",
".",
"with_alias",
"(",
"\"geojson\"",
")",
")",
"else",
":",
"# Use http://www.postgis.org/docs/ST_Simplify.html",
"rows",
"=",
"db",
"(",
"query",
")",
".",
"select",
"(",
"table",
".",
"id",
",",
"gtable",
".",
"the_geom",
".",
"st_simplify",
"(",
"tolerance",
")",
".",
"st_asgeojson",
"(",
"precision",
"=",
"precision",
")",
".",
"with_alias",
"(",
"\"geojson\"",
")",
")",
"else",
":",
"# Do the GeoJSON direct from the DB",
"rows",
"=",
"db",
"(",
"query",
")",
".",
"select",
"(",
"table",
".",
"id",
",",
"gtable",
".",
"the_geom",
".",
"st_asgeojson",
"(",
"precision",
"=",
"precision",
")",
".",
"with_alias",
"(",
"\"geojson\"",
")",
")",
"for",
"row",
"in",
"rows",
":",
"key",
"=",
"row",
"[",
"tablename",
"]",
".",
"id",
"if",
"key",
"in",
"output",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"row",
".",
"geojson",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"[",
"row",
".",
"geojson",
"]",
"else",
":",
"if",
"tolerance",
":",
"# Do the Simplify direct from the DB",
"rows",
"=",
"db",
"(",
"query",
")",
".",
"select",
"(",
"table",
".",
"id",
",",
"gtable",
".",
"the_geom",
".",
"st_simplify",
"(",
"tolerance",
")",
".",
"st_astext",
"(",
")",
".",
"with_alias",
"(",
"\"wkt\"",
")",
")",
"else",
":",
"rows",
"=",
"db",
"(",
"query",
")",
".",
"select",
"(",
"table",
".",
"id",
",",
"gtable",
".",
"the_geom",
".",
"st_astext",
"(",
")",
".",
"with_alias",
"(",
"\"wkt\"",
")",
")",
"for",
"row",
"in",
"rows",
":",
"key",
"=",
"row",
"[",
"tablename",
"]",
".",
"id",
"if",
"key",
"in",
"output",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"row",
".",
"wkt",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"[",
"row",
".",
"wkt",
"]",
"else",
":",
"rows",
"=",
"db",
"(",
"query",
")",
".",
"select",
"(",
"table",
".",
"id",
",",
"gtable",
".",
"wkt",
",",
")",
"simplify",
"=",
"S3GIS",
".",
"simplify",
"if",
"geojson",
":",
"# Simplify the polygon to reduce download size",
"if",
"join",
":",
"for",
"row",
"in",
"rows",
":",
"g",
"=",
"simplify",
"(",
"row",
"[",
"\"gis_location\"",
"]",
".",
"wkt",
",",
"tolerance",
"=",
"tolerance",
",",
"output",
"=",
"\"geojson\"",
")",
"if",
"g",
":",
"key",
"=",
"row",
"[",
"tablename",
"]",
".",
"id",
"if",
"key",
"in",
"output",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"g",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"[",
"g",
"]",
"else",
":",
"# gis_location: always single",
"for",
"row",
"in",
"rows",
":",
"g",
"=",
"simplify",
"(",
"row",
".",
"wkt",
",",
"tolerance",
"=",
"tolerance",
",",
"output",
"=",
"\"geojson\"",
")",
"if",
"g",
":",
"output",
"[",
"row",
".",
"id",
"]",
"=",
"g",
"else",
":",
"if",
"join",
":",
"if",
"tolerance",
":",
"# Simplify the polygon to reduce download size",
"# & also to work around the recursion limit in libxslt",
"# http://blog.gmane.org/gmane.comp.python.lxml.devel/day=20120309",
"for",
"row",
"in",
"rows",
":",
"wkt",
"=",
"simplify",
"(",
"row",
"[",
"\"gis_location\"",
"]",
".",
"wkt",
")",
"if",
"wkt",
":",
"key",
"=",
"row",
"[",
"tablename",
"]",
".",
"id",
"if",
"key",
"in",
"output",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"wkt",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"[",
"wkt",
"]",
"else",
":",
"for",
"row",
"in",
"rows",
":",
"wkt",
"=",
"row",
"[",
"\"gis_location\"",
"]",
".",
"wkt",
"if",
"wkt",
":",
"key",
"=",
"row",
"[",
"tablename",
"]",
".",
"id",
"if",
"key",
"in",
"output",
":",
"output",
"[",
"key",
"]",
".",
"append",
"(",
"wkt",
")",
"else",
":",
"output",
"[",
"key",
"]",
"=",
"[",
"wkt",
"]",
"else",
":",
"# gis_location: always single",
"if",
"tolerance",
":",
"for",
"row",
"in",
"rows",
":",
"wkt",
"=",
"simplify",
"(",
"row",
".",
"wkt",
")",
"if",
"wkt",
":",
"output",
"[",
"row",
".",
"id",
"]",
"=",
"wkt",
"else",
":",
"for",
"row",
"in",
"rows",
":",
"wkt",
"=",
"row",
".",
"wkt",
"if",
"wkt",
":",
"output",
"[",
"row",
".",
"id",
"]",
"=",
"wkt",
"return",
"output"
] |
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3/s3gis.py#L2426-L2550
|
|
PyCQA/pylint
|
3fc855f9d0fa8e6410be5a23cf954ffd5471b4eb
|
pylint/reporters/ureports/base_writer.py
|
python
|
BaseWriter.write
|
(self, string: str)
|
write a string in the output buffer
|
write a string in the output buffer
|
[
"write",
"a",
"string",
"in",
"the",
"output",
"buffer"
] |
def write(self, string: str) -> None:
"""write a string in the output buffer"""
self.out.write(string)
|
[
"def",
"write",
"(",
"self",
",",
"string",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"out",
".",
"write",
"(",
"string",
")"
] |
https://github.com/PyCQA/pylint/blob/3fc855f9d0fa8e6410be5a23cf954ffd5471b4eb/pylint/reporters/ureports/base_writer.py#L63-L65
|
||
linxid/Machine_Learning_Study_Path
|
558e82d13237114bbb8152483977806fc0c222af
|
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/distlib/database.py
|
python
|
EggInfoDistribution.check_installed_files
|
(self)
|
return mismatches
|
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
|
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
|
[
"Checks",
"that",
"the",
"hashes",
"and",
"sizes",
"of",
"the",
"files",
"in",
"RECORD",
"are",
"matched",
"by",
"the",
"files",
"themselves",
".",
"Returns",
"a",
"(",
"possibly",
"empty",
")",
"list",
"of",
"mismatches",
".",
"Each",
"entry",
"in",
"the",
"mismatch",
"list",
"will",
"be",
"a",
"tuple",
"consisting",
"of",
"the",
"path",
"exists",
"size",
"or",
"hash",
"according",
"to",
"what",
"didn",
"t",
"match",
"(",
"existence",
"is",
"checked",
"first",
"then",
"size",
"then",
"hash",
")",
"the",
"expected",
"value",
"and",
"the",
"actual",
"value",
"."
] |
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
|
[
"def",
"check_installed_files",
"(",
"self",
")",
":",
"mismatches",
"=",
"[",
"]",
"record_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"'installed-files.txt'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"record_path",
")",
":",
"for",
"path",
",",
"_",
",",
"_",
"in",
"self",
".",
"list_installed_files",
"(",
")",
":",
"if",
"path",
"==",
"record_path",
":",
"continue",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"mismatches",
".",
"append",
"(",
"(",
"path",
",",
"'exists'",
",",
"True",
",",
"False",
")",
")",
"return",
"mismatches"
] |
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/distlib/database.py#L958-L975
|
|
ucsb-seclab/karonte
|
427ac313e596f723e40768b95d13bd7a9fc92fd8
|
eval/multi_bin/all_bins/binary_dependency_graph/utils.py
|
python
|
run_command
|
(cmd)
|
return o, e
|
Run shell commands
:param cmd: command
:return: stdout and stderr
|
Run shell commands
:param cmd: command
:return: stdout and stderr
|
[
"Run",
"shell",
"commands",
":",
"param",
"cmd",
":",
"command",
":",
"return",
":",
"stdout",
"and",
"stderr"
] |
def run_command(cmd):
"""
Run shell commands
:param cmd: command
:return: stdout and stderr
"""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
o, e = p.communicate()
return o, e
|
[
"def",
"run_command",
"(",
"cmd",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"True",
")",
"o",
",",
"e",
"=",
"p",
".",
"communicate",
"(",
")",
"return",
"o",
",",
"e"
] |
https://github.com/ucsb-seclab/karonte/blob/427ac313e596f723e40768b95d13bd7a9fc92fd8/eval/multi_bin/all_bins/binary_dependency_graph/utils.py#L60-L69
|
|
alan-turing-institute/sktime
|
79cc513346b1257a6f3fa8e4ed855b5a2a7de716
|
sktime/benchmarking/orchestration.py
|
python
|
Orchestrator.fit_predict
|
(
self,
overwrite_predictions=False,
predict_on_train=False,
save_fitted_strategies=True,
overwrite_fitted_strategies=False,
verbose=False,
)
|
Fit and predict.
|
Fit and predict.
|
[
"Fit",
"and",
"predict",
"."
] |
def fit_predict(
self,
overwrite_predictions=False,
predict_on_train=False,
save_fitted_strategies=True,
overwrite_fitted_strategies=False,
verbose=False,
):
"""Fit and predict."""
# check that for fitted strategies overwrite option is only set when
# save option is set
if overwrite_fitted_strategies and not save_fitted_strategies:
raise ValueError(
f"Can only overwrite fitted strategies "
f"if fitted strategies are saved, but found: "
f"overwrite_fitted_strategies="
f"{overwrite_fitted_strategies} and "
f"save_fitted_strategies="
f"{save_fitted_strategies}"
)
# fitting and prediction
for task, dataset, data, strategy, cv_fold, train_idx, test_idx in self._iter():
# check which results already exist
train_pred_exist = self.results.check_predictions_exist(
strategy.name, dataset.name, cv_fold, train_or_test="train"
)
test_pred_exist = self.results.check_predictions_exist(
strategy.name, dataset.name, cv_fold, train_or_test="test"
)
fitted_stategy_exists = self.results.check_fitted_strategy_exists(
strategy.name, dataset.name, cv_fold
)
# skip if overwrite is set to False for both predictions and
# strategies and all results exist
if (
not overwrite_predictions
and test_pred_exist
and (train_pred_exist or not predict_on_train)
and not overwrite_fitted_strategies
and (fitted_stategy_exists or not save_fitted_strategies)
):
log.warn(
f"Skipping strategy: {strategy.name} on CV-fold: "
f"{cv_fold} of dataset: {dataset.name}"
)
continue
# split data into training and test sets
train = data.iloc[train_idx]
test = data.iloc[test_idx]
# fit strategy
self._print_progress(
dataset.name, strategy.name, cv_fold, "train", "fit", verbose
)
fit_estimator_start_time = pd.Timestamp.now()
strategy.fit(task, train)
fit_estimator_end_time = pd.Timestamp.now()
# save fitted strategy if save fitted strategies is set to True
# and overwrite is set to True or the
# fitted strategy does not already exist
if save_fitted_strategies and (
overwrite_fitted_strategies or not fitted_stategy_exists
):
self.results.save_fitted_strategy(
strategy, dataset_name=dataset.name, cv_fold=cv_fold
)
# optionally, predict on training set if predict on train is set
# to True and and overwrite is set to True
# or the predicted values do not already exist
if predict_on_train and (overwrite_predictions or not train_pred_exist):
y_true = train.loc[:, task.target]
predict_estimator_start_time = pd.Timestamp.now()
y_pred = strategy.predict(train)
predict_estimator_end_time = pd.Timestamp.now()
y_proba = self._predict_proba_one(strategy, task, train, y_true, y_pred)
self.results.save_predictions(
strategy_name=strategy.name,
dataset_name=dataset.name,
index=train_idx,
y_true=y_true,
y_pred=y_pred,
y_proba=y_proba,
cv_fold=cv_fold,
fit_estimator_start_time=fit_estimator_start_time,
fit_estimator_end_time=fit_estimator_end_time,
predict_estimator_start_time=predict_estimator_start_time,
predict_estimator_end_time=predict_estimator_end_time,
train_or_test="train",
)
# predict on test set if overwrite predictions is set to True or
# predictions do not already exist
if overwrite_predictions or not test_pred_exist:
y_true = test.loc[:, task.target]
predict_estimator_start_time = pd.Timestamp.now()
y_pred = strategy.predict(test)
predict_estimator_end_time = pd.Timestamp.now()
y_proba = self._predict_proba_one(strategy, task, test, y_true, y_pred)
self.results.save_predictions(
dataset_name=dataset.name,
strategy_name=strategy.name,
index=test_idx,
y_true=y_true,
y_pred=y_pred,
y_proba=y_proba,
cv_fold=cv_fold,
fit_estimator_start_time=fit_estimator_start_time,
fit_estimator_end_time=fit_estimator_end_time,
predict_estimator_start_time=predict_estimator_start_time,
predict_estimator_end_time=predict_estimator_end_time,
train_or_test="test",
)
# save results as master file
self.results.save()
|
[
"def",
"fit_predict",
"(",
"self",
",",
"overwrite_predictions",
"=",
"False",
",",
"predict_on_train",
"=",
"False",
",",
"save_fitted_strategies",
"=",
"True",
",",
"overwrite_fitted_strategies",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
")",
":",
"# check that for fitted strategies overwrite option is only set when",
"# save option is set",
"if",
"overwrite_fitted_strategies",
"and",
"not",
"save_fitted_strategies",
":",
"raise",
"ValueError",
"(",
"f\"Can only overwrite fitted strategies \"",
"f\"if fitted strategies are saved, but found: \"",
"f\"overwrite_fitted_strategies=\"",
"f\"{overwrite_fitted_strategies} and \"",
"f\"save_fitted_strategies=\"",
"f\"{save_fitted_strategies}\"",
")",
"# fitting and prediction",
"for",
"task",
",",
"dataset",
",",
"data",
",",
"strategy",
",",
"cv_fold",
",",
"train_idx",
",",
"test_idx",
"in",
"self",
".",
"_iter",
"(",
")",
":",
"# check which results already exist",
"train_pred_exist",
"=",
"self",
".",
"results",
".",
"check_predictions_exist",
"(",
"strategy",
".",
"name",
",",
"dataset",
".",
"name",
",",
"cv_fold",
",",
"train_or_test",
"=",
"\"train\"",
")",
"test_pred_exist",
"=",
"self",
".",
"results",
".",
"check_predictions_exist",
"(",
"strategy",
".",
"name",
",",
"dataset",
".",
"name",
",",
"cv_fold",
",",
"train_or_test",
"=",
"\"test\"",
")",
"fitted_stategy_exists",
"=",
"self",
".",
"results",
".",
"check_fitted_strategy_exists",
"(",
"strategy",
".",
"name",
",",
"dataset",
".",
"name",
",",
"cv_fold",
")",
"# skip if overwrite is set to False for both predictions and",
"# strategies and all results exist",
"if",
"(",
"not",
"overwrite_predictions",
"and",
"test_pred_exist",
"and",
"(",
"train_pred_exist",
"or",
"not",
"predict_on_train",
")",
"and",
"not",
"overwrite_fitted_strategies",
"and",
"(",
"fitted_stategy_exists",
"or",
"not",
"save_fitted_strategies",
")",
")",
":",
"log",
".",
"warn",
"(",
"f\"Skipping strategy: {strategy.name} on CV-fold: \"",
"f\"{cv_fold} of dataset: {dataset.name}\"",
")",
"continue",
"# split data into training and test sets",
"train",
"=",
"data",
".",
"iloc",
"[",
"train_idx",
"]",
"test",
"=",
"data",
".",
"iloc",
"[",
"test_idx",
"]",
"# fit strategy",
"self",
".",
"_print_progress",
"(",
"dataset",
".",
"name",
",",
"strategy",
".",
"name",
",",
"cv_fold",
",",
"\"train\"",
",",
"\"fit\"",
",",
"verbose",
")",
"fit_estimator_start_time",
"=",
"pd",
".",
"Timestamp",
".",
"now",
"(",
")",
"strategy",
".",
"fit",
"(",
"task",
",",
"train",
")",
"fit_estimator_end_time",
"=",
"pd",
".",
"Timestamp",
".",
"now",
"(",
")",
"# save fitted strategy if save fitted strategies is set to True",
"# and overwrite is set to True or the",
"# fitted strategy does not already exist",
"if",
"save_fitted_strategies",
"and",
"(",
"overwrite_fitted_strategies",
"or",
"not",
"fitted_stategy_exists",
")",
":",
"self",
".",
"results",
".",
"save_fitted_strategy",
"(",
"strategy",
",",
"dataset_name",
"=",
"dataset",
".",
"name",
",",
"cv_fold",
"=",
"cv_fold",
")",
"# optionally, predict on training set if predict on train is set",
"# to True and and overwrite is set to True",
"# or the predicted values do not already exist",
"if",
"predict_on_train",
"and",
"(",
"overwrite_predictions",
"or",
"not",
"train_pred_exist",
")",
":",
"y_true",
"=",
"train",
".",
"loc",
"[",
":",
",",
"task",
".",
"target",
"]",
"predict_estimator_start_time",
"=",
"pd",
".",
"Timestamp",
".",
"now",
"(",
")",
"y_pred",
"=",
"strategy",
".",
"predict",
"(",
"train",
")",
"predict_estimator_end_time",
"=",
"pd",
".",
"Timestamp",
".",
"now",
"(",
")",
"y_proba",
"=",
"self",
".",
"_predict_proba_one",
"(",
"strategy",
",",
"task",
",",
"train",
",",
"y_true",
",",
"y_pred",
")",
"self",
".",
"results",
".",
"save_predictions",
"(",
"strategy_name",
"=",
"strategy",
".",
"name",
",",
"dataset_name",
"=",
"dataset",
".",
"name",
",",
"index",
"=",
"train_idx",
",",
"y_true",
"=",
"y_true",
",",
"y_pred",
"=",
"y_pred",
",",
"y_proba",
"=",
"y_proba",
",",
"cv_fold",
"=",
"cv_fold",
",",
"fit_estimator_start_time",
"=",
"fit_estimator_start_time",
",",
"fit_estimator_end_time",
"=",
"fit_estimator_end_time",
",",
"predict_estimator_start_time",
"=",
"predict_estimator_start_time",
",",
"predict_estimator_end_time",
"=",
"predict_estimator_end_time",
",",
"train_or_test",
"=",
"\"train\"",
",",
")",
"# predict on test set if overwrite predictions is set to True or",
"# predictions do not already exist",
"if",
"overwrite_predictions",
"or",
"not",
"test_pred_exist",
":",
"y_true",
"=",
"test",
".",
"loc",
"[",
":",
",",
"task",
".",
"target",
"]",
"predict_estimator_start_time",
"=",
"pd",
".",
"Timestamp",
".",
"now",
"(",
")",
"y_pred",
"=",
"strategy",
".",
"predict",
"(",
"test",
")",
"predict_estimator_end_time",
"=",
"pd",
".",
"Timestamp",
".",
"now",
"(",
")",
"y_proba",
"=",
"self",
".",
"_predict_proba_one",
"(",
"strategy",
",",
"task",
",",
"test",
",",
"y_true",
",",
"y_pred",
")",
"self",
".",
"results",
".",
"save_predictions",
"(",
"dataset_name",
"=",
"dataset",
".",
"name",
",",
"strategy_name",
"=",
"strategy",
".",
"name",
",",
"index",
"=",
"test_idx",
",",
"y_true",
"=",
"y_true",
",",
"y_pred",
"=",
"y_pred",
",",
"y_proba",
"=",
"y_proba",
",",
"cv_fold",
"=",
"cv_fold",
",",
"fit_estimator_start_time",
"=",
"fit_estimator_start_time",
",",
"fit_estimator_end_time",
"=",
"fit_estimator_end_time",
",",
"predict_estimator_start_time",
"=",
"predict_estimator_start_time",
",",
"predict_estimator_end_time",
"=",
"predict_estimator_end_time",
",",
"train_or_test",
"=",
"\"test\"",
",",
")",
"# save results as master file",
"self",
".",
"results",
".",
"save",
"(",
")"
] |
https://github.com/alan-turing-institute/sktime/blob/79cc513346b1257a6f3fa8e4ed855b5a2a7de716/sktime/benchmarking/orchestration.py#L114-L236
|
||
datamllab/rlcard
|
c21ea82519c453a42e3bdc6848bd3356e9b6ac43
|
rlcard/games/uno/round.py
|
python
|
UnoRound.flip_top_card
|
(self)
|
return top
|
Flip the top card of the card pile
Returns:
(object of UnoCard): the top card in game
|
Flip the top card of the card pile
|
[
"Flip",
"the",
"top",
"card",
"of",
"the",
"card",
"pile"
] |
def flip_top_card(self):
''' Flip the top card of the card pile
Returns:
(object of UnoCard): the top card in game
'''
top = self.dealer.flip_top_card()
if top.trait == 'wild':
top.color = self.np_random.choice(UnoCard.info['color'])
self.target = top
self.played_cards.append(top)
return top
|
[
"def",
"flip_top_card",
"(",
"self",
")",
":",
"top",
"=",
"self",
".",
"dealer",
".",
"flip_top_card",
"(",
")",
"if",
"top",
".",
"trait",
"==",
"'wild'",
":",
"top",
".",
"color",
"=",
"self",
".",
"np_random",
".",
"choice",
"(",
"UnoCard",
".",
"info",
"[",
"'color'",
"]",
")",
"self",
".",
"target",
"=",
"top",
"self",
".",
"played_cards",
".",
"append",
"(",
"top",
")",
"return",
"top"
] |
https://github.com/datamllab/rlcard/blob/c21ea82519c453a42e3bdc6848bd3356e9b6ac43/rlcard/games/uno/round.py#L24-L36
|
|
pwnieexpress/pwn_plug_sources
|
1a23324f5dc2c3de20f9c810269b6a29b2758cad
|
src/set/src/core/scapy.py
|
python
|
merge
|
(x,y)
|
return m
|
[] |
def merge(x,y):
if len(x) > len(y):
y += "\x00"*(len(x)-len(y))
elif len(x) < len(y):
x += "\x00"*(len(y)-len(x))
m = ""
for i in range(len(x)/ss):
m += x[ss*i:ss*(i+1)]+y[ss*i:ss*(i+1)]
return m
|
[
"def",
"merge",
"(",
"x",
",",
"y",
")",
":",
"if",
"len",
"(",
"x",
")",
">",
"len",
"(",
"y",
")",
":",
"y",
"+=",
"\"\\x00\"",
"*",
"(",
"len",
"(",
"x",
")",
"-",
"len",
"(",
"y",
")",
")",
"elif",
"len",
"(",
"x",
")",
"<",
"len",
"(",
"y",
")",
":",
"x",
"+=",
"\"\\x00\"",
"*",
"(",
"len",
"(",
"y",
")",
"-",
"len",
"(",
"x",
")",
")",
"m",
"=",
"\"\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"x",
")",
"/",
"ss",
")",
":",
"m",
"+=",
"x",
"[",
"ss",
"*",
"i",
":",
"ss",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"+",
"y",
"[",
"ss",
"*",
"i",
":",
"ss",
"*",
"(",
"i",
"+",
"1",
")",
"]",
"return",
"m"
] |
https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/set/src/core/scapy.py#L12484-L12492
|
|||
KovenYu/MAR
|
860031695e4e1b623b48a7a1228a3cf8051d8dcf
|
src/utils.py
|
python
|
DiscriminativeLoss._partition_sets
|
(self, features, multilabels, labels)
|
return P, N
|
partition the batch into confident positive, hard negative and others
:param features: shape=(BS, dim)
:param multilabels: shape=(BS, n_class)
:param labels: shape=(BS,)
:return:
P: positive pair set. tuple of 2 np.array i and j.
i contains smaller indices and j larger indices in the batch.
if P is None, no positive pair found in this batch.
N: negative pair set. similar to P, but will never be None.
|
partition the batch into confident positive, hard negative and others
:param features: shape=(BS, dim)
:param multilabels: shape=(BS, n_class)
:param labels: shape=(BS,)
:return:
P: positive pair set. tuple of 2 np.array i and j.
i contains smaller indices and j larger indices in the batch.
if P is None, no positive pair found in this batch.
N: negative pair set. similar to P, but will never be None.
|
[
"partition",
"the",
"batch",
"into",
"confident",
"positive",
"hard",
"negative",
"and",
"others",
":",
"param",
"features",
":",
"shape",
"=",
"(",
"BS",
"dim",
")",
":",
"param",
"multilabels",
":",
"shape",
"=",
"(",
"BS",
"n_class",
")",
":",
"param",
"labels",
":",
"shape",
"=",
"(",
"BS",
")",
":",
"return",
":",
"P",
":",
"positive",
"pair",
"set",
".",
"tuple",
"of",
"2",
"np",
".",
"array",
"i",
"and",
"j",
".",
"i",
"contains",
"smaller",
"indices",
"and",
"j",
"larger",
"indices",
"in",
"the",
"batch",
".",
"if",
"P",
"is",
"None",
"no",
"positive",
"pair",
"found",
"in",
"this",
"batch",
".",
"N",
":",
"negative",
"pair",
"set",
".",
"similar",
"to",
"P",
"but",
"will",
"never",
"be",
"None",
"."
] |
def _partition_sets(self, features, multilabels, labels):
"""
partition the batch into confident positive, hard negative and others
:param features: shape=(BS, dim)
:param multilabels: shape=(BS, n_class)
:param labels: shape=(BS,)
:return:
P: positive pair set. tuple of 2 np.array i and j.
i contains smaller indices and j larger indices in the batch.
if P is None, no positive pair found in this batch.
N: negative pair set. similar to P, but will never be None.
"""
f_np = features.cpu().numpy()
ml_np = multilabels.cpu().numpy()
p_dist = pdist(f_np)
p_agree = 1 - pdist(ml_np, 'minkowski', p=1) / 2
sorting_idx = np.argsort(p_dist)
n_similar = int(len(p_dist) * self.mining_ratio)
similar_idx = sorting_idx[:n_similar]
is_positive = p_agree[similar_idx] > self.threshold.item()
pos_idx = similar_idx[is_positive]
neg_idx = similar_idx[~is_positive]
P = dist_idx_to_pair_idx(len(f_np), pos_idx)
N = dist_idx_to_pair_idx(len(f_np), neg_idx)
self._update_threshold(p_agree)
self._update_buffers(P, labels)
return P, N
|
[
"def",
"_partition_sets",
"(",
"self",
",",
"features",
",",
"multilabels",
",",
"labels",
")",
":",
"f_np",
"=",
"features",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"ml_np",
"=",
"multilabels",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"p_dist",
"=",
"pdist",
"(",
"f_np",
")",
"p_agree",
"=",
"1",
"-",
"pdist",
"(",
"ml_np",
",",
"'minkowski'",
",",
"p",
"=",
"1",
")",
"/",
"2",
"sorting_idx",
"=",
"np",
".",
"argsort",
"(",
"p_dist",
")",
"n_similar",
"=",
"int",
"(",
"len",
"(",
"p_dist",
")",
"*",
"self",
".",
"mining_ratio",
")",
"similar_idx",
"=",
"sorting_idx",
"[",
":",
"n_similar",
"]",
"is_positive",
"=",
"p_agree",
"[",
"similar_idx",
"]",
">",
"self",
".",
"threshold",
".",
"item",
"(",
")",
"pos_idx",
"=",
"similar_idx",
"[",
"is_positive",
"]",
"neg_idx",
"=",
"similar_idx",
"[",
"~",
"is_positive",
"]",
"P",
"=",
"dist_idx_to_pair_idx",
"(",
"len",
"(",
"f_np",
")",
",",
"pos_idx",
")",
"N",
"=",
"dist_idx_to_pair_idx",
"(",
"len",
"(",
"f_np",
")",
",",
"neg_idx",
")",
"self",
".",
"_update_threshold",
"(",
"p_agree",
")",
"self",
".",
"_update_buffers",
"(",
"P",
",",
"labels",
")",
"return",
"P",
",",
"N"
] |
https://github.com/KovenYu/MAR/blob/860031695e4e1b623b48a7a1228a3cf8051d8dcf/src/utils.py#L143-L169
|
|
MozillaSecurity/funfuzz
|
fe5cc4710f82e68a171bf1255f43519f9c1bf784
|
src/funfuzz/js/compile_shell.py
|
python
|
CompiledShell.get_s3_tar_with_ext_full_path
|
(self)
|
return sm_compile_helpers.ensure_cache_dir(Path.home()) / self.get_s3_tar_name_with_ext()
|
Retrieve the path to the tarball downloaded from S3.
Returns:
Path: Full path to the tarball in the local shell cache directory
|
Retrieve the path to the tarball downloaded from S3.
|
[
"Retrieve",
"the",
"path",
"to",
"the",
"tarball",
"downloaded",
"from",
"S3",
"."
] |
def get_s3_tar_with_ext_full_path(self):
"""Retrieve the path to the tarball downloaded from S3.
Returns:
Path: Full path to the tarball in the local shell cache directory
"""
return sm_compile_helpers.ensure_cache_dir(Path.home()) / self.get_s3_tar_name_with_ext()
|
[
"def",
"get_s3_tar_with_ext_full_path",
"(",
"self",
")",
":",
"return",
"sm_compile_helpers",
".",
"ensure_cache_dir",
"(",
"Path",
".",
"home",
"(",
")",
")",
"/",
"self",
".",
"get_s3_tar_name_with_ext",
"(",
")"
] |
https://github.com/MozillaSecurity/funfuzz/blob/fe5cc4710f82e68a171bf1255f43519f9c1bf784/src/funfuzz/js/compile_shell.py#L243-L249
|
|
mozilla/mozillians
|
bd5da47fef01e4e09d3bb8cb0799735bdfbeb3f9
|
mozillians/phonebook/validators.py
|
python
|
validate_twitter
|
(username)
|
return username
|
Return a twitter username given '@' or http(s) strings.
|
Return a twitter username given '
|
[
"Return",
"a",
"twitter",
"username",
"given"
] |
def validate_twitter(username):
"""Return a twitter username given '@' or http(s) strings."""
if username:
username = re.sub(r'https?://(www\.)?twitter\.com/|@', '', username)
# Twitter accounts must be alphanumeric ASCII including underscore, and <= 15 chars.
# https://support.twitter.com/articles/101299-why-can-t-i-register-certain-usernames
if len(username) > 15:
raise ValidationError(_('Twitter usernames cannot be longer than 15 characters.'))
if not re.match(r'^\w+$', username):
raise ValidationError(_('Twitter usernames must contain only alphanumeric'
' characters and the underscore.'))
return username
|
[
"def",
"validate_twitter",
"(",
"username",
")",
":",
"if",
"username",
":",
"username",
"=",
"re",
".",
"sub",
"(",
"r'https?://(www\\.)?twitter\\.com/|@'",
",",
"''",
",",
"username",
")",
"# Twitter accounts must be alphanumeric ASCII including underscore, and <= 15 chars.",
"# https://support.twitter.com/articles/101299-why-can-t-i-register-certain-usernames",
"if",
"len",
"(",
"username",
")",
">",
"15",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Twitter usernames cannot be longer than 15 characters.'",
")",
")",
"if",
"not",
"re",
".",
"match",
"(",
"r'^\\w+$'",
",",
"username",
")",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Twitter usernames must contain only alphanumeric'",
"' characters and the underscore.'",
")",
")",
"return",
"username"
] |
https://github.com/mozilla/mozillians/blob/bd5da47fef01e4e09d3bb8cb0799735bdfbeb3f9/mozillians/phonebook/validators.py#L10-L24
|
|
NVlabs/Deep_Object_Pose
|
c50e5fcd3741802484bf59c0f7bcf507b918e417
|
src/dope/inference/detector.py
|
python
|
ObjectDetector.find_objects
|
(vertex2, aff, config, numvertex=8)
|
return objects, all_peaks
|
Detects objects given network belief maps and affinities, using heuristic method
|
Detects objects given network belief maps and affinities, using heuristic method
|
[
"Detects",
"objects",
"given",
"network",
"belief",
"maps",
"and",
"affinities",
"using",
"heuristic",
"method"
] |
def find_objects(vertex2, aff, config, numvertex=8):
'''Detects objects given network belief maps and affinities, using heuristic method'''
all_peaks = []
peak_counter = 0
for j in range(vertex2.size()[0]):
belief = vertex2[j].clone()
map_ori = belief.cpu().data.numpy()
map = gaussian_filter(belief.cpu().data.numpy(), sigma=config.sigma)
p = 1
map_left = np.zeros(map.shape)
map_left[p:,:] = map[:-p,:]
map_right = np.zeros(map.shape)
map_right[:-p,:] = map[p:,:]
map_up = np.zeros(map.shape)
map_up[:,p:] = map[:,:-p]
map_down = np.zeros(map.shape)
map_down[:,:-p] = map[:,p:]
peaks_binary = np.logical_and.reduce(
(
map >= map_left,
map >= map_right,
map >= map_up,
map >= map_down,
map > config.thresh_map)
)
peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])
# Computing the weigthed average for localizing the peaks
peaks = list(peaks)
win = 5
ran = win // 2
peaks_avg = []
for p_value in range(len(peaks)):
p = peaks[p_value]
weights = np.zeros((win,win))
i_values = np.zeros((win,win))
j_values = np.zeros((win,win))
for i in range(-ran,ran+1):
for j in range(-ran,ran+1):
if p[1]+i < 0 \
or p[1]+i >= map_ori.shape[0] \
or p[0]+j < 0 \
or p[0]+j >= map_ori.shape[1]:
continue
i_values[j+ran, i+ran] = p[1] + i
j_values[j+ran, i+ran] = p[0] + j
weights[j+ran, i+ran] = (map_ori[p[1]+i, p[0]+j])
# if the weights are all zeros
# then add the none continuous points
OFFSET_DUE_TO_UPSAMPLING = 0.4395
try:
peaks_avg.append(
(np.average(j_values, weights=weights) + OFFSET_DUE_TO_UPSAMPLING, \
np.average(i_values, weights=weights) + OFFSET_DUE_TO_UPSAMPLING))
except:
peaks_avg.append((p[0] + OFFSET_DUE_TO_UPSAMPLING, p[1] + OFFSET_DUE_TO_UPSAMPLING))
# Note: Python3 doesn't support len for zip object
peaks_len = min(len(np.nonzero(peaks_binary)[1]), len(np.nonzero(peaks_binary)[0]))
peaks_with_score = [peaks_avg[x_] + (map_ori[peaks[x_][1],peaks[x_][0]],) for x_ in range(len(peaks))]
id = range(peak_counter, peak_counter + peaks_len)
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += peaks_len
objects = []
# Check object centroid and build the objects if the centroid is found
for nb_object in range(len(all_peaks[-1])):
if all_peaks[-1][nb_object][2] > config.thresh_points:
objects.append([
[all_peaks[-1][nb_object][:2][0],all_peaks[-1][nb_object][:2][1]],
[None for i in range(numvertex)],
[None for i in range(numvertex)],
all_peaks[-1][nb_object][2]
])
# Working with an output that only has belief maps
if aff is None:
if len (objects) > 0 and len(all_peaks)>0 and len(all_peaks[0])>0:
for i_points in range(8):
if len(all_peaks[i_points])>0 and all_peaks[i_points][0][2] > config.threshold:
objects[0][1][i_points] = (all_peaks[i_points][0][0], all_peaks[i_points][0][1])
else:
# For all points found
for i_lists in range(len(all_peaks[:-1])):
lists = all_peaks[i_lists]
for candidate in lists:
if candidate[2] < config.thresh_points:
continue
i_best = -1
best_dist = 10000
best_angle = 100
for i_obj in range(len(objects)):
center = [objects[i_obj][0][0], objects[i_obj][0][1]]
# integer is used to look into the affinity map,
# but the float version is used to run
point_int = [int(candidate[0]), int(candidate[1])]
point = [candidate[0], candidate[1]]
# look at the distance to the vector field.
v_aff = np.array([
aff[i_lists*2,
point_int[1],
point_int[0]].data.item(),
aff[i_lists*2+1,
point_int[1],
point_int[0]].data.item()]) * 10
# normalize the vector
xvec = v_aff[0]
yvec = v_aff[1]
norms = np.sqrt(xvec * xvec + yvec * yvec)
xvec/=norms
yvec/=norms
v_aff = np.concatenate([[xvec],[yvec]])
v_center = np.array(center) - np.array(point)
xvec = v_center[0]
yvec = v_center[1]
norms = np.sqrt(xvec * xvec + yvec * yvec)
xvec /= norms
yvec /= norms
v_center = np.concatenate([[xvec],[yvec]])
# vector affinity
dist_angle = np.linalg.norm(v_center - v_aff)
# distance between vertexes
dist_point = np.linalg.norm(np.array(point) - np.array(center))
if dist_angle < config.thresh_angle and (best_dist > 1000 or best_dist > dist_point):
i_best = i_obj
best_angle = dist_angle
best_dist = dist_point
if i_best == -1:
continue
if objects[i_best][1][i_lists] is None \
or best_angle < config.thresh_angle \
and best_dist < objects[i_best][2][i_lists][1]:
objects[i_best][1][i_lists] = ((candidate[0])*8, (candidate[1])*8)
objects[i_best][2][i_lists] = (best_angle, best_dist)
return objects, all_peaks
|
[
"def",
"find_objects",
"(",
"vertex2",
",",
"aff",
",",
"config",
",",
"numvertex",
"=",
"8",
")",
":",
"all_peaks",
"=",
"[",
"]",
"peak_counter",
"=",
"0",
"for",
"j",
"in",
"range",
"(",
"vertex2",
".",
"size",
"(",
")",
"[",
"0",
"]",
")",
":",
"belief",
"=",
"vertex2",
"[",
"j",
"]",
".",
"clone",
"(",
")",
"map_ori",
"=",
"belief",
".",
"cpu",
"(",
")",
".",
"data",
".",
"numpy",
"(",
")",
"map",
"=",
"gaussian_filter",
"(",
"belief",
".",
"cpu",
"(",
")",
".",
"data",
".",
"numpy",
"(",
")",
",",
"sigma",
"=",
"config",
".",
"sigma",
")",
"p",
"=",
"1",
"map_left",
"=",
"np",
".",
"zeros",
"(",
"map",
".",
"shape",
")",
"map_left",
"[",
"p",
":",
",",
":",
"]",
"=",
"map",
"[",
":",
"-",
"p",
",",
":",
"]",
"map_right",
"=",
"np",
".",
"zeros",
"(",
"map",
".",
"shape",
")",
"map_right",
"[",
":",
"-",
"p",
",",
":",
"]",
"=",
"map",
"[",
"p",
":",
",",
":",
"]",
"map_up",
"=",
"np",
".",
"zeros",
"(",
"map",
".",
"shape",
")",
"map_up",
"[",
":",
",",
"p",
":",
"]",
"=",
"map",
"[",
":",
",",
":",
"-",
"p",
"]",
"map_down",
"=",
"np",
".",
"zeros",
"(",
"map",
".",
"shape",
")",
"map_down",
"[",
":",
",",
":",
"-",
"p",
"]",
"=",
"map",
"[",
":",
",",
"p",
":",
"]",
"peaks_binary",
"=",
"np",
".",
"logical_and",
".",
"reduce",
"(",
"(",
"map",
">=",
"map_left",
",",
"map",
">=",
"map_right",
",",
"map",
">=",
"map_up",
",",
"map",
">=",
"map_down",
",",
"map",
">",
"config",
".",
"thresh_map",
")",
")",
"peaks",
"=",
"zip",
"(",
"np",
".",
"nonzero",
"(",
"peaks_binary",
")",
"[",
"1",
"]",
",",
"np",
".",
"nonzero",
"(",
"peaks_binary",
")",
"[",
"0",
"]",
")",
"# Computing the weigthed average for localizing the peaks",
"peaks",
"=",
"list",
"(",
"peaks",
")",
"win",
"=",
"5",
"ran",
"=",
"win",
"//",
"2",
"peaks_avg",
"=",
"[",
"]",
"for",
"p_value",
"in",
"range",
"(",
"len",
"(",
"peaks",
")",
")",
":",
"p",
"=",
"peaks",
"[",
"p_value",
"]",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"win",
",",
"win",
")",
")",
"i_values",
"=",
"np",
".",
"zeros",
"(",
"(",
"win",
",",
"win",
")",
")",
"j_values",
"=",
"np",
".",
"zeros",
"(",
"(",
"win",
",",
"win",
")",
")",
"for",
"i",
"in",
"range",
"(",
"-",
"ran",
",",
"ran",
"+",
"1",
")",
":",
"for",
"j",
"in",
"range",
"(",
"-",
"ran",
",",
"ran",
"+",
"1",
")",
":",
"if",
"p",
"[",
"1",
"]",
"+",
"i",
"<",
"0",
"or",
"p",
"[",
"1",
"]",
"+",
"i",
">=",
"map_ori",
".",
"shape",
"[",
"0",
"]",
"or",
"p",
"[",
"0",
"]",
"+",
"j",
"<",
"0",
"or",
"p",
"[",
"0",
"]",
"+",
"j",
">=",
"map_ori",
".",
"shape",
"[",
"1",
"]",
":",
"continue",
"i_values",
"[",
"j",
"+",
"ran",
",",
"i",
"+",
"ran",
"]",
"=",
"p",
"[",
"1",
"]",
"+",
"i",
"j_values",
"[",
"j",
"+",
"ran",
",",
"i",
"+",
"ran",
"]",
"=",
"p",
"[",
"0",
"]",
"+",
"j",
"weights",
"[",
"j",
"+",
"ran",
",",
"i",
"+",
"ran",
"]",
"=",
"(",
"map_ori",
"[",
"p",
"[",
"1",
"]",
"+",
"i",
",",
"p",
"[",
"0",
"]",
"+",
"j",
"]",
")",
"# if the weights are all zeros",
"# then add the none continuous points",
"OFFSET_DUE_TO_UPSAMPLING",
"=",
"0.4395",
"try",
":",
"peaks_avg",
".",
"append",
"(",
"(",
"np",
".",
"average",
"(",
"j_values",
",",
"weights",
"=",
"weights",
")",
"+",
"OFFSET_DUE_TO_UPSAMPLING",
",",
"np",
".",
"average",
"(",
"i_values",
",",
"weights",
"=",
"weights",
")",
"+",
"OFFSET_DUE_TO_UPSAMPLING",
")",
")",
"except",
":",
"peaks_avg",
".",
"append",
"(",
"(",
"p",
"[",
"0",
"]",
"+",
"OFFSET_DUE_TO_UPSAMPLING",
",",
"p",
"[",
"1",
"]",
"+",
"OFFSET_DUE_TO_UPSAMPLING",
")",
")",
"# Note: Python3 doesn't support len for zip object",
"peaks_len",
"=",
"min",
"(",
"len",
"(",
"np",
".",
"nonzero",
"(",
"peaks_binary",
")",
"[",
"1",
"]",
")",
",",
"len",
"(",
"np",
".",
"nonzero",
"(",
"peaks_binary",
")",
"[",
"0",
"]",
")",
")",
"peaks_with_score",
"=",
"[",
"peaks_avg",
"[",
"x_",
"]",
"+",
"(",
"map_ori",
"[",
"peaks",
"[",
"x_",
"]",
"[",
"1",
"]",
",",
"peaks",
"[",
"x_",
"]",
"[",
"0",
"]",
"]",
",",
")",
"for",
"x_",
"in",
"range",
"(",
"len",
"(",
"peaks",
")",
")",
"]",
"id",
"=",
"range",
"(",
"peak_counter",
",",
"peak_counter",
"+",
"peaks_len",
")",
"peaks_with_score_and_id",
"=",
"[",
"peaks_with_score",
"[",
"i",
"]",
"+",
"(",
"id",
"[",
"i",
"]",
",",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"id",
")",
")",
"]",
"all_peaks",
".",
"append",
"(",
"peaks_with_score_and_id",
")",
"peak_counter",
"+=",
"peaks_len",
"objects",
"=",
"[",
"]",
"# Check object centroid and build the objects if the centroid is found",
"for",
"nb_object",
"in",
"range",
"(",
"len",
"(",
"all_peaks",
"[",
"-",
"1",
"]",
")",
")",
":",
"if",
"all_peaks",
"[",
"-",
"1",
"]",
"[",
"nb_object",
"]",
"[",
"2",
"]",
">",
"config",
".",
"thresh_points",
":",
"objects",
".",
"append",
"(",
"[",
"[",
"all_peaks",
"[",
"-",
"1",
"]",
"[",
"nb_object",
"]",
"[",
":",
"2",
"]",
"[",
"0",
"]",
",",
"all_peaks",
"[",
"-",
"1",
"]",
"[",
"nb_object",
"]",
"[",
":",
"2",
"]",
"[",
"1",
"]",
"]",
",",
"[",
"None",
"for",
"i",
"in",
"range",
"(",
"numvertex",
")",
"]",
",",
"[",
"None",
"for",
"i",
"in",
"range",
"(",
"numvertex",
")",
"]",
",",
"all_peaks",
"[",
"-",
"1",
"]",
"[",
"nb_object",
"]",
"[",
"2",
"]",
"]",
")",
"# Working with an output that only has belief maps",
"if",
"aff",
"is",
"None",
":",
"if",
"len",
"(",
"objects",
")",
">",
"0",
"and",
"len",
"(",
"all_peaks",
")",
">",
"0",
"and",
"len",
"(",
"all_peaks",
"[",
"0",
"]",
")",
">",
"0",
":",
"for",
"i_points",
"in",
"range",
"(",
"8",
")",
":",
"if",
"len",
"(",
"all_peaks",
"[",
"i_points",
"]",
")",
">",
"0",
"and",
"all_peaks",
"[",
"i_points",
"]",
"[",
"0",
"]",
"[",
"2",
"]",
">",
"config",
".",
"threshold",
":",
"objects",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"i_points",
"]",
"=",
"(",
"all_peaks",
"[",
"i_points",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"all_peaks",
"[",
"i_points",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"else",
":",
"# For all points found",
"for",
"i_lists",
"in",
"range",
"(",
"len",
"(",
"all_peaks",
"[",
":",
"-",
"1",
"]",
")",
")",
":",
"lists",
"=",
"all_peaks",
"[",
"i_lists",
"]",
"for",
"candidate",
"in",
"lists",
":",
"if",
"candidate",
"[",
"2",
"]",
"<",
"config",
".",
"thresh_points",
":",
"continue",
"i_best",
"=",
"-",
"1",
"best_dist",
"=",
"10000",
"best_angle",
"=",
"100",
"for",
"i_obj",
"in",
"range",
"(",
"len",
"(",
"objects",
")",
")",
":",
"center",
"=",
"[",
"objects",
"[",
"i_obj",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"objects",
"[",
"i_obj",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
"# integer is used to look into the affinity map,",
"# but the float version is used to run",
"point_int",
"=",
"[",
"int",
"(",
"candidate",
"[",
"0",
"]",
")",
",",
"int",
"(",
"candidate",
"[",
"1",
"]",
")",
"]",
"point",
"=",
"[",
"candidate",
"[",
"0",
"]",
",",
"candidate",
"[",
"1",
"]",
"]",
"# look at the distance to the vector field.",
"v_aff",
"=",
"np",
".",
"array",
"(",
"[",
"aff",
"[",
"i_lists",
"*",
"2",
",",
"point_int",
"[",
"1",
"]",
",",
"point_int",
"[",
"0",
"]",
"]",
".",
"data",
".",
"item",
"(",
")",
",",
"aff",
"[",
"i_lists",
"*",
"2",
"+",
"1",
",",
"point_int",
"[",
"1",
"]",
",",
"point_int",
"[",
"0",
"]",
"]",
".",
"data",
".",
"item",
"(",
")",
"]",
")",
"*",
"10",
"# normalize the vector",
"xvec",
"=",
"v_aff",
"[",
"0",
"]",
"yvec",
"=",
"v_aff",
"[",
"1",
"]",
"norms",
"=",
"np",
".",
"sqrt",
"(",
"xvec",
"*",
"xvec",
"+",
"yvec",
"*",
"yvec",
")",
"xvec",
"/=",
"norms",
"yvec",
"/=",
"norms",
"v_aff",
"=",
"np",
".",
"concatenate",
"(",
"[",
"[",
"xvec",
"]",
",",
"[",
"yvec",
"]",
"]",
")",
"v_center",
"=",
"np",
".",
"array",
"(",
"center",
")",
"-",
"np",
".",
"array",
"(",
"point",
")",
"xvec",
"=",
"v_center",
"[",
"0",
"]",
"yvec",
"=",
"v_center",
"[",
"1",
"]",
"norms",
"=",
"np",
".",
"sqrt",
"(",
"xvec",
"*",
"xvec",
"+",
"yvec",
"*",
"yvec",
")",
"xvec",
"/=",
"norms",
"yvec",
"/=",
"norms",
"v_center",
"=",
"np",
".",
"concatenate",
"(",
"[",
"[",
"xvec",
"]",
",",
"[",
"yvec",
"]",
"]",
")",
"# vector affinity",
"dist_angle",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"v_center",
"-",
"v_aff",
")",
"# distance between vertexes",
"dist_point",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"np",
".",
"array",
"(",
"point",
")",
"-",
"np",
".",
"array",
"(",
"center",
")",
")",
"if",
"dist_angle",
"<",
"config",
".",
"thresh_angle",
"and",
"(",
"best_dist",
">",
"1000",
"or",
"best_dist",
">",
"dist_point",
")",
":",
"i_best",
"=",
"i_obj",
"best_angle",
"=",
"dist_angle",
"best_dist",
"=",
"dist_point",
"if",
"i_best",
"==",
"-",
"1",
":",
"continue",
"if",
"objects",
"[",
"i_best",
"]",
"[",
"1",
"]",
"[",
"i_lists",
"]",
"is",
"None",
"or",
"best_angle",
"<",
"config",
".",
"thresh_angle",
"and",
"best_dist",
"<",
"objects",
"[",
"i_best",
"]",
"[",
"2",
"]",
"[",
"i_lists",
"]",
"[",
"1",
"]",
":",
"objects",
"[",
"i_best",
"]",
"[",
"1",
"]",
"[",
"i_lists",
"]",
"=",
"(",
"(",
"candidate",
"[",
"0",
"]",
")",
"*",
"8",
",",
"(",
"candidate",
"[",
"1",
"]",
")",
"*",
"8",
")",
"objects",
"[",
"i_best",
"]",
"[",
"2",
"]",
"[",
"i_lists",
"]",
"=",
"(",
"best_angle",
",",
"best_dist",
")",
"return",
"objects",
",",
"all_peaks"
] |
https://github.com/NVlabs/Deep_Object_Pose/blob/c50e5fcd3741802484bf59c0f7bcf507b918e417/src/dope/inference/detector.py#L331-L494
|
|
scottlawsonbc/audio-reactive-led-strip
|
a278cf8a4455b56b1f5d5238929bf8817f9a4e8e
|
python/led.py
|
python
|
_update_blinkstick
|
()
|
Writes new LED values to the Blinkstick.
This function updates the LED strip with new values.
|
Writes new LED values to the Blinkstick.
This function updates the LED strip with new values.
|
[
"Writes",
"new",
"LED",
"values",
"to",
"the",
"Blinkstick",
".",
"This",
"function",
"updates",
"the",
"LED",
"strip",
"with",
"new",
"values",
"."
] |
def _update_blinkstick():
"""Writes new LED values to the Blinkstick.
This function updates the LED strip with new values.
"""
global pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optional gamma correction
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
# Read the rgb values
r = p[0][:].astype(int)
g = p[1][:].astype(int)
b = p[2][:].astype(int)
#create array in which we will store the led states
newstrip = [None]*(config.N_PIXELS*3)
for i in range(config.N_PIXELS):
# blinkstick uses GRB format
newstrip[i*3] = g[i]
newstrip[i*3+1] = r[i]
newstrip[i*3+2] = b[i]
#send the data to the blinkstick
stick.set_led_data(0, newstrip)
|
[
"def",
"_update_blinkstick",
"(",
")",
":",
"global",
"pixels",
"# Truncate values and cast to integer",
"pixels",
"=",
"np",
".",
"clip",
"(",
"pixels",
",",
"0",
",",
"255",
")",
".",
"astype",
"(",
"int",
")",
"# Optional gamma correction",
"p",
"=",
"_gamma",
"[",
"pixels",
"]",
"if",
"config",
".",
"SOFTWARE_GAMMA_CORRECTION",
"else",
"np",
".",
"copy",
"(",
"pixels",
")",
"# Read the rgb values",
"r",
"=",
"p",
"[",
"0",
"]",
"[",
":",
"]",
".",
"astype",
"(",
"int",
")",
"g",
"=",
"p",
"[",
"1",
"]",
"[",
":",
"]",
".",
"astype",
"(",
"int",
")",
"b",
"=",
"p",
"[",
"2",
"]",
"[",
":",
"]",
".",
"astype",
"(",
"int",
")",
"#create array in which we will store the led states",
"newstrip",
"=",
"[",
"None",
"]",
"*",
"(",
"config",
".",
"N_PIXELS",
"*",
"3",
")",
"for",
"i",
"in",
"range",
"(",
"config",
".",
"N_PIXELS",
")",
":",
"# blinkstick uses GRB format",
"newstrip",
"[",
"i",
"*",
"3",
"]",
"=",
"g",
"[",
"i",
"]",
"newstrip",
"[",
"i",
"*",
"3",
"+",
"1",
"]",
"=",
"r",
"[",
"i",
"]",
"newstrip",
"[",
"i",
"*",
"3",
"+",
"2",
"]",
"=",
"b",
"[",
"i",
"]",
"#send the data to the blinkstick",
"stick",
".",
"set_led_data",
"(",
"0",
",",
"newstrip",
")"
] |
https://github.com/scottlawsonbc/audio-reactive-led-strip/blob/a278cf8a4455b56b1f5d5238929bf8817f9a4e8e/python/led.py#L112-L136
|
||
smart-mobile-software/gitstack
|
d9fee8f414f202143eb6e620529e8e5539a2af56
|
python/Lib/logging/__init__.py
|
python
|
Manager.setLoggerClass
|
(self, klass)
|
Set the class to be used when instantiating a logger with this Manager.
|
Set the class to be used when instantiating a logger with this Manager.
|
[
"Set",
"the",
"class",
"to",
"be",
"used",
"when",
"instantiating",
"a",
"logger",
"with",
"this",
"Manager",
"."
] |
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
|
[
"def",
"setLoggerClass",
"(",
"self",
",",
"klass",
")",
":",
"if",
"klass",
"!=",
"Logger",
":",
"if",
"not",
"issubclass",
"(",
"klass",
",",
"Logger",
")",
":",
"raise",
"TypeError",
"(",
"\"logger not derived from logging.Logger: \"",
"+",
"klass",
".",
"__name__",
")",
"self",
".",
"loggerClass",
"=",
"klass"
] |
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/logging/__init__.py#L1026-L1034
|
||
andresriancho/w3af
|
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
|
w3af/plugins/audit/dav.py
|
python
|
dav.__init__
|
(self)
|
[] |
def __init__(self):
AuditPlugin.__init__(self)
# Internal variables
self._already_tested_dirs = ScalableBloomFilter()
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"AuditPlugin",
".",
"__init__",
"(",
"self",
")",
"# Internal variables",
"self",
".",
"_already_tested_dirs",
"=",
"ScalableBloomFilter",
"(",
")"
] |
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/audit/dav.py#L44-L48
|
||||
mozillazg/pypy
|
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
|
pypy/module/math/interp_math.py
|
python
|
erf
|
(space, w_x)
|
return math1(space, rfloat.erf, w_x)
|
The error function
|
The error function
|
[
"The",
"error",
"function"
] |
def erf(space, w_x):
"""The error function"""
return math1(space, rfloat.erf, w_x)
|
[
"def",
"erf",
"(",
"space",
",",
"w_x",
")",
":",
"return",
"math1",
"(",
"space",
",",
"rfloat",
".",
"erf",
",",
"w_x",
")"
] |
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/pypy/module/math/interp_math.py#L383-L385
|
|
khanhnamle1994/natural-language-processing
|
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
|
assignment1/.env/lib/python2.7/site-packages/numpy/polynomial/polynomial.py
|
python
|
polyroots
|
(c)
|
return r
|
Compute the roots of a polynomial.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * x^i.
Parameters
----------
c : 1-D array_like
1-D array of polynomial coefficients.
Returns
-------
out : ndarray
Array of the roots of the polynomial. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the power series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
Examples
--------
>>> import numpy.polynomial.polynomial as poly
>>> poly.polyroots(poly.polyfromroots((-1,0,1)))
array([-1., 0., 1.])
>>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype
dtype('float64')
>>> j = complex(0,1)
>>> poly.polyroots(poly.polyfromroots((-j,0,j)))
array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
|
Compute the roots of a polynomial.
|
[
"Compute",
"the",
"roots",
"of",
"a",
"polynomial",
"."
] |
def polyroots(c):
"""
Compute the roots of a polynomial.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * x^i.
Parameters
----------
c : 1-D array_like
1-D array of polynomial coefficients.
Returns
-------
out : ndarray
Array of the roots of the polynomial. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the power series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
Examples
--------
>>> import numpy.polynomial.polynomial as poly
>>> poly.polyroots(poly.polyfromroots((-1,0,1)))
array([-1., 0., 1.])
>>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype
dtype('float64')
>>> j = complex(0,1)
>>> poly.polyroots(poly.polyfromroots((-j,0,j)))
array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = polycompanion(c)
r = la.eigvals(m)
r.sort()
return r
|
[
"def",
"polyroots",
"(",
"c",
")",
":",
"# c is a trimmed copy",
"[",
"c",
"]",
"=",
"pu",
".",
"as_series",
"(",
"[",
"c",
"]",
")",
"if",
"len",
"(",
"c",
")",
"<",
"2",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"c",
".",
"dtype",
")",
"if",
"len",
"(",
"c",
")",
"==",
"2",
":",
"return",
"np",
".",
"array",
"(",
"[",
"-",
"c",
"[",
"0",
"]",
"/",
"c",
"[",
"1",
"]",
"]",
")",
"m",
"=",
"polycompanion",
"(",
"c",
")",
"r",
"=",
"la",
".",
"eigvals",
"(",
"m",
")",
"r",
".",
"sort",
"(",
")",
"return",
"r"
] |
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/numpy/polynomial/polynomial.py#L1431-L1486
|
|
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/zypperpkg.py
|
python
|
_find_types
|
(pkgs)
|
return sorted({pkg.split(":", 1)[0] for pkg in pkgs if len(pkg.split(":", 1)) == 2})
|
Form a package names list, find prefixes of packages types.
|
Form a package names list, find prefixes of packages types.
|
[
"Form",
"a",
"package",
"names",
"list",
"find",
"prefixes",
"of",
"packages",
"types",
"."
] |
def _find_types(pkgs):
"""Form a package names list, find prefixes of packages types."""
return sorted({pkg.split(":", 1)[0] for pkg in pkgs if len(pkg.split(":", 1)) == 2})
|
[
"def",
"_find_types",
"(",
"pkgs",
")",
":",
"return",
"sorted",
"(",
"{",
"pkg",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"[",
"0",
"]",
"for",
"pkg",
"in",
"pkgs",
"if",
"len",
"(",
"pkg",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
")",
"==",
"2",
"}",
")"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/zypperpkg.py#L1415-L1417
|
|
zyfra/ebonite
|
b01b662c43709d152940f488574d78ff25f89ecf
|
src/ebonite/ext/s3/artifact.py
|
python
|
S3Blob.materialize
|
(self, path)
|
[] |
def materialize(self, path):
logger.debug('Downloading file from %s to %s', self.s3path, path)
os.makedirs(os.path.dirname(path), exist_ok=True)
self._s3.download_file(self.bucket_name, self.s3path, path)
|
[
"def",
"materialize",
"(",
"self",
",",
"path",
")",
":",
"logger",
".",
"debug",
"(",
"'Downloading file from %s to %s'",
",",
"self",
".",
"s3path",
",",
"path",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"exist_ok",
"=",
"True",
")",
"self",
".",
"_s3",
".",
"download_file",
"(",
"self",
".",
"bucket_name",
",",
"self",
".",
"s3path",
",",
"path",
")"
] |
https://github.com/zyfra/ebonite/blob/b01b662c43709d152940f488574d78ff25f89ecf/src/ebonite/ext/s3/artifact.py#L68-L71
|
||||
hzlzh/AlfredWorkflow.com
|
7055f14f6922c80ea5943839eb0caff11ae57255
|
Sources/Workflows/KindleBookstore/PyAl/Request/requests/packages/oauthlib/oauth2/draft25/parameters.py
|
python
|
parse_authorization_code_response
|
(uri, state=None)
|
return params
|
Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the "application/x-www-form-urlencoded" format:
code
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
|
Parse authorization grant response URI into a dict.
|
[
"Parse",
"authorization",
"grant",
"response",
"URI",
"into",
"a",
"dict",
"."
] |
def parse_authorization_code_response(uri, state=None):
"""Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the "application/x-www-form-urlencoded" format:
code
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
"""
query = urlparse.urlparse(uri).query
params = dict(urlparse.parse_qsl(query))
if not u'code' in params:
raise KeyError("Missing code parameter in response.")
if state and params.get(u'state', None) != state:
raise ValueError("Mismatching or missing state in response.")
return params
|
[
"def",
"parse_authorization_code_response",
"(",
"uri",
",",
"state",
"=",
"None",
")",
":",
"query",
"=",
"urlparse",
".",
"urlparse",
"(",
"uri",
")",
".",
"query",
"params",
"=",
"dict",
"(",
"urlparse",
".",
"parse_qsl",
"(",
"query",
")",
")",
"if",
"not",
"u'code'",
"in",
"params",
":",
"raise",
"KeyError",
"(",
"\"Missing code parameter in response.\"",
")",
"if",
"state",
"and",
"params",
".",
"get",
"(",
"u'state'",
",",
"None",
")",
"!=",
"state",
":",
"raise",
"ValueError",
"(",
"\"Mismatching or missing state in response.\"",
")",
"return",
"params"
] |
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/KindleBookstore/PyAl/Request/requests/packages/oauthlib/oauth2/draft25/parameters.py#L95-L136
|
|
p2pool/p2pool
|
53c438bbada06b9d4a9a465bc13f7694a7a322b7
|
p2pool/util/forest.py
|
python
|
DistanceSkipList.apply_delta
|
(self, (dist1, to_hash1), (from_hash2, dist2, to_hash2), (n,))
|
return dist1 + dist2, to_hash2
|
[] |
def apply_delta(self, (dist1, to_hash1), (from_hash2, dist2, to_hash2), (n,)):
if to_hash1 != from_hash2:
raise AssertionError()
return dist1 + dist2, to_hash2
|
[
"def",
"apply_delta",
"(",
"self",
",",
"(",
"dist1",
",",
"to_hash1",
")",
",",
"(",
"from_hash2",
",",
"dist2",
",",
"to_hash2",
")",
",",
"(",
"n",
",",
")",
")",
":",
"if",
"to_hash1",
"!=",
"from_hash2",
":",
"raise",
"AssertionError",
"(",
")",
"return",
"dist1",
"+",
"dist2",
",",
"to_hash2"
] |
https://github.com/p2pool/p2pool/blob/53c438bbada06b9d4a9a465bc13f7694a7a322b7/p2pool/util/forest.py#L33-L36
|
|||
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/Django/django/contrib/formtools/wizard/views.py
|
python
|
WizardView.post
|
(self, *args, **kwargs)
|
return self.render(form)
|
This method handles POST requests.
The wizard will render either the current step (if form validation
wasn't successful), the next step (if the current step was stored
successful) or the done view (if no more steps are available)
|
This method handles POST requests.
|
[
"This",
"method",
"handles",
"POST",
"requests",
"."
] |
def post(self, *args, **kwargs):
"""
This method handles POST requests.
The wizard will render either the current step (if form validation
wasn't successful), the next step (if the current step was stored
successful) or the done view (if no more steps are available)
"""
# Look for a wizard_goto_step element in the posted data which
# contains a valid step name. If one was found, render the requested
# form. (This makes stepping back a lot easier).
wizard_goto_step = self.request.POST.get('wizard_goto_step', None)
if wizard_goto_step and wizard_goto_step in self.get_form_list():
self.storage.current_step = wizard_goto_step
form = self.get_form(
data=self.storage.get_step_data(self.steps.current),
files=self.storage.get_step_files(self.steps.current))
return self.render(form)
# Check if form was refreshed
management_form = ManagementForm(self.request.POST, prefix=self.prefix)
if not management_form.is_valid():
raise ValidationError(
'ManagementForm data is missing or has been tampered.')
form_current_step = management_form.cleaned_data['current_step']
if (form_current_step != self.steps.current and
self.storage.current_step is not None):
# form refreshed, change current step
self.storage.current_step = form_current_step
# get the form for the current step
form = self.get_form(data=self.request.POST, files=self.request.FILES)
# and try to validate
if form.is_valid():
# if the form is valid, store the cleaned data and files.
self.storage.set_step_data(self.steps.current, self.process_step(form))
self.storage.set_step_files(self.steps.current, self.process_step_files(form))
# check if the current step is the last step
if self.steps.current == self.steps.last:
# no more steps, render done view
return self.render_done(form, **kwargs)
else:
# proceed to the next step
return self.render_next_step(form)
return self.render(form)
|
[
"def",
"post",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Look for a wizard_goto_step element in the posted data which",
"# contains a valid step name. If one was found, render the requested",
"# form. (This makes stepping back a lot easier).",
"wizard_goto_step",
"=",
"self",
".",
"request",
".",
"POST",
".",
"get",
"(",
"'wizard_goto_step'",
",",
"None",
")",
"if",
"wizard_goto_step",
"and",
"wizard_goto_step",
"in",
"self",
".",
"get_form_list",
"(",
")",
":",
"self",
".",
"storage",
".",
"current_step",
"=",
"wizard_goto_step",
"form",
"=",
"self",
".",
"get_form",
"(",
"data",
"=",
"self",
".",
"storage",
".",
"get_step_data",
"(",
"self",
".",
"steps",
".",
"current",
")",
",",
"files",
"=",
"self",
".",
"storage",
".",
"get_step_files",
"(",
"self",
".",
"steps",
".",
"current",
")",
")",
"return",
"self",
".",
"render",
"(",
"form",
")",
"# Check if form was refreshed",
"management_form",
"=",
"ManagementForm",
"(",
"self",
".",
"request",
".",
"POST",
",",
"prefix",
"=",
"self",
".",
"prefix",
")",
"if",
"not",
"management_form",
".",
"is_valid",
"(",
")",
":",
"raise",
"ValidationError",
"(",
"'ManagementForm data is missing or has been tampered.'",
")",
"form_current_step",
"=",
"management_form",
".",
"cleaned_data",
"[",
"'current_step'",
"]",
"if",
"(",
"form_current_step",
"!=",
"self",
".",
"steps",
".",
"current",
"and",
"self",
".",
"storage",
".",
"current_step",
"is",
"not",
"None",
")",
":",
"# form refreshed, change current step",
"self",
".",
"storage",
".",
"current_step",
"=",
"form_current_step",
"# get the form for the current step",
"form",
"=",
"self",
".",
"get_form",
"(",
"data",
"=",
"self",
".",
"request",
".",
"POST",
",",
"files",
"=",
"self",
".",
"request",
".",
"FILES",
")",
"# and try to validate",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"# if the form is valid, store the cleaned data and files.",
"self",
".",
"storage",
".",
"set_step_data",
"(",
"self",
".",
"steps",
".",
"current",
",",
"self",
".",
"process_step",
"(",
"form",
")",
")",
"self",
".",
"storage",
".",
"set_step_files",
"(",
"self",
".",
"steps",
".",
"current",
",",
"self",
".",
"process_step_files",
"(",
"form",
")",
")",
"# check if the current step is the last step",
"if",
"self",
".",
"steps",
".",
"current",
"==",
"self",
".",
"steps",
".",
"last",
":",
"# no more steps, render done view",
"return",
"self",
".",
"render_done",
"(",
"form",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"# proceed to the next step",
"return",
"self",
".",
"render_next_step",
"(",
"form",
")",
"return",
"self",
".",
"render",
"(",
"form",
")"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/contrib/formtools/wizard/views.py#L247-L294
|
|
datacenter/acitoolkit
|
629b84887dd0f0183b81efc8adb16817f985541a
|
applications/search/aciSearchDb.py
|
python
|
SearchObjectStore.get_by_uids_short
|
(self, uids)
|
return result
|
Will return a dictionary indexed by uid, where each entry is a dictionary holding the class and name
of the object refereced by the uid.
:param uids: list of UIDs
|
Will return a dictionary indexed by uid, where each entry is a dictionary holding the class and name
of the object refereced by the uid.
:param uids: list of UIDs
|
[
"Will",
"return",
"a",
"dictionary",
"indexed",
"by",
"uid",
"where",
"each",
"entry",
"is",
"a",
"dictionary",
"holding",
"the",
"class",
"and",
"name",
"of",
"the",
"object",
"refereced",
"by",
"the",
"uid",
".",
":",
"param",
"uids",
":",
"list",
"of",
"UIDs"
] |
def get_by_uids_short(self, uids):
"""
Will return a dictionary indexed by uid, where each entry is a dictionary holding the class and name
of the object refereced by the uid.
:param uids: list of UIDs
"""
result = {}
if not APIC:
for uid in uids:
atk_obj = self.object_directory[uid]
record = {'class': atk_obj.__class__.__name__,
'name': atk_obj.get_attributes()['name'],
'dn': atk_obj.get_attributes()['dn']}
result[uid] = record
else:
# need to read from dB
# read one record where uid is uid and attribue is name, get name value and class
for uid in uids:
result[uid] = self._get_info_from_sql(uid)
return result
|
[
"def",
"get_by_uids_short",
"(",
"self",
",",
"uids",
")",
":",
"result",
"=",
"{",
"}",
"if",
"not",
"APIC",
":",
"for",
"uid",
"in",
"uids",
":",
"atk_obj",
"=",
"self",
".",
"object_directory",
"[",
"uid",
"]",
"record",
"=",
"{",
"'class'",
":",
"atk_obj",
".",
"__class__",
".",
"__name__",
",",
"'name'",
":",
"atk_obj",
".",
"get_attributes",
"(",
")",
"[",
"'name'",
"]",
",",
"'dn'",
":",
"atk_obj",
".",
"get_attributes",
"(",
")",
"[",
"'dn'",
"]",
"}",
"result",
"[",
"uid",
"]",
"=",
"record",
"else",
":",
"# need to read from dB",
"# read one record where uid is uid and attribue is name, get name value and class",
"for",
"uid",
"in",
"uids",
":",
"result",
"[",
"uid",
"]",
"=",
"self",
".",
"_get_info_from_sql",
"(",
"uid",
")",
"return",
"result"
] |
https://github.com/datacenter/acitoolkit/blob/629b84887dd0f0183b81efc8adb16817f985541a/applications/search/aciSearchDb.py#L910-L931
|
|
smart-mobile-software/gitstack
|
d9fee8f414f202143eb6e620529e8e5539a2af56
|
python/Lib/lib-tk/Tix.py
|
python
|
TixWidget.subwidgets_all
|
(self)
|
return retlist
|
Return all subwidgets.
|
Return all subwidgets.
|
[
"Return",
"all",
"subwidgets",
"."
] |
def subwidgets_all(self):
"""Return all subwidgets."""
names = self._subwidget_names()
if not names:
return []
retlist = []
for name in names:
name = name[len(self._w)+1:]
try:
retlist.append(self._nametowidget(name))
except:
# some of the widgets are unknown e.g. border in LabelFrame
pass
return retlist
|
[
"def",
"subwidgets_all",
"(",
"self",
")",
":",
"names",
"=",
"self",
".",
"_subwidget_names",
"(",
")",
"if",
"not",
"names",
":",
"return",
"[",
"]",
"retlist",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"self",
".",
"_w",
")",
"+",
"1",
":",
"]",
"try",
":",
"retlist",
".",
"append",
"(",
"self",
".",
"_nametowidget",
"(",
"name",
")",
")",
"except",
":",
"# some of the widgets are unknown e.g. border in LabelFrame",
"pass",
"return",
"retlist"
] |
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/lib-tk/Tix.py#L357-L370
|
|
clinton-hall/nzbToMedia
|
27669389216902d1085660167e7bda0bd8527ecf
|
libs/common/setuptools/_vendor/pyparsing.py
|
python
|
oneOf
|
( strs, caseless=False, useRegex=True )
|
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
|
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
|
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
|
[
"Helper",
"to",
"quickly",
"define",
"a",
"set",
"of",
"alternative",
"Literals",
"and",
"makes",
"sure",
"to",
"do",
"longest",
"-",
"first",
"testing",
"when",
"there",
"is",
"a",
"conflict",
"regardless",
"of",
"the",
"input",
"order",
"but",
"returns",
"a",
"C",
"{",
"L",
"{",
"MatchFirst",
"}}",
"for",
"best",
"performance",
"."
] |
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
|
[
"def",
"oneOf",
"(",
"strs",
",",
"caseless",
"=",
"False",
",",
"useRegex",
"=",
"True",
")",
":",
"if",
"caseless",
":",
"isequal",
"=",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
".",
"upper",
"(",
")",
"==",
"b",
".",
"upper",
"(",
")",
")",
"masks",
"=",
"(",
"lambda",
"a",
",",
"b",
":",
"b",
".",
"upper",
"(",
")",
".",
"startswith",
"(",
"a",
".",
"upper",
"(",
")",
")",
")",
"parseElementClass",
"=",
"CaselessLiteral",
"else",
":",
"isequal",
"=",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
"==",
"b",
")",
"masks",
"=",
"(",
"lambda",
"a",
",",
"b",
":",
"b",
".",
"startswith",
"(",
"a",
")",
")",
"parseElementClass",
"=",
"Literal",
"symbols",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"strs",
",",
"basestring",
")",
":",
"symbols",
"=",
"strs",
".",
"split",
"(",
")",
"elif",
"isinstance",
"(",
"strs",
",",
"Iterable",
")",
":",
"symbols",
"=",
"list",
"(",
"strs",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"Invalid argument to oneOf, expected string or iterable\"",
",",
"SyntaxWarning",
",",
"stacklevel",
"=",
"2",
")",
"if",
"not",
"symbols",
":",
"return",
"NoMatch",
"(",
")",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"symbols",
")",
"-",
"1",
":",
"cur",
"=",
"symbols",
"[",
"i",
"]",
"for",
"j",
",",
"other",
"in",
"enumerate",
"(",
"symbols",
"[",
"i",
"+",
"1",
":",
"]",
")",
":",
"if",
"(",
"isequal",
"(",
"other",
",",
"cur",
")",
")",
":",
"del",
"symbols",
"[",
"i",
"+",
"j",
"+",
"1",
"]",
"break",
"elif",
"(",
"masks",
"(",
"cur",
",",
"other",
")",
")",
":",
"del",
"symbols",
"[",
"i",
"+",
"j",
"+",
"1",
"]",
"symbols",
".",
"insert",
"(",
"i",
",",
"other",
")",
"cur",
"=",
"other",
"break",
"else",
":",
"i",
"+=",
"1",
"if",
"not",
"caseless",
"and",
"useRegex",
":",
"#~ print (strs,\"->\", \"|\".join( [ _escapeRegexChars(sym) for sym in symbols] ))",
"try",
":",
"if",
"len",
"(",
"symbols",
")",
"==",
"len",
"(",
"\"\"",
".",
"join",
"(",
"symbols",
")",
")",
":",
"return",
"Regex",
"(",
"\"[%s]\"",
"%",
"\"\"",
".",
"join",
"(",
"_escapeRegexRangeChars",
"(",
"sym",
")",
"for",
"sym",
"in",
"symbols",
")",
")",
".",
"setName",
"(",
"' | '",
".",
"join",
"(",
"symbols",
")",
")",
"else",
":",
"return",
"Regex",
"(",
"\"|\"",
".",
"join",
"(",
"re",
".",
"escape",
"(",
"sym",
")",
"for",
"sym",
"in",
"symbols",
")",
")",
".",
"setName",
"(",
"' | '",
".",
"join",
"(",
"symbols",
")",
")",
"except",
"Exception",
":",
"warnings",
".",
"warn",
"(",
"\"Exception creating Regex for oneOf, building MatchFirst\"",
",",
"SyntaxWarning",
",",
"stacklevel",
"=",
"2",
")",
"# last resort, just use MatchFirst",
"return",
"MatchFirst",
"(",
"parseElementClass",
"(",
"sym",
")",
"for",
"sym",
"in",
"symbols",
")",
".",
"setName",
"(",
"' | '",
".",
"join",
"(",
"symbols",
")",
")"
] |
https://github.com/clinton-hall/nzbToMedia/blob/27669389216902d1085660167e7bda0bd8527ecf/libs/common/setuptools/_vendor/pyparsing.py#L4573-L4644
|
|
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_env.py
|
python
|
Utils.openshift_installed
|
()
|
return rpmquery.count() > 0
|
check if openshift is installed
|
check if openshift is installed
|
[
"check",
"if",
"openshift",
"is",
"installed"
] |
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
|
[
"def",
"openshift_installed",
"(",
")",
":",
"import",
"rpm",
"transaction_set",
"=",
"rpm",
".",
"TransactionSet",
"(",
")",
"rpmquery",
"=",
"transaction_set",
".",
"dbMatch",
"(",
"\"name\"",
",",
"\"atomic-openshift\"",
")",
"return",
"rpmquery",
".",
"count",
"(",
")",
">",
"0"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_env.py#L1325-L1332
|
|
guillermooo/Vintageous
|
f958207009902052aed5fcac09745f1742648604
|
ex_main.py
|
python
|
ViColonInput.__init__
|
(self, window)
|
[] |
def __init__(self, window):
sublime_plugin.WindowCommand.__init__(self, window)
|
[
"def",
"__init__",
"(",
"self",
",",
"window",
")",
":",
"sublime_plugin",
".",
"WindowCommand",
".",
"__init__",
"(",
"self",
",",
"window",
")"
] |
https://github.com/guillermooo/Vintageous/blob/f958207009902052aed5fcac09745f1742648604/ex_main.py#L55-L56
|
||||
HaoZhang95/Python24
|
b897224b8a0e6a5734f408df8c24846a98c553bf
|
00Python/venv/Lib/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/html5lib/treewalkers/base.py
|
python
|
TreeWalker.comment
|
(self, data)
|
return {"type": "Comment", "data": data}
|
Generates a Comment token
:arg data: the comment
:returns: Comment token
|
Generates a Comment token
|
[
"Generates",
"a",
"Comment",
"token"
] |
def comment(self, data):
"""Generates a Comment token
:arg data: the comment
:returns: Comment token
"""
return {"type": "Comment", "data": data}
|
[
"def",
"comment",
"(",
"self",
",",
"data",
")",
":",
"return",
"{",
"\"type\"",
":",
"\"Comment\"",
",",
"\"data\"",
":",
"data",
"}"
] |
https://github.com/HaoZhang95/Python24/blob/b897224b8a0e6a5734f408df8c24846a98c553bf/00Python/venv/Lib/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/html5lib/treewalkers/base.py#L138-L146
|
|
wucng/TensorExpand
|
4ea58f64f5c5082b278229b799c9f679536510b7
|
TensorExpand/图片项目/5、迁移学习/VGG迁移学习/查看pb文件的op名.py
|
python
|
print_ops_shape
|
()
|
[] |
def print_ops_shape():
tensor_name_transfer_layer = "pool5:0"
graph = tf.Graph()
with graph.as_default():
create_graph()
with tf.Session() as sess:
# ops = sess.graph.get_operations()
# for op in ops:
# print(op.name)
transfer_layer = graph.get_tensor_by_name(tensor_name_transfer_layer)
print(transfer_layer.shape)
|
[
"def",
"print_ops_shape",
"(",
")",
":",
"tensor_name_transfer_layer",
"=",
"\"pool5:0\"",
"graph",
"=",
"tf",
".",
"Graph",
"(",
")",
"with",
"graph",
".",
"as_default",
"(",
")",
":",
"create_graph",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"# ops = sess.graph.get_operations()",
"# for op in ops:",
"# print(op.name)",
"transfer_layer",
"=",
"graph",
".",
"get_tensor_by_name",
"(",
"tensor_name_transfer_layer",
")",
"print",
"(",
"transfer_layer",
".",
"shape",
")"
] |
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/图片项目/5、迁移学习/VGG迁移学习/查看pb文件的op名.py#L25-L35
|
||||
google/TensorNetwork
|
e12580f1749493dbe05f474d2fecdec4eaba73c5
|
tensornetwork/backends/pytorch/decompositions.py
|
python
|
qr
|
(
torch: Any,
tensor: Tensor,
pivot_axis: int,
non_negative_diagonal: bool = False
)
|
return q, r
|
Computes the QR decomposition of a tensor.
The QR decomposition is performed by treating the tensor as a matrix,
with an effective left (row) index resulting from combining the axes
`tensor.shape[:pivot_axis]` and an effective right (column) index
resulting from combining the axes `tensor.shape[pivot_axis:]`.
For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,
then `q` would have shape (2, 3, 6), and `r` would have shape (6, 4, 5).
The output consists of two tensors `Q, R` such that:
```python
Q[i1,...,iN, j] * R[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
`R` is an upper triangular matrix, `Q` is an orthonormal matrix
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.
Args:
tf: The tensorflow module.
tensor: A tensor to be decomposed.
pivot_axis: Where to split the tensor's axes before flattening into a
matrix.
Returns:
Q: Left tensor factor.
R: Right tensor factor.
|
Computes the QR decomposition of a tensor.
|
[
"Computes",
"the",
"QR",
"decomposition",
"of",
"a",
"tensor",
"."
] |
def qr(
torch: Any,
tensor: Tensor,
pivot_axis: int,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
"""Computes the QR decomposition of a tensor.
The QR decomposition is performed by treating the tensor as a matrix,
with an effective left (row) index resulting from combining the axes
`tensor.shape[:pivot_axis]` and an effective right (column) index
resulting from combining the axes `tensor.shape[pivot_axis:]`.
For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,
then `q` would have shape (2, 3, 6), and `r` would have shape (6, 4, 5).
The output consists of two tensors `Q, R` such that:
```python
Q[i1,...,iN, j] * R[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
`R` is an upper triangular matrix, `Q` is an orthonormal matrix
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.
Args:
tf: The tensorflow module.
tensor: A tensor to be decomposed.
pivot_axis: Where to split the tensor's axes before flattening into a
matrix.
Returns:
Q: Left tensor factor.
R: Right tensor factor.
"""
left_dims = list(tensor.shape)[:pivot_axis]
right_dims = list(tensor.shape)[pivot_axis:]
tensor = torch.reshape(tensor, (np.prod(left_dims), np.prod(right_dims)))
q, r = torch.qr(tensor)
if non_negative_diagonal:
phases = torch.sign(torch.diagonal(r))
q = q * phases
r = phases[:, None] * r
center_dim = q.shape[1]
q = torch.reshape(q, list(left_dims) + [center_dim])
r = torch.reshape(r, [center_dim] + list(right_dims))
return q, r
|
[
"def",
"qr",
"(",
"torch",
":",
"Any",
",",
"tensor",
":",
"Tensor",
",",
"pivot_axis",
":",
"int",
",",
"non_negative_diagonal",
":",
"bool",
"=",
"False",
")",
"->",
"Tuple",
"[",
"Tensor",
",",
"Tensor",
"]",
":",
"left_dims",
"=",
"list",
"(",
"tensor",
".",
"shape",
")",
"[",
":",
"pivot_axis",
"]",
"right_dims",
"=",
"list",
"(",
"tensor",
".",
"shape",
")",
"[",
"pivot_axis",
":",
"]",
"tensor",
"=",
"torch",
".",
"reshape",
"(",
"tensor",
",",
"(",
"np",
".",
"prod",
"(",
"left_dims",
")",
",",
"np",
".",
"prod",
"(",
"right_dims",
")",
")",
")",
"q",
",",
"r",
"=",
"torch",
".",
"qr",
"(",
"tensor",
")",
"if",
"non_negative_diagonal",
":",
"phases",
"=",
"torch",
".",
"sign",
"(",
"torch",
".",
"diagonal",
"(",
"r",
")",
")",
"q",
"=",
"q",
"*",
"phases",
"r",
"=",
"phases",
"[",
":",
",",
"None",
"]",
"*",
"r",
"center_dim",
"=",
"q",
".",
"shape",
"[",
"1",
"]",
"q",
"=",
"torch",
".",
"reshape",
"(",
"q",
",",
"list",
"(",
"left_dims",
")",
"+",
"[",
"center_dim",
"]",
")",
"r",
"=",
"torch",
".",
"reshape",
"(",
"r",
",",
"[",
"center_dim",
"]",
"+",
"list",
"(",
"right_dims",
")",
")",
"return",
"q",
",",
"r"
] |
https://github.com/google/TensorNetwork/blob/e12580f1749493dbe05f474d2fecdec4eaba73c5/tensornetwork/backends/pytorch/decompositions.py#L124-L170
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/Python-2.7.9/Lib/idlelib/OutputWindow.py
|
python
|
OutputWindow.writelines
|
(self, lines)
|
[] |
def writelines(self, lines):
for line in lines:
self.write(line)
|
[
"def",
"writelines",
"(",
"self",
",",
"lines",
")",
":",
"for",
"line",
"in",
"lines",
":",
"self",
".",
"write",
"(",
"line",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/idlelib/OutputWindow.py#L50-L52
|
||||
osmr/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
tensorflow2/tf2cv/models/seresnet_cifar.py
|
python
|
seresnet56_svhn
|
(classes=10, **kwargs)
|
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs)
|
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
|
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
|
[
"SE",
"-",
"ResNet",
"-",
"56",
"model",
"for",
"SVHN",
"from",
"Squeeze",
"-",
"and",
"-",
"Excitation",
"Networks",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1709",
".",
"01507",
"."
] |
def seresnet56_svhn(classes=10, **kwargs):
"""
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs)
|
[
"def",
"seresnet56_svhn",
"(",
"classes",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"get_seresnet_cifar",
"(",
"classes",
"=",
"classes",
",",
"blocks",
"=",
"56",
",",
"bottleneck",
"=",
"False",
",",
"model_name",
"=",
"\"seresnet56_svhn\"",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/tensorflow2/tf2cv/models/seresnet_cifar.py#L241-L254
|
|
bendmorris/static-python
|
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
|
Lib/imghdr.py
|
python
|
test_pbm
|
(h, f)
|
PBM (portable bitmap)
|
PBM (portable bitmap)
|
[
"PBM",
"(",
"portable",
"bitmap",
")"
] |
def test_pbm(h, f):
"""PBM (portable bitmap)"""
if len(h) >= 3 and \
h[0] == ord(b'P') and h[1] in b'14' and h[2] in b' \t\n\r':
return 'pbm'
|
[
"def",
"test_pbm",
"(",
"h",
",",
"f",
")",
":",
"if",
"len",
"(",
"h",
")",
">=",
"3",
"and",
"h",
"[",
"0",
"]",
"==",
"ord",
"(",
"b'P'",
")",
"and",
"h",
"[",
"1",
"]",
"in",
"b'14'",
"and",
"h",
"[",
"2",
"]",
"in",
"b' \\t\\n\\r'",
":",
"return",
"'pbm'"
] |
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/imghdr.py#L71-L75
|
||
rsalmei/alive-progress
|
767445917e7cb384981c0dc29b3b3204384353b1
|
alive_progress/core/progress.py
|
python
|
_create_bars
|
(local_config)
|
return bar(local_config.length, local_config.unknown)
|
[] |
def _create_bars(local_config):
bar = local_config.bar
if bar is None:
obj = __noop
obj.unknown, obj.end = obj, obj
return obj
return bar(local_config.length, local_config.unknown)
|
[
"def",
"_create_bars",
"(",
"local_config",
")",
":",
"bar",
"=",
"local_config",
".",
"bar",
"if",
"bar",
"is",
"None",
":",
"obj",
"=",
"__noop",
"obj",
".",
"unknown",
",",
"obj",
".",
"end",
"=",
"obj",
",",
"obj",
"return",
"obj",
"return",
"bar",
"(",
"local_config",
".",
"length",
",",
"local_config",
".",
"unknown",
")"
] |
https://github.com/rsalmei/alive-progress/blob/767445917e7cb384981c0dc29b3b3204384353b1/alive_progress/core/progress.py#L277-L283
|
|||
pysmt/pysmt
|
ade4dc2a825727615033a96d31c71e9f53ce4764
|
pysmt/parsing.py
|
python
|
InfixOrUnaryOpAdapter.__init__
|
(self, b_operator, u_operator, b_lbp, u_lbp)
|
[] |
def __init__(self, b_operator, u_operator, b_lbp, u_lbp):
GrammarSymbol.__init__(self)
self.b_operator = b_operator
self.u_operator = u_operator
self.lbp = b_lbp
self.u_lbp = u_lbp
|
[
"def",
"__init__",
"(",
"self",
",",
"b_operator",
",",
"u_operator",
",",
"b_lbp",
",",
"u_lbp",
")",
":",
"GrammarSymbol",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"b_operator",
"=",
"b_operator",
"self",
".",
"u_operator",
"=",
"u_operator",
"self",
".",
"lbp",
"=",
"b_lbp",
"self",
".",
"u_lbp",
"=",
"u_lbp"
] |
https://github.com/pysmt/pysmt/blob/ade4dc2a825727615033a96d31c71e9f53ce4764/pysmt/parsing.py#L558-L563
|
||||
thu-coai/CrossWOZ
|
265e97379b34221f5949beb46f3eec0e2dc943c4
|
convlab2/util/allennlp_file_utils.py
|
python
|
read_set_from_file
|
(filename: str)
|
return collection
|
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
|
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
|
[
"Extract",
"a",
"de",
"-",
"duped",
"collection",
"(",
"set",
")",
"of",
"text",
"from",
"a",
"file",
".",
"Expected",
"file",
"format",
"is",
"one",
"item",
"per",
"line",
"."
] |
def read_set_from_file(filename: str) -> Set[str]:
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, 'r') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
[
"def",
"read_set_from_file",
"(",
"filename",
":",
"str",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"collection",
"=",
"set",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"file_",
":",
"for",
"line",
"in",
"file_",
":",
"collection",
".",
"add",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"return",
"collection"
] |
https://github.com/thu-coai/CrossWOZ/blob/265e97379b34221f5949beb46f3eec0e2dc943c4/convlab2/util/allennlp_file_utils.py#L312-L321
|
|
pyglet/pyglet
|
2833c1df902ca81aeeffa786c12e7e87d402434b
|
pyglet/com.py
|
python
|
COMObject.pointers
|
(self)
|
return self._pointers
|
Returns pointers to the implemented interfaces in this COMObject. Read-only.
:type: dict
|
Returns pointers to the implemented interfaces in this COMObject. Read-only.
|
[
"Returns",
"pointers",
"to",
"the",
"implemented",
"interfaces",
"in",
"this",
"COMObject",
".",
"Read",
"-",
"only",
"."
] |
def pointers(self):
"""Returns pointers to the implemented interfaces in this COMObject. Read-only.
:type: dict
"""
return self._pointers
|
[
"def",
"pointers",
"(",
"self",
")",
":",
"return",
"self",
".",
"_pointers"
] |
https://github.com/pyglet/pyglet/blob/2833c1df902ca81aeeffa786c12e7e87d402434b/pyglet/com.py#L350-L355
|
|
guoqiangqi/PFLD
|
d2696963bb023c11cd5e8a8ee38a66085e26537f
|
mtcnn/detect_face.py
|
python
|
Network.prelu
|
(self, inp, name)
|
return output
|
[] |
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
|
[
"def",
"prelu",
"(",
"self",
",",
"inp",
",",
"name",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"i",
"=",
"int",
"(",
"inp",
".",
"get_shape",
"(",
")",
"[",
"-",
"1",
"]",
")",
"alpha",
"=",
"self",
".",
"make_var",
"(",
"'alpha'",
",",
"shape",
"=",
"(",
"i",
",",
")",
")",
"output",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"inp",
")",
"+",
"tf",
".",
"multiply",
"(",
"alpha",
",",
"-",
"tf",
".",
"nn",
".",
"relu",
"(",
"-",
"inp",
")",
")",
"return",
"output"
] |
https://github.com/guoqiangqi/PFLD/blob/d2696963bb023c11cd5e8a8ee38a66085e26537f/mtcnn/detect_face.py#L141-L146
|
|||
wummel/linkchecker
|
c2ce810c3fb00b895a841a7be6b2e78c64e7b042
|
third_party/dnspython/dns/rdtypes/nsbase.py
|
python
|
NSBase.to_text
|
(self, origin=None, relativize=True, **kw)
|
return str(target)
|
[] |
def to_text(self, origin=None, relativize=True, **kw):
target = self.target.choose_relativity(origin, relativize)
return str(target)
|
[
"def",
"to_text",
"(",
"self",
",",
"origin",
"=",
"None",
",",
"relativize",
"=",
"True",
",",
"*",
"*",
"kw",
")",
":",
"target",
"=",
"self",
".",
"target",
".",
"choose_relativity",
"(",
"origin",
",",
"relativize",
")",
"return",
"str",
"(",
"target",
")"
] |
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/rdtypes/nsbase.py#L36-L38
|
|||
openstack/heat
|
ea6633c35b04bb49c4a2858edc9df0a82d039478
|
heat/engine/timestamp.py
|
python
|
Timestamp.__init__
|
(self, db_fetch, attribute)
|
Initialise the timestamp descriptor.
Initialise with a function to fetch the database representation of an
object (given a context and ID) and the name of the attribute to
retrieve.
|
Initialise the timestamp descriptor.
|
[
"Initialise",
"the",
"timestamp",
"descriptor",
"."
] |
def __init__(self, db_fetch, attribute):
"""Initialise the timestamp descriptor.
Initialise with a function to fetch the database representation of an
object (given a context and ID) and the name of the attribute to
retrieve.
"""
self.db_fetch = db_fetch
self.attribute = attribute
|
[
"def",
"__init__",
"(",
"self",
",",
"db_fetch",
",",
"attribute",
")",
":",
"self",
".",
"db_fetch",
"=",
"db_fetch",
"self",
".",
"attribute",
"=",
"attribute"
] |
https://github.com/openstack/heat/blob/ea6633c35b04bb49c4a2858edc9df0a82d039478/heat/engine/timestamp.py#L20-L28
|
||
flyyufelix/DenseNet-Keras
|
8c42d8092b2616a9fbf025c756b14c67be708685
|
densenet121.py
|
python
|
transition_block
|
(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4)
|
return x
|
Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
|
Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
|
[
"Apply",
"BatchNorm",
"1x1",
"Convolution",
"averagePooling",
"optional",
"compression",
"dropout",
"#",
"Arguments",
"x",
":",
"input",
"tensor",
"stage",
":",
"index",
"for",
"dense",
"block",
"nb_filter",
":",
"number",
"of",
"filters",
"compression",
":",
"calculated",
"as",
"1",
"-",
"reduction",
".",
"Reduces",
"the",
"number",
"of",
"feature",
"maps",
"in",
"the",
"transition",
"block",
".",
"dropout_rate",
":",
"dropout",
"rate",
"weight_decay",
":",
"weight",
"decay",
"factor"
] |
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_blk'
relu_name_base = 'relu' + str(stage) + '_blk'
pool_name_base = 'pool' + str(stage)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
x = Activation('relu', name=relu_name_base)(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
return x
|
[
"def",
"transition_block",
"(",
"x",
",",
"stage",
",",
"nb_filter",
",",
"compression",
"=",
"1.0",
",",
"dropout_rate",
"=",
"None",
",",
"weight_decay",
"=",
"1E-4",
")",
":",
"eps",
"=",
"1.1e-5",
"conv_name_base",
"=",
"'conv'",
"+",
"str",
"(",
"stage",
")",
"+",
"'_blk'",
"relu_name_base",
"=",
"'relu'",
"+",
"str",
"(",
"stage",
")",
"+",
"'_blk'",
"pool_name_base",
"=",
"'pool'",
"+",
"str",
"(",
"stage",
")",
"x",
"=",
"BatchNormalization",
"(",
"epsilon",
"=",
"eps",
",",
"axis",
"=",
"concat_axis",
",",
"name",
"=",
"conv_name_base",
"+",
"'_bn'",
")",
"(",
"x",
")",
"x",
"=",
"Scale",
"(",
"axis",
"=",
"concat_axis",
",",
"name",
"=",
"conv_name_base",
"+",
"'_scale'",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"'relu'",
",",
"name",
"=",
"relu_name_base",
")",
"(",
"x",
")",
"x",
"=",
"Convolution2D",
"(",
"int",
"(",
"nb_filter",
"*",
"compression",
")",
",",
"1",
",",
"1",
",",
"name",
"=",
"conv_name_base",
",",
"bias",
"=",
"False",
")",
"(",
"x",
")",
"if",
"dropout_rate",
":",
"x",
"=",
"Dropout",
"(",
"dropout_rate",
")",
"(",
"x",
")",
"x",
"=",
"AveragePooling2D",
"(",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"(",
"2",
",",
"2",
")",
",",
"name",
"=",
"pool_name_base",
")",
"(",
"x",
")",
"return",
"x"
] |
https://github.com/flyyufelix/DenseNet-Keras/blob/8c42d8092b2616a9fbf025c756b14c67be708685/densenet121.py#L117-L143
|
|
intrig-unicamp/mininet-wifi
|
3c8a8f63bd4aa043aa9c1ad16f304dec2916f5ba
|
mn_wifi/sixLoWPAN/node.py
|
python
|
OVSSensor.setup
|
(cls)
|
Make sure Open vSwitch is installed and working
|
Make sure Open vSwitch is installed and working
|
[
"Make",
"sure",
"Open",
"vSwitch",
"is",
"installed",
"and",
"working"
] |
def setup(cls):
"Make sure Open vSwitch is installed and working"
pathCheck('ovs-vsctl',
moduleName='Open vSwitch (openvswitch.org)')
# This should no longer be needed, and it breaks
# with OVS 1.7 which has renamed the kernel module:
# moduleDeps( subtract=OF_KMOD, add=OVS_KMOD )
out, err, exitcode = errRun('ovs-vsctl -t 1 show')
if exitcode:
error(out + err +
'ovs-vsctl exited with code %d\n' % exitcode +
'*** Error connecting to ovs-db with ovs-vsctl\n'
'Make sure that Open vSwitch is installed, '
'that ovsdb-server is running, and that\n'
'"ovs-vsctl show" works correctly.\n'
'You may wish to try '
'"service openvswitch-switch start".\n')
exit(1)
version = quietRun('ovs-vsctl --version')
cls.OVSVersion = findall(r'\d+\.\d+', version)[0]
|
[
"def",
"setup",
"(",
"cls",
")",
":",
"pathCheck",
"(",
"'ovs-vsctl'",
",",
"moduleName",
"=",
"'Open vSwitch (openvswitch.org)'",
")",
"# This should no longer be needed, and it breaks",
"# with OVS 1.7 which has renamed the kernel module:",
"# moduleDeps( subtract=OF_KMOD, add=OVS_KMOD )",
"out",
",",
"err",
",",
"exitcode",
"=",
"errRun",
"(",
"'ovs-vsctl -t 1 show'",
")",
"if",
"exitcode",
":",
"error",
"(",
"out",
"+",
"err",
"+",
"'ovs-vsctl exited with code %d\\n'",
"%",
"exitcode",
"+",
"'*** Error connecting to ovs-db with ovs-vsctl\\n'",
"'Make sure that Open vSwitch is installed, '",
"'that ovsdb-server is running, and that\\n'",
"'\"ovs-vsctl show\" works correctly.\\n'",
"'You may wish to try '",
"'\"service openvswitch-switch start\".\\n'",
")",
"exit",
"(",
"1",
")",
"version",
"=",
"quietRun",
"(",
"'ovs-vsctl --version'",
")",
"cls",
".",
"OVSVersion",
"=",
"findall",
"(",
"r'\\d+\\.\\d+'",
",",
"version",
")",
"[",
"0",
"]"
] |
https://github.com/intrig-unicamp/mininet-wifi/blob/3c8a8f63bd4aa043aa9c1ad16f304dec2916f5ba/mn_wifi/sixLoWPAN/node.py#L278-L297
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/pip/_vendor/colorama/ansi.py
|
python
|
code_to_chars
|
(code)
|
return CSI + str(code) + 'm'
|
[] |
def code_to_chars(code):
return CSI + str(code) + 'm'
|
[
"def",
"code_to_chars",
"(",
"code",
")",
":",
"return",
"CSI",
"+",
"str",
"(",
"code",
")",
"+",
"'m'"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/_vendor/colorama/ansi.py#L12-L13
|
|||
numenta/nupic
|
b9ebedaf54f49a33de22d8d44dff7c765cdb5548
|
src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py
|
python
|
HTMPredictionModelClassifierHelper._recomputeRecordFromKNN
|
(self, record)
|
return None
|
return the classified labeling of record
|
return the classified labeling of record
|
[
"return",
"the",
"classified",
"labeling",
"of",
"record"
] |
def _recomputeRecordFromKNN(self, record):
"""
return the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Run inference only to capture state before learning
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
# Only use points before record to classify and after the wait period.
classifier_indexes = \
numpy.array(classifier.getSelf().getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self._autoDetectWaitRecords) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', False)
classifier.getSelf().compute(inputs, outputs)
classifier.setParameter('learningMode', True)
classifier_distances = classifier.getSelf().getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = classifier.getSelf().getCategoryList()[indexID]
return category
return None
|
[
"def",
"_recomputeRecordFromKNN",
"(",
"self",
",",
"record",
")",
":",
"inputs",
"=",
"{",
"\"categoryIn\"",
":",
"[",
"None",
"]",
",",
"\"bottomUpIn\"",
":",
"self",
".",
"_getStateAnomalyVector",
"(",
"record",
")",
",",
"}",
"outputs",
"=",
"{",
"\"categoriesOut\"",
":",
"numpy",
".",
"zeros",
"(",
"(",
"1",
",",
")",
")",
",",
"\"bestPrototypeIndices\"",
":",
"numpy",
".",
"zeros",
"(",
"(",
"1",
",",
")",
")",
",",
"\"categoryProbabilitiesOut\"",
":",
"numpy",
".",
"zeros",
"(",
"(",
"1",
",",
")",
")",
"}",
"# Run inference only to capture state before learning",
"classifier",
"=",
"self",
".",
"htm_prediction_model",
".",
"_getAnomalyClassifier",
"(",
")",
"knn",
"=",
"classifier",
".",
"getSelf",
"(",
")",
".",
"_knn",
"# Only use points before record to classify and after the wait period.",
"classifier_indexes",
"=",
"numpy",
".",
"array",
"(",
"classifier",
".",
"getSelf",
"(",
")",
".",
"getParameter",
"(",
"'categoryRecencyList'",
")",
")",
"valid_idx",
"=",
"numpy",
".",
"where",
"(",
"(",
"classifier_indexes",
">=",
"self",
".",
"_autoDetectWaitRecords",
")",
"&",
"(",
"classifier_indexes",
"<",
"record",
".",
"ROWID",
")",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"if",
"len",
"(",
"valid_idx",
")",
"==",
"0",
":",
"return",
"None",
"classifier",
".",
"setParameter",
"(",
"'inferenceMode'",
",",
"True",
")",
"classifier",
".",
"setParameter",
"(",
"'learningMode'",
",",
"False",
")",
"classifier",
".",
"getSelf",
"(",
")",
".",
"compute",
"(",
"inputs",
",",
"outputs",
")",
"classifier",
".",
"setParameter",
"(",
"'learningMode'",
",",
"True",
")",
"classifier_distances",
"=",
"classifier",
".",
"getSelf",
"(",
")",
".",
"getLatestDistances",
"(",
")",
"valid_distances",
"=",
"classifier_distances",
"[",
"valid_idx",
"]",
"if",
"valid_distances",
".",
"min",
"(",
")",
"<=",
"self",
".",
"_classificationMaxDist",
":",
"classifier_indexes_prev",
"=",
"classifier_indexes",
"[",
"valid_idx",
"]",
"rowID",
"=",
"classifier_indexes_prev",
"[",
"valid_distances",
".",
"argmin",
"(",
")",
"]",
"indexID",
"=",
"numpy",
".",
"where",
"(",
"classifier_indexes",
"==",
"rowID",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"category",
"=",
"classifier",
".",
"getSelf",
"(",
")",
".",
"getCategoryList",
"(",
")",
"[",
"indexID",
"]",
"return",
"category",
"return",
"None"
] |
https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py#L394-L435
|
|
SickChill/SickChill
|
01020f3636d01535f60b83464d8127ea0efabfc7
|
sickchill/adba/__init__.py
|
python
|
Connection.producer
|
(self, pid=None, pname=None, callback=None)
|
return self.handle(ProducerCommand(pid, pname), callback)
|
Get information about a producer
parameters:
pid - producer id
pname - name of the producer
structure of parameters:
(pid|pname)
|
Get information about a producer
|
[
"Get",
"information",
"about",
"a",
"producer"
] |
def producer(self, pid=None, pname=None, callback=None):
"""
Get information about a producer
parameters:
pid - producer id
pname - name of the producer
structure of parameters:
(pid|pname)
"""
return self.handle(ProducerCommand(pid, pname), callback)
|
[
"def",
"producer",
"(",
"self",
",",
"pid",
"=",
"None",
",",
"pname",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"return",
"self",
".",
"handle",
"(",
"ProducerCommand",
"(",
"pid",
",",
"pname",
")",
",",
"callback",
")"
] |
https://github.com/SickChill/SickChill/blob/01020f3636d01535f60b83464d8127ea0efabfc7/sickchill/adba/__init__.py#L612-L625
|
|
fonttools/fonttools
|
892322aaff6a89bea5927379ec06bc0da3dfb7df
|
Lib/fontTools/feaLib/ast.py
|
python
|
LanguageSystemStatement.build
|
(self, builder)
|
Calls the builder object's ``add_language_system`` callback.
|
Calls the builder object's ``add_language_system`` callback.
|
[
"Calls",
"the",
"builder",
"object",
"s",
"add_language_system",
"callback",
"."
] |
def build(self, builder):
"""Calls the builder object's ``add_language_system`` callback."""
builder.add_language_system(self.location, self.script, self.language)
|
[
"def",
"build",
"(",
"self",
",",
"builder",
")",
":",
"builder",
".",
"add_language_system",
"(",
"self",
".",
"location",
",",
"self",
".",
"script",
",",
"self",
".",
"language",
")"
] |
https://github.com/fonttools/fonttools/blob/892322aaff6a89bea5927379ec06bc0da3dfb7df/Lib/fontTools/feaLib/ast.py#L982-L984
|
||
libtcod/python-tcod
|
e12c4172baa9efdfd74aff6ee9bab8454a835248
|
tcod/libtcodpy.py
|
python
|
heightmap_add_voronoi
|
(
hm: NDArray[np.float32],
nbPoints: Any,
nbCoef: int,
coef: Sequence[float],
rnd: Optional[tcod.random.Random] = None,
)
|
Add values from a Voronoi diagram to the heightmap.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbPoints (Any): Number of Voronoi sites.
nbCoef (int): The diagram value is calculated from the nbCoef
closest sites.
coef (Sequence[float]): The distance to each site is scaled by the
corresponding coef.
Closest site : coef[0],
second closest site : coef[1], ...
rnd (Optional[Random]): A Random instance, or None.
|
Add values from a Voronoi diagram to the heightmap.
|
[
"Add",
"values",
"from",
"a",
"Voronoi",
"diagram",
"to",
"the",
"heightmap",
"."
] |
def heightmap_add_voronoi(
hm: NDArray[np.float32],
nbPoints: Any,
nbCoef: int,
coef: Sequence[float],
rnd: Optional[tcod.random.Random] = None,
) -> None:
"""Add values from a Voronoi diagram to the heightmap.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
nbPoints (Any): Number of Voronoi sites.
nbCoef (int): The diagram value is calculated from the nbCoef
closest sites.
coef (Sequence[float]): The distance to each site is scaled by the
corresponding coef.
Closest site : coef[0],
second closest site : coef[1], ...
rnd (Optional[Random]): A Random instance, or None.
"""
nbPoints = len(coef)
ccoef = ffi.new("float[]", coef)
lib.TCOD_heightmap_add_voronoi(
_heightmap_cdata(hm),
nbPoints,
nbCoef,
ccoef,
rnd.random_c if rnd else ffi.NULL,
)
|
[
"def",
"heightmap_add_voronoi",
"(",
"hm",
":",
"NDArray",
"[",
"np",
".",
"float32",
"]",
",",
"nbPoints",
":",
"Any",
",",
"nbCoef",
":",
"int",
",",
"coef",
":",
"Sequence",
"[",
"float",
"]",
",",
"rnd",
":",
"Optional",
"[",
"tcod",
".",
"random",
".",
"Random",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"nbPoints",
"=",
"len",
"(",
"coef",
")",
"ccoef",
"=",
"ffi",
".",
"new",
"(",
"\"float[]\"",
",",
"coef",
")",
"lib",
".",
"TCOD_heightmap_add_voronoi",
"(",
"_heightmap_cdata",
"(",
"hm",
")",
",",
"nbPoints",
",",
"nbCoef",
",",
"ccoef",
",",
"rnd",
".",
"random_c",
"if",
"rnd",
"else",
"ffi",
".",
"NULL",
",",
")"
] |
https://github.com/libtcod/python-tcod/blob/e12c4172baa9efdfd74aff6ee9bab8454a835248/tcod/libtcodpy.py#L2636-L2664
|
||
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/mechanize/mechanize/_mechanize.py
|
python
|
Browser.set_handle_referer
|
(self, handle)
|
Set whether to add Referer header to each request.
|
Set whether to add Referer header to each request.
|
[
"Set",
"whether",
"to",
"add",
"Referer",
"header",
"to",
"each",
"request",
"."
] |
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
|
[
"def",
"set_handle_referer",
"(",
"self",
",",
"handle",
")",
":",
"self",
".",
"_set_handler",
"(",
"\"_referer\"",
",",
"handle",
")",
"self",
".",
"_handle_referer",
"=",
"bool",
"(",
"handle",
")"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/mechanize/mechanize/_mechanize.py#L160-L163
|
||
abisee/pointer-generator
|
b29e986f24fdd01a6b6d6008187c5c887f0be282
|
model.py
|
python
|
SummarizationModel._reduce_states
|
(self, fw_st, bw_st)
|
Add to the graph a linear layer to reduce the encoder's final FW and BW state into a single initial state for the decoder. This is needed because the encoder is bidirectional but the decoder is not.
Args:
fw_st: LSTMStateTuple with hidden_dim units.
bw_st: LSTMStateTuple with hidden_dim units.
Returns:
state: LSTMStateTuple with hidden_dim units.
|
Add to the graph a linear layer to reduce the encoder's final FW and BW state into a single initial state for the decoder. This is needed because the encoder is bidirectional but the decoder is not.
|
[
"Add",
"to",
"the",
"graph",
"a",
"linear",
"layer",
"to",
"reduce",
"the",
"encoder",
"s",
"final",
"FW",
"and",
"BW",
"state",
"into",
"a",
"single",
"initial",
"state",
"for",
"the",
"decoder",
".",
"This",
"is",
"needed",
"because",
"the",
"encoder",
"is",
"bidirectional",
"but",
"the",
"decoder",
"is",
"not",
"."
] |
def _reduce_states(self, fw_st, bw_st):
"""Add to the graph a linear layer to reduce the encoder's final FW and BW state into a single initial state for the decoder. This is needed because the encoder is bidirectional but the decoder is not.
Args:
fw_st: LSTMStateTuple with hidden_dim units.
bw_st: LSTMStateTuple with hidden_dim units.
Returns:
state: LSTMStateTuple with hidden_dim units.
"""
hidden_dim = self._hps.hidden_dim
with tf.variable_scope('reduce_final_st'):
# Define weights and biases to reduce the cell and reduce the state
w_reduce_c = tf.get_variable('w_reduce_c', [hidden_dim * 2, hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
w_reduce_h = tf.get_variable('w_reduce_h', [hidden_dim * 2, hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
bias_reduce_c = tf.get_variable('bias_reduce_c', [hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
bias_reduce_h = tf.get_variable('bias_reduce_h', [hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)
# Apply linear layer
old_c = tf.concat(axis=1, values=[fw_st.c, bw_st.c]) # Concatenation of fw and bw cell
old_h = tf.concat(axis=1, values=[fw_st.h, bw_st.h]) # Concatenation of fw and bw state
new_c = tf.nn.relu(tf.matmul(old_c, w_reduce_c) + bias_reduce_c) # Get new cell from old cell
new_h = tf.nn.relu(tf.matmul(old_h, w_reduce_h) + bias_reduce_h) # Get new state from old state
return tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
|
[
"def",
"_reduce_states",
"(",
"self",
",",
"fw_st",
",",
"bw_st",
")",
":",
"hidden_dim",
"=",
"self",
".",
"_hps",
".",
"hidden_dim",
"with",
"tf",
".",
"variable_scope",
"(",
"'reduce_final_st'",
")",
":",
"# Define weights and biases to reduce the cell and reduce the state",
"w_reduce_c",
"=",
"tf",
".",
"get_variable",
"(",
"'w_reduce_c'",
",",
"[",
"hidden_dim",
"*",
"2",
",",
"hidden_dim",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"initializer",
"=",
"self",
".",
"trunc_norm_init",
")",
"w_reduce_h",
"=",
"tf",
".",
"get_variable",
"(",
"'w_reduce_h'",
",",
"[",
"hidden_dim",
"*",
"2",
",",
"hidden_dim",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"initializer",
"=",
"self",
".",
"trunc_norm_init",
")",
"bias_reduce_c",
"=",
"tf",
".",
"get_variable",
"(",
"'bias_reduce_c'",
",",
"[",
"hidden_dim",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"initializer",
"=",
"self",
".",
"trunc_norm_init",
")",
"bias_reduce_h",
"=",
"tf",
".",
"get_variable",
"(",
"'bias_reduce_h'",
",",
"[",
"hidden_dim",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"initializer",
"=",
"self",
".",
"trunc_norm_init",
")",
"# Apply linear layer",
"old_c",
"=",
"tf",
".",
"concat",
"(",
"axis",
"=",
"1",
",",
"values",
"=",
"[",
"fw_st",
".",
"c",
",",
"bw_st",
".",
"c",
"]",
")",
"# Concatenation of fw and bw cell",
"old_h",
"=",
"tf",
".",
"concat",
"(",
"axis",
"=",
"1",
",",
"values",
"=",
"[",
"fw_st",
".",
"h",
",",
"bw_st",
".",
"h",
"]",
")",
"# Concatenation of fw and bw state",
"new_c",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"tf",
".",
"matmul",
"(",
"old_c",
",",
"w_reduce_c",
")",
"+",
"bias_reduce_c",
")",
"# Get new cell from old cell",
"new_h",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"tf",
".",
"matmul",
"(",
"old_h",
",",
"w_reduce_h",
")",
"+",
"bias_reduce_h",
")",
"# Get new state from old state",
"return",
"tf",
".",
"contrib",
".",
"rnn",
".",
"LSTMStateTuple",
"(",
"new_c",
",",
"new_h",
")"
] |
https://github.com/abisee/pointer-generator/blob/b29e986f24fdd01a6b6d6008187c5c887f0be282/model.py#L97-L121
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/django/contrib/gis/gdal/geometries.py
|
python
|
OGRGeometry.union
|
(self, other)
|
return self._geomgen(capi.geom_union, other)
|
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
|
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
|
[
"Returns",
"a",
"new",
"geometry",
"consisting",
"of",
"the",
"region",
"which",
"is",
"the",
"union",
"of",
"this",
"geometry",
"and",
"the",
"other",
"."
] |
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
|
[
"def",
"union",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"_geomgen",
"(",
"capi",
".",
"geom_union",
",",
"other",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/contrib/gis/gdal/geometries.py#L487-L492
|
|
abe-winter/automigrate
|
ebad8c503028bc36e6f0b07c4c9cfa350d278685
|
automig/lib/githelp.py
|
python
|
get_paths
|
(tree, pattern, root=())
|
return found
|
given tree (a git tree at a desired ref) return list of paths matching pattern
note: this matches paths that existed at the time of ref, including files that don't exist now and excluding ones that exist on disk but were missing at ref
|
given tree (a git tree at a desired ref) return list of paths matching pattern
note: this matches paths that existed at the time of ref, including files that don't exist now and excluding ones that exist on disk but were missing at ref
|
[
"given",
"tree",
"(",
"a",
"git",
"tree",
"at",
"a",
"desired",
"ref",
")",
"return",
"list",
"of",
"paths",
"matching",
"pattern",
"note",
":",
"this",
"matches",
"paths",
"that",
"existed",
"at",
"the",
"time",
"of",
"ref",
"including",
"files",
"that",
"don",
"t",
"exist",
"now",
"and",
"excluding",
"ones",
"that",
"exist",
"on",
"disk",
"but",
"were",
"missing",
"at",
"ref"
] |
def get_paths(tree, pattern, root=()):
"""given tree (a git tree at a desired ref) return list of paths matching pattern
note: this matches paths that existed at the time of ref, including files that don't exist now and excluding ones that exist on disk but were missing at ref
"""
found = []
for item in tree:
if isinstance(item, git.Blob):
if fnmatch.fnmatch(item.abspath, pattern):
found.append(item.path)
elif isinstance(item, git.Tree):
found.extend(get_paths(item, pattern, root + (item.name,)))
else:
raise NotImplementedError("unexpected type in tree", item)
return found
|
[
"def",
"get_paths",
"(",
"tree",
",",
"pattern",
",",
"root",
"=",
"(",
")",
")",
":",
"found",
"=",
"[",
"]",
"for",
"item",
"in",
"tree",
":",
"if",
"isinstance",
"(",
"item",
",",
"git",
".",
"Blob",
")",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"item",
".",
"abspath",
",",
"pattern",
")",
":",
"found",
".",
"append",
"(",
"item",
".",
"path",
")",
"elif",
"isinstance",
"(",
"item",
",",
"git",
".",
"Tree",
")",
":",
"found",
".",
"extend",
"(",
"get_paths",
"(",
"item",
",",
"pattern",
",",
"root",
"+",
"(",
"item",
".",
"name",
",",
")",
")",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"unexpected type in tree\"",
",",
"item",
")",
"return",
"found"
] |
https://github.com/abe-winter/automigrate/blob/ebad8c503028bc36e6f0b07c4c9cfa350d278685/automig/lib/githelp.py#L15-L28
|
|
aws/aws-cli
|
d697e0ed79fca0f853ce53efe1f83ee41a478134
|
awscli/customizations/s3uploader.py
|
python
|
S3Uploader.upload
|
(self, file_name, remote_path)
|
Uploads given file to S3
:param file_name: Path to the file that will be uploaded
:param remote_path: be uploaded
:return: VersionId of the latest upload
|
Uploads given file to S3
:param file_name: Path to the file that will be uploaded
:param remote_path: be uploaded
:return: VersionId of the latest upload
|
[
"Uploads",
"given",
"file",
"to",
"S3",
":",
"param",
"file_name",
":",
"Path",
"to",
"the",
"file",
"that",
"will",
"be",
"uploaded",
":",
"param",
"remote_path",
":",
"be",
"uploaded",
":",
"return",
":",
"VersionId",
"of",
"the",
"latest",
"upload"
] |
def upload(self, file_name, remote_path):
"""
Uploads given file to S3
:param file_name: Path to the file that will be uploaded
:param remote_path: be uploaded
:return: VersionId of the latest upload
"""
if self.prefix and len(self.prefix) > 0:
remote_path = "{0}/{1}".format(self.prefix, remote_path)
# Check if a file with same data exists
if not self.force_upload and self.file_exists(remote_path):
LOG.debug("File with same data already exists at {0}. "
"Skipping upload".format(remote_path))
return self.make_url(remote_path)
try:
# Default to regular server-side encryption unless customer has
# specified their own KMS keys
additional_args = {
"ServerSideEncryption": "AES256"
}
if self.kms_key_id:
additional_args["ServerSideEncryption"] = "aws:kms"
additional_args["SSEKMSKeyId"] = self.kms_key_id
if self.artifact_metadata:
additional_args["Metadata"] = self.artifact_metadata
print_progress_callback = \
ProgressPercentage(file_name, remote_path)
future = self.transfer_manager.upload(file_name,
self.bucket_name,
remote_path,
additional_args,
[print_progress_callback])
future.result()
return self.make_url(remote_path)
except botocore.exceptions.ClientError as ex:
error_code = ex.response["Error"]["Code"]
if error_code == "NoSuchBucket":
raise NoSuchBucketError(bucket_name=self.bucket_name)
raise ex
|
[
"def",
"upload",
"(",
"self",
",",
"file_name",
",",
"remote_path",
")",
":",
"if",
"self",
".",
"prefix",
"and",
"len",
"(",
"self",
".",
"prefix",
")",
">",
"0",
":",
"remote_path",
"=",
"\"{0}/{1}\"",
".",
"format",
"(",
"self",
".",
"prefix",
",",
"remote_path",
")",
"# Check if a file with same data exists",
"if",
"not",
"self",
".",
"force_upload",
"and",
"self",
".",
"file_exists",
"(",
"remote_path",
")",
":",
"LOG",
".",
"debug",
"(",
"\"File with same data already exists at {0}. \"",
"\"Skipping upload\"",
".",
"format",
"(",
"remote_path",
")",
")",
"return",
"self",
".",
"make_url",
"(",
"remote_path",
")",
"try",
":",
"# Default to regular server-side encryption unless customer has",
"# specified their own KMS keys",
"additional_args",
"=",
"{",
"\"ServerSideEncryption\"",
":",
"\"AES256\"",
"}",
"if",
"self",
".",
"kms_key_id",
":",
"additional_args",
"[",
"\"ServerSideEncryption\"",
"]",
"=",
"\"aws:kms\"",
"additional_args",
"[",
"\"SSEKMSKeyId\"",
"]",
"=",
"self",
".",
"kms_key_id",
"if",
"self",
".",
"artifact_metadata",
":",
"additional_args",
"[",
"\"Metadata\"",
"]",
"=",
"self",
".",
"artifact_metadata",
"print_progress_callback",
"=",
"ProgressPercentage",
"(",
"file_name",
",",
"remote_path",
")",
"future",
"=",
"self",
".",
"transfer_manager",
".",
"upload",
"(",
"file_name",
",",
"self",
".",
"bucket_name",
",",
"remote_path",
",",
"additional_args",
",",
"[",
"print_progress_callback",
"]",
")",
"future",
".",
"result",
"(",
")",
"return",
"self",
".",
"make_url",
"(",
"remote_path",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"ex",
":",
"error_code",
"=",
"ex",
".",
"response",
"[",
"\"Error\"",
"]",
"[",
"\"Code\"",
"]",
"if",
"error_code",
"==",
"\"NoSuchBucket\"",
":",
"raise",
"NoSuchBucketError",
"(",
"bucket_name",
"=",
"self",
".",
"bucket_name",
")",
"raise",
"ex"
] |
https://github.com/aws/aws-cli/blob/d697e0ed79fca0f853ce53efe1f83ee41a478134/awscli/customizations/s3uploader.py#L80-L127
|
||
open-mmlab/mmediting
|
6a08a728c63e76f0427eeebcd2db236839bbd11d
|
tools/data/matting/comp1k/preprocess_comp1k_dataset.py
|
python
|
fix_png_file
|
(filename, folder)
|
Fix png files in the target filename using pngfix.
pngfix is a tool to fix PNG files. It's installed on Linux or MacOS by
default.
Args:
filename (str): png file to run pngfix.
|
Fix png files in the target filename using pngfix.
|
[
"Fix",
"png",
"files",
"in",
"the",
"target",
"filename",
"using",
"pngfix",
"."
] |
def fix_png_file(filename, folder):
"""Fix png files in the target filename using pngfix.
pngfix is a tool to fix PNG files. It's installed on Linux or MacOS by
default.
Args:
filename (str): png file to run pngfix.
"""
subprocess.call(
f'pngfix --quiet --strip=color --prefix=fixed_ "{filename}"',
cwd=f'{folder}',
shell=True)
subprocess.call(
f'mv "fixed_{filename}" "{filename}"', cwd=f'{folder}', shell=True)
|
[
"def",
"fix_png_file",
"(",
"filename",
",",
"folder",
")",
":",
"subprocess",
".",
"call",
"(",
"f'pngfix --quiet --strip=color --prefix=fixed_ \"{filename}\"'",
",",
"cwd",
"=",
"f'{folder}'",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"call",
"(",
"f'mv \"fixed_{filename}\" \"{filename}\"'",
",",
"cwd",
"=",
"f'{folder}'",
",",
"shell",
"=",
"True",
")"
] |
https://github.com/open-mmlab/mmediting/blob/6a08a728c63e76f0427eeebcd2db236839bbd11d/tools/data/matting/comp1k/preprocess_comp1k_dataset.py#L32-L46
|
||
oracle/graalpython
|
577e02da9755d916056184ec441c26e00b70145c
|
graalpython/lib-python/3/sndhdr.py
|
python
|
test_aifc
|
(h, f)
|
return (fmt, a.getframerate(), a.getnchannels(),
a.getnframes(), 8 * a.getsampwidth())
|
[] |
def test_aifc(h, f):
import aifc
if not h.startswith(b'FORM'):
return None
if h[8:12] == b'AIFC':
fmt = 'aifc'
elif h[8:12] == b'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.open(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(),
a.getnframes(), 8 * a.getsampwidth())
|
[
"def",
"test_aifc",
"(",
"h",
",",
"f",
")",
":",
"import",
"aifc",
"if",
"not",
"h",
".",
"startswith",
"(",
"b'FORM'",
")",
":",
"return",
"None",
"if",
"h",
"[",
"8",
":",
"12",
"]",
"==",
"b'AIFC'",
":",
"fmt",
"=",
"'aifc'",
"elif",
"h",
"[",
"8",
":",
"12",
"]",
"==",
"b'AIFF'",
":",
"fmt",
"=",
"'aiff'",
"else",
":",
"return",
"None",
"f",
".",
"seek",
"(",
"0",
")",
"try",
":",
"a",
"=",
"aifc",
".",
"open",
"(",
"f",
",",
"'r'",
")",
"except",
"(",
"EOFError",
",",
"aifc",
".",
"Error",
")",
":",
"return",
"None",
"return",
"(",
"fmt",
",",
"a",
".",
"getframerate",
"(",
")",
",",
"a",
".",
"getnchannels",
"(",
")",
",",
"a",
".",
"getnframes",
"(",
")",
",",
"8",
"*",
"a",
".",
"getsampwidth",
"(",
")",
")"
] |
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/sndhdr.py#L75-L91
|
|||
ThibaultGROUEIX/AtlasNet
|
22a0504b00e7a36d6bb00328f7df4952d36ccc80
|
auxiliary/meter.py
|
python
|
Logs.reset
|
(self)
|
Reset all meters
:return:
|
Reset all meters
:return:
|
[
"Reset",
"all",
"meters",
":",
"return",
":"
] |
def reset(self):
"""
Reset all meters
:return:
"""
for name in self.curves_names:
self.meters[name].reset()
|
[
"def",
"reset",
"(",
"self",
")",
":",
"for",
"name",
"in",
"self",
".",
"curves_names",
":",
"self",
".",
"meters",
"[",
"name",
"]",
".",
"reset",
"(",
")"
] |
https://github.com/ThibaultGROUEIX/AtlasNet/blob/22a0504b00e7a36d6bb00328f7df4952d36ccc80/auxiliary/meter.py#L52-L58
|
||
openedx/edx-platform
|
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
|
openedx/features/enterprise_support/api.py
|
python
|
get_enterprise_consent_url
|
(request, course_id, user=None, return_to=None, enrollment_exists=False)
|
return full_url
|
Build a URL to redirect the user to the Enterprise app to provide data sharing
consent for a specific course ID.
Arguments:
* request: Request object
* course_id: Course key/identifier string.
* user: user to check for consent. If None, uses request.user
* return_to: url name label for the page to return to after consent is granted.
If None, return to request.path instead.
|
Build a URL to redirect the user to the Enterprise app to provide data sharing
consent for a specific course ID.
|
[
"Build",
"a",
"URL",
"to",
"redirect",
"the",
"user",
"to",
"the",
"Enterprise",
"app",
"to",
"provide",
"data",
"sharing",
"consent",
"for",
"a",
"specific",
"course",
"ID",
"."
] |
def get_enterprise_consent_url(request, course_id, user=None, return_to=None, enrollment_exists=False):
"""
Build a URL to redirect the user to the Enterprise app to provide data sharing
consent for a specific course ID.
Arguments:
* request: Request object
* course_id: Course key/identifier string.
* user: user to check for consent. If None, uses request.user
* return_to: url name label for the page to return to after consent is granted.
If None, return to request.path instead.
"""
user = user or request.user
LOGGER.info(
'Getting enterprise consent url for user [{username}] and course [{course_id}].'.format(
username=user.username,
course_id=course_id
)
)
if not consent_needed_for_course(request, user, course_id, enrollment_exists=enrollment_exists):
return None
if return_to is None:
return_path = request.path
else:
return_path = reverse(return_to, args=(course_id,))
url_params = {
'enterprise_customer_uuid': enterprise_customer_uuid_for_request(request),
'course_id': course_id,
'next': request.build_absolute_uri(return_path),
'failure_url': request.build_absolute_uri(
reverse('dashboard') + '?' + urlencode(
{
CONSENT_FAILED_PARAMETER: course_id
}
)
),
}
querystring = urlencode(url_params)
full_url = reverse('grant_data_sharing_permissions') + '?' + querystring
LOGGER.info('Redirecting to %s to complete data sharing consent', full_url)
return full_url
|
[
"def",
"get_enterprise_consent_url",
"(",
"request",
",",
"course_id",
",",
"user",
"=",
"None",
",",
"return_to",
"=",
"None",
",",
"enrollment_exists",
"=",
"False",
")",
":",
"user",
"=",
"user",
"or",
"request",
".",
"user",
"LOGGER",
".",
"info",
"(",
"'Getting enterprise consent url for user [{username}] and course [{course_id}].'",
".",
"format",
"(",
"username",
"=",
"user",
".",
"username",
",",
"course_id",
"=",
"course_id",
")",
")",
"if",
"not",
"consent_needed_for_course",
"(",
"request",
",",
"user",
",",
"course_id",
",",
"enrollment_exists",
"=",
"enrollment_exists",
")",
":",
"return",
"None",
"if",
"return_to",
"is",
"None",
":",
"return_path",
"=",
"request",
".",
"path",
"else",
":",
"return_path",
"=",
"reverse",
"(",
"return_to",
",",
"args",
"=",
"(",
"course_id",
",",
")",
")",
"url_params",
"=",
"{",
"'enterprise_customer_uuid'",
":",
"enterprise_customer_uuid_for_request",
"(",
"request",
")",
",",
"'course_id'",
":",
"course_id",
",",
"'next'",
":",
"request",
".",
"build_absolute_uri",
"(",
"return_path",
")",
",",
"'failure_url'",
":",
"request",
".",
"build_absolute_uri",
"(",
"reverse",
"(",
"'dashboard'",
")",
"+",
"'?'",
"+",
"urlencode",
"(",
"{",
"CONSENT_FAILED_PARAMETER",
":",
"course_id",
"}",
")",
")",
",",
"}",
"querystring",
"=",
"urlencode",
"(",
"url_params",
")",
"full_url",
"=",
"reverse",
"(",
"'grant_data_sharing_permissions'",
")",
"+",
"'?'",
"+",
"querystring",
"LOGGER",
".",
"info",
"(",
"'Redirecting to %s to complete data sharing consent'",
",",
"full_url",
")",
"return",
"full_url"
] |
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/features/enterprise_support/api.py#L720-L764
|
|
OpenCobolIDE/OpenCobolIDE
|
c78d0d335378e5fe0a5e74f53c19b68b55e85388
|
open_cobol_ide/extlibs/future/backports/datetime.py
|
python
|
tzinfo.dst
|
(self, dt)
|
datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
|
datetime -> DST offset in minutes east of UTC.
|
[
"datetime",
"-",
">",
"DST",
"offset",
"in",
"minutes",
"east",
"of",
"UTC",
"."
] |
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
|
[
"def",
"dst",
"(",
"self",
",",
"dt",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"tzinfo subclass must override dst()\"",
")"
] |
https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/future/backports/datetime.py#L933-L939
|
||
adamcharnock/lightbus
|
5e7069da06cd37a8131e8c592ee957ccb73603d5
|
lightbus_experiments/structured_logging.py
|
python
|
event_dict_ordering
|
(logger, method_name, event_dict)
|
return ordered
|
[] |
def event_dict_ordering(logger, method_name, event_dict):
ordered = {"event": event_dict.pop("event")}
ordered.update(**event_dict)
return ordered
|
[
"def",
"event_dict_ordering",
"(",
"logger",
",",
"method_name",
",",
"event_dict",
")",
":",
"ordered",
"=",
"{",
"\"event\"",
":",
"event_dict",
".",
"pop",
"(",
"\"event\"",
")",
"}",
"ordered",
".",
"update",
"(",
"*",
"*",
"event_dict",
")",
"return",
"ordered"
] |
https://github.com/adamcharnock/lightbus/blob/5e7069da06cd37a8131e8c592ee957ccb73603d5/lightbus_experiments/structured_logging.py#L9-L12
|
|||
demisto/content
|
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
|
Packs/PhishTank/Integrations/PhishTankV2/PhishTankV2.py
|
python
|
reload
|
(client: Client)
|
return parsed_response
|
This function is responsible for:
1. request a csv file from PhishTank API (calling to client.get_http_request)
2. parsing an API response and saving all relevant information into a dictionary
Args:
client:
(Client) : client to use in the PhisTankV2 integration.
Returns:
dictionary of parsed http response. Each url is a key and his values are:
"id,submission_time,verified,verification_time,online,target"
|
This function is responsible for:
1. request a csv file from PhishTank API (calling to client.get_http_request)
2. parsing an API response and saving all relevant information into a dictionary
|
[
"This",
"function",
"is",
"responsible",
"for",
":",
"1",
".",
"request",
"a",
"csv",
"file",
"from",
"PhishTank",
"API",
"(",
"calling",
"to",
"client",
".",
"get_http_request",
")",
"2",
".",
"parsing",
"an",
"API",
"response",
"and",
"saving",
"all",
"relevant",
"information",
"into",
"a",
"dictionary"
] |
def reload(client: Client) -> dict:
"""
This function is responsible for:
1. request a csv file from PhishTank API (calling to client.get_http_request)
2. parsing an API response and saving all relevant information into a dictionary
Args:
client:
(Client) : client to use in the PhisTankV2 integration.
Returns:
dictionary of parsed http response. Each url is a key and his values are:
"id,submission_time,verified,verification_time,online,target"
"""
response = client.get_http_request(RELOAD_DATA_URL_SUFFIX)
response_is_empty = not response
if response_is_empty:
return dict()
response = response.splitlines()
parsed_response = {}
columns = response[0].strip().split(",") # get csv headers
for index, line in list(enumerate(response))[1:]:
line = line.split(",")
line = parse_response_line(line, index, response)
invalid_parsed_line = line is None
if invalid_parsed_line:
continue
url = remove_last_slash(line[columns.index("url")])
if url:
parsed_response[url] = {
"phish_id": line[columns.index("phish_id")].strip(),
"submission_time": line[columns.index("submission_time")].strip(),
"verified": line[columns.index("verified")].strip(),
"verification_time": line[columns.index("verification_time")].strip(),
"online": line[columns.index("online")].strip(),
"target": line[columns.index("target")].strip(),
}
return parsed_response
|
[
"def",
"reload",
"(",
"client",
":",
"Client",
")",
"->",
"dict",
":",
"response",
"=",
"client",
".",
"get_http_request",
"(",
"RELOAD_DATA_URL_SUFFIX",
")",
"response_is_empty",
"=",
"not",
"response",
"if",
"response_is_empty",
":",
"return",
"dict",
"(",
")",
"response",
"=",
"response",
".",
"splitlines",
"(",
")",
"parsed_response",
"=",
"{",
"}",
"columns",
"=",
"response",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\",\"",
")",
"# get csv headers",
"for",
"index",
",",
"line",
"in",
"list",
"(",
"enumerate",
"(",
"response",
")",
")",
"[",
"1",
":",
"]",
":",
"line",
"=",
"line",
".",
"split",
"(",
"\",\"",
")",
"line",
"=",
"parse_response_line",
"(",
"line",
",",
"index",
",",
"response",
")",
"invalid_parsed_line",
"=",
"line",
"is",
"None",
"if",
"invalid_parsed_line",
":",
"continue",
"url",
"=",
"remove_last_slash",
"(",
"line",
"[",
"columns",
".",
"index",
"(",
"\"url\"",
")",
"]",
")",
"if",
"url",
":",
"parsed_response",
"[",
"url",
"]",
"=",
"{",
"\"phish_id\"",
":",
"line",
"[",
"columns",
".",
"index",
"(",
"\"phish_id\"",
")",
"]",
".",
"strip",
"(",
")",
",",
"\"submission_time\"",
":",
"line",
"[",
"columns",
".",
"index",
"(",
"\"submission_time\"",
")",
"]",
".",
"strip",
"(",
")",
",",
"\"verified\"",
":",
"line",
"[",
"columns",
".",
"index",
"(",
"\"verified\"",
")",
"]",
".",
"strip",
"(",
")",
",",
"\"verification_time\"",
":",
"line",
"[",
"columns",
".",
"index",
"(",
"\"verification_time\"",
")",
"]",
".",
"strip",
"(",
")",
",",
"\"online\"",
":",
"line",
"[",
"columns",
".",
"index",
"(",
"\"online\"",
")",
"]",
".",
"strip",
"(",
")",
",",
"\"target\"",
":",
"line",
"[",
"columns",
".",
"index",
"(",
"\"target\"",
")",
"]",
".",
"strip",
"(",
")",
",",
"}",
"return",
"parsed_response"
] |
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/PhishTank/Integrations/PhishTankV2/PhishTankV2.py#L224-L261
|
|
Cadene/bootstrap.pytorch
|
e7d55b52fe8d819de7ea3da8b1027d4a3dcc9e0c
|
bootstrap/lib/options.py
|
python
|
Options.__new__
|
(cls, source=None, arguments_callback=None, lock=False, run_parser=True)
|
return Options.__instance
|
[] |
def __new__(cls, source=None, arguments_callback=None, lock=False, run_parser=True):
# Options is a singleton, we will only build if it has not been built before
if not Options.__instance:
Options.__instance = object.__new__(Options)
if source:
cls.source = source
else:
# Parsing only the path_opts argument to find yaml file
optfile_parser = Options.HelpParser(add_help=True)
optfile_parser.add_argument('-o', '--path_opts', type=str, required=True)
cls.source = optfile_parser.parse_known_args()[0].path_opts
options_dict = Options.load_yaml_opts(cls.source)
if run_parser:
fullopt_parser = Options.HelpParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
fullopt_parser.add_argument('-o', '--path_opts', type=str, required=True)
Options.__instance.add_options(fullopt_parser, options_dict)
arguments = fullopt_parser.parse_args()
if arguments_callback:
arguments = arguments_callback(Options.__instance, arguments, options_dict)
Options.__instance.options = OptionsDict()
for argname in vars(arguments):
nametree = argname.split('.')
value = getattr(arguments, argname)
position = Options.__instance.options
for piece in nametree[:-1]:
if piece in position and isinstance(position[piece], collections.abc.Mapping):
position = position[piece]
else:
position[piece] = {}
position = position[piece]
position[nametree[-1]] = value
else:
Options.__instance.options = options_dict
if lock:
Options.__instance.lock()
return Options.__instance
|
[
"def",
"__new__",
"(",
"cls",
",",
"source",
"=",
"None",
",",
"arguments_callback",
"=",
"None",
",",
"lock",
"=",
"False",
",",
"run_parser",
"=",
"True",
")",
":",
"# Options is a singleton, we will only build if it has not been built before",
"if",
"not",
"Options",
".",
"__instance",
":",
"Options",
".",
"__instance",
"=",
"object",
".",
"__new__",
"(",
"Options",
")",
"if",
"source",
":",
"cls",
".",
"source",
"=",
"source",
"else",
":",
"# Parsing only the path_opts argument to find yaml file",
"optfile_parser",
"=",
"Options",
".",
"HelpParser",
"(",
"add_help",
"=",
"True",
")",
"optfile_parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--path_opts'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"cls",
".",
"source",
"=",
"optfile_parser",
".",
"parse_known_args",
"(",
")",
"[",
"0",
"]",
".",
"path_opts",
"options_dict",
"=",
"Options",
".",
"load_yaml_opts",
"(",
"cls",
".",
"source",
")",
"if",
"run_parser",
":",
"fullopt_parser",
"=",
"Options",
".",
"HelpParser",
"(",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"fullopt_parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--path_opts'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"Options",
".",
"__instance",
".",
"add_options",
"(",
"fullopt_parser",
",",
"options_dict",
")",
"arguments",
"=",
"fullopt_parser",
".",
"parse_args",
"(",
")",
"if",
"arguments_callback",
":",
"arguments",
"=",
"arguments_callback",
"(",
"Options",
".",
"__instance",
",",
"arguments",
",",
"options_dict",
")",
"Options",
".",
"__instance",
".",
"options",
"=",
"OptionsDict",
"(",
")",
"for",
"argname",
"in",
"vars",
"(",
"arguments",
")",
":",
"nametree",
"=",
"argname",
".",
"split",
"(",
"'.'",
")",
"value",
"=",
"getattr",
"(",
"arguments",
",",
"argname",
")",
"position",
"=",
"Options",
".",
"__instance",
".",
"options",
"for",
"piece",
"in",
"nametree",
"[",
":",
"-",
"1",
"]",
":",
"if",
"piece",
"in",
"position",
"and",
"isinstance",
"(",
"position",
"[",
"piece",
"]",
",",
"collections",
".",
"abc",
".",
"Mapping",
")",
":",
"position",
"=",
"position",
"[",
"piece",
"]",
"else",
":",
"position",
"[",
"piece",
"]",
"=",
"{",
"}",
"position",
"=",
"position",
"[",
"piece",
"]",
"position",
"[",
"nametree",
"[",
"-",
"1",
"]",
"]",
"=",
"value",
"else",
":",
"Options",
".",
"__instance",
".",
"options",
"=",
"options_dict",
"if",
"lock",
":",
"Options",
".",
"__instance",
".",
"lock",
"(",
")",
"return",
"Options",
".",
"__instance"
] |
https://github.com/Cadene/bootstrap.pytorch/blob/e7d55b52fe8d819de7ea3da8b1027d4a3dcc9e0c/bootstrap/lib/options.py#L164-L206
|
|||
CvvT/dumpDex
|
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
|
python/idc.py
|
python
|
MakeFrame
|
(ea, lvsize, frregs, argsize)
|
return func.frame
|
Make function frame
@param ea: any address belonging to the function
@param lvsize: size of function local variables
@param frregs: size of saved registers
@param argsize: size of function arguments
@return: ID of function frame or -1
If the function did not have a frame, the frame
will be created. Otherwise the frame will be modified
|
Make function frame
|
[
"Make",
"function",
"frame"
] |
def MakeFrame(ea, lvsize, frregs, argsize):
"""
Make function frame
@param ea: any address belonging to the function
@param lvsize: size of function local variables
@param frregs: size of saved registers
@param argsize: size of function arguments
@return: ID of function frame or -1
If the function did not have a frame, the frame
will be created. Otherwise the frame will be modified
"""
func = idaapi.get_func(ea)
if func is None:
return -1
frameid = idaapi.add_frame(func, lvsize, frregs, argsize)
if not frameid:
if not idaapi.set_frame_size(func, lvsize, frregs, argsize):
return -1
return func.frame
|
[
"def",
"MakeFrame",
"(",
"ea",
",",
"lvsize",
",",
"frregs",
",",
"argsize",
")",
":",
"func",
"=",
"idaapi",
".",
"get_func",
"(",
"ea",
")",
"if",
"func",
"is",
"None",
":",
"return",
"-",
"1",
"frameid",
"=",
"idaapi",
".",
"add_frame",
"(",
"func",
",",
"lvsize",
",",
"frregs",
",",
"argsize",
")",
"if",
"not",
"frameid",
":",
"if",
"not",
"idaapi",
".",
"set_frame_size",
"(",
"func",
",",
"lvsize",
",",
"frregs",
",",
"argsize",
")",
":",
"return",
"-",
"1",
"return",
"func",
".",
"frame"
] |
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idc.py#L4397-L4420
|
|
mlcommons/training
|
4a4d5a0b7efe99c680306b1940749211d4238a84
|
translation/tensorflow/bert/run_classifier.py
|
python
|
input_fn_builder
|
(features, seq_length, is_training, drop_remainder)
|
return input_fn
|
Creates an `input_fn` closure to be passed to TPUEstimator.
|
Creates an `input_fn` closure to be passed to TPUEstimator.
|
[
"Creates",
"an",
"input_fn",
"closure",
"to",
"be",
"passed",
"to",
"TPUEstimator",
"."
] |
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
|
[
"def",
"input_fn_builder",
"(",
"features",
",",
"seq_length",
",",
"is_training",
",",
"drop_remainder",
")",
":",
"all_input_ids",
"=",
"[",
"]",
"all_input_mask",
"=",
"[",
"]",
"all_segment_ids",
"=",
"[",
"]",
"all_label_ids",
"=",
"[",
"]",
"for",
"feature",
"in",
"features",
":",
"all_input_ids",
".",
"append",
"(",
"feature",
".",
"input_ids",
")",
"all_input_mask",
".",
"append",
"(",
"feature",
".",
"input_mask",
")",
"all_segment_ids",
".",
"append",
"(",
"feature",
".",
"segment_ids",
")",
"all_label_ids",
".",
"append",
"(",
"feature",
".",
"label_id",
")",
"def",
"input_fn",
"(",
"params",
")",
":",
"\"\"\"The actual input function.\"\"\"",
"batch_size",
"=",
"params",
"[",
"\"batch_size\"",
"]",
"num_examples",
"=",
"len",
"(",
"features",
")",
"# This is for demo purposes and does NOT scale to large data sets. We do",
"# not use Dataset.from_generator() because that uses tf.py_func which is",
"# not TPU compatible. The right way to load data is with TFRecordReader.",
"d",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"{",
"\"input_ids\"",
":",
"tf",
".",
"constant",
"(",
"all_input_ids",
",",
"shape",
"=",
"[",
"num_examples",
",",
"seq_length",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"input_mask\"",
":",
"tf",
".",
"constant",
"(",
"all_input_mask",
",",
"shape",
"=",
"[",
"num_examples",
",",
"seq_length",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"segment_ids\"",
":",
"tf",
".",
"constant",
"(",
"all_segment_ids",
",",
"shape",
"=",
"[",
"num_examples",
",",
"seq_length",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"label_ids\"",
":",
"tf",
".",
"constant",
"(",
"all_label_ids",
",",
"shape",
"=",
"[",
"num_examples",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"}",
")",
"if",
"is_training",
":",
"d",
"=",
"d",
".",
"repeat",
"(",
")",
"d",
"=",
"d",
".",
"shuffle",
"(",
"buffer_size",
"=",
"100",
")",
"d",
"=",
"d",
".",
"batch",
"(",
"batch_size",
"=",
"batch_size",
",",
"drop_remainder",
"=",
"drop_remainder",
")",
"return",
"d",
"return",
"input_fn"
] |
https://github.com/mlcommons/training/blob/4a4d5a0b7efe99c680306b1940749211d4238a84/translation/tensorflow/bert/run_classifier.py#L713-L762
|
|
reviewboard/reviewboard
|
7395902e4c181bcd1d633f61105012ffb1d18e1b
|
reviewboard/scmtools/forms.py
|
python
|
RepositoryForm._populate_hosting_service_fields
|
(self)
|
Populates all the main hosting service fields in the form.
This populates the hosting service type and the repository plan
on the form. These are only set if operating on an existing
repository.
|
Populates all the main hosting service fields in the form.
|
[
"Populates",
"all",
"the",
"main",
"hosting",
"service",
"fields",
"in",
"the",
"form",
"."
] |
def _populate_hosting_service_fields(self):
"""Populates all the main hosting service fields in the form.
This populates the hosting service type and the repository plan
on the form. These are only set if operating on an existing
repository.
"""
# NOTE: This method *cannot* access anything in the loaded forms or
# hosting_service_info attributes.
hosting_account = self.instance.hosting_account
if hosting_account:
service = hosting_account.service
self.fields['hosting_type'].initial = \
hosting_account.service_name
if service.plans:
self.fields['repository_plan'].choices = [
(plan_id, info['name'])
for plan_id, info in service.plans
]
repository_plan = \
self.instance.extra_data.get('repository_plan', None)
if repository_plan:
self.fields['repository_plan'].initial = repository_plan
|
[
"def",
"_populate_hosting_service_fields",
"(",
"self",
")",
":",
"# NOTE: This method *cannot* access anything in the loaded forms or",
"# hosting_service_info attributes.",
"hosting_account",
"=",
"self",
".",
"instance",
".",
"hosting_account",
"if",
"hosting_account",
":",
"service",
"=",
"hosting_account",
".",
"service",
"self",
".",
"fields",
"[",
"'hosting_type'",
"]",
".",
"initial",
"=",
"hosting_account",
".",
"service_name",
"if",
"service",
".",
"plans",
":",
"self",
".",
"fields",
"[",
"'repository_plan'",
"]",
".",
"choices",
"=",
"[",
"(",
"plan_id",
",",
"info",
"[",
"'name'",
"]",
")",
"for",
"plan_id",
",",
"info",
"in",
"service",
".",
"plans",
"]",
"repository_plan",
"=",
"self",
".",
"instance",
".",
"extra_data",
".",
"get",
"(",
"'repository_plan'",
",",
"None",
")",
"if",
"repository_plan",
":",
"self",
".",
"fields",
"[",
"'repository_plan'",
"]",
".",
"initial",
"=",
"repository_plan"
] |
https://github.com/reviewboard/reviewboard/blob/7395902e4c181bcd1d633f61105012ffb1d18e1b/reviewboard/scmtools/forms.py#L1395-L1421
|
||
livid/v2ex-gae
|
32be3a77d535e7c9df85a333e01ab8834d0e8581
|
twitter/twitter.py
|
python
|
User.AsDict
|
(self)
|
return data
|
A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
|
A dict representation of this twitter.User instance.
|
[
"A",
"dict",
"representation",
"of",
"this",
"twitter",
".",
"User",
"instance",
"."
] |
def AsDict(self):
'''A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.screen_name:
data['screen_name'] = self.screen_name
if self.location:
data['location'] = self.location
if self.description:
data['description'] = self.description
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.profile_background_tile is not None:
data['profile_background_tile'] = self.profile_background_tile
if self.profile_background_image_url:
data['profile_sidebar_fill_color'] = self.profile_background_image_url
if self.profile_background_color:
data['profile_background_color'] = self.profile_background_color
if self.profile_link_color:
data['profile_link_color'] = self.profile_link_color
if self.profile_text_color:
data['profile_text_color'] = self.profile_text_color
if self.protected is not None:
data['protected'] = self.protected
if self.utc_offset:
data['utc_offset'] = self.utc_offset
if self.time_zone:
data['time_zone'] = self.time_zone
if self.url:
data['url'] = self.url
if self.status:
data['status'] = self.status.AsDict()
if self.friends_count:
data['friends_count'] = self.friends_count
if self.followers_count:
data['followers_count'] = self.followers_count
if self.statuses_count:
data['statuses_count'] = self.statuses_count
if self.favourites_count:
data['favourites_count'] = self.favourites_count
return data
|
[
"def",
"AsDict",
"(",
"self",
")",
":",
"data",
"=",
"{",
"}",
"if",
"self",
".",
"id",
":",
"data",
"[",
"'id'",
"]",
"=",
"self",
".",
"id",
"if",
"self",
".",
"name",
":",
"data",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"if",
"self",
".",
"screen_name",
":",
"data",
"[",
"'screen_name'",
"]",
"=",
"self",
".",
"screen_name",
"if",
"self",
".",
"location",
":",
"data",
"[",
"'location'",
"]",
"=",
"self",
".",
"location",
"if",
"self",
".",
"description",
":",
"data",
"[",
"'description'",
"]",
"=",
"self",
".",
"description",
"if",
"self",
".",
"profile_image_url",
":",
"data",
"[",
"'profile_image_url'",
"]",
"=",
"self",
".",
"profile_image_url",
"if",
"self",
".",
"profile_background_tile",
"is",
"not",
"None",
":",
"data",
"[",
"'profile_background_tile'",
"]",
"=",
"self",
".",
"profile_background_tile",
"if",
"self",
".",
"profile_background_image_url",
":",
"data",
"[",
"'profile_sidebar_fill_color'",
"]",
"=",
"self",
".",
"profile_background_image_url",
"if",
"self",
".",
"profile_background_color",
":",
"data",
"[",
"'profile_background_color'",
"]",
"=",
"self",
".",
"profile_background_color",
"if",
"self",
".",
"profile_link_color",
":",
"data",
"[",
"'profile_link_color'",
"]",
"=",
"self",
".",
"profile_link_color",
"if",
"self",
".",
"profile_text_color",
":",
"data",
"[",
"'profile_text_color'",
"]",
"=",
"self",
".",
"profile_text_color",
"if",
"self",
".",
"protected",
"is",
"not",
"None",
":",
"data",
"[",
"'protected'",
"]",
"=",
"self",
".",
"protected",
"if",
"self",
".",
"utc_offset",
":",
"data",
"[",
"'utc_offset'",
"]",
"=",
"self",
".",
"utc_offset",
"if",
"self",
".",
"time_zone",
":",
"data",
"[",
"'time_zone'",
"]",
"=",
"self",
".",
"time_zone",
"if",
"self",
".",
"url",
":",
"data",
"[",
"'url'",
"]",
"=",
"self",
".",
"url",
"if",
"self",
".",
"status",
":",
"data",
"[",
"'status'",
"]",
"=",
"self",
".",
"status",
".",
"AsDict",
"(",
")",
"if",
"self",
".",
"friends_count",
":",
"data",
"[",
"'friends_count'",
"]",
"=",
"self",
".",
"friends_count",
"if",
"self",
".",
"followers_count",
":",
"data",
"[",
"'followers_count'",
"]",
"=",
"self",
".",
"followers_count",
"if",
"self",
".",
"statuses_count",
":",
"data",
"[",
"'statuses_count'",
"]",
"=",
"self",
".",
"statuses_count",
"if",
"self",
".",
"favourites_count",
":",
"data",
"[",
"'favourites_count'",
"]",
"=",
"self",
".",
"favourites_count",
"return",
"data"
] |
https://github.com/livid/v2ex-gae/blob/32be3a77d535e7c9df85a333e01ab8834d0e8581/twitter/twitter.py#L877-L926
|
|
isce-framework/isce2
|
0e5114a8bede3caf1d533d98e44dfe4b983e3f48
|
contrib/geo_autoRIFT/autoRIFT/autoRIFT.py
|
python
|
autoRIFT.preprocess_filt_wal
|
(self)
|
Do the pre processing using wallis filter (10 min vs 15 min in Matlab).
|
Do the pre processing using wallis filter (10 min vs 15 min in Matlab).
|
[
"Do",
"the",
"pre",
"processing",
"using",
"wallis",
"filter",
"(",
"10",
"min",
"vs",
"15",
"min",
"in",
"Matlab",
")",
"."
] |
def preprocess_filt_wal(self):
'''
Do the pre processing using wallis filter (10 min vs 15 min in Matlab).
'''
import cv2
import numpy as np
# import scipy.io as sio
self.zeroMask = (self.I1 == 0)
kernel = np.ones((self.WallisFilterWidth,self.WallisFilterWidth), dtype=np.float32)
m = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
m2 = (self.I1)**2
m2 = cv2.filter2D(m2,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
s = np.sqrt(m2 - m**2) * np.sqrt(np.sum(kernel)/(np.sum(kernel)-1.0))
self.I1 = (self.I1 - m) / s
# pdb.set_trace()
m = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
m2 = (self.I2)**2
m2 = cv2.filter2D(m2,-1,kernel,borderType=cv2.BORDER_CONSTANT)/np.sum(kernel)
s = np.sqrt(m2 - m**2) * np.sqrt(np.sum(kernel)/(np.sum(kernel)-1.0))
self.I2 = (self.I2 - m) / s
|
[
"def",
"preprocess_filt_wal",
"(",
"self",
")",
":",
"import",
"cv2",
"import",
"numpy",
"as",
"np",
"# import scipy.io as sio",
"self",
".",
"zeroMask",
"=",
"(",
"self",
".",
"I1",
"==",
"0",
")",
"kernel",
"=",
"np",
".",
"ones",
"(",
"(",
"self",
".",
"WallisFilterWidth",
",",
"self",
".",
"WallisFilterWidth",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"m",
"=",
"cv2",
".",
"filter2D",
"(",
"self",
".",
"I1",
",",
"-",
"1",
",",
"kernel",
",",
"borderType",
"=",
"cv2",
".",
"BORDER_CONSTANT",
")",
"/",
"np",
".",
"sum",
"(",
"kernel",
")",
"m2",
"=",
"(",
"self",
".",
"I1",
")",
"**",
"2",
"m2",
"=",
"cv2",
".",
"filter2D",
"(",
"m2",
",",
"-",
"1",
",",
"kernel",
",",
"borderType",
"=",
"cv2",
".",
"BORDER_CONSTANT",
")",
"/",
"np",
".",
"sum",
"(",
"kernel",
")",
"s",
"=",
"np",
".",
"sqrt",
"(",
"m2",
"-",
"m",
"**",
"2",
")",
"*",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"kernel",
")",
"/",
"(",
"np",
".",
"sum",
"(",
"kernel",
")",
"-",
"1.0",
")",
")",
"self",
".",
"I1",
"=",
"(",
"self",
".",
"I1",
"-",
"m",
")",
"/",
"s",
"# pdb.set_trace()",
"m",
"=",
"cv2",
".",
"filter2D",
"(",
"self",
".",
"I2",
",",
"-",
"1",
",",
"kernel",
",",
"borderType",
"=",
"cv2",
".",
"BORDER_CONSTANT",
")",
"/",
"np",
".",
"sum",
"(",
"kernel",
")",
"m2",
"=",
"(",
"self",
".",
"I2",
")",
"**",
"2",
"m2",
"=",
"cv2",
".",
"filter2D",
"(",
"m2",
",",
"-",
"1",
",",
"kernel",
",",
"borderType",
"=",
"cv2",
".",
"BORDER_CONSTANT",
")",
"/",
"np",
".",
"sum",
"(",
"kernel",
")",
"s",
"=",
"np",
".",
"sqrt",
"(",
"m2",
"-",
"m",
"**",
"2",
")",
"*",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"kernel",
")",
"/",
"(",
"np",
".",
"sum",
"(",
"kernel",
")",
"-",
"1.0",
")",
")",
"self",
".",
"I2",
"=",
"(",
"self",
".",
"I2",
"-",
"m",
")",
"/",
"s"
] |
https://github.com/isce-framework/isce2/blob/0e5114a8bede3caf1d533d98e44dfe4b983e3f48/contrib/geo_autoRIFT/autoRIFT/autoRIFT.py#L50-L83
|
||
theotherp/nzbhydra
|
4b03d7f769384b97dfc60dade4806c0fc987514e
|
libs/imaplib.py
|
python
|
IMAP4.recent
|
(self)
|
return self._untagged_response(typ, dat, name)
|
Return most recent 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
(typ, [data]) = <instance>.recent()
'data' is None if no new messages,
else list of RECENT responses, most recent last.
|
Return most recent 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
|
[
"Return",
"most",
"recent",
"RECENT",
"responses",
"if",
"any",
"exist",
"else",
"prompt",
"server",
"for",
"an",
"update",
"using",
"the",
"NOOP",
"command",
"."
] |
def recent(self):
"""Return most recent 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
(typ, [data]) = <instance>.recent()
'data' is None if no new messages,
else list of RECENT responses, most recent last.
"""
name = 'RECENT'
typ, dat = self._untagged_response('OK', [None], name)
if dat[-1]:
return typ, dat
typ, dat = self.noop() # Prod server for response
return self._untagged_response(typ, dat, name)
|
[
"def",
"recent",
"(",
"self",
")",
":",
"name",
"=",
"'RECENT'",
"typ",
",",
"dat",
"=",
"self",
".",
"_untagged_response",
"(",
"'OK'",
",",
"[",
"None",
"]",
",",
"name",
")",
"if",
"dat",
"[",
"-",
"1",
"]",
":",
"return",
"typ",
",",
"dat",
"typ",
",",
"dat",
"=",
"self",
".",
"noop",
"(",
")",
"# Prod server for response",
"return",
"self",
".",
"_untagged_response",
"(",
"typ",
",",
"dat",
",",
"name",
")"
] |
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/imaplib.py#L286-L300
|
|
freelawproject/courtlistener
|
ab3ae7bb6e5e836b286749113e7dbb403d470912
|
cl/search/models.py
|
python
|
sort_cites
|
(c)
|
Sort a list or QuerySet of citations according to BlueBook ordering.
This is intended as a parameter to the 'key' argument of a sorting method
like `sort` or `sorted`. It intends to take a single citation and give it a
numeric score as to where it should occur in a list of other citations.
For example:
cs = Citation.objects.filter(cluser_id=222)
cs = sorted(cs, key=sort_cites)
That'd give you the list of the Citation items sorted by their priority.
:param c: A Citation object to score.
:return: A score for the Citation passed in.
|
Sort a list or QuerySet of citations according to BlueBook ordering.
|
[
"Sort",
"a",
"list",
"or",
"QuerySet",
"of",
"citations",
"according",
"to",
"BlueBook",
"ordering",
"."
] |
def sort_cites(c):
"""Sort a list or QuerySet of citations according to BlueBook ordering.
This is intended as a parameter to the 'key' argument of a sorting method
like `sort` or `sorted`. It intends to take a single citation and give it a
numeric score as to where it should occur in a list of other citations.
For example:
cs = Citation.objects.filter(cluser_id=222)
cs = sorted(cs, key=sort_cites)
That'd give you the list of the Citation items sorted by their priority.
:param c: A Citation object to score.
:return: A score for the Citation passed in.
"""
if c.type == Citation.NEUTRAL:
return 0
if c.type == Citation.FEDERAL:
if c.reporter == "U.S.":
return 1.1
elif c.reporter == "S. Ct.":
return 1.2
elif "L. Ed." in c.reporter:
return 1.3
else:
return 1.4
elif c.type == Citation.SCOTUS_EARLY:
return 2
elif c.type == Citation.SPECIALTY:
return 3
elif c.type == Citation.STATE_REGIONAL:
return 4
elif c.type == Citation.STATE:
return 5
elif c.type == Citation.WEST:
return 6
elif c.type == Citation.LEXIS:
return 7
else:
return 8
|
[
"def",
"sort_cites",
"(",
"c",
")",
":",
"if",
"c",
".",
"type",
"==",
"Citation",
".",
"NEUTRAL",
":",
"return",
"0",
"if",
"c",
".",
"type",
"==",
"Citation",
".",
"FEDERAL",
":",
"if",
"c",
".",
"reporter",
"==",
"\"U.S.\"",
":",
"return",
"1.1",
"elif",
"c",
".",
"reporter",
"==",
"\"S. Ct.\"",
":",
"return",
"1.2",
"elif",
"\"L. Ed.\"",
"in",
"c",
".",
"reporter",
":",
"return",
"1.3",
"else",
":",
"return",
"1.4",
"elif",
"c",
".",
"type",
"==",
"Citation",
".",
"SCOTUS_EARLY",
":",
"return",
"2",
"elif",
"c",
".",
"type",
"==",
"Citation",
".",
"SPECIALTY",
":",
"return",
"3",
"elif",
"c",
".",
"type",
"==",
"Citation",
".",
"STATE_REGIONAL",
":",
"return",
"4",
"elif",
"c",
".",
"type",
"==",
"Citation",
".",
"STATE",
":",
"return",
"5",
"elif",
"c",
".",
"type",
"==",
"Citation",
".",
"WEST",
":",
"return",
"6",
"elif",
"c",
".",
"type",
"==",
"Citation",
".",
"LEXIS",
":",
"return",
"7",
"else",
":",
"return",
"8"
] |
https://github.com/freelawproject/courtlistener/blob/ab3ae7bb6e5e836b286749113e7dbb403d470912/cl/search/models.py#L2492-L2533
|
||
gramps-project/gramps
|
04d4651a43eb210192f40a9f8c2bad8ee8fa3753
|
gramps/plugins/graph/gvrelgraph.py
|
python
|
RelGraphReport.add_child_links_to_families
|
(self, person_handles)
|
returns string of Graphviz edges linking parents to families or
children
|
returns string of Graphviz edges linking parents to families or
children
|
[
"returns",
"string",
"of",
"Graphviz",
"edges",
"linking",
"parents",
"to",
"families",
"or",
"children"
] |
def add_child_links_to_families(self, person_handles):
"""
returns string of Graphviz edges linking parents to families or
children
"""
for person_handle in person_handles:
if self._user:
self._user.step_progress()
person = self._db.get_person_from_handle(person_handle)
p_id = person.get_gramps_id()
for fam_handle in person.get_parent_family_handle_list():
family = self._db.get_family_from_handle(fam_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
sibling = False
for child_ref in family.get_child_ref_list():
if child_ref.ref == person_handle:
frel = child_ref.frel
mrel = child_ref.mrel
elif child_ref.ref in self.persons:
sibling = True
if (self.show_families and
((father_handle and father_handle in self.persons) or
(mother_handle and mother_handle in self.persons) or
sibling)):
# Link to the family node if either parent is in graph
self.add_family_link(p_id, family, frel, mrel)
else:
# Link to the parents' nodes directly, if they are in graph
if father_handle and father_handle in self.persons:
self.add_parent_link(p_id, father_handle, frel)
if mother_handle and mother_handle in self.persons:
self.add_parent_link(p_id, mother_handle, mrel)
|
[
"def",
"add_child_links_to_families",
"(",
"self",
",",
"person_handles",
")",
":",
"for",
"person_handle",
"in",
"person_handles",
":",
"if",
"self",
".",
"_user",
":",
"self",
".",
"_user",
".",
"step_progress",
"(",
")",
"person",
"=",
"self",
".",
"_db",
".",
"get_person_from_handle",
"(",
"person_handle",
")",
"p_id",
"=",
"person",
".",
"get_gramps_id",
"(",
")",
"for",
"fam_handle",
"in",
"person",
".",
"get_parent_family_handle_list",
"(",
")",
":",
"family",
"=",
"self",
".",
"_db",
".",
"get_family_from_handle",
"(",
"fam_handle",
")",
"father_handle",
"=",
"family",
".",
"get_father_handle",
"(",
")",
"mother_handle",
"=",
"family",
".",
"get_mother_handle",
"(",
")",
"sibling",
"=",
"False",
"for",
"child_ref",
"in",
"family",
".",
"get_child_ref_list",
"(",
")",
":",
"if",
"child_ref",
".",
"ref",
"==",
"person_handle",
":",
"frel",
"=",
"child_ref",
".",
"frel",
"mrel",
"=",
"child_ref",
".",
"mrel",
"elif",
"child_ref",
".",
"ref",
"in",
"self",
".",
"persons",
":",
"sibling",
"=",
"True",
"if",
"(",
"self",
".",
"show_families",
"and",
"(",
"(",
"father_handle",
"and",
"father_handle",
"in",
"self",
".",
"persons",
")",
"or",
"(",
"mother_handle",
"and",
"mother_handle",
"in",
"self",
".",
"persons",
")",
"or",
"sibling",
")",
")",
":",
"# Link to the family node if either parent is in graph",
"self",
".",
"add_family_link",
"(",
"p_id",
",",
"family",
",",
"frel",
",",
"mrel",
")",
"else",
":",
"# Link to the parents' nodes directly, if they are in graph",
"if",
"father_handle",
"and",
"father_handle",
"in",
"self",
".",
"persons",
":",
"self",
".",
"add_parent_link",
"(",
"p_id",
",",
"father_handle",
",",
"frel",
")",
"if",
"mother_handle",
"and",
"mother_handle",
"in",
"self",
".",
"persons",
":",
"self",
".",
"add_parent_link",
"(",
"p_id",
",",
"mother_handle",
",",
"mrel",
")"
] |
https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/plugins/graph/gvrelgraph.py#L284-L316
|
||
NVIDIA/DeepLearningExamples
|
589604d49e016cd9ef4525f7abcc9c7b826cfc5e
|
Tools/PyTorch/TimeSeriesPredictionPlatform/data/script_download_data.py
|
python
|
download_from_url
|
(url, output_path)
|
Downloads a file froma url.
|
Downloads a file froma url.
|
[
"Downloads",
"a",
"file",
"froma",
"url",
"."
] |
def download_from_url(url, output_path):
"""Downloads a file froma url."""
print("Pulling data from {} to {}".format(url, output_path))
wget.download(url, output_path)
print("done")
|
[
"def",
"download_from_url",
"(",
"url",
",",
"output_path",
")",
":",
"print",
"(",
"\"Pulling data from {} to {}\"",
".",
"format",
"(",
"url",
",",
"output_path",
")",
")",
"wget",
".",
"download",
"(",
"url",
",",
"output_path",
")",
"print",
"(",
"\"done\"",
")"
] |
https://github.com/NVIDIA/DeepLearningExamples/blob/589604d49e016cd9ef4525f7abcc9c7b826cfc5e/Tools/PyTorch/TimeSeriesPredictionPlatform/data/script_download_data.py#L65-L70
|
||
dagster-io/dagster
|
b27d569d5fcf1072543533a0c763815d96f90b8f
|
python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/ops.py
|
python
|
bq_solid_for_queries
|
(sql_queries)
|
return _bq_core_command(solid, "solid", sql_queries)
|
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
|
Executes BigQuery SQL queries.
|
[
"Executes",
"BigQuery",
"SQL",
"queries",
"."
] |
def bq_solid_for_queries(sql_queries):
"""
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
return _bq_core_command(solid, "solid", sql_queries)
|
[
"def",
"bq_solid_for_queries",
"(",
"sql_queries",
")",
":",
"return",
"_bq_core_command",
"(",
"solid",
",",
"\"solid\"",
",",
"sql_queries",
")"
] |
https://github.com/dagster-io/dagster/blob/b27d569d5fcf1072543533a0c763815d96f90b8f/python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/ops.py#L72-L79
|
|
Grokzen/redis-py-cluster
|
f0627c91ce23e8784dbc996078428c9bdbacb20b
|
rediscluster/client.py
|
python
|
RedisCluster.cluster_addslots
|
(self, node_id, *slots)
|
return self.execute_command('CLUSTER ADDSLOTS', *slots, node_id=node_id)
|
Assign new hash slots to receiving node
Sends to specified node
|
Assign new hash slots to receiving node
|
[
"Assign",
"new",
"hash",
"slots",
"to",
"receiving",
"node"
] |
def cluster_addslots(self, node_id, *slots):
"""
Assign new hash slots to receiving node
Sends to specified node
"""
return self.execute_command('CLUSTER ADDSLOTS', *slots, node_id=node_id)
|
[
"def",
"cluster_addslots",
"(",
"self",
",",
"node_id",
",",
"*",
"slots",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'CLUSTER ADDSLOTS'",
",",
"*",
"slots",
",",
"node_id",
"=",
"node_id",
")"
] |
https://github.com/Grokzen/redis-py-cluster/blob/f0627c91ce23e8784dbc996078428c9bdbacb20b/rediscluster/client.py#L773-L779
|
|
modelop/hadrian
|
7c63e539d79e6e3cad959792d313dfc8b0c523ea
|
titus/titus/inspector/defs.py
|
python
|
CommandGroup.__init__
|
(self, name, commands)
|
:type name: string
:param name: name of the group
:type commands: list of titus.inspector.defs.Command
:param commands: commands in this group
|
:type name: string
:param name: name of the group
:type commands: list of titus.inspector.defs.Command
:param commands: commands in this group
|
[
":",
"type",
"name",
":",
"string",
":",
"param",
"name",
":",
"name",
"of",
"the",
"group",
":",
"type",
"commands",
":",
"list",
"of",
"titus",
".",
"inspector",
".",
"defs",
".",
"Command",
":",
"param",
"commands",
":",
"commands",
"in",
"this",
"group"
] |
def __init__(self, name, commands):
""":type name: string
:param name: name of the group
:type commands: list of titus.inspector.defs.Command
:param commands: commands in this group
"""
self.name = name
self.commands = dict((x.name, x) for x in commands)
if name is None:
self.help = "Commands:\n"
else:
self.help = "{0} gadget (type '{1} help' for details)\nSubcommands under {2}:\n".format(name, name, name)
for x in commands:
self.help += " {0:<20s} {1}\n".format(x.name, x.help.split("\n")[0] if x.help is not None else "")
self.help = self.help.strip()
|
[
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"commands",
")",
":",
"self",
".",
"name",
"=",
"name",
"self",
".",
"commands",
"=",
"dict",
"(",
"(",
"x",
".",
"name",
",",
"x",
")",
"for",
"x",
"in",
"commands",
")",
"if",
"name",
"is",
"None",
":",
"self",
".",
"help",
"=",
"\"Commands:\\n\"",
"else",
":",
"self",
".",
"help",
"=",
"\"{0} gadget (type '{1} help' for details)\\nSubcommands under {2}:\\n\"",
".",
"format",
"(",
"name",
",",
"name",
",",
"name",
")",
"for",
"x",
"in",
"commands",
":",
"self",
".",
"help",
"+=",
"\" {0:<20s} {1}\\n\"",
".",
"format",
"(",
"x",
".",
"name",
",",
"x",
".",
"help",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"0",
"]",
"if",
"x",
".",
"help",
"is",
"not",
"None",
"else",
"\"\"",
")",
"self",
".",
"help",
"=",
"self",
".",
"help",
".",
"strip",
"(",
")"
] |
https://github.com/modelop/hadrian/blob/7c63e539d79e6e3cad959792d313dfc8b0c523ea/titus/titus/inspector/defs.py#L538-L553
|
||
saltstack/raet
|
54858029568115550c7cb7d93e999d9c52b1494a
|
raet/road/transacting.py
|
python
|
Joiner.pend
|
(self)
|
Process ack pend to join packet
|
Process ack pend to join packet
|
[
"Process",
"ack",
"pend",
"to",
"join",
"packet"
] |
def pend(self):
'''
Process ack pend to join packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.pended = True
|
[
"def",
"pend",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"stack",
".",
"parseInner",
"(",
"self",
".",
"rxPacket",
")",
":",
"return",
"self",
".",
"pended",
"=",
"True"
] |
https://github.com/saltstack/raet/blob/54858029568115550c7cb7d93e999d9c52b1494a/raet/road/transacting.py#L590-L596
|
||
mlcommons/training
|
4a4d5a0b7efe99c680306b1940749211d4238a84
|
image_classification/tensorflow2/common.py
|
python
|
PiecewiseConstantDecayWithWarmup._get_learning_rate
|
(self, step)
|
Compute learning rate at given step.
|
Compute learning rate at given step.
|
[
"Compute",
"learning",
"rate",
"at",
"given",
"step",
"."
] |
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(step, self.step_boundaries,
self.lr_values)
lr = tf.cond(step < self.warmup_steps, lambda: warmup_lr(step),
lambda: piecewise_lr(step))
return lr
|
[
"def",
"_get_learning_rate",
"(",
"self",
",",
"step",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"self",
".",
"name",
",",
"'PiecewiseConstantDecayWithWarmup'",
",",
"[",
"self",
".",
"rescaled_lr",
",",
"self",
".",
"step_boundaries",
",",
"self",
".",
"lr_values",
",",
"self",
".",
"warmup_steps",
",",
"self",
".",
"compute_lr_on_cpu",
"]",
")",
":",
"def",
"warmup_lr",
"(",
"step",
")",
":",
"return",
"self",
".",
"rescaled_lr",
"*",
"(",
"tf",
".",
"cast",
"(",
"step",
",",
"tf",
".",
"float32",
")",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"warmup_steps",
",",
"tf",
".",
"float32",
")",
")",
"def",
"piecewise_lr",
"(",
"step",
")",
":",
"return",
"tf",
".",
"compat",
".",
"v1",
".",
"train",
".",
"piecewise_constant",
"(",
"step",
",",
"self",
".",
"step_boundaries",
",",
"self",
".",
"lr_values",
")",
"lr",
"=",
"tf",
".",
"cond",
"(",
"step",
"<",
"self",
".",
"warmup_steps",
",",
"lambda",
":",
"warmup_lr",
"(",
"step",
")",
",",
"lambda",
":",
"piecewise_lr",
"(",
"step",
")",
")",
"return",
"lr"
] |
https://github.com/mlcommons/training/blob/4a4d5a0b7efe99c680306b1940749211d4238a84/image_classification/tensorflow2/common.py#L156-L171
|
||
TesterlifeRaymond/doraemon
|
d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333
|
venv/lib/python3.6/site-packages/pip/_vendor/cachecontrol/controller.py
|
python
|
CacheController.cached_request
|
(self, request)
|
return False
|
Return a cached response if it exists in the cache, otherwise
return False.
|
Return a cached response if it exists in the cache, otherwise
return False.
|
[
"Return",
"a",
"cached",
"response",
"if",
"it",
"exists",
"in",
"the",
"cache",
"otherwise",
"return",
"False",
"."
] |
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
# Bail out if the request insists on fresh data
if 'no-cache' in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return False
if 'max-age' in cc and cc['max-age'] == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return False
# Request allows serving from the cache, let's see if we find something
cache_data = self.cache.get(cache_url)
if cache_data is None:
logger.debug('No cache entry available')
return False
# Check whether it can be deserialized
resp = self.serializer.loads(request, cache_data)
if not resp:
logger.warning('Cache entry deserialization failed, entry ignored')
return False
# If we have a cached 301, return it immediately. We don't
# need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if resp.status == 301:
msg = ('Returning cached "301 Moved Permanently" response '
'(ignoring date and etag information)')
logger.debug(msg)
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or 'date' not in headers:
if 'etag' not in headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug('Purging cached response: no date or etag')
self.cache.delete(cache_url)
logger.debug('Ignoring cached response: no date')
return False
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
logger.debug('Current age based on date: %i', current_age)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
logger.debug('Freshness lifetime from max-age: %i',
freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i",
freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
logger.debug('Freshness lifetime from request max-age: %i',
freshness_lifetime)
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug('Adjusted current age from min-fresh: %i',
current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug('%i > %i', freshness_lifetime, current_age)
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
logger.debug(
'The cached response is "stale" with no etag, purging'
)
self.cache.delete(cache_url)
# return the original handler
return False
|
[
"def",
"cached_request",
"(",
"self",
",",
"request",
")",
":",
"cache_url",
"=",
"self",
".",
"cache_url",
"(",
"request",
".",
"url",
")",
"logger",
".",
"debug",
"(",
"'Looking up \"%s\" in the cache'",
",",
"cache_url",
")",
"cc",
"=",
"self",
".",
"parse_cache_control",
"(",
"request",
".",
"headers",
")",
"# Bail out if the request insists on fresh data",
"if",
"'no-cache'",
"in",
"cc",
":",
"logger",
".",
"debug",
"(",
"'Request header has \"no-cache\", cache bypassed'",
")",
"return",
"False",
"if",
"'max-age'",
"in",
"cc",
"and",
"cc",
"[",
"'max-age'",
"]",
"==",
"0",
":",
"logger",
".",
"debug",
"(",
"'Request header has \"max_age\" as 0, cache bypassed'",
")",
"return",
"False",
"# Request allows serving from the cache, let's see if we find something",
"cache_data",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"cache_url",
")",
"if",
"cache_data",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'No cache entry available'",
")",
"return",
"False",
"# Check whether it can be deserialized",
"resp",
"=",
"self",
".",
"serializer",
".",
"loads",
"(",
"request",
",",
"cache_data",
")",
"if",
"not",
"resp",
":",
"logger",
".",
"warning",
"(",
"'Cache entry deserialization failed, entry ignored'",
")",
"return",
"False",
"# If we have a cached 301, return it immediately. We don't",
"# need to test our response for other headers b/c it is",
"# intrinsically \"cacheable\" as it is Permanent.",
"# See:",
"# https://tools.ietf.org/html/rfc7231#section-6.4.2",
"#",
"# Client can try to refresh the value by repeating the request",
"# with cache busting headers as usual (ie no-cache).",
"if",
"resp",
".",
"status",
"==",
"301",
":",
"msg",
"=",
"(",
"'Returning cached \"301 Moved Permanently\" response '",
"'(ignoring date and etag information)'",
")",
"logger",
".",
"debug",
"(",
"msg",
")",
"return",
"resp",
"headers",
"=",
"CaseInsensitiveDict",
"(",
"resp",
".",
"headers",
")",
"if",
"not",
"headers",
"or",
"'date'",
"not",
"in",
"headers",
":",
"if",
"'etag'",
"not",
"in",
"headers",
":",
"# Without date or etag, the cached response can never be used",
"# and should be deleted.",
"logger",
".",
"debug",
"(",
"'Purging cached response: no date or etag'",
")",
"self",
".",
"cache",
".",
"delete",
"(",
"cache_url",
")",
"logger",
".",
"debug",
"(",
"'Ignoring cached response: no date'",
")",
"return",
"False",
"now",
"=",
"time",
".",
"time",
"(",
")",
"date",
"=",
"calendar",
".",
"timegm",
"(",
"parsedate_tz",
"(",
"headers",
"[",
"'date'",
"]",
")",
")",
"current_age",
"=",
"max",
"(",
"0",
",",
"now",
"-",
"date",
")",
"logger",
".",
"debug",
"(",
"'Current age based on date: %i'",
",",
"current_age",
")",
"# TODO: There is an assumption that the result will be a",
"# urllib3 response object. This may not be best since we",
"# could probably avoid instantiating or constructing the",
"# response until we know we need it.",
"resp_cc",
"=",
"self",
".",
"parse_cache_control",
"(",
"headers",
")",
"# determine freshness",
"freshness_lifetime",
"=",
"0",
"# Check the max-age pragma in the cache control header",
"if",
"'max-age'",
"in",
"resp_cc",
"and",
"resp_cc",
"[",
"'max-age'",
"]",
".",
"isdigit",
"(",
")",
":",
"freshness_lifetime",
"=",
"int",
"(",
"resp_cc",
"[",
"'max-age'",
"]",
")",
"logger",
".",
"debug",
"(",
"'Freshness lifetime from max-age: %i'",
",",
"freshness_lifetime",
")",
"# If there isn't a max-age, check for an expires header",
"elif",
"'expires'",
"in",
"headers",
":",
"expires",
"=",
"parsedate_tz",
"(",
"headers",
"[",
"'expires'",
"]",
")",
"if",
"expires",
"is",
"not",
"None",
":",
"expire_time",
"=",
"calendar",
".",
"timegm",
"(",
"expires",
")",
"-",
"date",
"freshness_lifetime",
"=",
"max",
"(",
"0",
",",
"expire_time",
")",
"logger",
".",
"debug",
"(",
"\"Freshness lifetime from expires: %i\"",
",",
"freshness_lifetime",
")",
"# Determine if we are setting freshness limit in the",
"# request. Note, this overrides what was in the response.",
"if",
"'max-age'",
"in",
"cc",
":",
"try",
":",
"freshness_lifetime",
"=",
"int",
"(",
"cc",
"[",
"'max-age'",
"]",
")",
"logger",
".",
"debug",
"(",
"'Freshness lifetime from request max-age: %i'",
",",
"freshness_lifetime",
")",
"except",
"ValueError",
":",
"freshness_lifetime",
"=",
"0",
"if",
"'min-fresh'",
"in",
"cc",
":",
"try",
":",
"min_fresh",
"=",
"int",
"(",
"cc",
"[",
"'min-fresh'",
"]",
")",
"except",
"ValueError",
":",
"min_fresh",
"=",
"0",
"# adjust our current age by our min fresh",
"current_age",
"+=",
"min_fresh",
"logger",
".",
"debug",
"(",
"'Adjusted current age from min-fresh: %i'",
",",
"current_age",
")",
"# Return entry if it is fresh enough",
"if",
"freshness_lifetime",
">",
"current_age",
":",
"logger",
".",
"debug",
"(",
"'The response is \"fresh\", returning cached response'",
")",
"logger",
".",
"debug",
"(",
"'%i > %i'",
",",
"freshness_lifetime",
",",
"current_age",
")",
"return",
"resp",
"# we're not fresh. If we don't have an Etag, clear it out",
"if",
"'etag'",
"not",
"in",
"headers",
":",
"logger",
".",
"debug",
"(",
"'The cached response is \"stale\" with no etag, purging'",
")",
"self",
".",
"cache",
".",
"delete",
"(",
"cache_url",
")",
"# return the original handler",
"return",
"False"
] |
https://github.com/TesterlifeRaymond/doraemon/blob/d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333/venv/lib/python3.6/site-packages/pip/_vendor/cachecontrol/controller.py#L86-L205
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.