nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
osmr/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
tensorflow2/tf2cv/models/deeplabv3.py
|
python
|
deeplabv3_resnetd101b_ade20k
|
(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last",
**kwargs)
|
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k",
data_format=data_format, **kwargs)
|
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
|
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
|
[
"DeepLabv3",
"model",
"on",
"the",
"base",
"of",
"ResNet",
"(",
"D",
")",
"-",
"101b",
"for",
"ADE20K",
"from",
"Rethinking",
"Atrous",
"Convolution",
"for",
"Semantic",
"Image",
"Segmentation",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1706",
".",
"05587",
"."
] |
def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, data_format="channels_last",
**kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,),
data_format=data_format).features
del backbone.children[-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k",
data_format=data_format, **kwargs)
|
[
"def",
"deeplabv3_resnetd101b_ade20k",
"(",
"pretrained_backbone",
"=",
"False",
",",
"classes",
"=",
"150",
",",
"aux",
"=",
"True",
",",
"data_format",
"=",
"\"channels_last\"",
",",
"*",
"*",
"kwargs",
")",
":",
"backbone",
"=",
"resnetd101b",
"(",
"pretrained",
"=",
"pretrained_backbone",
",",
"ordinary_init",
"=",
"False",
",",
"bends",
"=",
"(",
"3",
",",
")",
",",
"data_format",
"=",
"data_format",
")",
".",
"features",
"del",
"backbone",
".",
"children",
"[",
"-",
"1",
"]",
"return",
"get_deeplabv3",
"(",
"backbone",
"=",
"backbone",
",",
"classes",
"=",
"classes",
",",
"aux",
"=",
"aux",
",",
"model_name",
"=",
"\"deeplabv3_resnetd101b_ade20k\"",
",",
"data_format",
"=",
"data_format",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/tensorflow2/tf2cv/models/deeplabv3.py#L495-L520
|
|
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
base/site-packages/androguard/core/analysis/auto.py
|
python
|
DefaultAndroAnalysis.create_axml
|
(self, log, fileraw)
|
return apk.AXMLPrinter(fileraw)
|
This method is called in order to create a new AXML object
:param log: an object which corresponds to a unique app
:param fileraw: the raw axml (a string)
:rtype: an :class:`APK` object
|
This method is called in order to create a new AXML object
|
[
"This",
"method",
"is",
"called",
"in",
"order",
"to",
"create",
"a",
"new",
"AXML",
"object"
] |
def create_axml(self, log, fileraw):
"""
This method is called in order to create a new AXML object
:param log: an object which corresponds to a unique app
:param fileraw: the raw axml (a string)
:rtype: an :class:`APK` object
"""
return apk.AXMLPrinter(fileraw)
|
[
"def",
"create_axml",
"(",
"self",
",",
"log",
",",
"fileraw",
")",
":",
"return",
"apk",
".",
"AXMLPrinter",
"(",
"fileraw",
")"
] |
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/androguard/core/analysis/auto.py#L165-L174
|
|
edouardoyallon/pyscatwave
|
b2bd090569f142c5728f6b07980d7aa95da37efa
|
examples/mnist.py
|
python
|
main
|
()
|
Train a simple Hybrid Scattering + CNN model on MNIST.
Scattering features are normalized by batch normalization.
The model achieves 99.6% testing accuracy after 10 epochs.
|
Train a simple Hybrid Scattering + CNN model on MNIST.
|
[
"Train",
"a",
"simple",
"Hybrid",
"Scattering",
"+",
"CNN",
"model",
"on",
"MNIST",
"."
] |
def main():
"""Train a simple Hybrid Scattering + CNN model on MNIST.
Scattering features are normalized by batch normalization.
The model achieves 99.6% testing accuracy after 10 epochs.
"""
meter_loss = tnt.meter.AverageValueMeter()
classerr = tnt.meter.ClassErrorMeter(accuracy=True)
scat = Scattering(M=28, N=28, J=2).cuda()
K = 81
params = {
'conv1.weight': conv_init(K, 64, 1),
'conv1.bias': torch.zeros(64),
'bn.weight': torch.Tensor(K).uniform_(),
'bn.bias': torch.zeros(K),
'linear2.weight': linear_init(64*7*7, 512),
'linear2.bias': torch.zeros(512),
'linear3.weight': linear_init(512, 10),
'linear3.bias': torch.zeros(10),
}
stats = {'bn.running_mean': torch.zeros(K).cuda(),
'bn.running_var': torch.ones(K).cuda()}
for k, v in list(params.items()):
params[k] = Variable(v.cuda(), requires_grad=True)
def h(sample):
x = scat(sample[0].float().cuda().unsqueeze(1) / 255.0).squeeze(1)
inputs = Variable(x)
targets = Variable(torch.LongTensor(sample[1]).cuda())
o = f(inputs, params, stats, sample[2])
return F.cross_entropy(o, targets), o
def on_sample(state):
state['sample'].append(state['train'])
def on_forward(state):
classerr.add(state['output'].data,
torch.LongTensor(state['sample'][1]))
meter_loss.add(state['loss'].item())
def on_start_epoch(state):
classerr.reset()
state['iterator'] = tqdm(state['iterator'])
def on_end_epoch(state):
print('Training accuracy:', classerr.value())
def on_end(state):
print('Training' if state['train'] else 'Testing', 'accuracy')
print(classerr.value())
classerr.reset()
optimizer = torch.optim.SGD(list(params.values()), lr=0.01, momentum=0.9,
weight_decay=0.0005)
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.hooks['on_end'] = on_end
print('Training:')
engine.train(h, get_iterator(True), 10, optimizer)
print('Testing:')
engine.test(h, get_iterator(False))
|
[
"def",
"main",
"(",
")",
":",
"meter_loss",
"=",
"tnt",
".",
"meter",
".",
"AverageValueMeter",
"(",
")",
"classerr",
"=",
"tnt",
".",
"meter",
".",
"ClassErrorMeter",
"(",
"accuracy",
"=",
"True",
")",
"scat",
"=",
"Scattering",
"(",
"M",
"=",
"28",
",",
"N",
"=",
"28",
",",
"J",
"=",
"2",
")",
".",
"cuda",
"(",
")",
"K",
"=",
"81",
"params",
"=",
"{",
"'conv1.weight'",
":",
"conv_init",
"(",
"K",
",",
"64",
",",
"1",
")",
",",
"'conv1.bias'",
":",
"torch",
".",
"zeros",
"(",
"64",
")",
",",
"'bn.weight'",
":",
"torch",
".",
"Tensor",
"(",
"K",
")",
".",
"uniform_",
"(",
")",
",",
"'bn.bias'",
":",
"torch",
".",
"zeros",
"(",
"K",
")",
",",
"'linear2.weight'",
":",
"linear_init",
"(",
"64",
"*",
"7",
"*",
"7",
",",
"512",
")",
",",
"'linear2.bias'",
":",
"torch",
".",
"zeros",
"(",
"512",
")",
",",
"'linear3.weight'",
":",
"linear_init",
"(",
"512",
",",
"10",
")",
",",
"'linear3.bias'",
":",
"torch",
".",
"zeros",
"(",
"10",
")",
",",
"}",
"stats",
"=",
"{",
"'bn.running_mean'",
":",
"torch",
".",
"zeros",
"(",
"K",
")",
".",
"cuda",
"(",
")",
",",
"'bn.running_var'",
":",
"torch",
".",
"ones",
"(",
"K",
")",
".",
"cuda",
"(",
")",
"}",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"params",
".",
"items",
"(",
")",
")",
":",
"params",
"[",
"k",
"]",
"=",
"Variable",
"(",
"v",
".",
"cuda",
"(",
")",
",",
"requires_grad",
"=",
"True",
")",
"def",
"h",
"(",
"sample",
")",
":",
"x",
"=",
"scat",
"(",
"sample",
"[",
"0",
"]",
".",
"float",
"(",
")",
".",
"cuda",
"(",
")",
".",
"unsqueeze",
"(",
"1",
")",
"/",
"255.0",
")",
".",
"squeeze",
"(",
"1",
")",
"inputs",
"=",
"Variable",
"(",
"x",
")",
"targets",
"=",
"Variable",
"(",
"torch",
".",
"LongTensor",
"(",
"sample",
"[",
"1",
"]",
")",
".",
"cuda",
"(",
")",
")",
"o",
"=",
"f",
"(",
"inputs",
",",
"params",
",",
"stats",
",",
"sample",
"[",
"2",
"]",
")",
"return",
"F",
".",
"cross_entropy",
"(",
"o",
",",
"targets",
")",
",",
"o",
"def",
"on_sample",
"(",
"state",
")",
":",
"state",
"[",
"'sample'",
"]",
".",
"append",
"(",
"state",
"[",
"'train'",
"]",
")",
"def",
"on_forward",
"(",
"state",
")",
":",
"classerr",
".",
"add",
"(",
"state",
"[",
"'output'",
"]",
".",
"data",
",",
"torch",
".",
"LongTensor",
"(",
"state",
"[",
"'sample'",
"]",
"[",
"1",
"]",
")",
")",
"meter_loss",
".",
"add",
"(",
"state",
"[",
"'loss'",
"]",
".",
"item",
"(",
")",
")",
"def",
"on_start_epoch",
"(",
"state",
")",
":",
"classerr",
".",
"reset",
"(",
")",
"state",
"[",
"'iterator'",
"]",
"=",
"tqdm",
"(",
"state",
"[",
"'iterator'",
"]",
")",
"def",
"on_end_epoch",
"(",
"state",
")",
":",
"print",
"(",
"'Training accuracy:'",
",",
"classerr",
".",
"value",
"(",
")",
")",
"def",
"on_end",
"(",
"state",
")",
":",
"print",
"(",
"'Training'",
"if",
"state",
"[",
"'train'",
"]",
"else",
"'Testing'",
",",
"'accuracy'",
")",
"print",
"(",
"classerr",
".",
"value",
"(",
")",
")",
"classerr",
".",
"reset",
"(",
")",
"optimizer",
"=",
"torch",
".",
"optim",
".",
"SGD",
"(",
"list",
"(",
"params",
".",
"values",
"(",
")",
")",
",",
"lr",
"=",
"0.01",
",",
"momentum",
"=",
"0.9",
",",
"weight_decay",
"=",
"0.0005",
")",
"engine",
"=",
"Engine",
"(",
")",
"engine",
".",
"hooks",
"[",
"'on_sample'",
"]",
"=",
"on_sample",
"engine",
".",
"hooks",
"[",
"'on_forward'",
"]",
"=",
"on_forward",
"engine",
".",
"hooks",
"[",
"'on_start_epoch'",
"]",
"=",
"on_start_epoch",
"engine",
".",
"hooks",
"[",
"'on_end_epoch'",
"]",
"=",
"on_end_epoch",
"engine",
".",
"hooks",
"[",
"'on_end'",
"]",
"=",
"on_end",
"print",
"(",
"'Training:'",
")",
"engine",
".",
"train",
"(",
"h",
",",
"get_iterator",
"(",
"True",
")",
",",
"10",
",",
"optimizer",
")",
"print",
"(",
"'Testing:'",
")",
"engine",
".",
"test",
"(",
"h",
",",
"get_iterator",
"(",
"False",
")",
")"
] |
https://github.com/edouardoyallon/pyscatwave/blob/b2bd090569f142c5728f6b07980d7aa95da37efa/examples/mnist.py#L43-L111
|
||
Coalfire-Research/Slackor
|
aa32a7f9250bd8b107d48fd573f26176b527b2a5
|
impacket/impacket/dot11.py
|
python
|
Dot11ControlFrameRTS.set_ta
|
(self, value)
|
Set 802.11 RTS control frame 48 bit 'Transmitter Address' field as a 6 bytes array
|
Set 802.11 RTS control frame 48 bit 'Transmitter Address' field as a 6 bytes array
|
[
"Set",
"802",
".",
"11",
"RTS",
"control",
"frame",
"48",
"bit",
"Transmitter",
"Address",
"field",
"as",
"a",
"6",
"bytes",
"array"
] |
def set_ta(self, value):
"Set 802.11 RTS control frame 48 bit 'Transmitter Address' field as a 6 bytes array"
for i in range(0, 6):
self.header.set_byte(8+i, value[i])
|
[
"def",
"set_ta",
"(",
"self",
",",
"value",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"6",
")",
":",
"self",
".",
"header",
".",
"set_byte",
"(",
"8",
"+",
"i",
",",
"value",
"[",
"i",
"]",
")"
] |
https://github.com/Coalfire-Research/Slackor/blob/aa32a7f9250bd8b107d48fd573f26176b527b2a5/impacket/impacket/dot11.py#L621-L624
|
||
hak5/nano-tetra-modules
|
aa43cb5e2338b8dbd12a75314104a34ba608263b
|
PortalAuth/includes/scripts/libs/requests/cookies.py
|
python
|
merge_cookies
|
(cookiejar, cookies)
|
return cookiejar
|
Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
|
Add cookies to cookiejar and returns a merged CookieJar.
|
[
"Add",
"cookies",
"to",
"cookiejar",
"and",
"returns",
"a",
"merged",
"CookieJar",
"."
] |
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
[
"def",
"merge_cookies",
"(",
"cookiejar",
",",
"cookies",
")",
":",
"if",
"not",
"isinstance",
"(",
"cookiejar",
",",
"cookielib",
".",
"CookieJar",
")",
":",
"raise",
"ValueError",
"(",
"'You can only merge into CookieJar'",
")",
"if",
"isinstance",
"(",
"cookies",
",",
"dict",
")",
":",
"cookiejar",
"=",
"cookiejar_from_dict",
"(",
"cookies",
",",
"cookiejar",
"=",
"cookiejar",
",",
"overwrite",
"=",
"False",
")",
"elif",
"isinstance",
"(",
"cookies",
",",
"cookielib",
".",
"CookieJar",
")",
":",
"try",
":",
"cookiejar",
".",
"update",
"(",
"cookies",
")",
"except",
"AttributeError",
":",
"for",
"cookie_in_jar",
"in",
"cookies",
":",
"cookiejar",
".",
"set_cookie",
"(",
"cookie_in_jar",
")",
"return",
"cookiejar"
] |
https://github.com/hak5/nano-tetra-modules/blob/aa43cb5e2338b8dbd12a75314104a34ba608263b/PortalAuth/includes/scripts/libs/requests/cookies.py#L444-L463
|
|
Qirky/FoxDot
|
76318f9630bede48ff3994146ed644affa27bfa4
|
FoxDot/lib/OSC.py
|
python
|
OSCMultiClient._prefixAddress
|
(self, prefix, msg)
|
return out
|
Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
|
Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
|
[
"Makes",
"a",
"copy",
"of",
"the",
"given",
"OSCMessage",
"then",
"prepends",
"the",
"given",
"prefix",
"to",
"The",
"message",
"s",
"OSC",
"-",
"address",
".",
"If",
"msg",
"is",
"an",
"OSCBundle",
"recursively",
"prepends",
"the",
"prefix",
"to",
"its",
"constituents",
"."
] |
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
|
[
"def",
"_prefixAddress",
"(",
"self",
",",
"prefix",
",",
"msg",
")",
":",
"out",
"=",
"msg",
".",
"copy",
"(",
")",
"if",
"isinstance",
"(",
"msg",
",",
"OSCBundle",
")",
":",
"msgs",
"=",
"out",
".",
"values",
"(",
")",
"out",
".",
"clearData",
"(",
")",
"for",
"m",
"in",
"msgs",
":",
"out",
".",
"append",
"(",
"self",
".",
"_prefixAddress",
"(",
"prefix",
",",
"m",
")",
")",
"elif",
"isinstance",
"(",
"msg",
",",
"OSCMessage",
")",
":",
"out",
".",
"setAddress",
"(",
"prefix",
"+",
"out",
".",
"address",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"'msg' argument is not an OSCMessage or OSCBundle object\"",
")",
"return",
"out"
] |
https://github.com/Qirky/FoxDot/blob/76318f9630bede48ff3994146ed644affa27bfa4/FoxDot/lib/OSC.py#L1626-L1645
|
|
sametmax/Django--an-app-at-a-time
|
99eddf12ead76e6dfbeb09ce0bae61e282e22f8a
|
ignore_this_directory/django/db/models/fields/reverse_related.py
|
python
|
ManyToManyRel.get_related_field
|
(self)
|
return field.foreign_related_fields[0]
|
Return the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
|
Return the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
|
[
"Return",
"the",
"field",
"in",
"the",
"to",
"object",
"to",
"which",
"this",
"relationship",
"is",
"tied",
".",
"Provided",
"for",
"symmetry",
"with",
"ManyToOneRel",
"."
] |
def get_related_field(self):
"""
Return the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, 'remote_field', None)
if rel and rel.model == self.model:
break
return field.foreign_related_fields[0]
|
[
"def",
"get_related_field",
"(",
"self",
")",
":",
"opts",
"=",
"self",
".",
"through",
".",
"_meta",
"if",
"self",
".",
"through_fields",
":",
"field",
"=",
"opts",
".",
"get_field",
"(",
"self",
".",
"through_fields",
"[",
"0",
"]",
")",
"else",
":",
"for",
"field",
"in",
"opts",
".",
"fields",
":",
"rel",
"=",
"getattr",
"(",
"field",
",",
"'remote_field'",
",",
"None",
")",
"if",
"rel",
"and",
"rel",
".",
"model",
"==",
"self",
".",
"model",
":",
"break",
"return",
"field",
".",
"foreign_related_fields",
"[",
"0",
"]"
] |
https://github.com/sametmax/Django--an-app-at-a-time/blob/99eddf12ead76e6dfbeb09ce0bae61e282e22f8a/ignore_this_directory/django/db/models/fields/reverse_related.py#L277-L290
|
|
PaddlePaddle/Research
|
2da0bd6c72d60e9df403aff23a7802779561c4a1
|
KG/CoKE/bin/model/transformer_encoder.py
|
python
|
encoder_layer
|
(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name='')
|
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn')
|
The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
|
The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
|
[
"The",
"encoder",
"layers",
"that",
"can",
"be",
"stacked",
"to",
"form",
"a",
"deep",
"encoder",
".",
"This",
"module",
"consits",
"of",
"a",
"multi",
"-",
"head",
"(",
"self",
")",
"attention",
"followed",
"by",
"position",
"-",
"wise",
"feed",
"-",
"forward",
"networks",
"and",
"both",
"the",
"two",
"components",
"companied",
"with",
"the",
"post_process_layer",
"to",
"add",
"residual",
"connection",
"layer",
"normalization",
"and",
"droput",
"."
] |
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
pre_process_layer(
enc_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
enc_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn')
|
[
"def",
"encoder_layer",
"(",
"enc_input",
",",
"attn_bias",
",",
"n_head",
",",
"d_key",
",",
"d_value",
",",
"d_model",
",",
"d_inner_hid",
",",
"prepostprocess_dropout",
",",
"attention_dropout",
",",
"relu_dropout",
",",
"hidden_act",
",",
"preprocess_cmd",
"=",
"\"n\"",
",",
"postprocess_cmd",
"=",
"\"da\"",
",",
"param_initializer",
"=",
"None",
",",
"name",
"=",
"''",
")",
":",
"attn_output",
"=",
"multi_head_attention",
"(",
"pre_process_layer",
"(",
"enc_input",
",",
"preprocess_cmd",
",",
"prepostprocess_dropout",
",",
"name",
"=",
"name",
"+",
"'_pre_att'",
")",
",",
"None",
",",
"None",
",",
"attn_bias",
",",
"d_key",
",",
"d_value",
",",
"d_model",
",",
"n_head",
",",
"attention_dropout",
",",
"param_initializer",
"=",
"param_initializer",
",",
"name",
"=",
"name",
"+",
"'_multi_head_att'",
")",
"attn_output",
"=",
"post_process_layer",
"(",
"enc_input",
",",
"attn_output",
",",
"postprocess_cmd",
",",
"prepostprocess_dropout",
",",
"name",
"=",
"name",
"+",
"'_post_att'",
")",
"ffd_output",
"=",
"positionwise_feed_forward",
"(",
"pre_process_layer",
"(",
"attn_output",
",",
"preprocess_cmd",
",",
"prepostprocess_dropout",
",",
"name",
"=",
"name",
"+",
"'_pre_ffn'",
")",
",",
"d_inner_hid",
",",
"d_model",
",",
"relu_dropout",
",",
"hidden_act",
",",
"param_initializer",
"=",
"param_initializer",
",",
"name",
"=",
"name",
"+",
"'_ffn'",
")",
"return",
"post_process_layer",
"(",
"attn_output",
",",
"ffd_output",
",",
"postprocess_cmd",
",",
"prepostprocess_dropout",
",",
"name",
"=",
"name",
"+",
"'_post_ffn'",
")"
] |
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/KG/CoKE/bin/model/transformer_encoder.py#L279-L339
|
|
obspy/obspy
|
0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f
|
obspy/imaging/scripts/mopad.py
|
python
|
MomentTensor.get_t_axis
|
(self, system='NED', style='n')
|
return self._vector_w_style_and_system(self._t_axis, system, style)
|
Returns the tension axis of the moment tensor.
Call with arguments to set ouput in other basis system or in fancy
style (to be viewed with 'print')
|
Returns the tension axis of the moment tensor.
|
[
"Returns",
"the",
"tension",
"axis",
"of",
"the",
"moment",
"tensor",
"."
] |
def get_t_axis(self, system='NED', style='n'):
"""
Returns the tension axis of the moment tensor.
Call with arguments to set ouput in other basis system or in fancy
style (to be viewed with 'print')
"""
if style == 'f':
print('\n Tension-axis in %s -coordinates: ' % (system))
return self._vector_w_style_and_system(self._t_axis, system, style)
|
[
"def",
"get_t_axis",
"(",
"self",
",",
"system",
"=",
"'NED'",
",",
"style",
"=",
"'n'",
")",
":",
"if",
"style",
"==",
"'f'",
":",
"print",
"(",
"'\\n Tension-axis in %s -coordinates: '",
"%",
"(",
"system",
")",
")",
"return",
"self",
".",
"_vector_w_style_and_system",
"(",
"self",
".",
"_t_axis",
",",
"system",
",",
"style",
")"
] |
https://github.com/obspy/obspy/blob/0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f/obspy/imaging/scripts/mopad.py#L1370-L1379
|
|
deanishe/alfred-convert
|
97407f4ec8dbca5abbc6952b2b56cf3918624177
|
src/pkg_resources/__init__.py
|
python
|
ResourceManager.set_extraction_path
|
(self, path)
|
Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
|
Set the base path where resources will be extracted to, if needed.
|
[
"Set",
"the",
"base",
"path",
"where",
"resources",
"will",
"be",
"extracted",
"to",
"if",
"needed",
"."
] |
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
|
[
"def",
"set_extraction_path",
"(",
"self",
",",
"path",
")",
":",
"if",
"self",
".",
"cached_files",
":",
"raise",
"ValueError",
"(",
"\"Can't change extraction path, files already extracted\"",
")",
"self",
".",
"extraction_path",
"=",
"path"
] |
https://github.com/deanishe/alfred-convert/blob/97407f4ec8dbca5abbc6952b2b56cf3918624177/src/pkg_resources/__init__.py#L1265-L1289
|
||
leo-editor/leo-editor
|
383d6776d135ef17d73d935a2f0ecb3ac0e99494
|
leo/plugins/bigdash.py
|
python
|
GlobalSearch.__init__
|
(self)
|
Ctor for GlobalSearch class.
|
Ctor for GlobalSearch class.
|
[
"Ctor",
"for",
"GlobalSearch",
"class",
"."
] |
def __init__(self):
"""Ctor for GlobalSearch class."""
self.fts_max_hits = g.app.config.getInt('fts-max-hits') or 30
# A default: will be overridden by the global-search command.
self.bd = BigDash()
self.gnxcache = GnxCache()
#self.bd.show()
self.bd.add_cmd_handler(self.do_search)
if whoosh:
self.fts = LeoFts(self.gnxcache, g.app.homeLeoDir + "/fts_index")
self.bd.add_cmd_handler(self.do_fts)
self.bd.add_cmd_handler(self.do_stats)
else:
self.fts = None
self.anchors = {}
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"fts_max_hits",
"=",
"g",
".",
"app",
".",
"config",
".",
"getInt",
"(",
"'fts-max-hits'",
")",
"or",
"30",
"# A default: will be overridden by the global-search command.",
"self",
".",
"bd",
"=",
"BigDash",
"(",
")",
"self",
".",
"gnxcache",
"=",
"GnxCache",
"(",
")",
"#self.bd.show()",
"self",
".",
"bd",
".",
"add_cmd_handler",
"(",
"self",
".",
"do_search",
")",
"if",
"whoosh",
":",
"self",
".",
"fts",
"=",
"LeoFts",
"(",
"self",
".",
"gnxcache",
",",
"g",
".",
"app",
".",
"homeLeoDir",
"+",
"\"/fts_index\"",
")",
"self",
".",
"bd",
".",
"add_cmd_handler",
"(",
"self",
".",
"do_fts",
")",
"self",
".",
"bd",
".",
"add_cmd_handler",
"(",
"self",
".",
"do_stats",
")",
"else",
":",
"self",
".",
"fts",
"=",
"None",
"self",
".",
"anchors",
"=",
"{",
"}"
] |
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/plugins/bigdash.py#L194-L208
|
||
ella/ella
|
4a1414991f649dc21c4b777dc6b41a922a13faa7
|
ella/core/box.py
|
python
|
Box._get_template_list
|
(self)
|
return t_list
|
Get the hierarchy of templates belonging to the object/box_type given.
|
Get the hierarchy of templates belonging to the object/box_type given.
|
[
"Get",
"the",
"hierarchy",
"of",
"templates",
"belonging",
"to",
"the",
"object",
"/",
"box_type",
"given",
"."
] |
def _get_template_list(self):
" Get the hierarchy of templates belonging to the object/box_type given. "
t_list = []
if hasattr(self.obj, 'category_id') and self.obj.category_id:
cat = self.obj.category
base_path = 'box/category/%s/content_type/%s/' % (cat.path, self.name)
if hasattr(self.obj, 'slug'):
t_list.append(base_path + '%s/%s.html' % (self.obj.slug, self.box_type,))
t_list.append(base_path + '%s.html' % (self.box_type,))
t_list.append(base_path + 'box.html')
base_path = 'box/content_type/%s/' % self.name
if hasattr(self.obj, 'slug'):
t_list.append(base_path + '%s/%s.html' % (self.obj.slug, self.box_type,))
t_list.append(base_path + '%s.html' % (self.box_type,))
t_list.append(base_path + 'box.html')
t_list.append('box/%s.html' % self.box_type)
t_list.append('box/box.html')
return t_list
|
[
"def",
"_get_template_list",
"(",
"self",
")",
":",
"t_list",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"self",
".",
"obj",
",",
"'category_id'",
")",
"and",
"self",
".",
"obj",
".",
"category_id",
":",
"cat",
"=",
"self",
".",
"obj",
".",
"category",
"base_path",
"=",
"'box/category/%s/content_type/%s/'",
"%",
"(",
"cat",
".",
"path",
",",
"self",
".",
"name",
")",
"if",
"hasattr",
"(",
"self",
".",
"obj",
",",
"'slug'",
")",
":",
"t_list",
".",
"append",
"(",
"base_path",
"+",
"'%s/%s.html'",
"%",
"(",
"self",
".",
"obj",
".",
"slug",
",",
"self",
".",
"box_type",
",",
")",
")",
"t_list",
".",
"append",
"(",
"base_path",
"+",
"'%s.html'",
"%",
"(",
"self",
".",
"box_type",
",",
")",
")",
"t_list",
".",
"append",
"(",
"base_path",
"+",
"'box.html'",
")",
"base_path",
"=",
"'box/content_type/%s/'",
"%",
"self",
".",
"name",
"if",
"hasattr",
"(",
"self",
".",
"obj",
",",
"'slug'",
")",
":",
"t_list",
".",
"append",
"(",
"base_path",
"+",
"'%s/%s.html'",
"%",
"(",
"self",
".",
"obj",
".",
"slug",
",",
"self",
".",
"box_type",
",",
")",
")",
"t_list",
".",
"append",
"(",
"base_path",
"+",
"'%s.html'",
"%",
"(",
"self",
".",
"box_type",
",",
")",
")",
"t_list",
".",
"append",
"(",
"base_path",
"+",
"'box.html'",
")",
"t_list",
".",
"append",
"(",
"'box/%s.html'",
"%",
"self",
".",
"box_type",
")",
"t_list",
".",
"append",
"(",
"'box/box.html'",
")",
"return",
"t_list"
] |
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/box.py#L121-L141
|
|
DamnWidget/anaconda
|
a9998fb362320f907d5ccbc6fcf5b62baca677c0
|
anaconda_lib/autopep/autopep8_lib/autopep8.py
|
python
|
readlines_from_file
|
(filename)
|
Return contents of file.
|
Return contents of file.
|
[
"Return",
"contents",
"of",
"file",
"."
] |
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
|
[
"def",
"readlines_from_file",
"(",
"filename",
")",
":",
"with",
"open_with_encoding",
"(",
"filename",
")",
"as",
"input_file",
":",
"return",
"input_file",
".",
"readlines",
"(",
")"
] |
https://github.com/DamnWidget/anaconda/blob/a9998fb362320f907d5ccbc6fcf5b62baca677c0/anaconda_lib/autopep/autopep8_lib/autopep8.py#L158-L161
|
||
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/html5lib/treeadapters/genshi.py
|
python
|
to_genshi
|
(walker)
|
Convert a tree to a genshi tree
:arg walker: the treewalker to use to walk the tree to convert it
:returns: generator of genshi nodes
|
Convert a tree to a genshi tree
|
[
"Convert",
"a",
"tree",
"to",
"a",
"genshi",
"tree"
] |
def to_genshi(walker):
"""Convert a tree to a genshi tree
:arg walker: the treewalker to use to walk the tree to convert it
:returns: generator of genshi nodes
"""
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
|
[
"def",
"to_genshi",
"(",
"walker",
")",
":",
"text",
"=",
"[",
"]",
"for",
"token",
"in",
"walker",
":",
"type",
"=",
"token",
"[",
"\"type\"",
"]",
"if",
"type",
"in",
"(",
"\"Characters\"",
",",
"\"SpaceCharacters\"",
")",
":",
"text",
".",
"append",
"(",
"token",
"[",
"\"data\"",
"]",
")",
"elif",
"text",
":",
"yield",
"TEXT",
",",
"\"\"",
".",
"join",
"(",
"text",
")",
",",
"(",
"None",
",",
"-",
"1",
",",
"-",
"1",
")",
"text",
"=",
"[",
"]",
"if",
"type",
"in",
"(",
"\"StartTag\"",
",",
"\"EmptyTag\"",
")",
":",
"if",
"token",
"[",
"\"namespace\"",
"]",
":",
"name",
"=",
"\"{%s}%s\"",
"%",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
"else",
":",
"name",
"=",
"token",
"[",
"\"name\"",
"]",
"attrs",
"=",
"Attrs",
"(",
"[",
"(",
"QName",
"(",
"\"{%s}%s\"",
"%",
"attr",
"if",
"attr",
"[",
"0",
"]",
"is",
"not",
"None",
"else",
"attr",
"[",
"1",
"]",
")",
",",
"value",
")",
"for",
"attr",
",",
"value",
"in",
"token",
"[",
"\"data\"",
"]",
".",
"items",
"(",
")",
"]",
")",
"yield",
"(",
"START",
",",
"(",
"QName",
"(",
"name",
")",
",",
"attrs",
")",
",",
"(",
"None",
",",
"-",
"1",
",",
"-",
"1",
")",
")",
"if",
"type",
"==",
"\"EmptyTag\"",
":",
"type",
"=",
"\"EndTag\"",
"if",
"type",
"==",
"\"EndTag\"",
":",
"if",
"token",
"[",
"\"namespace\"",
"]",
":",
"name",
"=",
"\"{%s}%s\"",
"%",
"(",
"token",
"[",
"\"namespace\"",
"]",
",",
"token",
"[",
"\"name\"",
"]",
")",
"else",
":",
"name",
"=",
"token",
"[",
"\"name\"",
"]",
"yield",
"END",
",",
"QName",
"(",
"name",
")",
",",
"(",
"None",
",",
"-",
"1",
",",
"-",
"1",
")",
"elif",
"type",
"==",
"\"Comment\"",
":",
"yield",
"COMMENT",
",",
"token",
"[",
"\"data\"",
"]",
",",
"(",
"None",
",",
"-",
"1",
",",
"-",
"1",
")",
"elif",
"type",
"==",
"\"Doctype\"",
":",
"yield",
"DOCTYPE",
",",
"(",
"token",
"[",
"\"name\"",
"]",
",",
"token",
"[",
"\"publicId\"",
"]",
",",
"token",
"[",
"\"systemId\"",
"]",
")",
",",
"(",
"None",
",",
"-",
"1",
",",
"-",
"1",
")",
"else",
":",
"pass",
"# FIXME: What to do?",
"if",
"text",
":",
"yield",
"TEXT",
",",
"\"\"",
".",
"join",
"(",
"text",
")",
",",
"(",
"None",
",",
"-",
"1",
",",
"-",
"1",
")"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/html5lib/treeadapters/genshi.py#L7-L54
|
||
openedx/edx-platform
|
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
|
openedx/core/djangoapps/credit/signature.py
|
python
|
_encode_secret
|
(secret, provider_id)
|
return secret
|
Helper function for encoding text_type secrets into ascii.
|
Helper function for encoding text_type secrets into ascii.
|
[
"Helper",
"function",
"for",
"encoding",
"text_type",
"secrets",
"into",
"ascii",
"."
] |
def _encode_secret(secret, provider_id):
"""
Helper function for encoding text_type secrets into ascii.
"""
try:
secret.encode('ascii')
except UnicodeEncodeError:
secret = None
log.error('Shared secret key for credit provider "%s" contains non-ASCII unicode.', provider_id)
return secret
|
[
"def",
"_encode_secret",
"(",
"secret",
",",
"provider_id",
")",
":",
"try",
":",
"secret",
".",
"encode",
"(",
"'ascii'",
")",
"except",
"UnicodeEncodeError",
":",
"secret",
"=",
"None",
"log",
".",
"error",
"(",
"'Shared secret key for credit provider \"%s\" contains non-ASCII unicode.'",
",",
"provider_id",
")",
"return",
"secret"
] |
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/djangoapps/credit/signature.py#L29-L39
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/willow/registry.py
|
python
|
WillowRegistry.get_image_classes
|
(self, with_operation=None, available=None)
|
[] |
def get_image_classes(self, with_operation=None, available=None):
image_classes = self._registered_image_classes.copy()
if with_operation:
image_classes = set(filter(lambda image_class: image_class in self._registered_operations and with_operation in self._registered_operations[image_class], image_classes))
if not image_classes:
raise UnrecognisedOperationError("Could not find image class with the '{0}' operation".format(with_operation))
if available:
# Remove unavailable image classes
available_image_classes = image_classes - set(self._unavailable_image_classes.keys())
# Raise error if all image classes failed the check
if not available_image_classes:
raise UnavailableOperationError('\n'.join([
"The operation '{0}' is available in the following image classes but they all raised errors:".format(with_operation)
] + [
"{image_class_name}: {error_message}".format(
image_class_name=image_class.__name__,
error_message=str(self._unavailable_image_classes.get(image_class, "Unknown error"))
)
for image_class in image_classes
]))
return available_image_classes
else:
return image_classes
|
[
"def",
"get_image_classes",
"(",
"self",
",",
"with_operation",
"=",
"None",
",",
"available",
"=",
"None",
")",
":",
"image_classes",
"=",
"self",
".",
"_registered_image_classes",
".",
"copy",
"(",
")",
"if",
"with_operation",
":",
"image_classes",
"=",
"set",
"(",
"filter",
"(",
"lambda",
"image_class",
":",
"image_class",
"in",
"self",
".",
"_registered_operations",
"and",
"with_operation",
"in",
"self",
".",
"_registered_operations",
"[",
"image_class",
"]",
",",
"image_classes",
")",
")",
"if",
"not",
"image_classes",
":",
"raise",
"UnrecognisedOperationError",
"(",
"\"Could not find image class with the '{0}' operation\"",
".",
"format",
"(",
"with_operation",
")",
")",
"if",
"available",
":",
"# Remove unavailable image classes",
"available_image_classes",
"=",
"image_classes",
"-",
"set",
"(",
"self",
".",
"_unavailable_image_classes",
".",
"keys",
"(",
")",
")",
"# Raise error if all image classes failed the check",
"if",
"not",
"available_image_classes",
":",
"raise",
"UnavailableOperationError",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"\"The operation '{0}' is available in the following image classes but they all raised errors:\"",
".",
"format",
"(",
"with_operation",
")",
"]",
"+",
"[",
"\"{image_class_name}: {error_message}\"",
".",
"format",
"(",
"image_class_name",
"=",
"image_class",
".",
"__name__",
",",
"error_message",
"=",
"str",
"(",
"self",
".",
"_unavailable_image_classes",
".",
"get",
"(",
"image_class",
",",
"\"Unknown error\"",
")",
")",
")",
"for",
"image_class",
"in",
"image_classes",
"]",
")",
")",
"return",
"available_image_classes",
"else",
":",
"return",
"image_classes"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/willow/registry.py#L94-L121
|
||||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/lib2to3/pygram.py
|
python
|
Symbols.__init__
|
(self, grammar)
|
Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
|
Initializer.
|
[
"Initializer",
"."
] |
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
|
[
"def",
"__init__",
"(",
"self",
",",
"grammar",
")",
":",
"for",
"name",
",",
"symbol",
"in",
"grammar",
".",
"symbol2number",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"name",
",",
"symbol",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/lib2to3/pygram.py#L22-L29
|
||
openwpm/OpenWPM
|
771b6db4169374a7f7b6eb5ce6e59ea763f26df4
|
openwpm/command_sequence.py
|
python
|
CommandSequence.recursive_dump_page_source
|
(self, suffix="", timeout=30)
|
Dumps rendered source of current page visit to 'sources' dir.
Unlike `dump_page_source`, this includes iframe sources. Archive is
stored in `manager_params.source_dump_path` and is keyed by the
current `visit_id` and top-level url. The source dump is a gzipped json
file with the following structure:
{
'document_url': "http://example.com",
'source': "<html> ... </html>",
'iframes': {
'frame_1': {'document_url': ...,
'source': ...,
'iframes: { ... }},
'frame_2': {'document_url': ...,
'source': ...,
'iframes: { ... }},
'frame_3': { ... }
}
}
|
Dumps rendered source of current page visit to 'sources' dir.
Unlike `dump_page_source`, this includes iframe sources. Archive is
stored in `manager_params.source_dump_path` and is keyed by the
current `visit_id` and top-level url. The source dump is a gzipped json
file with the following structure:
{
'document_url': "http://example.com",
'source': "<html> ... </html>",
'iframes': {
'frame_1': {'document_url': ...,
'source': ...,
'iframes: { ... }},
'frame_2': {'document_url': ...,
'source': ...,
'iframes: { ... }},
'frame_3': { ... }
}
}
|
[
"Dumps",
"rendered",
"source",
"of",
"current",
"page",
"visit",
"to",
"sources",
"dir",
".",
"Unlike",
"dump_page_source",
"this",
"includes",
"iframe",
"sources",
".",
"Archive",
"is",
"stored",
"in",
"manager_params",
".",
"source_dump_path",
"and",
"is",
"keyed",
"by",
"the",
"current",
"visit_id",
"and",
"top",
"-",
"level",
"url",
".",
"The",
"source",
"dump",
"is",
"a",
"gzipped",
"json",
"file",
"with",
"the",
"following",
"structure",
":",
"{",
"document_url",
":",
"http",
":",
"//",
"example",
".",
"com",
"source",
":",
"<html",
">",
"...",
"<",
"/",
"html",
">",
"iframes",
":",
"{",
"frame_1",
":",
"{",
"document_url",
":",
"...",
"source",
":",
"...",
"iframes",
":",
"{",
"...",
"}}",
"frame_2",
":",
"{",
"document_url",
":",
"...",
"source",
":",
"...",
"iframes",
":",
"{",
"...",
"}}",
"frame_3",
":",
"{",
"...",
"}",
"}",
"}"
] |
def recursive_dump_page_source(self, suffix="", timeout=30):
"""Dumps rendered source of current page visit to 'sources' dir.
Unlike `dump_page_source`, this includes iframe sources. Archive is
stored in `manager_params.source_dump_path` and is keyed by the
current `visit_id` and top-level url. The source dump is a gzipped json
file with the following structure:
{
'document_url': "http://example.com",
'source': "<html> ... </html>",
'iframes': {
'frame_1': {'document_url': ...,
'source': ...,
'iframes: { ... }},
'frame_2': {'document_url': ...,
'source': ...,
'iframes: { ... }},
'frame_3': { ... }
}
}
"""
self.total_timeout += timeout
if not self.contains_get_or_browse:
raise CommandExecutionError(
"No get or browse request preceding the recursive dump"
" page source command",
self,
)
command = RecursiveDumpPageSourceCommand(suffix)
self._commands_with_timeout.append((command, timeout))
|
[
"def",
"recursive_dump_page_source",
"(",
"self",
",",
"suffix",
"=",
"\"\"",
",",
"timeout",
"=",
"30",
")",
":",
"self",
".",
"total_timeout",
"+=",
"timeout",
"if",
"not",
"self",
".",
"contains_get_or_browse",
":",
"raise",
"CommandExecutionError",
"(",
"\"No get or browse request preceding the recursive dump\"",
"\" page source command\"",
",",
"self",
",",
")",
"command",
"=",
"RecursiveDumpPageSourceCommand",
"(",
"suffix",
")",
"self",
".",
"_commands_with_timeout",
".",
"append",
"(",
"(",
"command",
",",
"timeout",
")",
")"
] |
https://github.com/openwpm/OpenWPM/blob/771b6db4169374a7f7b6eb5ce6e59ea763f26df4/openwpm/command_sequence.py#L153-L181
|
||
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/CPython/27/Lib/xml/sax/handler.py
|
python
|
ContentHandler.endElement
|
(self, name)
|
Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event.
|
Signals the end of an element in non-namespace mode.
|
[
"Signals",
"the",
"end",
"of",
"an",
"element",
"in",
"non",
"-",
"namespace",
"mode",
"."
] |
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
|
[
"def",
"endElement",
"(",
"self",
",",
"name",
")",
":"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/CPython/27/Lib/xml/sax/handler.py#L134-L138
|
||
shubhtuls/factored3d
|
1bb77c7ae7dbaba7056e94cb99fdd6c9cc73c7cd
|
nnutils/voxel_net.py
|
python
|
ResNetConv.__init__
|
(self, n_blocks=4)
|
[] |
def __init__(self, n_blocks=4):
super(ResNetConv, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
self.n_blocks=n_blocks
|
[
"def",
"__init__",
"(",
"self",
",",
"n_blocks",
"=",
"4",
")",
":",
"super",
"(",
"ResNetConv",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"resnet",
"=",
"torchvision",
".",
"models",
".",
"resnet18",
"(",
"pretrained",
"=",
"True",
")",
"self",
".",
"n_blocks",
"=",
"n_blocks"
] |
https://github.com/shubhtuls/factored3d/blob/1bb77c7ae7dbaba7056e94cb99fdd6c9cc73c7cd/nnutils/voxel_net.py#L23-L26
|
||||
makehumancommunity/makehuman
|
8006cf2cc851624619485658bb933a4244bbfd7c
|
makehuman/lib/image.py
|
python
|
Image.data
|
(self)
|
return self._data
|
Return the numpy ndarray that contains the Image data.
|
Return the numpy ndarray that contains the Image data.
|
[
"Return",
"the",
"numpy",
"ndarray",
"that",
"contains",
"the",
"Image",
"data",
"."
] |
def data(self):
"""Return the numpy ndarray that contains the Image data."""
return self._data
|
[
"def",
"data",
"(",
"self",
")",
":",
"return",
"self",
".",
"_data"
] |
https://github.com/makehumancommunity/makehuman/blob/8006cf2cc851624619485658bb933a4244bbfd7c/makehuman/lib/image.py#L156-L158
|
|
Pymol-Scripts/Pymol-script-repo
|
bcd7bb7812dc6db1595953dfa4471fa15fb68c77
|
modules/pdb2pqr/contrib/numpy-1.1.0/numpy/lib/scimath.py
|
python
|
log2
|
(x)
|
return nx.log(x)/_ln2
|
Take log base 2 of x.
If x contains negative inputs, the answer is computed and returned in the
complex domain.
Parameters
----------
x : array_like
Returns
-------
array_like
Examples
--------
(We set the printing precision so the example can be auto-tested)
>>> import numpy as np; np.set_printoptions(precision=4)
>>> log2([4,8])
array([ 2., 3.])
>>> log2([-4,-8,8])
array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
|
Take log base 2 of x.
|
[
"Take",
"log",
"base",
"2",
"of",
"x",
"."
] |
def log2(x):
""" Take log base 2 of x.
If x contains negative inputs, the answer is computed and returned in the
complex domain.
Parameters
----------
x : array_like
Returns
-------
array_like
Examples
--------
(We set the printing precision so the example can be auto-tested)
>>> import numpy as np; np.set_printoptions(precision=4)
>>> log2([4,8])
array([ 2., 3.])
>>> log2([-4,-8,8])
array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
return nx.log(x)/_ln2
|
[
"def",
"log2",
"(",
"x",
")",
":",
"x",
"=",
"_fix_real_lt_zero",
"(",
"x",
")",
"return",
"nx",
".",
"log",
"(",
"x",
")",
"/",
"_ln2"
] |
https://github.com/Pymol-Scripts/Pymol-script-repo/blob/bcd7bb7812dc6db1595953dfa4471fa15fb68c77/modules/pdb2pqr/contrib/numpy-1.1.0/numpy/lib/scimath.py#L291-L318
|
|
apple/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
contrib/performance/benchmark.py
|
python
|
IOMeasureConsumer.__init__
|
(self, started, done, parser)
|
[] |
def __init__(self, started, done, parser):
self.started = started
self.done = done
self.parser = parser
|
[
"def",
"__init__",
"(",
"self",
",",
"started",
",",
"done",
",",
"parser",
")",
":",
"self",
".",
"started",
"=",
"started",
"self",
".",
"done",
"=",
"done",
"self",
".",
"parser",
"=",
"parser"
] |
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/contrib/performance/benchmark.py#L44-L47
|
||||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/document/document.py
|
python
|
Document._destructively_move
|
(self, dest_doc)
|
Move all data in this doc to the dest_doc, leaving this doc empty.
Args:
dest_doc (Document) :
The Bokeh document to populate with data from this one
Returns:
None
|
Move all data in this doc to the dest_doc, leaving this doc empty.
|
[
"Move",
"all",
"data",
"in",
"this",
"doc",
"to",
"the",
"dest_doc",
"leaving",
"this",
"doc",
"empty",
"."
] |
def _destructively_move(self, dest_doc):
''' Move all data in this doc to the dest_doc, leaving this doc empty.
Args:
dest_doc (Document) :
The Bokeh document to populate with data from this one
Returns:
None
'''
if dest_doc is self:
raise RuntimeError("Attempted to overwrite a document with itself")
dest_doc.clear()
# we have to remove ALL roots before adding any
# to the new doc or else models referenced from multiple
# roots could be in both docs at once, which isn't allowed.
roots = []
self._push_all_models_freeze()
try:
while self.roots:
r = next(iter(self.roots))
self.remove_root(r)
roots.append(r)
finally:
self._pop_all_models_freeze()
for r in roots:
if r.document is not None:
raise RuntimeError("Somehow we didn't detach %r" % (r))
if len(self._all_models) != 0:
raise RuntimeError("_all_models still had stuff in it: %r" % (self._all_models))
for r in roots:
dest_doc.add_root(r)
dest_doc.title = self.title
|
[
"def",
"_destructively_move",
"(",
"self",
",",
"dest_doc",
")",
":",
"if",
"dest_doc",
"is",
"self",
":",
"raise",
"RuntimeError",
"(",
"\"Attempted to overwrite a document with itself\"",
")",
"dest_doc",
".",
"clear",
"(",
")",
"# we have to remove ALL roots before adding any",
"# to the new doc or else models referenced from multiple",
"# roots could be in both docs at once, which isn't allowed.",
"roots",
"=",
"[",
"]",
"self",
".",
"_push_all_models_freeze",
"(",
")",
"try",
":",
"while",
"self",
".",
"roots",
":",
"r",
"=",
"next",
"(",
"iter",
"(",
"self",
".",
"roots",
")",
")",
"self",
".",
"remove_root",
"(",
"r",
")",
"roots",
".",
"append",
"(",
"r",
")",
"finally",
":",
"self",
".",
"_pop_all_models_freeze",
"(",
")",
"for",
"r",
"in",
"roots",
":",
"if",
"r",
".",
"document",
"is",
"not",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Somehow we didn't detach %r\"",
"%",
"(",
"r",
")",
")",
"if",
"len",
"(",
"self",
".",
"_all_models",
")",
"!=",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"_all_models still had stuff in it: %r\"",
"%",
"(",
"self",
".",
"_all_models",
")",
")",
"for",
"r",
"in",
"roots",
":",
"dest_doc",
".",
"add_root",
"(",
"r",
")",
"dest_doc",
".",
"title",
"=",
"self",
".",
"title"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/document/document.py#L930-L966
|
||
cloudant/bigcouch
|
8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe
|
couchjs/scons/scons-local-2.0.1/SCons/Environment.py
|
python
|
Base.Depends
|
(self, target, dependency)
|
return tlist
|
Explicity specify that 'target's depend on 'dependency'.
|
Explicity specify that 'target's depend on 'dependency'.
|
[
"Explicity",
"specify",
"that",
"target",
"s",
"depend",
"on",
"dependency",
"."
] |
def Depends(self, target, dependency):
"""Explicity specify that 'target's depend on 'dependency'."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_dependency(dlist)
return tlist
|
[
"def",
"Depends",
"(",
"self",
",",
"target",
",",
"dependency",
")",
":",
"tlist",
"=",
"self",
".",
"arg2nodes",
"(",
"target",
",",
"self",
".",
"fs",
".",
"Entry",
")",
"dlist",
"=",
"self",
".",
"arg2nodes",
"(",
"dependency",
",",
"self",
".",
"fs",
".",
"Entry",
")",
"for",
"t",
"in",
"tlist",
":",
"t",
".",
"add_dependency",
"(",
"dlist",
")",
"return",
"tlist"
] |
https://github.com/cloudant/bigcouch/blob/8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe/couchjs/scons/scons-local-2.0.1/SCons/Environment.py#L1870-L1876
|
|
lyft/cartography
|
921a790d686c679ab5d8936b07e167fd424ee8d6
|
cartography/intel/azure/sql.py
|
python
|
get_server_list
|
(credentials: Credentials, subscription_id: str)
|
return server_list
|
Returning the list of Azure SQL servers.
|
Returning the list of Azure SQL servers.
|
[
"Returning",
"the",
"list",
"of",
"Azure",
"SQL",
"servers",
"."
] |
def get_server_list(credentials: Credentials, subscription_id: str) -> List[Dict]:
"""
Returning the list of Azure SQL servers.
"""
try:
client = get_client(credentials, subscription_id)
server_list = list(map(lambda x: x.as_dict(), client.servers.list()))
# ClientAuthenticationError and ResourceNotFoundError are subclasses under HttpResponseError
except ClientAuthenticationError as e:
logger.warning(f"Client Authentication Error while retrieving servers - {e}")
return []
except ResourceNotFoundError as e:
logger.warning(f"Server resource not found error - {e}")
return []
except HttpResponseError as e:
logger.warning(f"Error while retrieving servers - {e}")
return []
for server in server_list:
x = server['id'].split('/')
server['resourceGroup'] = x[x.index('resourceGroups') + 1]
return server_list
|
[
"def",
"get_server_list",
"(",
"credentials",
":",
"Credentials",
",",
"subscription_id",
":",
"str",
")",
"->",
"List",
"[",
"Dict",
"]",
":",
"try",
":",
"client",
"=",
"get_client",
"(",
"credentials",
",",
"subscription_id",
")",
"server_list",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"as_dict",
"(",
")",
",",
"client",
".",
"servers",
".",
"list",
"(",
")",
")",
")",
"# ClientAuthenticationError and ResourceNotFoundError are subclasses under HttpResponseError",
"except",
"ClientAuthenticationError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"f\"Client Authentication Error while retrieving servers - {e}\"",
")",
"return",
"[",
"]",
"except",
"ResourceNotFoundError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"f\"Server resource not found error - {e}\"",
")",
"return",
"[",
"]",
"except",
"HttpResponseError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"f\"Error while retrieving servers - {e}\"",
")",
"return",
"[",
"]",
"for",
"server",
"in",
"server_list",
":",
"x",
"=",
"server",
"[",
"'id'",
"]",
".",
"split",
"(",
"'/'",
")",
"server",
"[",
"'resourceGroup'",
"]",
"=",
"x",
"[",
"x",
".",
"index",
"(",
"'resourceGroups'",
")",
"+",
"1",
"]",
"return",
"server_list"
] |
https://github.com/lyft/cartography/blob/921a790d686c679ab5d8936b07e167fd424ee8d6/cartography/intel/azure/sql.py#L34-L57
|
|
SheffieldML/GPy
|
bb1bc5088671f9316bc92a46d356734e34c2d5c0
|
GPy/util/normalizer.py
|
python
|
Standardize.inverse_covariance
|
(self, covariance)
|
return (covariance[..., np.newaxis]*(self.std**2))
|
[] |
def inverse_covariance(self, covariance):
return (covariance[..., np.newaxis]*(self.std**2))
|
[
"def",
"inverse_covariance",
"(",
"self",
",",
"covariance",
")",
":",
"return",
"(",
"covariance",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"*",
"(",
"self",
".",
"std",
"**",
"2",
")",
")"
] |
https://github.com/SheffieldML/GPy/blob/bb1bc5088671f9316bc92a46d356734e34c2d5c0/GPy/util/normalizer.py#L108-L109
|
|||
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-windows/x86/pyasn1/type/base.py
|
python
|
Asn1Type.isSuperTypeOf
|
(self, other, matchTags=True, matchConstraints=True)
|
return (not matchTags or
(self.tagSet.isSuperTagSetOf(other.tagSet)) and
(not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec)))
|
Examine |ASN.1| type for subtype relationship with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:obj:`True` if *other* is a subtype of |ASN.1| type,
:obj:`False` otherwise.
|
Examine |ASN.1| type for subtype relationship with other ASN.1 type.
|
[
"Examine",
"|ASN",
".",
"1|",
"type",
"for",
"subtype",
"relationship",
"with",
"other",
"ASN",
".",
"1",
"type",
"."
] |
def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for subtype relationship with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
Python class inheritance relationship is NOT considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:obj:`True` if *other* is a subtype of |ASN.1| type,
:obj:`False` otherwise.
"""
return (not matchTags or
(self.tagSet.isSuperTagSetOf(other.tagSet)) and
(not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec)))
|
[
"def",
"isSuperTypeOf",
"(",
"self",
",",
"other",
",",
"matchTags",
"=",
"True",
",",
"matchConstraints",
"=",
"True",
")",
":",
"return",
"(",
"not",
"matchTags",
"or",
"(",
"self",
".",
"tagSet",
".",
"isSuperTagSetOf",
"(",
"other",
".",
"tagSet",
")",
")",
"and",
"(",
"not",
"matchConstraints",
"or",
"self",
".",
"subtypeSpec",
".",
"isSuperTypeOf",
"(",
"other",
".",
"subtypeSpec",
")",
")",
")"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-windows/x86/pyasn1/type/base.py#L112-L134
|
|
ShadowXZT/pytorch_RFCN
|
0e532444263938aa4d000113dc6aac2e72b4b925
|
faster_rcnn/datasets/coco.py
|
python
|
coco._roidb_from_proposals
|
(self, method)
|
return roidb
|
Creates a roidb from pre-computed proposals of a particular methods.
|
Creates a roidb from pre-computed proposals of a particular methods.
|
[
"Creates",
"a",
"roidb",
"from",
"pre",
"-",
"computed",
"proposals",
"of",
"a",
"particular",
"methods",
"."
] |
def _roidb_from_proposals(self, method):
"""
Creates a roidb from pre-computed proposals of a particular methods.
"""
top_k = self.config['top_k']
cache_file = osp.join(self.cache_path, self.name +
'_{:s}_top{:d}'.format(method, top_k) +
'_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{:s} {:s} roidb loaded from {:s}'.format(self.name, method,
cache_file)
return roidb
if self._image_set in self._gt_splits:
gt_roidb = self.gt_roidb()
method_roidb = self._load_proposals(method, gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, method_roidb)
# Make sure we don't use proposals that are contained in crowds
roidb = _filter_crowd_proposals(roidb, self.config['crowd_thresh'])
else:
roidb = self._load_proposals(method, None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote {:s} roidb to {:s}'.format(method, cache_file)
return roidb
|
[
"def",
"_roidb_from_proposals",
"(",
"self",
",",
"method",
")",
":",
"top_k",
"=",
"self",
".",
"config",
"[",
"'top_k'",
"]",
"cache_file",
"=",
"osp",
".",
"join",
"(",
"self",
".",
"cache_path",
",",
"self",
".",
"name",
"+",
"'_{:s}_top{:d}'",
".",
"format",
"(",
"method",
",",
"top_k",
")",
"+",
"'_roidb.pkl'",
")",
"if",
"osp",
".",
"exists",
"(",
"cache_file",
")",
":",
"with",
"open",
"(",
"cache_file",
",",
"'rb'",
")",
"as",
"fid",
":",
"roidb",
"=",
"cPickle",
".",
"load",
"(",
"fid",
")",
"print",
"'{:s} {:s} roidb loaded from {:s}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"method",
",",
"cache_file",
")",
"return",
"roidb",
"if",
"self",
".",
"_image_set",
"in",
"self",
".",
"_gt_splits",
":",
"gt_roidb",
"=",
"self",
".",
"gt_roidb",
"(",
")",
"method_roidb",
"=",
"self",
".",
"_load_proposals",
"(",
"method",
",",
"gt_roidb",
")",
"roidb",
"=",
"imdb",
".",
"merge_roidbs",
"(",
"gt_roidb",
",",
"method_roidb",
")",
"# Make sure we don't use proposals that are contained in crowds",
"roidb",
"=",
"_filter_crowd_proposals",
"(",
"roidb",
",",
"self",
".",
"config",
"[",
"'crowd_thresh'",
"]",
")",
"else",
":",
"roidb",
"=",
"self",
".",
"_load_proposals",
"(",
"method",
",",
"None",
")",
"with",
"open",
"(",
"cache_file",
",",
"'wb'",
")",
"as",
"fid",
":",
"cPickle",
".",
"dump",
"(",
"roidb",
",",
"fid",
",",
"cPickle",
".",
"HIGHEST_PROTOCOL",
")",
"print",
"'wrote {:s} roidb to {:s}'",
".",
"format",
"(",
"method",
",",
"cache_file",
")",
"return",
"roidb"
] |
https://github.com/ShadowXZT/pytorch_RFCN/blob/0e532444263938aa4d000113dc6aac2e72b4b925/faster_rcnn/datasets/coco.py#L141-L168
|
|
theotherp/nzbhydra
|
4b03d7f769384b97dfc60dade4806c0fc987514e
|
libs/cryptography/hazmat/backends/openssl/dsa.py
|
python
|
_truncate_digest_for_dsa
|
(dsa_cdata, digest, backend)
|
return _truncate_digest(digest, order_bits)
|
This function truncates digests that are longer than a given DS
key's length so they can be signed. OpenSSL does this for us in
1.0.0c+ and it isn't needed in 0.9.8, but that leaves us with three
releases (1.0.0, 1.0.0a, and 1.0.0b) where this is a problem. This
truncation is not required in 0.9.8 because DSA is limited to SHA-1.
|
This function truncates digests that are longer than a given DS
key's length so they can be signed. OpenSSL does this for us in
1.0.0c+ and it isn't needed in 0.9.8, but that leaves us with three
releases (1.0.0, 1.0.0a, and 1.0.0b) where this is a problem. This
truncation is not required in 0.9.8 because DSA is limited to SHA-1.
|
[
"This",
"function",
"truncates",
"digests",
"that",
"are",
"longer",
"than",
"a",
"given",
"DS",
"key",
"s",
"length",
"so",
"they",
"can",
"be",
"signed",
".",
"OpenSSL",
"does",
"this",
"for",
"us",
"in",
"1",
".",
"0",
".",
"0c",
"+",
"and",
"it",
"isn",
"t",
"needed",
"in",
"0",
".",
"9",
".",
"8",
"but",
"that",
"leaves",
"us",
"with",
"three",
"releases",
"(",
"1",
".",
"0",
".",
"0",
"1",
".",
"0",
".",
"0a",
"and",
"1",
".",
"0",
".",
"0b",
")",
"where",
"this",
"is",
"a",
"problem",
".",
"This",
"truncation",
"is",
"not",
"required",
"in",
"0",
".",
"9",
".",
"8",
"because",
"DSA",
"is",
"limited",
"to",
"SHA",
"-",
"1",
"."
] |
def _truncate_digest_for_dsa(dsa_cdata, digest, backend):
"""
This function truncates digests that are longer than a given DS
key's length so they can be signed. OpenSSL does this for us in
1.0.0c+ and it isn't needed in 0.9.8, but that leaves us with three
releases (1.0.0, 1.0.0a, and 1.0.0b) where this is a problem. This
truncation is not required in 0.9.8 because DSA is limited to SHA-1.
"""
order_bits = backend._lib.BN_num_bits(dsa_cdata.q)
return _truncate_digest(digest, order_bits)
|
[
"def",
"_truncate_digest_for_dsa",
"(",
"dsa_cdata",
",",
"digest",
",",
"backend",
")",
":",
"order_bits",
"=",
"backend",
".",
"_lib",
".",
"BN_num_bits",
"(",
"dsa_cdata",
".",
"q",
")",
"return",
"_truncate_digest",
"(",
"digest",
",",
"order_bits",
")"
] |
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/cryptography/hazmat/backends/openssl/dsa.py#L16-L26
|
|
JDASoftwareGroup/kartothek
|
1821ea5df60d4079d3911b3c2f17be11d8780e22
|
kartothek/utils/migration_helpers.py
|
python
|
deprecate_parameters
|
(warning: str, *parameters: str)
|
return wrapper
|
Decorator, raising warnings that specified parameters of the decorated function are deprecated and will be removed
or changed in the future.
.. note:: Please only use this decorator before other decorators preserving the function __name__ and __signature__.
And note, that the correct call origin can not be returned if this decorator is nested inside others. If you
absolutely have to use it with other decorators, best add it last.
..note:: You may stack `deprecate_parameters` and `deprecate_parameters_if_set` decorators interchanigibly.
Examples
--------
>>> from kartothek.utils.migration_helpers import deprecate_parameters
>>> message = 'Parameter {parameter} is deprecated due to reason X!'
>>> message2 = 'Parameter {parameter} is deprecated due to reason Y!'
>>> @deprecate_parameters(message, 'param1', 'param2')
... @deprecate_parameters(message2, 'param4')
... def func(param1: str, param2: int, param3: float, param4: float):
... return param1, param2, param3, param4
...
>>> # Warnings will be generated for `param1`, `param2` and `param4` with a different message
>>> func('example', 0, 5.0, 10.0)
('example', 0, 5.0, 10.0)
Parameters
----------
warning: str
Warning, the DeprecationWarnings will be raised with. Please make sure to include the substring '{parameter}'
that will be replaced by the parameter name in the warning.
*parameters: Tuple [str]
Tuple of strings denoting the parameters to be marked deprecated.
Raises
------
DeprecationWarning
One deprecation warning per parameter containing the formatted passed `warning` string.
ValueError
If the validation routines in plave for the decorator are not passed.
Possible issues:
No param specified;
Duplicate param definition;
Declared param does not match underlying function signature.
|
Decorator, raising warnings that specified parameters of the decorated function are deprecated and will be removed
or changed in the future.
|
[
"Decorator",
"raising",
"warnings",
"that",
"specified",
"parameters",
"of",
"the",
"decorated",
"function",
"are",
"deprecated",
"and",
"will",
"be",
"removed",
"or",
"changed",
"in",
"the",
"future",
"."
] |
def deprecate_parameters(warning: str, *parameters: str) -> Callable:
"""
Decorator, raising warnings that specified parameters of the decorated function are deprecated and will be removed
or changed in the future.
.. note:: Please only use this decorator before other decorators preserving the function __name__ and __signature__.
And note, that the correct call origin can not be returned if this decorator is nested inside others. If you
absolutely have to use it with other decorators, best add it last.
..note:: You may stack `deprecate_parameters` and `deprecate_parameters_if_set` decorators interchanigibly.
Examples
--------
>>> from kartothek.utils.migration_helpers import deprecate_parameters
>>> message = 'Parameter {parameter} is deprecated due to reason X!'
>>> message2 = 'Parameter {parameter} is deprecated due to reason Y!'
>>> @deprecate_parameters(message, 'param1', 'param2')
... @deprecate_parameters(message2, 'param4')
... def func(param1: str, param2: int, param3: float, param4: float):
... return param1, param2, param3, param4
...
>>> # Warnings will be generated for `param1`, `param2` and `param4` with a different message
>>> func('example', 0, 5.0, 10.0)
('example', 0, 5.0, 10.0)
Parameters
----------
warning: str
Warning, the DeprecationWarnings will be raised with. Please make sure to include the substring '{parameter}'
that will be replaced by the parameter name in the warning.
*parameters: Tuple [str]
Tuple of strings denoting the parameters to be marked deprecated.
Raises
------
DeprecationWarning
One deprecation warning per parameter containing the formatted passed `warning` string.
ValueError
If the validation routines in plave for the decorator are not passed.
Possible issues:
No param specified;
Duplicate param definition;
Declared param does not match underlying function signature.
"""
def wrapper(func):
@wraps(func)
def wraps_func(*args, **kwargs):
def warn_logic() -> None:
for parameter in parameters:
raise_warning(
parameter=parameter,
warning=warning,
func_name=func.__name__,
stacklevel=5,
)
return _handle_suppress_warnings_in_subsequent_deprecators(
wraps_func=wraps_func,
warning_func=warn_logic,
func=func,
args=args,
kwargs=kwargs,
)
_check_params(func=func, params=parameters)
return _make_decorator_stackable(
wrapper_func=wraps_func, base_func=func, exclude_parameters=parameters
)
return wrapper
|
[
"def",
"deprecate_parameters",
"(",
"warning",
":",
"str",
",",
"*",
"parameters",
":",
"str",
")",
"->",
"Callable",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wraps_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"warn_logic",
"(",
")",
"->",
"None",
":",
"for",
"parameter",
"in",
"parameters",
":",
"raise_warning",
"(",
"parameter",
"=",
"parameter",
",",
"warning",
"=",
"warning",
",",
"func_name",
"=",
"func",
".",
"__name__",
",",
"stacklevel",
"=",
"5",
",",
")",
"return",
"_handle_suppress_warnings_in_subsequent_deprecators",
"(",
"wraps_func",
"=",
"wraps_func",
",",
"warning_func",
"=",
"warn_logic",
",",
"func",
"=",
"func",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
",",
")",
"_check_params",
"(",
"func",
"=",
"func",
",",
"params",
"=",
"parameters",
")",
"return",
"_make_decorator_stackable",
"(",
"wrapper_func",
"=",
"wraps_func",
",",
"base_func",
"=",
"func",
",",
"exclude_parameters",
"=",
"parameters",
")",
"return",
"wrapper"
] |
https://github.com/JDASoftwareGroup/kartothek/blob/1821ea5df60d4079d3911b3c2f17be11d8780e22/kartothek/utils/migration_helpers.py#L374-L444
|
|
facelessuser/BracketHighlighter
|
223ffd4ceafd58686503e3328934c039e959a88c
|
bh_core.py
|
python
|
BhThread.reset
|
(self)
|
Reset the thread variables.
|
Reset the thread variables.
|
[
"Reset",
"the",
"thread",
"variables",
"."
] |
def reset(self):
"""Reset the thread variables."""
self.wait_time = 0.12
self.time = time()
self.queue = Queue()
self.modified = False
self.type = BH_MATCH_TYPE_SELECTION
self.ignore_all = False
self.abort = False
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"wait_time",
"=",
"0.12",
"self",
".",
"time",
"=",
"time",
"(",
")",
"self",
".",
"queue",
"=",
"Queue",
"(",
")",
"self",
".",
"modified",
"=",
"False",
"self",
".",
"type",
"=",
"BH_MATCH_TYPE_SELECTION",
"self",
".",
"ignore_all",
"=",
"False",
"self",
".",
"abort",
"=",
"False"
] |
https://github.com/facelessuser/BracketHighlighter/blob/223ffd4ceafd58686503e3328934c039e959a88c/bh_core.py#L1111-L1120
|
||
keiffster/program-y
|
8c99b56f8c32f01a7b9887b5daae9465619d0385
|
src/programy/parser/pattern/graph.py
|
python
|
PatternGraph.pattern_factory
|
(self)
|
return self._pattern_factory
|
[] |
def pattern_factory(self):
return self._pattern_factory
|
[
"def",
"pattern_factory",
"(",
"self",
")",
":",
"return",
"self",
".",
"_pattern_factory"
] |
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/parser/pattern/graph.py#L42-L43
|
|||
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
runtime/python/lib/python2.7/site-packages/Jinja2-2.6-py2.7.egg/jinja2/environment.py
|
python
|
Environment.compile
|
(self, source, name=None, filename=None, raw=False,
defer_init=False)
|
Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
|
Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
|
[
"Compile",
"a",
"node",
"or",
"template",
"source",
"code",
".",
"The",
"name",
"parameter",
"is",
"the",
"load",
"name",
"of",
"the",
"template",
"after",
"it",
"was",
"joined",
"using",
":",
"meth",
":",
"join_path",
"if",
"necessary",
"not",
"the",
"filename",
"on",
"the",
"file",
"system",
".",
"the",
"filename",
"parameter",
"is",
"the",
"estimated",
"filename",
"of",
"the",
"template",
"on",
"the",
"file",
"system",
".",
"If",
"the",
"template",
"came",
"from",
"a",
"database",
"or",
"memory",
"this",
"can",
"be",
"omitted",
"."
] |
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, basestring):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = _encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
|
[
"def",
"compile",
"(",
"self",
",",
"source",
",",
"name",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"defer_init",
"=",
"False",
")",
":",
"source_hint",
"=",
"None",
"try",
":",
"if",
"isinstance",
"(",
"source",
",",
"basestring",
")",
":",
"source_hint",
"=",
"source",
"source",
"=",
"self",
".",
"_parse",
"(",
"source",
",",
"name",
",",
"filename",
")",
"if",
"self",
".",
"optimized",
":",
"source",
"=",
"optimize",
"(",
"source",
",",
"self",
")",
"source",
"=",
"self",
".",
"_generate",
"(",
"source",
",",
"name",
",",
"filename",
",",
"defer_init",
"=",
"defer_init",
")",
"if",
"raw",
":",
"return",
"source",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"'<template>'",
"else",
":",
"filename",
"=",
"_encode_filename",
"(",
"filename",
")",
"return",
"self",
".",
"_compile",
"(",
"source",
",",
"filename",
")",
"except",
"TemplateSyntaxError",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"self",
".",
"handle_exception",
"(",
"exc_info",
",",
"source_hint",
"=",
"source",
")"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/Jinja2-2.6-py2.7.egg/jinja2/environment.py#L454-L493
|
||
microsoft/mssql-scripter
|
b9b38beb7bb14bdd96ff4583efae9f1715509d5c
|
mssqlscripter/argparser.py
|
python
|
get_connection_string_from_environment
|
(parameters)
|
return False
|
Get connection string from environment variable.
|
Get connection string from environment variable.
|
[
"Get",
"connection",
"string",
"from",
"environment",
"variable",
"."
] |
def get_connection_string_from_environment(parameters):
"""
Get connection string from environment variable.
"""
if MSSQL_SCRIPTER_CONNECTION_STRING in os.environ:
parameters.ConnectionString = os.environ[MSSQL_SCRIPTER_CONNECTION_STRING]
return True
return False
|
[
"def",
"get_connection_string_from_environment",
"(",
"parameters",
")",
":",
"if",
"MSSQL_SCRIPTER_CONNECTION_STRING",
"in",
"os",
".",
"environ",
":",
"parameters",
".",
"ConnectionString",
"=",
"os",
".",
"environ",
"[",
"MSSQL_SCRIPTER_CONNECTION_STRING",
"]",
"return",
"True",
"return",
"False"
] |
https://github.com/microsoft/mssql-scripter/blob/b9b38beb7bb14bdd96ff4583efae9f1715509d5c/mssqlscripter/argparser.py#L446-L454
|
|
seleniumbase/SeleniumBase
|
0d1de7238bfafe4b7309fec6f735dcd0dc4538a8
|
seleniumbase/fixtures/base_case.py
|
python
|
BaseCase.save_cookies
|
(self, name="cookies.txt")
|
Saves the page cookies to the "saved_cookies" folder.
|
Saves the page cookies to the "saved_cookies" folder.
|
[
"Saves",
"the",
"page",
"cookies",
"to",
"the",
"saved_cookies",
"folder",
"."
] |
def save_cookies(self, name="cookies.txt"):
""" Saves the page cookies to the "saved_cookies" folder. """
self.wait_for_ready_state_complete()
cookies = self.driver.get_cookies()
json_cookies = json.dumps(cookies)
if name.endswith("/"):
raise Exception("Invalid filename for Cookies!")
if "/" in name:
name = name.split("/")[-1]
if len(name) < 1:
raise Exception("Filename for Cookies is too short!")
if not name.endswith(".txt"):
name = name + ".txt"
folder = constants.SavedCookies.STORAGE_FOLDER
abs_path = os.path.abspath(".")
file_path = abs_path + "/%s" % folder
if not os.path.exists(file_path):
os.makedirs(file_path)
cookies_file_path = "%s/%s" % (file_path, name)
cookies_file = codecs.open(cookies_file_path, "w+", encoding="utf-8")
cookies_file.writelines(json_cookies)
cookies_file.close()
|
[
"def",
"save_cookies",
"(",
"self",
",",
"name",
"=",
"\"cookies.txt\"",
")",
":",
"self",
".",
"wait_for_ready_state_complete",
"(",
")",
"cookies",
"=",
"self",
".",
"driver",
".",
"get_cookies",
"(",
")",
"json_cookies",
"=",
"json",
".",
"dumps",
"(",
"cookies",
")",
"if",
"name",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"raise",
"Exception",
"(",
"\"Invalid filename for Cookies!\"",
")",
"if",
"\"/\"",
"in",
"name",
":",
"name",
"=",
"name",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"name",
")",
"<",
"1",
":",
"raise",
"Exception",
"(",
"\"Filename for Cookies is too short!\"",
")",
"if",
"not",
"name",
".",
"endswith",
"(",
"\".txt\"",
")",
":",
"name",
"=",
"name",
"+",
"\".txt\"",
"folder",
"=",
"constants",
".",
"SavedCookies",
".",
"STORAGE_FOLDER",
"abs_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"\".\"",
")",
"file_path",
"=",
"abs_path",
"+",
"\"/%s\"",
"%",
"folder",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"os",
".",
"makedirs",
"(",
"file_path",
")",
"cookies_file_path",
"=",
"\"%s/%s\"",
"%",
"(",
"file_path",
",",
"name",
")",
"cookies_file",
"=",
"codecs",
".",
"open",
"(",
"cookies_file_path",
",",
"\"w+\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"cookies_file",
".",
"writelines",
"(",
"json_cookies",
")",
"cookies_file",
".",
"close",
"(",
")"
] |
https://github.com/seleniumbase/SeleniumBase/blob/0d1de7238bfafe4b7309fec6f735dcd0dc4538a8/seleniumbase/fixtures/base_case.py#L3072-L3093
|
||
plivo/plivoframework
|
29fc41fb3c887d5d9022a941e87bbeb2269112ff
|
src/plivo/core/freeswitch/commands.py
|
python
|
Commands.speak
|
(self, text, uuid="", lock=True, loops=1)
|
return self._protocol_sendmsg("speak", text, uuid, lock, loops)
|
Please refer to http://wiki.freeswitch.org/wiki/TTS
>>> "set" data="tts_engine=flite"
>>> "set" data="tts_voice=kal"
>>> speak(text)
For Inbound connection, uuid argument is mandatory.
|
Please refer to http://wiki.freeswitch.org/wiki/TTS
|
[
"Please",
"refer",
"to",
"http",
":",
"//",
"wiki",
".",
"freeswitch",
".",
"org",
"/",
"wiki",
"/",
"TTS"
] |
def speak(self, text, uuid="", lock=True, loops=1):
"""Please refer to http://wiki.freeswitch.org/wiki/TTS
>>> "set" data="tts_engine=flite"
>>> "set" data="tts_voice=kal"
>>> speak(text)
For Inbound connection, uuid argument is mandatory.
"""
return self._protocol_sendmsg("speak", text, uuid, lock, loops)
|
[
"def",
"speak",
"(",
"self",
",",
"text",
",",
"uuid",
"=",
"\"\"",
",",
"lock",
"=",
"True",
",",
"loops",
"=",
"1",
")",
":",
"return",
"self",
".",
"_protocol_sendmsg",
"(",
"\"speak\"",
",",
"text",
",",
"uuid",
",",
"lock",
",",
"loops",
")"
] |
https://github.com/plivo/plivoframework/blob/29fc41fb3c887d5d9022a941e87bbeb2269112ff/src/plivo/core/freeswitch/commands.py#L497-L506
|
|
exaile/exaile
|
a7b58996c5c15b3aa7b9975ac13ee8f784ef4689
|
xl/formatter.py
|
python
|
TimeTagFormatter.format_value
|
(value, format='short')
|
return text
|
Formats a length value
:param value: the length in seconds
:type value: float
:param format: verbosity of the output,
possible values are:
* short: "1:02:42"
* long: "1h, 2m, 42s"
* verbose: "1 hour, 2 minutes, 42 seconds"
:type format: string
:returns: the formatted value
:rtype: string
|
Formats a length value
|
[
"Formats",
"a",
"length",
"value"
] |
def format_value(value, format='short'):
"""
Formats a length value
:param value: the length in seconds
:type value: float
:param format: verbosity of the output,
possible values are:
* short: "1:02:42"
* long: "1h, 2m, 42s"
* verbose: "1 hour, 2 minutes, 42 seconds"
:type format: string
:returns: the formatted value
:rtype: string
"""
span = TimeSpan(value)
text = ''
if format == 'verbose':
if span.days > 0:
text += ngettext('%d day, ', '%d days, ', span.days) % span.days
if span.hours > 0:
text += ngettext('%d hour, ', '%d hours, ', span.hours) % span.hours
text += ngettext('%d minute, ', '%d minutes, ', span.minutes) % span.minutes
text += ngettext('%d second', '%d seconds', span.seconds) % span.seconds
elif format == 'long':
if span.days > 0:
# TRANSLATORS: Short form of an amount of days
text += _('%dd, ') % span.days
if span.hours > 0:
# TRANSLATORS: Short form of an amount of hours
text += _('%dh, ') % span.hours
# TRANSLATORS: Short form of an amount of minutes
text += _('%dm, ') % span.minutes
# TRANSLATORS: Short form of an amount of seconds
text += _('%ds') % span.seconds
elif format == 'short':
if span.days > 0:
# TRANSLATORS: Short form of an amount of days
text += _('%dd ') % span.days
if span.hours > 0 or text: # always show hours when > 1 day
# TRANSLATORS: Time duration (hours:minutes:seconds)
text += _('%d:%02d:%02d') % (span.hours, span.minutes, span.seconds)
else:
# TRANSLATORS: Time duration (minutes:seconds)
text += _('%d:%02d') % (span.minutes, span.seconds)
else:
raise ValueError(
'Invalid argument "%s" passed to parameter '
'"format" for tag "__length", possible arguments are '
'"short", "long" and "verbose"' % format
)
return text
|
[
"def",
"format_value",
"(",
"value",
",",
"format",
"=",
"'short'",
")",
":",
"span",
"=",
"TimeSpan",
"(",
"value",
")",
"text",
"=",
"''",
"if",
"format",
"==",
"'verbose'",
":",
"if",
"span",
".",
"days",
">",
"0",
":",
"text",
"+=",
"ngettext",
"(",
"'%d day, '",
",",
"'%d days, '",
",",
"span",
".",
"days",
")",
"%",
"span",
".",
"days",
"if",
"span",
".",
"hours",
">",
"0",
":",
"text",
"+=",
"ngettext",
"(",
"'%d hour, '",
",",
"'%d hours, '",
",",
"span",
".",
"hours",
")",
"%",
"span",
".",
"hours",
"text",
"+=",
"ngettext",
"(",
"'%d minute, '",
",",
"'%d minutes, '",
",",
"span",
".",
"minutes",
")",
"%",
"span",
".",
"minutes",
"text",
"+=",
"ngettext",
"(",
"'%d second'",
",",
"'%d seconds'",
",",
"span",
".",
"seconds",
")",
"%",
"span",
".",
"seconds",
"elif",
"format",
"==",
"'long'",
":",
"if",
"span",
".",
"days",
">",
"0",
":",
"# TRANSLATORS: Short form of an amount of days",
"text",
"+=",
"_",
"(",
"'%dd, '",
")",
"%",
"span",
".",
"days",
"if",
"span",
".",
"hours",
">",
"0",
":",
"# TRANSLATORS: Short form of an amount of hours",
"text",
"+=",
"_",
"(",
"'%dh, '",
")",
"%",
"span",
".",
"hours",
"# TRANSLATORS: Short form of an amount of minutes",
"text",
"+=",
"_",
"(",
"'%dm, '",
")",
"%",
"span",
".",
"minutes",
"# TRANSLATORS: Short form of an amount of seconds",
"text",
"+=",
"_",
"(",
"'%ds'",
")",
"%",
"span",
".",
"seconds",
"elif",
"format",
"==",
"'short'",
":",
"if",
"span",
".",
"days",
">",
"0",
":",
"# TRANSLATORS: Short form of an amount of days",
"text",
"+=",
"_",
"(",
"'%dd '",
")",
"%",
"span",
".",
"days",
"if",
"span",
".",
"hours",
">",
"0",
"or",
"text",
":",
"# always show hours when > 1 day",
"# TRANSLATORS: Time duration (hours:minutes:seconds)",
"text",
"+=",
"_",
"(",
"'%d:%02d:%02d'",
")",
"%",
"(",
"span",
".",
"hours",
",",
"span",
".",
"minutes",
",",
"span",
".",
"seconds",
")",
"else",
":",
"# TRANSLATORS: Time duration (minutes:seconds)",
"text",
"+=",
"_",
"(",
"'%d:%02d'",
")",
"%",
"(",
"span",
".",
"minutes",
",",
"span",
".",
"seconds",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid argument \"%s\" passed to parameter '",
"'\"format\" for tag \"__length\", possible arguments are '",
"'\"short\", \"long\" and \"verbose\"'",
"%",
"format",
")",
"return",
"text"
] |
https://github.com/exaile/exaile/blob/a7b58996c5c15b3aa7b9975ac13ee8f784ef4689/xl/formatter.py#L582-L639
|
|
d6t/d6tflow
|
ccd161057793e04ac0d090a4968f1ac9abb43e5b
|
d6tflow/functional.py
|
python
|
Workflow.outputLoad
|
(self, func_to_run, *args, **kwargs)
|
Loads all or several outputs from flow step.
Args:
func_to_run: flow step function
keys (list): list of data to load
as_dict (bool): cache data in memory
cached (bool): cache data in memory
Returns: list or dict of all task output
|
Loads all or several outputs from flow step.
|
[
"Loads",
"all",
"or",
"several",
"outputs",
"from",
"flow",
"step",
"."
] |
def outputLoad(self, func_to_run, *args, **kwargs):
"""
Loads all or several outputs from flow step.
Args:
func_to_run: flow step function
keys (list): list of data to load
as_dict (bool): cache data in memory
cached (bool): cache data in memory
Returns: list or dict of all task output
"""
if self.multi_params:
output = {}
for params in self.multi_params:
print(self.multi_params_tasks[params])
output[params] = self.multi_params_tasks[params].outputLoad(
*args, **kwargs)
return output
else:
name = func_to_run.__name__
if name in self.instantiated_tasks:
return self.instantiated_tasks[name].outputLoad(*args, **kwargs)
raise RuntimeError(
f"The function {name} has not been run yet! Please run the function using WorkflowObject.run()")
|
[
"def",
"outputLoad",
"(",
"self",
",",
"func_to_run",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"multi_params",
":",
"output",
"=",
"{",
"}",
"for",
"params",
"in",
"self",
".",
"multi_params",
":",
"print",
"(",
"self",
".",
"multi_params_tasks",
"[",
"params",
"]",
")",
"output",
"[",
"params",
"]",
"=",
"self",
".",
"multi_params_tasks",
"[",
"params",
"]",
".",
"outputLoad",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"output",
"else",
":",
"name",
"=",
"func_to_run",
".",
"__name__",
"if",
"name",
"in",
"self",
".",
"instantiated_tasks",
":",
"return",
"self",
".",
"instantiated_tasks",
"[",
"name",
"]",
".",
"outputLoad",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"raise",
"RuntimeError",
"(",
"f\"The function {name} has not been run yet! Please run the function using WorkflowObject.run()\"",
")"
] |
https://github.com/d6t/d6tflow/blob/ccd161057793e04ac0d090a4968f1ac9abb43e5b/d6tflow/functional.py#L225-L249
|
||
rhinstaller/anaconda
|
63edc8680f1b05cbfe11bef28703acba808c5174
|
pyanaconda/modules/network/device_configuration.py
|
python
|
DeviceConfigurations._has_read_only_active_connection
|
(self, device)
|
return False
|
Does the device have read-only active connection ?
:param device: NetworkManager device object
:type device: NMDevice
|
Does the device have read-only active connection ?
|
[
"Does",
"the",
"device",
"have",
"read",
"-",
"only",
"active",
"connection",
"?"
] |
def _has_read_only_active_connection(self, device):
"""Does the device have read-only active connection ?
:param device: NetworkManager device object
:type device: NMDevice
"""
ac = device.get_active_connection()
if ac:
rc = ac.get_connection()
# Getting of NMRemoteConnection can fail (None), isn't it a bug in NM?
if rc:
con_setting = rc.get_setting_connection()
if con_setting and con_setting.get_read_only():
return True
else:
log.debug("can't get remote connection of active connection "
"of device %s", device.get_iface())
return False
|
[
"def",
"_has_read_only_active_connection",
"(",
"self",
",",
"device",
")",
":",
"ac",
"=",
"device",
".",
"get_active_connection",
"(",
")",
"if",
"ac",
":",
"rc",
"=",
"ac",
".",
"get_connection",
"(",
")",
"# Getting of NMRemoteConnection can fail (None), isn't it a bug in NM?",
"if",
"rc",
":",
"con_setting",
"=",
"rc",
".",
"get_setting_connection",
"(",
")",
"if",
"con_setting",
"and",
"con_setting",
".",
"get_read_only",
"(",
")",
":",
"return",
"True",
"else",
":",
"log",
".",
"debug",
"(",
"\"can't get remote connection of active connection \"",
"\"of device %s\"",
",",
"device",
".",
"get_iface",
"(",
")",
")",
"return",
"False"
] |
https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/modules/network/device_configuration.py#L191-L208
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/logic/boolformula.py
|
python
|
BooleanFormula.__pow__
|
(self, other)
|
return self.add_statement(other, '^')
|
r"""
Overload the ``^`` operator to 'xor' two statements together.
INPUT:
- ``other`` -- a boolean formula; this is the formula on
the right side of the operator
OUTPUT:
A boolean formula of the form ``self ^ other``.
EXAMPLES:
This example shows how to combine two formulas with ``^``::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s ^ f
(a&b)^(c^d)
.. TODO::
This function seems to be identical to ``__xor__``.
Thus, this function should be replaced with ``__xor__`` everywhere
that it appears in the logic module. Then it can be deleted
altogether.
|
r"""
Overload the ``^`` operator to 'xor' two statements together.
|
[
"r",
"Overload",
"the",
"^",
"operator",
"to",
"xor",
"two",
"statements",
"together",
"."
] |
def __pow__(self, other):
r"""
Overload the ``^`` operator to 'xor' two statements together.
INPUT:
- ``other`` -- a boolean formula; this is the formula on
the right side of the operator
OUTPUT:
A boolean formula of the form ``self ^ other``.
EXAMPLES:
This example shows how to combine two formulas with ``^``::
sage: import sage.logic.propcalc as propcalc
sage: s = propcalc.formula("a&b")
sage: f = propcalc.formula("c^d")
sage: s ^ f
(a&b)^(c^d)
.. TODO::
This function seems to be identical to ``__xor__``.
Thus, this function should be replaced with ``__xor__`` everywhere
that it appears in the logic module. Then it can be deleted
altogether.
"""
return self.add_statement(other, '^')
|
[
"def",
"__pow__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"add_statement",
"(",
"other",
",",
"'^'",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/logic/boolformula.py#L400-L430
|
|
Jack-Cherish/Deep-Learning
|
5fd254b61ad45367fbae28c49976e82b14ff7110
|
Tutorial/lesson-5/cnn.py
|
python
|
Filter.update
|
(self, learning_rate)
|
[] |
def update(self, learning_rate):
self.weights -= learning_rate * self.weights_grad
self.bias -= learning_rate * self.bias_grad
|
[
"def",
"update",
"(",
"self",
",",
"learning_rate",
")",
":",
"self",
".",
"weights",
"-=",
"learning_rate",
"*",
"self",
".",
"weights_grad",
"self",
".",
"bias",
"-=",
"learning_rate",
"*",
"self",
".",
"bias_grad"
] |
https://github.com/Jack-Cherish/Deep-Learning/blob/5fd254b61ad45367fbae28c49976e82b14ff7110/Tutorial/lesson-5/cnn.py#L116-L118
|
||||
wnhsu/FactorizedHierarchicalVAE
|
7e3e23aebff70df2bd038f059d38292b864e44c1
|
src/parsers/dataset_parsers.py
|
python
|
parse_label_paths
|
(raw_str)
|
return raw_toks
|
raw str in the format of:
[name_1:]n_class_1:path_1,[name_2:]n_class_2:path_2,...
|
raw str in the format of:
[name_1:]n_class_1:path_1,[name_2:]n_class_2:path_2,...
|
[
"raw",
"str",
"in",
"the",
"format",
"of",
":",
"[",
"name_1",
":",
"]",
"n_class_1",
":",
"path_1",
"[",
"name_2",
":",
"]",
"n_class_2",
":",
"path_2",
"..."
] |
def parse_label_paths(raw_str):
"""
raw str in the format of:
[name_1:]n_class_1:path_1,[name_2:]n_class_2:path_2,...
"""
raw_toks = [tok.split(":") for tok in raw_str.split(",") if tok]
if raw_toks and len(raw_toks[0]) == 3:
raw_toks = OrderedDict([(tok[0], (int(tok[1]), tok[2])) for tok in raw_toks])
return raw_toks
|
[
"def",
"parse_label_paths",
"(",
"raw_str",
")",
":",
"raw_toks",
"=",
"[",
"tok",
".",
"split",
"(",
"\":\"",
")",
"for",
"tok",
"in",
"raw_str",
".",
"split",
"(",
"\",\"",
")",
"if",
"tok",
"]",
"if",
"raw_toks",
"and",
"len",
"(",
"raw_toks",
"[",
"0",
"]",
")",
"==",
"3",
":",
"raw_toks",
"=",
"OrderedDict",
"(",
"[",
"(",
"tok",
"[",
"0",
"]",
",",
"(",
"int",
"(",
"tok",
"[",
"1",
"]",
")",
",",
"tok",
"[",
"2",
"]",
")",
")",
"for",
"tok",
"in",
"raw_toks",
"]",
")",
"return",
"raw_toks"
] |
https://github.com/wnhsu/FactorizedHierarchicalVAE/blob/7e3e23aebff70df2bd038f059d38292b864e44c1/src/parsers/dataset_parsers.py#L6-L14
|
|
sametmax/Django--an-app-at-a-time
|
99eddf12ead76e6dfbeb09ce0bae61e282e22f8a
|
ignore_this_directory/django/db/models/query_utils.py
|
python
|
RegisterLookupMixin._unregister_lookup
|
(cls, lookup, lookup_name=None)
|
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
|
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
|
[
"Remove",
"given",
"lookup",
"from",
"cls",
"lookups",
".",
"For",
"use",
"in",
"tests",
"only",
"as",
"it",
"s",
"not",
"thread",
"-",
"safe",
"."
] |
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
|
[
"def",
"_unregister_lookup",
"(",
"cls",
",",
"lookup",
",",
"lookup_name",
"=",
"None",
")",
":",
"if",
"lookup_name",
"is",
"None",
":",
"lookup_name",
"=",
"lookup",
".",
"lookup_name",
"del",
"cls",
".",
"class_lookups",
"[",
"lookup_name",
"]"
] |
https://github.com/sametmax/Django--an-app-at-a-time/blob/99eddf12ead76e6dfbeb09ce0bae61e282e22f8a/ignore_this_directory/django/db/models/query_utils.py#L211-L218
|
||
jkszw2014/bert-kbqa-NLPCC2017
|
c09511829377b959a8ad5c81f5581e742ba13dc9
|
AttributeMap-BERT-Classification/run_classifier.py
|
python
|
MnliProcessor.get_labels
|
(self)
|
return ["contradiction", "entailment", "neutral"]
|
See base class.
|
See base class.
|
[
"See",
"base",
"class",
"."
] |
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
|
[
"def",
"get_labels",
"(",
"self",
")",
":",
"return",
"[",
"\"contradiction\"",
",",
"\"entailment\"",
",",
"\"neutral\"",
"]"
] |
https://github.com/jkszw2014/bert-kbqa-NLPCC2017/blob/c09511829377b959a8ad5c81f5581e742ba13dc9/AttributeMap-BERT-Classification/run_classifier.py#L274-L276
|
|
AstroPrint/AstroBox
|
e7e3b8a7d33ea85fcb6b2696869c0d719ceb8b75
|
src/octoprint/server/util.py
|
python
|
LargeResponseHandler.initialize
|
(self, path, default_filename=None, as_attachment=False, access_validation=None)
|
[] |
def initialize(self, path, default_filename=None, as_attachment=False, access_validation=None):
StaticFileHandler.initialize(self, path, default_filename)
self._as_attachment = as_attachment
self._access_validation = access_validation
|
[
"def",
"initialize",
"(",
"self",
",",
"path",
",",
"default_filename",
"=",
"None",
",",
"as_attachment",
"=",
"False",
",",
"access_validation",
"=",
"None",
")",
":",
"StaticFileHandler",
".",
"initialize",
"(",
"self",
",",
"path",
",",
"default_filename",
")",
"self",
".",
"_as_attachment",
"=",
"as_attachment",
"self",
".",
"_access_validation",
"=",
"access_validation"
] |
https://github.com/AstroPrint/AstroBox/blob/e7e3b8a7d33ea85fcb6b2696869c0d719ceb8b75/src/octoprint/server/util.py#L259-L262
|
||||
refraction-ray/xalpha
|
943d7746e22c764d46cb8b55f53f4591d7912b54
|
xalpha/evaluate.py
|
python
|
evaluate.v_netvalue
|
(self, end=yesterdayobj(), vopts=None, rendered=True)
|
起点对齐归一的,各参考基金或指数的净值比较可视化
:param end: string or object of date, the end date of the line
:param vkwds: pyechart line.add() options
:param vopts: dict, options for pyecharts instead of builtin settings
:returns: pyecharts.charts.Line.render_notebook()
|
起点对齐归一的,各参考基金或指数的净值比较可视化
|
[
"起点对齐归一的,各参考基金或指数的净值比较可视化"
] |
def v_netvalue(self, end=yesterdayobj(), vopts=None, rendered=True):
"""
起点对齐归一的,各参考基金或指数的净值比较可视化
:param end: string or object of date, the end date of the line
:param vkwds: pyechart line.add() options
:param vopts: dict, options for pyecharts instead of builtin settings
:returns: pyecharts.charts.Line.render_notebook()
"""
partprice = self.totprice[self.totprice["date"] <= end]
line = Line()
if vopts is None:
vopts = line_opts
line.set_global_opts(**vopts)
line.add_xaxis([d.date() for d in list(partprice.date)])
for fund in self.fundobjs:
line.add_yaxis(
series_name=fund.name,
y_axis=list(partprice[fund.code]),
is_symbol_show=False,
)
if rendered:
return line.render_notebook()
else:
return line
|
[
"def",
"v_netvalue",
"(",
"self",
",",
"end",
"=",
"yesterdayobj",
"(",
")",
",",
"vopts",
"=",
"None",
",",
"rendered",
"=",
"True",
")",
":",
"partprice",
"=",
"self",
".",
"totprice",
"[",
"self",
".",
"totprice",
"[",
"\"date\"",
"]",
"<=",
"end",
"]",
"line",
"=",
"Line",
"(",
")",
"if",
"vopts",
"is",
"None",
":",
"vopts",
"=",
"line_opts",
"line",
".",
"set_global_opts",
"(",
"*",
"*",
"vopts",
")",
"line",
".",
"add_xaxis",
"(",
"[",
"d",
".",
"date",
"(",
")",
"for",
"d",
"in",
"list",
"(",
"partprice",
".",
"date",
")",
"]",
")",
"for",
"fund",
"in",
"self",
".",
"fundobjs",
":",
"line",
".",
"add_yaxis",
"(",
"series_name",
"=",
"fund",
".",
"name",
",",
"y_axis",
"=",
"list",
"(",
"partprice",
"[",
"fund",
".",
"code",
"]",
")",
",",
"is_symbol_show",
"=",
"False",
",",
")",
"if",
"rendered",
":",
"return",
"line",
".",
"render_notebook",
"(",
")",
"else",
":",
"return",
"line"
] |
https://github.com/refraction-ray/xalpha/blob/943d7746e22c764d46cb8b55f53f4591d7912b54/xalpha/evaluate.py#L53-L78
|
||
DamnWidget/anaconda
|
a9998fb362320f907d5ccbc6fcf5b62baca677c0
|
anaconda_lib/linting/pycodestyle.py
|
python
|
extraneous_whitespace
|
(logical_line)
|
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
|
r"""Avoid extraneous whitespace.
|
[
"r",
"Avoid",
"extraneous",
"whitespace",
"."
] |
def extraneous_whitespace(logical_line):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
|
[
"def",
"extraneous_whitespace",
"(",
"logical_line",
")",
":",
"line",
"=",
"logical_line",
"for",
"match",
"in",
"EXTRANEOUS_WHITESPACE_REGEX",
".",
"finditer",
"(",
"line",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"char",
"=",
"text",
".",
"strip",
"(",
")",
"found",
"=",
"match",
".",
"start",
"(",
")",
"if",
"text",
"==",
"char",
"+",
"' '",
":",
"# assert char in '([{'",
"yield",
"found",
"+",
"1",
",",
"\"E201 whitespace after '%s'\"",
"%",
"char",
"elif",
"line",
"[",
"found",
"-",
"1",
"]",
"!=",
"','",
":",
"code",
"=",
"(",
"'E202'",
"if",
"char",
"in",
"'}])'",
"else",
"'E203'",
")",
"# if char in ',;:'",
"yield",
"found",
",",
"\"%s whitespace before '%s'\"",
"%",
"(",
"code",
",",
"char",
")"
] |
https://github.com/DamnWidget/anaconda/blob/a9998fb362320f907d5ccbc6fcf5b62baca677c0/anaconda_lib/linting/pycodestyle.py#L409-L438
|
||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/idlelib/replace.py
|
python
|
ReplaceDialog.show_hit
|
(self, first, last)
|
Highlight text from 'first' to 'last'.
'first', 'last' - Text indices
|
Highlight text from 'first' to 'last'.
'first', 'last' - Text indices
|
[
"Highlight",
"text",
"from",
"first",
"to",
"last",
".",
"first",
"last",
"-",
"Text",
"indices"
] |
def show_hit(self, first, last):
"""Highlight text from 'first' to 'last'.
'first', 'last' - Text indices"""
text = self.text
text.mark_set("insert", first)
text.tag_remove("sel", "1.0", "end")
text.tag_add("sel", first, last)
text.tag_remove("hit", "1.0", "end")
if first == last:
text.tag_add("hit", first)
else:
text.tag_add("hit", first, last)
text.see("insert")
text.update_idletasks()
|
[
"def",
"show_hit",
"(",
"self",
",",
"first",
",",
"last",
")",
":",
"text",
"=",
"self",
".",
"text",
"text",
".",
"mark_set",
"(",
"\"insert\"",
",",
"first",
")",
"text",
".",
"tag_remove",
"(",
"\"sel\"",
",",
"\"1.0\"",
",",
"\"end\"",
")",
"text",
".",
"tag_add",
"(",
"\"sel\"",
",",
"first",
",",
"last",
")",
"text",
".",
"tag_remove",
"(",
"\"hit\"",
",",
"\"1.0\"",
",",
"\"end\"",
")",
"if",
"first",
"==",
"last",
":",
"text",
".",
"tag_add",
"(",
"\"hit\"",
",",
"first",
")",
"else",
":",
"text",
".",
"tag_add",
"(",
"\"hit\"",
",",
"first",
",",
"last",
")",
"text",
".",
"see",
"(",
"\"insert\"",
")",
"text",
".",
"update_idletasks",
"(",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/idlelib/replace.py#L186-L199
|
||
pulp/pulp
|
a0a28d804f997b6f81c391378aff2e4c90183df9
|
server/pulp/server/managers/consumer/group/cud.py
|
python
|
ConsumerGroupManager.add_notes
|
(self, group_id, notes)
|
Add a set of notes to a consumer group.
@param group_id: unique id of the group to add notes to
@type group_id: str
@param notes: notes to add to the consumer group
@type notes: dict
|
Add a set of notes to a consumer group.
|
[
"Add",
"a",
"set",
"of",
"notes",
"to",
"a",
"consumer",
"group",
"."
] |
def add_notes(self, group_id, notes):
"""
Add a set of notes to a consumer group.
@param group_id: unique id of the group to add notes to
@type group_id: str
@param notes: notes to add to the consumer group
@type notes: dict
"""
group_collection = validate_existing_consumer_group(group_id)
set_doc = dict(('notes.' + k, v) for k, v in notes.items())
if set_doc:
group_collection.update({'id': group_id}, {'$set': set_doc})
|
[
"def",
"add_notes",
"(",
"self",
",",
"group_id",
",",
"notes",
")",
":",
"group_collection",
"=",
"validate_existing_consumer_group",
"(",
"group_id",
")",
"set_doc",
"=",
"dict",
"(",
"(",
"'notes.'",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"notes",
".",
"items",
"(",
")",
")",
"if",
"set_doc",
":",
"group_collection",
".",
"update",
"(",
"{",
"'id'",
":",
"group_id",
"}",
",",
"{",
"'$set'",
":",
"set_doc",
"}",
")"
] |
https://github.com/pulp/pulp/blob/a0a28d804f997b6f81c391378aff2e4c90183df9/server/pulp/server/managers/consumer/group/cud.py#L193-L204
|
||
kennethreitz-archive/requests3
|
69eb662703b40db58fdc6c095d0fe130c56649bb
|
requests3/_structures.py
|
python
|
CaseInsensitiveDict.__delitem__
|
(self, key)
|
[] |
def __delitem__(self, key):
del self._store[key.lower()]
|
[
"def",
"__delitem__",
"(",
"self",
",",
"key",
")",
":",
"del",
"self",
".",
"_store",
"[",
"key",
".",
"lower",
"(",
")",
"]"
] |
https://github.com/kennethreitz-archive/requests3/blob/69eb662703b40db58fdc6c095d0fe130c56649bb/requests3/_structures.py#L57-L58
|
||||
i-pan/kaggle-rsna18
|
2db498fe99615d935aa676f04847d0c562fd8e46
|
models/DeformableConvNets/deeplab/core/callback.py
|
python
|
Speedometer.__call__
|
(self, param)
|
Callback to Show speed.
|
Callback to Show speed.
|
[
"Callback",
"to",
"Show",
"speed",
"."
] |
def __call__(self, param):
"""Callback to Show speed."""
count = param.nbatch
if self.last_count > count:
self.init = False
self.last_count = count
if self.init:
if count % self.frequent == 0:
speed = self.frequent * self.batch_size / (time.time() - self.tic)
s = ''
if param.eval_metric is not None:
name, value = param.eval_metric.get()
s = "Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-" % (param.epoch, count, speed)
for n, v in zip(name, value):
s += "%s=%f,\t" % (n, v)
else:
s = "Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec" % (param.epoch, count, speed)
logging.info(s)
print(s)
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
|
[
"def",
"__call__",
"(",
"self",
",",
"param",
")",
":",
"count",
"=",
"param",
".",
"nbatch",
"if",
"self",
".",
"last_count",
">",
"count",
":",
"self",
".",
"init",
"=",
"False",
"self",
".",
"last_count",
"=",
"count",
"if",
"self",
".",
"init",
":",
"if",
"count",
"%",
"self",
".",
"frequent",
"==",
"0",
":",
"speed",
"=",
"self",
".",
"frequent",
"*",
"self",
".",
"batch_size",
"/",
"(",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"tic",
")",
"s",
"=",
"''",
"if",
"param",
".",
"eval_metric",
"is",
"not",
"None",
":",
"name",
",",
"value",
"=",
"param",
".",
"eval_metric",
".",
"get",
"(",
")",
"s",
"=",
"\"Epoch[%d] Batch [%d]\\tSpeed: %.2f samples/sec\\tTrain-\"",
"%",
"(",
"param",
".",
"epoch",
",",
"count",
",",
"speed",
")",
"for",
"n",
",",
"v",
"in",
"zip",
"(",
"name",
",",
"value",
")",
":",
"s",
"+=",
"\"%s=%f,\\t\"",
"%",
"(",
"n",
",",
"v",
")",
"else",
":",
"s",
"=",
"\"Iter[%d] Batch [%d]\\tSpeed: %.2f samples/sec\"",
"%",
"(",
"param",
".",
"epoch",
",",
"count",
",",
"speed",
")",
"logging",
".",
"info",
"(",
"s",
")",
"print",
"(",
"s",
")",
"self",
".",
"tic",
"=",
"time",
".",
"time",
"(",
")",
"else",
":",
"self",
".",
"init",
"=",
"True",
"self",
".",
"tic",
"=",
"time",
".",
"time",
"(",
")"
] |
https://github.com/i-pan/kaggle-rsna18/blob/2db498fe99615d935aa676f04847d0c562fd8e46/models/DeformableConvNets/deeplab/core/callback.py#L26-L50
|
||
JaniceWuo/MovieRecommend
|
4c86db64ca45598917d304f535413df3bc9fea65
|
movierecommend/venv1/Lib/site-packages/django/template/defaulttags.py
|
python
|
LoremNode.render
|
(self, context)
|
return '\n\n'.join(paras)
|
[] |
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return '\n\n'.join(paras)
|
[
"def",
"render",
"(",
"self",
",",
"context",
")",
":",
"try",
":",
"count",
"=",
"int",
"(",
"self",
".",
"count",
".",
"resolve",
"(",
"context",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"count",
"=",
"1",
"if",
"self",
".",
"method",
"==",
"'w'",
":",
"return",
"words",
"(",
"count",
",",
"common",
"=",
"self",
".",
"common",
")",
"else",
":",
"paras",
"=",
"paragraphs",
"(",
"count",
",",
"common",
"=",
"self",
".",
"common",
")",
"if",
"self",
".",
"method",
"==",
"'p'",
":",
"paras",
"=",
"[",
"'<p>%s</p>'",
"%",
"p",
"for",
"p",
"in",
"paras",
"]",
"return",
"'\\n\\n'",
".",
"join",
"(",
"paras",
")"
] |
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/template/defaulttags.py#L331-L342
|
|||
IronLanguages/ironpython3
|
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
|
Src/StdLib/Lib/asyncio/selector_events.py
|
python
|
BaseSelectorEventLoop._sock_accept
|
(self, fut, registered, sock)
|
[] |
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = sock.accept()
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result((conn, address))
|
[
"def",
"_sock_accept",
"(",
"self",
",",
"fut",
",",
"registered",
",",
"sock",
")",
":",
"fd",
"=",
"sock",
".",
"fileno",
"(",
")",
"if",
"registered",
":",
"self",
".",
"remove_reader",
"(",
"fd",
")",
"if",
"fut",
".",
"cancelled",
"(",
")",
":",
"return",
"try",
":",
"conn",
",",
"address",
"=",
"sock",
".",
"accept",
"(",
")",
"conn",
".",
"setblocking",
"(",
"False",
")",
"except",
"(",
"BlockingIOError",
",",
"InterruptedError",
")",
":",
"self",
".",
"add_reader",
"(",
"fd",
",",
"self",
".",
"_sock_accept",
",",
"fut",
",",
"True",
",",
"sock",
")",
"except",
"Exception",
"as",
"exc",
":",
"fut",
".",
"set_exception",
"(",
"exc",
")",
"else",
":",
"fut",
".",
"set_result",
"(",
"(",
"conn",
",",
"address",
")",
")"
] |
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/asyncio/selector_events.py#L460-L474
|
||||
JaniceWuo/MovieRecommend
|
4c86db64ca45598917d304f535413df3bc9fea65
|
movierecommend/venv1/Lib/site-packages/django/http/request.py
|
python
|
HttpRequest.readline
|
(self, *args, **kwargs)
|
[] |
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
|
[
"def",
"readline",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_read_started",
"=",
"True",
"try",
":",
"return",
"self",
".",
"_stream",
".",
"readline",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"IOError",
"as",
"e",
":",
"six",
".",
"reraise",
"(",
"UnreadablePostError",
",",
"UnreadablePostError",
"(",
"*",
"e",
".",
"args",
")",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")"
] |
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/http/request.py#L335-L340
|
||||
appian42/kaggle-rsna-intracranial-hemorrhage
|
54ea4cc228a03d8a2d7e3e10aa71dde6673c9a3d
|
src/cnn/main.py
|
python
|
calc_auc
|
(targets, outputs)
|
return {
'auc': (macro + micro) / 2,
'auc_macro': macro,
'auc_micro': micro,
}
|
[] |
def calc_auc(targets, outputs):
macro = roc_auc_score(np.round(targets), outputs, average='macro')
micro = roc_auc_score(np.round(targets), outputs, average='micro')
return {
'auc': (macro + micro) / 2,
'auc_macro': macro,
'auc_micro': micro,
}
|
[
"def",
"calc_auc",
"(",
"targets",
",",
"outputs",
")",
":",
"macro",
"=",
"roc_auc_score",
"(",
"np",
".",
"round",
"(",
"targets",
")",
",",
"outputs",
",",
"average",
"=",
"'macro'",
")",
"micro",
"=",
"roc_auc_score",
"(",
"np",
".",
"round",
"(",
"targets",
")",
",",
"outputs",
",",
"average",
"=",
"'micro'",
")",
"return",
"{",
"'auc'",
":",
"(",
"macro",
"+",
"micro",
")",
"/",
"2",
",",
"'auc_macro'",
":",
"macro",
",",
"'auc_micro'",
":",
"micro",
",",
"}"
] |
https://github.com/appian42/kaggle-rsna-intracranial-hemorrhage/blob/54ea4cc228a03d8a2d7e3e10aa71dde6673c9a3d/src/cnn/main.py#L236-L243
|
|||
triaquae/triaquae
|
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
|
TriAquae/models/Ubuntu_13/pyasn1/type/namedtype.py
|
python
|
NamedType.getType
|
(self)
|
return self.__type
|
[] |
def getType(self): return self.__type
|
[
"def",
"getType",
"(",
"self",
")",
":",
"return",
"self",
".",
"__type"
] |
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Ubuntu_13/pyasn1/type/namedtype.py#L14-L14
|
|||
davidoren/CuckooSploit
|
3fce8183bee8f7917e08f765ce2a01c921f86354
|
lib/cuckoo/common/abstracts.py
|
python
|
Signature.get_argument
|
(self, call, name)
|
return None
|
Retrieves the value of a specific argument from an API call.
@param call: API call object.
@param name: name of the argument to retrieve.
@return: value of the required argument.
|
Retrieves the value of a specific argument from an API call.
|
[
"Retrieves",
"the",
"value",
"of",
"a",
"specific",
"argument",
"from",
"an",
"API",
"call",
"."
] |
def get_argument(self, call, name):
"""Retrieves the value of a specific argument from an API call.
@param call: API call object.
@param name: name of the argument to retrieve.
@return: value of the required argument.
"""
# Check if the call passed to it was cached already.
# If not, we can start caching it and store a copy converted to a dict.
if call is not self._current_call_cache:
self._current_call_cache = call
self._current_call_dict = dict()
for argument in call["arguments"]:
self._current_call_dict[argument["name"]] = argument["value"]
# Return the required argument.
if name in self._current_call_dict:
return self._current_call_dict[name]
return None
|
[
"def",
"get_argument",
"(",
"self",
",",
"call",
",",
"name",
")",
":",
"# Check if the call passed to it was cached already.",
"# If not, we can start caching it and store a copy converted to a dict.",
"if",
"call",
"is",
"not",
"self",
".",
"_current_call_cache",
":",
"self",
".",
"_current_call_cache",
"=",
"call",
"self",
".",
"_current_call_dict",
"=",
"dict",
"(",
")",
"for",
"argument",
"in",
"call",
"[",
"\"arguments\"",
"]",
":",
"self",
".",
"_current_call_dict",
"[",
"argument",
"[",
"\"name\"",
"]",
"]",
"=",
"argument",
"[",
"\"value\"",
"]",
"# Return the required argument.",
"if",
"name",
"in",
"self",
".",
"_current_call_dict",
":",
"return",
"self",
".",
"_current_call_dict",
"[",
"name",
"]",
"return",
"None"
] |
https://github.com/davidoren/CuckooSploit/blob/3fce8183bee8f7917e08f765ce2a01c921f86354/lib/cuckoo/common/abstracts.py#L876-L895
|
|
google-research/motion_imitation
|
d0e7b963c5a301984352d25a3ee0820266fa4218
|
motion_imitation/envs/env_wrappers/imitation_task.py
|
python
|
ImitationTask._load_ref_motions
|
(self, filenames)
|
return motions
|
Load reference motions.
Args:
dir: Directory containing the reference motion files.
filenames: Names of files in dir to be loaded.
Returns: List of reference motions loaded from the files.
|
Load reference motions.
|
[
"Load",
"reference",
"motions",
"."
] |
def _load_ref_motions(self, filenames):
"""Load reference motions.
Args:
dir: Directory containing the reference motion files.
filenames: Names of files in dir to be loaded.
Returns: List of reference motions loaded from the files.
"""
num_files = len(filenames)
if num_files == 0:
raise ValueError("No reference motions specified.")
total_time = 0.0
motions = []
for filename in filenames:
curr_motion = motion_data.MotionData(filename)
curr_duration = curr_motion.get_duration()
total_time += curr_duration
motions.append(curr_motion)
logging.info("Loaded {:d} motion clips with {:.3f}s of motion data.".format(
num_files, total_time))
return motions
|
[
"def",
"_load_ref_motions",
"(",
"self",
",",
"filenames",
")",
":",
"num_files",
"=",
"len",
"(",
"filenames",
")",
"if",
"num_files",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"No reference motions specified.\"",
")",
"total_time",
"=",
"0.0",
"motions",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames",
":",
"curr_motion",
"=",
"motion_data",
".",
"MotionData",
"(",
"filename",
")",
"curr_duration",
"=",
"curr_motion",
".",
"get_duration",
"(",
")",
"total_time",
"+=",
"curr_duration",
"motions",
".",
"append",
"(",
"curr_motion",
")",
"logging",
".",
"info",
"(",
"\"Loaded {:d} motion clips with {:.3f}s of motion data.\"",
".",
"format",
"(",
"num_files",
",",
"total_time",
")",
")",
"return",
"motions"
] |
https://github.com/google-research/motion_imitation/blob/d0e7b963c5a301984352d25a3ee0820266fa4218/motion_imitation/envs/env_wrappers/imitation_task.py#L527-L551
|
|
mpastell/Pweave
|
45b56ec60c5badb4e40796178397a4eae5bdddcb
|
pweave/processors/base.py
|
python
|
PwebProcessorBase._hideinline
|
(self, chunk)
|
return chunk
|
Hide inline code in doc mode
|
Hide inline code in doc mode
|
[
"Hide",
"inline",
"code",
"in",
"doc",
"mode"
] |
def _hideinline(self, chunk):
"""Hide inline code in doc mode"""
splitted = re.split('<%[\w\s\W]*?%>', chunk['content'])
chunk['content'] = ''.join(splitted)
return chunk
|
[
"def",
"_hideinline",
"(",
"self",
",",
"chunk",
")",
":",
"splitted",
"=",
"re",
".",
"split",
"(",
"'<%[\\w\\s\\W]*?%>'",
",",
"chunk",
"[",
"'content'",
"]",
")",
"chunk",
"[",
"'content'",
"]",
"=",
"''",
".",
"join",
"(",
"splitted",
")",
"return",
"chunk"
] |
https://github.com/mpastell/Pweave/blob/45b56ec60c5badb4e40796178397a4eae5bdddcb/pweave/processors/base.py#L263-L267
|
|
4shadoww/hakkuframework
|
409a11fc3819d251f86faa3473439f8c19066a21
|
lib/future/backports/misc.py
|
python
|
Counter.__neg__
|
(self)
|
return Counter() - self
|
Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
|
Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
|
[
"Subtracts",
"from",
"an",
"empty",
"counter",
".",
"Strips",
"positive",
"and",
"zero",
"counts",
"and",
"flips",
"the",
"sign",
"on",
"negative",
"counts",
"."
] |
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
return Counter() - self
|
[
"def",
"__neg__",
"(",
"self",
")",
":",
"return",
"Counter",
"(",
")",
"-",
"self"
] |
https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/future/backports/misc.py#L631-L636
|
|
nosmokingbandit/Watcher3
|
0217e75158b563bdefc8e01c3be7620008cf3977
|
lib/cherrypy/_helper.py
|
python
|
url
|
(path='', qs='', script_name=None, base=None, relative=None)
|
return newurl
|
Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used
to find a script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute URL
for the current request path (minus the querystring) by passing no args.
If you call url(qs=cherrypy.request.query_string), you should get the
original browser URL (assuming no internal redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
|
Create an absolute URL for the given path.
|
[
"Create",
"an",
"absolute",
"URL",
"for",
"the",
"given",
"path",
"."
] |
def url(path='', qs='', script_name=None, base=None, relative=None):
"""Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used
to find a script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute URL
for the current request path (minus the querystring) by passing no args.
If you call url(qs=cherrypy.request.query_string), you should get the
original browser URL (assuming no internal redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
"""
if isinstance(qs, (tuple, list, dict)):
qs = _urlencode(qs)
if qs:
qs = '?' + qs
if cherrypy.request.app:
if not path.startswith('/'):
# Append/remove trailing slash from path_info as needed
# (this is to support mistyped URL's without redirecting;
# if you want to redirect, use tools.trailing_slash).
pi = cherrypy.request.path_info
if cherrypy.request.is_index is True:
if not pi.endswith('/'):
pi = pi + '/'
elif cherrypy.request.is_index is False:
if pi.endswith('/') and pi != '/':
pi = pi[:-1]
if path == '':
path = pi
else:
path = _urljoin(pi, path)
if script_name is None:
script_name = cherrypy.request.script_name
if base is None:
base = cherrypy.request.base
newurl = base + script_name + path + qs
else:
# No request.app (we're being called outside a request).
# We'll have to guess the base from server.* attributes.
# This will produce very different results from the above
# if you're using vhosts or tools.proxy.
if base is None:
base = cherrypy.server.base()
path = (script_name or '') + path
newurl = base + path + qs
if './' in newurl:
# Normalize the URL by removing ./ and ../
atoms = []
for atom in newurl.split('/'):
if atom == '.':
pass
elif atom == '..':
atoms.pop()
else:
atoms.append(atom)
newurl = '/'.join(atoms)
# At this point, we should have a fully-qualified absolute URL.
if relative is None:
relative = getattr(cherrypy.request.app, 'relative_urls', False)
# See http://www.ietf.org/rfc/rfc2396.txt
if relative == 'server':
# "A relative reference beginning with a single slash character is
# termed an absolute-path reference, as defined by <abs_path>..."
# This is also sometimes called "server-relative".
newurl = '/' + '/'.join(newurl.split('/', 3)[3:])
elif relative:
# "A relative reference that does not begin with a scheme name
# or a slash character is termed a relative-path reference."
old = url(relative=False).split('/')[:-1]
new = newurl.split('/')
while old and new:
a, b = old[0], new[0]
if a != b:
break
old.pop(0)
new.pop(0)
new = (['..'] * len(old)) + new
newurl = '/'.join(new)
return newurl
|
[
"def",
"url",
"(",
"path",
"=",
"''",
",",
"qs",
"=",
"''",
",",
"script_name",
"=",
"None",
",",
"base",
"=",
"None",
",",
"relative",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"qs",
",",
"(",
"tuple",
",",
"list",
",",
"dict",
")",
")",
":",
"qs",
"=",
"_urlencode",
"(",
"qs",
")",
"if",
"qs",
":",
"qs",
"=",
"'?'",
"+",
"qs",
"if",
"cherrypy",
".",
"request",
".",
"app",
":",
"if",
"not",
"path",
".",
"startswith",
"(",
"'/'",
")",
":",
"# Append/remove trailing slash from path_info as needed",
"# (this is to support mistyped URL's without redirecting;",
"# if you want to redirect, use tools.trailing_slash).",
"pi",
"=",
"cherrypy",
".",
"request",
".",
"path_info",
"if",
"cherrypy",
".",
"request",
".",
"is_index",
"is",
"True",
":",
"if",
"not",
"pi",
".",
"endswith",
"(",
"'/'",
")",
":",
"pi",
"=",
"pi",
"+",
"'/'",
"elif",
"cherrypy",
".",
"request",
".",
"is_index",
"is",
"False",
":",
"if",
"pi",
".",
"endswith",
"(",
"'/'",
")",
"and",
"pi",
"!=",
"'/'",
":",
"pi",
"=",
"pi",
"[",
":",
"-",
"1",
"]",
"if",
"path",
"==",
"''",
":",
"path",
"=",
"pi",
"else",
":",
"path",
"=",
"_urljoin",
"(",
"pi",
",",
"path",
")",
"if",
"script_name",
"is",
"None",
":",
"script_name",
"=",
"cherrypy",
".",
"request",
".",
"script_name",
"if",
"base",
"is",
"None",
":",
"base",
"=",
"cherrypy",
".",
"request",
".",
"base",
"newurl",
"=",
"base",
"+",
"script_name",
"+",
"path",
"+",
"qs",
"else",
":",
"# No request.app (we're being called outside a request).",
"# We'll have to guess the base from server.* attributes.",
"# This will produce very different results from the above",
"# if you're using vhosts or tools.proxy.",
"if",
"base",
"is",
"None",
":",
"base",
"=",
"cherrypy",
".",
"server",
".",
"base",
"(",
")",
"path",
"=",
"(",
"script_name",
"or",
"''",
")",
"+",
"path",
"newurl",
"=",
"base",
"+",
"path",
"+",
"qs",
"if",
"'./'",
"in",
"newurl",
":",
"# Normalize the URL by removing ./ and ../",
"atoms",
"=",
"[",
"]",
"for",
"atom",
"in",
"newurl",
".",
"split",
"(",
"'/'",
")",
":",
"if",
"atom",
"==",
"'.'",
":",
"pass",
"elif",
"atom",
"==",
"'..'",
":",
"atoms",
".",
"pop",
"(",
")",
"else",
":",
"atoms",
".",
"append",
"(",
"atom",
")",
"newurl",
"=",
"'/'",
".",
"join",
"(",
"atoms",
")",
"# At this point, we should have a fully-qualified absolute URL.",
"if",
"relative",
"is",
"None",
":",
"relative",
"=",
"getattr",
"(",
"cherrypy",
".",
"request",
".",
"app",
",",
"'relative_urls'",
",",
"False",
")",
"# See http://www.ietf.org/rfc/rfc2396.txt",
"if",
"relative",
"==",
"'server'",
":",
"# \"A relative reference beginning with a single slash character is",
"# termed an absolute-path reference, as defined by <abs_path>...\"",
"# This is also sometimes called \"server-relative\".",
"newurl",
"=",
"'/'",
"+",
"'/'",
".",
"join",
"(",
"newurl",
".",
"split",
"(",
"'/'",
",",
"3",
")",
"[",
"3",
":",
"]",
")",
"elif",
"relative",
":",
"# \"A relative reference that does not begin with a scheme name",
"# or a slash character is termed a relative-path reference.\"",
"old",
"=",
"url",
"(",
"relative",
"=",
"False",
")",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
"new",
"=",
"newurl",
".",
"split",
"(",
"'/'",
")",
"while",
"old",
"and",
"new",
":",
"a",
",",
"b",
"=",
"old",
"[",
"0",
"]",
",",
"new",
"[",
"0",
"]",
"if",
"a",
"!=",
"b",
":",
"break",
"old",
".",
"pop",
"(",
"0",
")",
"new",
".",
"pop",
"(",
"0",
")",
"new",
"=",
"(",
"[",
"'..'",
"]",
"*",
"len",
"(",
"old",
")",
")",
"+",
"new",
"newurl",
"=",
"'/'",
".",
"join",
"(",
"new",
")",
"return",
"newurl"
] |
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/cherrypy/_helper.py#L194-L298
|
|
KoreLogicSecurity/mastiff
|
04d569e4fa59513572e77c74b049cad82f9b0310
|
mastiff/plugins/category/exe.py
|
python
|
EXECat.__init__
|
(self, name=None)
|
Initialize the category.
|
Initialize the category.
|
[
"Initialize",
"the",
"category",
"."
] |
def __init__(self, name=None):
"""Initialize the category."""
categories.MastiffPlugin.__init__(self, name)
self.cat_name = 'EXE'
self.my_types = [ 'PE32 executable',
'MS-DOS executable',
'Win32 Executable',
'Win32 EXE'
]
self.yara_filetype = """rule isexe {
strings:
$MZ = "MZ"
condition:
$MZ at 0 and uint32(uint32(0x3C)) == 0x00004550
}"""
|
[
"def",
"__init__",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"categories",
".",
"MastiffPlugin",
".",
"__init__",
"(",
"self",
",",
"name",
")",
"self",
".",
"cat_name",
"=",
"'EXE'",
"self",
".",
"my_types",
"=",
"[",
"'PE32 executable'",
",",
"'MS-DOS executable'",
",",
"'Win32 Executable'",
",",
"'Win32 EXE'",
"]",
"self",
".",
"yara_filetype",
"=",
"\"\"\"rule isexe {\n\t strings:\n\t\t $MZ = \"MZ\" \n\t condition:\n\t\t $MZ at 0 and uint32(uint32(0x3C)) == 0x00004550\n }\"\"\""
] |
https://github.com/KoreLogicSecurity/mastiff/blob/04d569e4fa59513572e77c74b049cad82f9b0310/mastiff/plugins/category/exe.py#L43-L57
|
||
KalleHallden/AutoTimer
|
2d954216700c4930baa154e28dbddc34609af7ce
|
env/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py
|
python
|
ParseResults.asDict
|
( self )
|
return dict((k,toItem(v)) for k,v in item_fn())
|
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
|
Returns the named parse results as a nested dictionary.
|
[
"Returns",
"the",
"named",
"parse",
"results",
"as",
"a",
"nested",
"dictionary",
"."
] |
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
|
[
"def",
"asDict",
"(",
"self",
")",
":",
"if",
"PY_3",
":",
"item_fn",
"=",
"self",
".",
"items",
"else",
":",
"item_fn",
"=",
"self",
".",
"iteritems",
"def",
"toItem",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"ParseResults",
")",
":",
"if",
"obj",
".",
"haskeys",
"(",
")",
":",
"return",
"obj",
".",
"asDict",
"(",
")",
"else",
":",
"return",
"[",
"toItem",
"(",
"v",
")",
"for",
"v",
"in",
"obj",
"]",
"else",
":",
"return",
"obj",
"return",
"dict",
"(",
"(",
"k",
",",
"toItem",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"item_fn",
"(",
")",
")"
] |
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py#L720-L753
|
|
DataDog/integrations-core
|
934674b29d94b70ccc008f76ea172d0cdae05e1e
|
etcd/datadog_checks/etcd/config_models/defaults.py
|
python
|
instance_tags
|
(field, value)
|
return get_default_field_value(field, value)
|
[] |
def instance_tags(field, value):
return get_default_field_value(field, value)
|
[
"def",
"instance_tags",
"(",
"field",
",",
"value",
")",
":",
"return",
"get_default_field_value",
"(",
"field",
",",
"value",
")"
] |
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/etcd/datadog_checks/etcd/config_models/defaults.py#L225-L226
|
|||
OpenRCE/sulley
|
bff0dd1864d055eb4bfa8aacbbb6ecf215e4db4b
|
sulley/primitives.py
|
python
|
bit_field.render
|
(self)
|
return self.rendered
|
Render the primitive.
|
Render the primitive.
|
[
"Render",
"the",
"primitive",
"."
] |
def render (self):
'''
Render the primitive.
'''
#
# binary formatting.
#
if self.format == "binary":
bit_stream = ""
rendered = ""
# pad the bit stream to the next byte boundary.
if self.width % 8 == 0:
bit_stream += self.to_binary()
else:
bit_stream = "0" * (8 - (self.width % 8))
bit_stream += self.to_binary()
# convert the bit stream from a string of bits into raw bytes.
for i in xrange(len(bit_stream) / 8):
chunk = bit_stream[8*i:8*i+8]
rendered += struct.pack("B", self.to_decimal(chunk))
# if necessary, convert the endianess of the raw bytes.
if self.endian == "<":
rendered = list(rendered)
rendered.reverse()
rendered = "".join(rendered)
self.rendered = rendered
#
# ascii formatting.
#
else:
# if the sign flag is raised and we are dealing with a signed integer (first bit is 1).
if self.signed and self.to_binary()[0] == "1":
max_num = self.to_decimal("1" + "0" * (self.width - 1))
# mask off the sign bit.
val = self.value & self.to_decimal("1" * (self.width - 1))
# account for the fact that the negative scale works backwards.
val = max_num - val - 1
# toss in the negative sign.
self.rendered = "%d" % ~val
# unsigned integer or positive signed integer.
else:
self.rendered = "%d" % self.value
return self.rendered
|
[
"def",
"render",
"(",
"self",
")",
":",
"#",
"# binary formatting.",
"#",
"if",
"self",
".",
"format",
"==",
"\"binary\"",
":",
"bit_stream",
"=",
"\"\"",
"rendered",
"=",
"\"\"",
"# pad the bit stream to the next byte boundary.",
"if",
"self",
".",
"width",
"%",
"8",
"==",
"0",
":",
"bit_stream",
"+=",
"self",
".",
"to_binary",
"(",
")",
"else",
":",
"bit_stream",
"=",
"\"0\"",
"*",
"(",
"8",
"-",
"(",
"self",
".",
"width",
"%",
"8",
")",
")",
"bit_stream",
"+=",
"self",
".",
"to_binary",
"(",
")",
"# convert the bit stream from a string of bits into raw bytes.",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"bit_stream",
")",
"/",
"8",
")",
":",
"chunk",
"=",
"bit_stream",
"[",
"8",
"*",
"i",
":",
"8",
"*",
"i",
"+",
"8",
"]",
"rendered",
"+=",
"struct",
".",
"pack",
"(",
"\"B\"",
",",
"self",
".",
"to_decimal",
"(",
"chunk",
")",
")",
"# if necessary, convert the endianess of the raw bytes.",
"if",
"self",
".",
"endian",
"==",
"\"<\"",
":",
"rendered",
"=",
"list",
"(",
"rendered",
")",
"rendered",
".",
"reverse",
"(",
")",
"rendered",
"=",
"\"\"",
".",
"join",
"(",
"rendered",
")",
"self",
".",
"rendered",
"=",
"rendered",
"#",
"# ascii formatting.",
"#",
"else",
":",
"# if the sign flag is raised and we are dealing with a signed integer (first bit is 1).",
"if",
"self",
".",
"signed",
"and",
"self",
".",
"to_binary",
"(",
")",
"[",
"0",
"]",
"==",
"\"1\"",
":",
"max_num",
"=",
"self",
".",
"to_decimal",
"(",
"\"1\"",
"+",
"\"0\"",
"*",
"(",
"self",
".",
"width",
"-",
"1",
")",
")",
"# mask off the sign bit.",
"val",
"=",
"self",
".",
"value",
"&",
"self",
".",
"to_decimal",
"(",
"\"1\"",
"*",
"(",
"self",
".",
"width",
"-",
"1",
")",
")",
"# account for the fact that the negative scale works backwards.",
"val",
"=",
"max_num",
"-",
"val",
"-",
"1",
"# toss in the negative sign.",
"self",
".",
"rendered",
"=",
"\"%d\"",
"%",
"~",
"val",
"# unsigned integer or positive signed integer.",
"else",
":",
"self",
".",
"rendered",
"=",
"\"%d\"",
"%",
"self",
".",
"value",
"return",
"self",
".",
"rendered"
] |
https://github.com/OpenRCE/sulley/blob/bff0dd1864d055eb4bfa8aacbbb6ecf215e4db4b/sulley/primitives.py#L744-L799
|
|
naftaliharris/tauthon
|
5587ceec329b75f7caf6d65a036db61ac1bae214
|
Lib/pstats.py
|
python
|
Stats.dump_stats
|
(self, filename)
|
Write the profile data to a file we know how to load back.
|
Write the profile data to a file we know how to load back.
|
[
"Write",
"the",
"profile",
"data",
"to",
"a",
"file",
"we",
"know",
"how",
"to",
"load",
"back",
"."
] |
def dump_stats(self, filename):
"""Write the profile data to a file we know how to load back."""
f = file(filename, 'wb')
try:
marshal.dump(self.stats, f)
finally:
f.close()
|
[
"def",
"dump_stats",
"(",
"self",
",",
"filename",
")",
":",
"f",
"=",
"file",
"(",
"filename",
",",
"'wb'",
")",
"try",
":",
"marshal",
".",
"dump",
"(",
"self",
".",
"stats",
",",
"f",
")",
"finally",
":",
"f",
".",
"close",
"(",
")"
] |
https://github.com/naftaliharris/tauthon/blob/5587ceec329b75f7caf6d65a036db61ac1bae214/Lib/pstats.py#L163-L169
|
||
microsoft/unilm
|
65f15af2a307ebb64cfb25adf54375b002e6fe8d
|
infoxlm/fairseq/fairseq/tasks/fairseq_task.py
|
python
|
FairseqTask.source_dictionary
|
(self)
|
Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task).
|
Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task).
|
[
"Return",
"the",
"source",
":",
"class",
":",
"~fairseq",
".",
"data",
".",
"Dictionary",
"(",
"if",
"applicable",
"for",
"this",
"task",
")",
"."
] |
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
|
[
"def",
"source_dictionary",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/microsoft/unilm/blob/65f15af2a307ebb64cfb25adf54375b002e6fe8d/infoxlm/fairseq/fairseq/tasks/fairseq_task.py#L291-L294
|
||
ioflo/ioflo
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
ioflo/aio/proto/stacking.py
|
python
|
TcpServerStack.serviceReceivesOnce
|
(self)
|
Service receives once (one reception)
|
Service receives once (one reception)
|
[
"Service",
"receives",
"once",
"(",
"one",
"reception",
")"
] |
def serviceReceivesOnce(self):
"""
Service receives once (one reception)
"""
if self.handler.opened:
for ca, ix in self.handler.ixes.items():
self._serviceOneReceived(ix, ca)
|
[
"def",
"serviceReceivesOnce",
"(",
"self",
")",
":",
"if",
"self",
".",
"handler",
".",
"opened",
":",
"for",
"ca",
",",
"ix",
"in",
"self",
".",
"handler",
".",
"ixes",
".",
"items",
"(",
")",
":",
"self",
".",
"_serviceOneReceived",
"(",
"ix",
",",
"ca",
")"
] |
https://github.com/ioflo/ioflo/blob/177ac656d7c4ff801aebb0d8b401db365a5248ce/ioflo/aio/proto/stacking.py#L1324-L1330
|
||
ales-tsurko/cells
|
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
|
packaging/macos/python/lib/python3.7/distutils/ccompiler.py
|
python
|
CCompiler.set_runtime_library_dirs
|
(self, dirs)
|
Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
|
Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
|
[
"Set",
"the",
"list",
"of",
"directories",
"to",
"search",
"for",
"shared",
"libraries",
"at",
"runtime",
"to",
"dirs",
"(",
"a",
"list",
"of",
"strings",
")",
".",
"This",
"does",
"not",
"affect",
"any",
"standard",
"search",
"path",
"that",
"the",
"runtime",
"linker",
"may",
"search",
"by",
"default",
"."
] |
def set_runtime_library_dirs(self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:]
|
[
"def",
"set_runtime_library_dirs",
"(",
"self",
",",
"dirs",
")",
":",
"self",
".",
"runtime_library_dirs",
"=",
"dirs",
"[",
":",
"]"
] |
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/distutils/ccompiler.py#L280-L286
|
||
SciTools/iris
|
a12d0b15bab3377b23a148e891270b13a0419c38
|
lib/iris/analysis/_interpolation.py
|
python
|
RectilinearInterpolator._resample_coord
|
(self, sample_points, coord, coord_dims)
|
return new_coord
|
Interpolate the given coordinate at the provided sample points.
|
Interpolate the given coordinate at the provided sample points.
|
[
"Interpolate",
"the",
"given",
"coordinate",
"at",
"the",
"provided",
"sample",
"points",
"."
] |
def _resample_coord(self, sample_points, coord, coord_dims):
"""
Interpolate the given coordinate at the provided sample points.
"""
# NB. This section is ripe for improvement:
# - Internally self._points() expands coord.points to the same
# N-dimensional shape as the cube's data, but it doesn't
# collapse it again before returning so we have to do that
# here.
# - By expanding to N dimensions self._points() is doing
# unnecessary work.
data = self._points(sample_points, coord.points, coord_dims)
index = tuple(
0 if dim not in coord_dims else slice(None)
for dim in range(self._src_cube.ndim)
)
new_points = data[index]
# Watch out for DimCoord instances that are no longer monotonic
# after the resampling.
try:
new_coord = coord.copy(new_points)
except (ValueError, TypeError):
aux_coord = AuxCoord.from_coord(coord)
new_coord = aux_coord.copy(new_points)
return new_coord
|
[
"def",
"_resample_coord",
"(",
"self",
",",
"sample_points",
",",
"coord",
",",
"coord_dims",
")",
":",
"# NB. This section is ripe for improvement:",
"# - Internally self._points() expands coord.points to the same",
"# N-dimensional shape as the cube's data, but it doesn't",
"# collapse it again before returning so we have to do that",
"# here.",
"# - By expanding to N dimensions self._points() is doing",
"# unnecessary work.",
"data",
"=",
"self",
".",
"_points",
"(",
"sample_points",
",",
"coord",
".",
"points",
",",
"coord_dims",
")",
"index",
"=",
"tuple",
"(",
"0",
"if",
"dim",
"not",
"in",
"coord_dims",
"else",
"slice",
"(",
"None",
")",
"for",
"dim",
"in",
"range",
"(",
"self",
".",
"_src_cube",
".",
"ndim",
")",
")",
"new_points",
"=",
"data",
"[",
"index",
"]",
"# Watch out for DimCoord instances that are no longer monotonic",
"# after the resampling.",
"try",
":",
"new_coord",
"=",
"coord",
".",
"copy",
"(",
"new_points",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"aux_coord",
"=",
"AuxCoord",
".",
"from_coord",
"(",
"coord",
")",
"new_coord",
"=",
"aux_coord",
".",
"copy",
"(",
"new_points",
")",
"return",
"new_coord"
] |
https://github.com/SciTools/iris/blob/a12d0b15bab3377b23a148e891270b13a0419c38/lib/iris/analysis/_interpolation.py#L373-L398
|
|
bungnoid/glTools
|
8ff0899de43784a18bd4543285655e68e28fb5e5
|
tools/match.py
|
python
|
Match.selectTwin
|
(self)
|
Select the twin of the currectly selected object
|
Select the twin of the currectly selected object
|
[
"Select",
"the",
"twin",
"of",
"the",
"currectly",
"selected",
"object"
] |
def selectTwin(self):
'''
Select the twin of the currectly selected object
'''
# Get Current Selection
selection = mc.ls(sl=1,transforms=True)
# Find Twin Controls
twinList = []
for obj in selection:
# Check namespace
ns = glTools.utils.namespace.getNS(obj,topOnly=False)
obj = glTools.utils.namespace.stripNS(obj)
if ns: ns += ':'
# Get Twin
twin = self.getTwin(ns+obj)
# Check Twin
if not mc.objExists(twin):
print('Match Warning: Twin object "'+twin+'" does not exist! Skipping object...')
else:
# Append to Twin List
twinList.append(twin)
# Set Selection
if twinList: mc.select(twinList,r=True)
|
[
"def",
"selectTwin",
"(",
"self",
")",
":",
"# Get Current Selection",
"selection",
"=",
"mc",
".",
"ls",
"(",
"sl",
"=",
"1",
",",
"transforms",
"=",
"True",
")",
"# Find Twin Controls",
"twinList",
"=",
"[",
"]",
"for",
"obj",
"in",
"selection",
":",
"# Check namespace",
"ns",
"=",
"glTools",
".",
"utils",
".",
"namespace",
".",
"getNS",
"(",
"obj",
",",
"topOnly",
"=",
"False",
")",
"obj",
"=",
"glTools",
".",
"utils",
".",
"namespace",
".",
"stripNS",
"(",
"obj",
")",
"if",
"ns",
":",
"ns",
"+=",
"':'",
"# Get Twin",
"twin",
"=",
"self",
".",
"getTwin",
"(",
"ns",
"+",
"obj",
")",
"# Check Twin",
"if",
"not",
"mc",
".",
"objExists",
"(",
"twin",
")",
":",
"print",
"(",
"'Match Warning: Twin object \"'",
"+",
"twin",
"+",
"'\" does not exist! Skipping object...'",
")",
"else",
":",
"# Append to Twin List",
"twinList",
".",
"append",
"(",
"twin",
")",
"# Set Selection",
"if",
"twinList",
":",
"mc",
".",
"select",
"(",
"twinList",
",",
"r",
"=",
"True",
")"
] |
https://github.com/bungnoid/glTools/blob/8ff0899de43784a18bd4543285655e68e28fb5e5/tools/match.py#L740-L767
|
||
GoSecure/pyrdp
|
abd8b8762b6d7fd0e49d4a927b529f892b412743
|
pyrdp/mitm/AttackerMITM.py
|
python
|
AttackerMITM.sendDirectoryList
|
(self, requestID: int, deviceID: int)
|
[] |
def sendDirectoryList(self, requestID: int, deviceID: int):
directoryList = self.directoryListingLists[requestID]
pdu = PlayerDirectoryListingResponsePDU(self.attacker.getCurrentTimeStamp(), deviceID, directoryList)
self.attacker.sendPDU(pdu)
|
[
"def",
"sendDirectoryList",
"(",
"self",
",",
"requestID",
":",
"int",
",",
"deviceID",
":",
"int",
")",
":",
"directoryList",
"=",
"self",
".",
"directoryListingLists",
"[",
"requestID",
"]",
"pdu",
"=",
"PlayerDirectoryListingResponsePDU",
"(",
"self",
".",
"attacker",
".",
"getCurrentTimeStamp",
"(",
")",
",",
"deviceID",
",",
"directoryList",
")",
"self",
".",
"attacker",
".",
"sendPDU",
"(",
"pdu",
")"
] |
https://github.com/GoSecure/pyrdp/blob/abd8b8762b6d7fd0e49d4a927b529f892b412743/pyrdp/mitm/AttackerMITM.py#L265-L268
|
||||
SteveDoyle2/pyNastran
|
eda651ac2d4883d95a34951f8a002ff94f642a1a
|
pyNastran/op2/tables/geom/geom2.py
|
python
|
GEOM2._read_cquad8_72
|
(self, card_obj, data: bytes, n: int)
|
return n, elements
|
data = (
eid pid n1 n2 n3 n4 n5 n6 n7 n8 t1 t2 t3 t4 theta ? ?
301, 30, 40000, 40002, 40202, 40200, 40001, 40102, 40201, 40100, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
302, 30, 40002, 40004, 40204, 40202, 40003, 40104, 40203, 40102, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
303, 30, 40200, 40202, 40402, 40400, 40201, 40302, 40401, 40300, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
304, 30, 40202, 40204, 40404, 40402, 40203, 40304, 40403, 40302, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
1301, 20, 10000, 10002, 10202, 10200, 10001, 10102, 10201, 10100, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1302, 20, 10002, 10004, 10204, 10202, 10003, 10104, 10203, 10102, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1303, 20, 10200, 10202, 10402, 10400, 10201, 10302, 10401, 10300, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1304, 20, 10202, 10204, 10404, 10402, 10203, 10304, 10403, 10302, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1)
|
data = (
eid pid n1 n2 n3 n4 n5 n6 n7 n8 t1 t2 t3 t4 theta ? ?
301, 30, 40000, 40002, 40202, 40200, 40001, 40102, 40201, 40100, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
302, 30, 40002, 40004, 40204, 40202, 40003, 40104, 40203, 40102, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
303, 30, 40200, 40202, 40402, 40400, 40201, 40302, 40401, 40300, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
304, 30, 40202, 40204, 40404, 40402, 40203, 40304, 40403, 40302, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
1301, 20, 10000, 10002, 10202, 10200, 10001, 10102, 10201, 10100, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1302, 20, 10002, 10004, 10204, 10202, 10003, 10104, 10203, 10102, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1303, 20, 10200, 10202, 10402, 10400, 10201, 10302, 10401, 10300, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1304, 20, 10202, 10204, 10404, 10402, 10203, 10304, 10403, 10302, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1)
|
[
"data",
"=",
"(",
"eid",
"pid",
"n1",
"n2",
"n3",
"n4",
"n5",
"n6",
"n7",
"n8",
"t1",
"t2",
"t3",
"t4",
"theta",
"?",
"?",
"301",
"30",
"40000",
"40002",
"40202",
"40200",
"40001",
"40102",
"40201",
"40100",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"19",
".",
"2",
"0",
"0",
"-",
"1",
"302",
"30",
"40002",
"40004",
"40204",
"40202",
"40003",
"40104",
"40203",
"40102",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"19",
".",
"2",
"0",
"0",
"-",
"1",
"303",
"30",
"40200",
"40202",
"40402",
"40400",
"40201",
"40302",
"40401",
"40300",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"19",
".",
"2",
"0",
"0",
"-",
"1",
"304",
"30",
"40202",
"40204",
"40404",
"40402",
"40203",
"40304",
"40403",
"40302",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"0",
".",
"2",
"19",
".",
"2",
"0",
"0",
"-",
"1",
"1301",
"20",
"10000",
"10002",
"10202",
"10200",
"10001",
"10102",
"10201",
"10100",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"0",
"0",
"0",
"-",
"1",
"1302",
"20",
"10002",
"10004",
"10204",
"10202",
"10003",
"10104",
"10203",
"10102",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"0",
"0",
"0",
"-",
"1",
"1303",
"20",
"10200",
"10202",
"10402",
"10400",
"10201",
"10302",
"10401",
"10300",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"0",
"0",
"0",
"-",
"1",
"1304",
"20",
"10202",
"10204",
"10404",
"10402",
"10203",
"10304",
"10403",
"10302",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"-",
"1",
".",
"0",
"0",
"0",
"0",
"-",
"1",
")"
] |
def _read_cquad8_72(self, card_obj, data: bytes, n: int) -> int:
"""
data = (
eid pid n1 n2 n3 n4 n5 n6 n7 n8 t1 t2 t3 t4 theta ? ?
301, 30, 40000, 40002, 40202, 40200, 40001, 40102, 40201, 40100, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
302, 30, 40002, 40004, 40204, 40202, 40003, 40104, 40203, 40102, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
303, 30, 40200, 40202, 40402, 40400, 40201, 40302, 40401, 40300, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
304, 30, 40202, 40204, 40404, 40402, 40203, 40304, 40403, 40302, 0.2, 0.2, 0.2, 0.2, 19.2, 0, 0, -1,
1301, 20, 10000, 10002, 10202, 10200, 10001, 10102, 10201, 10100, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1302, 20, 10002, 10004, 10204, 10202, 10003, 10104, 10203, 10102, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1303, 20, 10200, 10202, 10402, 10400, 10201, 10302, 10401, 10300, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1,
1304, 20, 10202, 10204, 10404, 10402, 10203, 10304, 10403, 10302, -1.0, -1.0, -1.0, -1.0, 0, 0, 0, -1)
"""
op2 = self.op2
elements = []
#self.show_data(data, types='if')
#ss
ntotal = 72 * self.factor # 16*4
ndatai = (len(data) - n)
nelements = ndatai // ntotal
assert ndatai % ntotal == 0
s = Struct(mapfmt(op2._endian + b'10i 5f 3i', self.size))
#sf = Struct(mapfmt(op2._endian + b'10i 6f', self.size))
edata0 = data[n:n + ntotal]
flag = s.unpack(edata0)[-1]
if flag == -1:
for unused_i in range(nelements):
edata = data[n:n + ntotal]
out = s.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' CQUAD8=%s\n' % str(out))
(eid, pid, n1, n2, n3, n4, n5, n6, n7, n8, t1, t2,
t3, t4, theta, tflag, zoffs, flag) = out
assert eid > 0
assert pid > 0
assert tflag == 0
assert zoffs == 0
assert flag == -1, flag
tflag = None
out = (eid, pid, n1, n2, n3, n4, n5, n6, n7, n8, t1, t2,
t3, t4, theta, zoffs, tflag)
#print('eid=%s pid=%s n1=%s n2=%s n3=%s n4=%s theta=%g zoffs=%s '
#'tflag=%s t1=%g t2=%g t3=%g t4=%g' % (
#eid, pid, n1, n2, n3, n4, theta, zoffs, tflag, t1, t2, t3, t4))
#data_init = [eid,pid,n1,n2,n3,n4,theta,zoffs,tflag,t1,t2,t3,t4]
elem = CQUAD8.add_op2_data(out)
elements.append(elem)
self.add_op2_element(elem)
n += ntotal
else:
for unused_i in range(nelements):
edata = data[n:n + ntotal]
out = s.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' CQUAD8=%s\n' % str(out))
(eid, pid, n1, n2, n3, n4, n5, n6, n7, n8, t1, t2,
t3, t4, theta, zoffs) = out
assert eid > 0
assert pid > 0
tflag = None
out = (eid, pid, n1, n2, n3, n4, n5, n6, n7, n8, t1, t2,
t3, t4, theta, zoffs, tflag)
#print('eid=%s pid=%s n1=%s n2=%s n3=%s n4=%s theta=%g zoffs=%s '
#'tflag=%s t1=%g t2=%g t3=%g t4=%g' % (
#eid, pid, n1, n2, n3, n4, theta, zoffs, tflag, t1, t2, t3, t4))
#data_init = [eid,pid,n1,n2,n3,n4,theta,zoffs,tflag,t1,t2,t3,t4]
elem = CQUAD8.add_op2_data(out)
elements.append(elem)
self.add_op2_element(elem)
n += ntotal
return n, elements
|
[
"def",
"_read_cquad8_72",
"(",
"self",
",",
"card_obj",
",",
"data",
":",
"bytes",
",",
"n",
":",
"int",
")",
"->",
"int",
":",
"op2",
"=",
"self",
".",
"op2",
"elements",
"=",
"[",
"]",
"#self.show_data(data, types='if')",
"#ss",
"ntotal",
"=",
"72",
"*",
"self",
".",
"factor",
"# 16*4",
"ndatai",
"=",
"(",
"len",
"(",
"data",
")",
"-",
"n",
")",
"nelements",
"=",
"ndatai",
"//",
"ntotal",
"assert",
"ndatai",
"%",
"ntotal",
"==",
"0",
"s",
"=",
"Struct",
"(",
"mapfmt",
"(",
"op2",
".",
"_endian",
"+",
"b'10i 5f 3i'",
",",
"self",
".",
"size",
")",
")",
"#sf = Struct(mapfmt(op2._endian + b'10i 6f', self.size))",
"edata0",
"=",
"data",
"[",
"n",
":",
"n",
"+",
"ntotal",
"]",
"flag",
"=",
"s",
".",
"unpack",
"(",
"edata0",
")",
"[",
"-",
"1",
"]",
"if",
"flag",
"==",
"-",
"1",
":",
"for",
"unused_i",
"in",
"range",
"(",
"nelements",
")",
":",
"edata",
"=",
"data",
"[",
"n",
":",
"n",
"+",
"ntotal",
"]",
"out",
"=",
"s",
".",
"unpack",
"(",
"edata",
")",
"if",
"op2",
".",
"is_debug_file",
":",
"op2",
".",
"binary_debug",
".",
"write",
"(",
"' CQUAD8=%s\\n'",
"%",
"str",
"(",
"out",
")",
")",
"(",
"eid",
",",
"pid",
",",
"n1",
",",
"n2",
",",
"n3",
",",
"n4",
",",
"n5",
",",
"n6",
",",
"n7",
",",
"n8",
",",
"t1",
",",
"t2",
",",
"t3",
",",
"t4",
",",
"theta",
",",
"tflag",
",",
"zoffs",
",",
"flag",
")",
"=",
"out",
"assert",
"eid",
">",
"0",
"assert",
"pid",
">",
"0",
"assert",
"tflag",
"==",
"0",
"assert",
"zoffs",
"==",
"0",
"assert",
"flag",
"==",
"-",
"1",
",",
"flag",
"tflag",
"=",
"None",
"out",
"=",
"(",
"eid",
",",
"pid",
",",
"n1",
",",
"n2",
",",
"n3",
",",
"n4",
",",
"n5",
",",
"n6",
",",
"n7",
",",
"n8",
",",
"t1",
",",
"t2",
",",
"t3",
",",
"t4",
",",
"theta",
",",
"zoffs",
",",
"tflag",
")",
"#print('eid=%s pid=%s n1=%s n2=%s n3=%s n4=%s theta=%g zoffs=%s '",
"#'tflag=%s t1=%g t2=%g t3=%g t4=%g' % (",
"#eid, pid, n1, n2, n3, n4, theta, zoffs, tflag, t1, t2, t3, t4))",
"#data_init = [eid,pid,n1,n2,n3,n4,theta,zoffs,tflag,t1,t2,t3,t4]",
"elem",
"=",
"CQUAD8",
".",
"add_op2_data",
"(",
"out",
")",
"elements",
".",
"append",
"(",
"elem",
")",
"self",
".",
"add_op2_element",
"(",
"elem",
")",
"n",
"+=",
"ntotal",
"else",
":",
"for",
"unused_i",
"in",
"range",
"(",
"nelements",
")",
":",
"edata",
"=",
"data",
"[",
"n",
":",
"n",
"+",
"ntotal",
"]",
"out",
"=",
"s",
".",
"unpack",
"(",
"edata",
")",
"if",
"op2",
".",
"is_debug_file",
":",
"op2",
".",
"binary_debug",
".",
"write",
"(",
"' CQUAD8=%s\\n'",
"%",
"str",
"(",
"out",
")",
")",
"(",
"eid",
",",
"pid",
",",
"n1",
",",
"n2",
",",
"n3",
",",
"n4",
",",
"n5",
",",
"n6",
",",
"n7",
",",
"n8",
",",
"t1",
",",
"t2",
",",
"t3",
",",
"t4",
",",
"theta",
",",
"zoffs",
")",
"=",
"out",
"assert",
"eid",
">",
"0",
"assert",
"pid",
">",
"0",
"tflag",
"=",
"None",
"out",
"=",
"(",
"eid",
",",
"pid",
",",
"n1",
",",
"n2",
",",
"n3",
",",
"n4",
",",
"n5",
",",
"n6",
",",
"n7",
",",
"n8",
",",
"t1",
",",
"t2",
",",
"t3",
",",
"t4",
",",
"theta",
",",
"zoffs",
",",
"tflag",
")",
"#print('eid=%s pid=%s n1=%s n2=%s n3=%s n4=%s theta=%g zoffs=%s '",
"#'tflag=%s t1=%g t2=%g t3=%g t4=%g' % (",
"#eid, pid, n1, n2, n3, n4, theta, zoffs, tflag, t1, t2, t3, t4))",
"#data_init = [eid,pid,n1,n2,n3,n4,theta,zoffs,tflag,t1,t2,t3,t4]",
"elem",
"=",
"CQUAD8",
".",
"add_op2_data",
"(",
"out",
")",
"elements",
".",
"append",
"(",
"elem",
")",
"self",
".",
"add_op2_element",
"(",
"elem",
")",
"n",
"+=",
"ntotal",
"return",
"n",
",",
"elements"
] |
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/op2/tables/geom/geom2.py#L3360-L3431
|
|
dswah/pyGAM
|
b57b4cf8783a90976031e1857e748ca3e6ec650b
|
pygam/terms.py
|
python
|
Term.hasconstraint
|
(self)
|
return np.not_equal(np.atleast_1d(self.constraints), None).any()
|
bool, whether the term has any constraints
|
bool, whether the term has any constraints
|
[
"bool",
"whether",
"the",
"term",
"has",
"any",
"constraints"
] |
def hasconstraint(self):
"""bool, whether the term has any constraints
"""
return np.not_equal(np.atleast_1d(self.constraints), None).any()
|
[
"def",
"hasconstraint",
"(",
"self",
")",
":",
"return",
"np",
".",
"not_equal",
"(",
"np",
".",
"atleast_1d",
"(",
"self",
".",
"constraints",
")",
",",
"None",
")",
".",
"any",
"(",
")"
] |
https://github.com/dswah/pyGAM/blob/b57b4cf8783a90976031e1857e748ca3e6ec650b/pygam/terms.py#L241-L244
|
|
qntm/greenery
|
da23f57b737e19338777f4d327e58f95bb59556a
|
greenery/fsm.py
|
python
|
fsm.__ne__
|
(self, other)
|
return self.different(other)
|
Use `fsm1 != fsm2` to determine whether two FSMs recognise different
strings.
|
Use `fsm1 != fsm2` to determine whether two FSMs recognise different
strings.
|
[
"Use",
"fsm1",
"!",
"=",
"fsm2",
"to",
"determine",
"whether",
"two",
"FSMs",
"recognise",
"different",
"strings",
"."
] |
def __ne__(self, other):
'''
Use `fsm1 != fsm2` to determine whether two FSMs recognise different
strings.
'''
return self.different(other)
|
[
"def",
"__ne__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"different",
"(",
"other",
")"
] |
https://github.com/qntm/greenery/blob/da23f57b737e19338777f4d327e58f95bb59556a/greenery/fsm.py#L521-L526
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/setuptools/config.py
|
python
|
configuration_to_dict
|
(handlers)
|
return config_dict
|
Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
|
Returns configuration data gathered by given handlers as a dict.
|
[
"Returns",
"configuration",
"data",
"gathered",
"by",
"given",
"handlers",
"as",
"a",
"dict",
"."
] |
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
obj_alias = handler.section_prefix
target_obj = handler.target_obj
for option in handler.set_options:
getter = getattr(target_obj, 'get_%s' % option, None)
if getter is None:
value = getattr(target_obj, option)
else:
value = getter()
config_dict[obj_alias][option] = value
return config_dict
|
[
"def",
"configuration_to_dict",
"(",
"handlers",
")",
":",
"config_dict",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"handler",
"in",
"handlers",
":",
"obj_alias",
"=",
"handler",
".",
"section_prefix",
"target_obj",
"=",
"handler",
".",
"target_obj",
"for",
"option",
"in",
"handler",
".",
"set_options",
":",
"getter",
"=",
"getattr",
"(",
"target_obj",
",",
"'get_%s'",
"%",
"option",
",",
"None",
")",
"if",
"getter",
"is",
"None",
":",
"value",
"=",
"getattr",
"(",
"target_obj",
",",
"option",
")",
"else",
":",
"value",
"=",
"getter",
"(",
")",
"config_dict",
"[",
"obj_alias",
"]",
"[",
"option",
"]",
"=",
"value",
"return",
"config_dict"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/setuptools/config.py#L60-L86
|
|
kamalgill/flask-appengine-template
|
11760f83faccbb0d0afe416fc58e67ecfb4643c2
|
src/lib/wtforms/utils.py
|
python
|
WebobInputWrapper.__init__
|
(self, multidict)
|
[] |
def __init__(self, multidict):
self._wrapped = multidict
|
[
"def",
"__init__",
"(",
"self",
",",
"multidict",
")",
":",
"self",
".",
"_wrapped",
"=",
"multidict"
] |
https://github.com/kamalgill/flask-appengine-template/blob/11760f83faccbb0d0afe416fc58e67ecfb4643c2/src/lib/wtforms/utils.py#L41-L42
|
||||
jython/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
lib-python/3.5.1/fractions.py
|
python
|
gcd
|
(a, b)
|
return _gcd(a, b)
|
Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
|
Calculate the Greatest Common Divisor of a and b.
|
[
"Calculate",
"the",
"Greatest",
"Common",
"Divisor",
"of",
"a",
"and",
"b",
"."
] |
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
import warnings
warnings.warn('fractions.gcd() is deprecated. Use math.gcd() instead.',
DeprecationWarning, 2)
if type(a) is int is type(b):
if (b or a) < 0:
return -math.gcd(a, b)
return math.gcd(a, b)
return _gcd(a, b)
|
[
"def",
"gcd",
"(",
"a",
",",
"b",
")",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"'fractions.gcd() is deprecated. Use math.gcd() instead.'",
",",
"DeprecationWarning",
",",
"2",
")",
"if",
"type",
"(",
"a",
")",
"is",
"int",
"is",
"type",
"(",
"b",
")",
":",
"if",
"(",
"b",
"or",
"a",
")",
"<",
"0",
":",
"return",
"-",
"math",
".",
"gcd",
"(",
"a",
",",
"b",
")",
"return",
"math",
".",
"gcd",
"(",
"a",
",",
"b",
")",
"return",
"_gcd",
"(",
"a",
",",
"b",
")"
] |
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/fractions.py#L17-L30
|
|
ym2011/ScanBackdoor
|
3a10de49c3ebd90c2f0eb62304877e00d2a52396
|
util/operation.py
|
python
|
write
|
(filepath, text, type)
|
[] |
def write(filepath, text, type):
file_object = open(filepath, type)
try:
file_object.write(text)
finally:
file_object.close()
|
[
"def",
"write",
"(",
"filepath",
",",
"text",
",",
"type",
")",
":",
"file_object",
"=",
"open",
"(",
"filepath",
",",
"type",
")",
"try",
":",
"file_object",
".",
"write",
"(",
"text",
")",
"finally",
":",
"file_object",
".",
"close",
"(",
")"
] |
https://github.com/ym2011/ScanBackdoor/blob/3a10de49c3ebd90c2f0eb62304877e00d2a52396/util/operation.py#L42-L47
|
||||
psychopy/psychopy
|
01b674094f38d0e0bd51c45a6f66f671d7041696
|
psychopy/visual/radial.py
|
python
|
RadialStim.setMask
|
(self, value, log=None)
|
Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
|
Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
|
[
"Usually",
"you",
"can",
"use",
"stim",
".",
"attribute",
"=",
"value",
"syntax",
"instead",
"but",
"use",
"this",
"method",
"if",
"you",
"need",
"to",
"suppress",
"the",
"log",
"message"
] |
def setMask(self, value, log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message
"""
setAttribute(self, 'mask', value, log)
|
[
"def",
"setMask",
"(",
"self",
",",
"value",
",",
"log",
"=",
"None",
")",
":",
"setAttribute",
"(",
"self",
",",
"'mask'",
",",
"value",
",",
"log",
")"
] |
https://github.com/psychopy/psychopy/blob/01b674094f38d0e0bd51c45a6f66f671d7041696/psychopy/visual/radial.py#L247-L251
|
||
microsoft/unilm
|
65f15af2a307ebb64cfb25adf54375b002e6fe8d
|
infoxlm/fairseq/fairseq/models/__init__.py
|
python
|
register_model
|
(name)
|
return register_model_cls
|
New model types can be added to fairseq with the :func:`register_model`
function decorator.
For example::
@register_model('lstm')
class LSTM(FairseqEncoderDecoderModel):
(...)
.. note:: All models must implement the :class:`BaseFairseqModel` interface.
Typically you will extend :class:`FairseqEncoderDecoderModel` for
sequence-to-sequence tasks or :class:`FairseqLanguageModel` for
language modeling tasks.
Args:
name (str): the name of the model
|
New model types can be added to fairseq with the :func:`register_model`
function decorator.
|
[
"New",
"model",
"types",
"can",
"be",
"added",
"to",
"fairseq",
"with",
"the",
":",
"func",
":",
"register_model",
"function",
"decorator",
"."
] |
def register_model(name):
"""
New model types can be added to fairseq with the :func:`register_model`
function decorator.
For example::
@register_model('lstm')
class LSTM(FairseqEncoderDecoderModel):
(...)
.. note:: All models must implement the :class:`BaseFairseqModel` interface.
Typically you will extend :class:`FairseqEncoderDecoderModel` for
sequence-to-sequence tasks or :class:`FairseqLanguageModel` for
language modeling tasks.
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model ({})'.format(name))
if not issubclass(cls, BaseFairseqModel):
raise ValueError('Model ({}: {}) must extend BaseFairseqModel'.format(name, cls.__name__))
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
|
[
"def",
"register_model",
"(",
"name",
")",
":",
"def",
"register_model_cls",
"(",
"cls",
")",
":",
"if",
"name",
"in",
"MODEL_REGISTRY",
":",
"raise",
"ValueError",
"(",
"'Cannot register duplicate model ({})'",
".",
"format",
"(",
"name",
")",
")",
"if",
"not",
"issubclass",
"(",
"cls",
",",
"BaseFairseqModel",
")",
":",
"raise",
"ValueError",
"(",
"'Model ({}: {}) must extend BaseFairseqModel'",
".",
"format",
"(",
"name",
",",
"cls",
".",
"__name__",
")",
")",
"MODEL_REGISTRY",
"[",
"name",
"]",
"=",
"cls",
"return",
"cls",
"return",
"register_model_cls"
] |
https://github.com/microsoft/unilm/blob/65f15af2a307ebb64cfb25adf54375b002e6fe8d/infoxlm/fairseq/fairseq/models/__init__.py#L51-L79
|
|
ehuss/Sublime-Wrap-Plus
|
da3b19930840e33598933654c10600309a910b38
|
wrap_plus.py
|
python
|
PrefixStrippingView.line
|
(self, where)
|
return line_r, line
|
Get a line for a point.
:returns: A (region, str) tuple. str has the comment prefix stripped.
Returns None, None if line out of range.
|
Get a line for a point.
|
[
"Get",
"a",
"line",
"for",
"a",
"point",
"."
] |
def line(self, where):
"""Get a line for a point.
:returns: A (region, str) tuple. str has the comment prefix stripped.
Returns None, None if line out of range.
"""
line_r = self.view.line(where)
if line_r.begin() < self.min:
debug('line min increased')
line_r = sublime.Region(self.min, line_r.end())
if line_r.end() > self.max:
debug('line max lowered')
line_r = sublime.Region(line_r.begin(), self.max)
line = self.view.substr(line_r)
debug('line=%r', line)
if self.required_comment_prefix:
debug('checking required comment prefix %r', self.required_comment_prefix)
if line.startswith(self.required_comment_prefix):
# Check for an insufficient prefix.
if self.required_comment_pattern:
m = self.required_comment_pattern.match(line)
if m:
if m.group() != self.required_comment_prefix:
# This might happen, if for example with an email
# comment, we go from one comment level to a
# deeper one (the regex matched more > characters
# than are in required_comment_pattern).
return None, None
else:
# This should never happen (matches the string but not
# the regex?).
return None, None
rcp_len = len(self.required_comment_prefix)
line = line[rcp_len:]
# XXX: Should this also update line_r?
else:
return None, None
return line_r, line
|
[
"def",
"line",
"(",
"self",
",",
"where",
")",
":",
"line_r",
"=",
"self",
".",
"view",
".",
"line",
"(",
"where",
")",
"if",
"line_r",
".",
"begin",
"(",
")",
"<",
"self",
".",
"min",
":",
"debug",
"(",
"'line min increased'",
")",
"line_r",
"=",
"sublime",
".",
"Region",
"(",
"self",
".",
"min",
",",
"line_r",
".",
"end",
"(",
")",
")",
"if",
"line_r",
".",
"end",
"(",
")",
">",
"self",
".",
"max",
":",
"debug",
"(",
"'line max lowered'",
")",
"line_r",
"=",
"sublime",
".",
"Region",
"(",
"line_r",
".",
"begin",
"(",
")",
",",
"self",
".",
"max",
")",
"line",
"=",
"self",
".",
"view",
".",
"substr",
"(",
"line_r",
")",
"debug",
"(",
"'line=%r'",
",",
"line",
")",
"if",
"self",
".",
"required_comment_prefix",
":",
"debug",
"(",
"'checking required comment prefix %r'",
",",
"self",
".",
"required_comment_prefix",
")",
"if",
"line",
".",
"startswith",
"(",
"self",
".",
"required_comment_prefix",
")",
":",
"# Check for an insufficient prefix.",
"if",
"self",
".",
"required_comment_pattern",
":",
"m",
"=",
"self",
".",
"required_comment_pattern",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"if",
"m",
".",
"group",
"(",
")",
"!=",
"self",
".",
"required_comment_prefix",
":",
"# This might happen, if for example with an email",
"# comment, we go from one comment level to a",
"# deeper one (the regex matched more > characters",
"# than are in required_comment_pattern).",
"return",
"None",
",",
"None",
"else",
":",
"# This should never happen (matches the string but not",
"# the regex?).",
"return",
"None",
",",
"None",
"rcp_len",
"=",
"len",
"(",
"self",
".",
"required_comment_prefix",
")",
"line",
"=",
"line",
"[",
"rcp_len",
":",
"]",
"# XXX: Should this also update line_r?",
"else",
":",
"return",
"None",
",",
"None",
"return",
"line_r",
",",
"line"
] |
https://github.com/ehuss/Sublime-Wrap-Plus/blob/da3b19930840e33598933654c10600309a910b38/wrap_plus.py#L157-L195
|
|
pytroll/satpy
|
09e51f932048f98cce7919a4ff8bd2ec01e1ae98
|
satpy/readers/fci_l2_nc.py
|
python
|
FciL2NCFileHandler.__init__
|
(self, filename, filename_info, filetype_info)
|
Open the NetCDF file with xarray and prepare for dataset reading.
|
Open the NetCDF file with xarray and prepare for dataset reading.
|
[
"Open",
"the",
"NetCDF",
"file",
"with",
"xarray",
"and",
"prepare",
"for",
"dataset",
"reading",
"."
] |
def __init__(self, filename, filename_info, filetype_info):
"""Open the NetCDF file with xarray and prepare for dataset reading."""
super().__init__(filename, filename_info, filetype_info)
# Use xarray's default netcdf4 engine to open the file
self.nc = xr.open_dataset(
self.filename,
decode_cf=True,
mask_and_scale=True,
chunks={
'number_of_columns': CHUNK_SIZE,
'number_of_rows': CHUNK_SIZE
}
)
# Read metadata which are common to all datasets
self.nlines = self.nc['y'].size
self.ncols = self.nc['x'].size
self._projection = self.nc['mtg_geos_projection']
# Compute the area definition
self._area_def = self._compute_area_def()
|
[
"def",
"__init__",
"(",
"self",
",",
"filename",
",",
"filename_info",
",",
"filetype_info",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"filename",
",",
"filename_info",
",",
"filetype_info",
")",
"# Use xarray's default netcdf4 engine to open the file",
"self",
".",
"nc",
"=",
"xr",
".",
"open_dataset",
"(",
"self",
".",
"filename",
",",
"decode_cf",
"=",
"True",
",",
"mask_and_scale",
"=",
"True",
",",
"chunks",
"=",
"{",
"'number_of_columns'",
":",
"CHUNK_SIZE",
",",
"'number_of_rows'",
":",
"CHUNK_SIZE",
"}",
")",
"# Read metadata which are common to all datasets",
"self",
".",
"nlines",
"=",
"self",
".",
"nc",
"[",
"'y'",
"]",
".",
"size",
"self",
".",
"ncols",
"=",
"self",
".",
"nc",
"[",
"'x'",
"]",
".",
"size",
"self",
".",
"_projection",
"=",
"self",
".",
"nc",
"[",
"'mtg_geos_projection'",
"]",
"# Compute the area definition",
"self",
".",
"_area_def",
"=",
"self",
".",
"_compute_area_def",
"(",
")"
] |
https://github.com/pytroll/satpy/blob/09e51f932048f98cce7919a4ff8bd2ec01e1ae98/satpy/readers/fci_l2_nc.py#L118-L139
|
||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/logging/config.py
|
python
|
BaseConfigurator.convert
|
(self, value)
|
return value
|
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
|
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
|
[
"Convert",
"values",
"to",
"an",
"appropriate",
"type",
".",
"dicts",
"lists",
"and",
"tuples",
"are",
"replaced",
"by",
"their",
"converting",
"alternatives",
".",
"Strings",
"are",
"checked",
"to",
"see",
"if",
"they",
"have",
"a",
"conversion",
"format",
"and",
"are",
"converted",
"if",
"they",
"do",
"."
] |
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
|
[
"def",
"convert",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"ConvertingDict",
")",
"and",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"value",
"=",
"ConvertingDict",
"(",
"value",
")",
"value",
".",
"configurator",
"=",
"self",
"elif",
"not",
"isinstance",
"(",
"value",
",",
"ConvertingList",
")",
"and",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"ConvertingList",
"(",
"value",
")",
"value",
".",
"configurator",
"=",
"self",
"elif",
"not",
"isinstance",
"(",
"value",
",",
"ConvertingTuple",
")",
"and",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"value",
"=",
"ConvertingTuple",
"(",
"value",
")",
"value",
".",
"configurator",
"=",
"self",
"elif",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"# str for py3k",
"m",
"=",
"self",
".",
"CONVERT_PATTERN",
".",
"match",
"(",
"value",
")",
"if",
"m",
":",
"d",
"=",
"m",
".",
"groupdict",
"(",
")",
"prefix",
"=",
"d",
"[",
"'prefix'",
"]",
"converter",
"=",
"self",
".",
"value_converters",
".",
"get",
"(",
"prefix",
",",
"None",
")",
"if",
"converter",
":",
"suffix",
"=",
"d",
"[",
"'suffix'",
"]",
"converter",
"=",
"getattr",
"(",
"self",
",",
"converter",
")",
"value",
"=",
"converter",
"(",
"suffix",
")",
"return",
"value"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/logging/config.py#L436-L462
|
|
CouchPotato/CouchPotatoServer
|
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
|
libs/suds/mx/core.py
|
python
|
Core.setnil
|
(self, node, content)
|
Set the value of the I{node} to nill.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set nil.
@type content: L{Content}
|
Set the value of the I{node} to nill.
|
[
"Set",
"the",
"value",
"of",
"the",
"I",
"{",
"node",
"}",
"to",
"nill",
"."
] |
def setnil(self, node, content):
"""
Set the value of the I{node} to nill.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set nil.
@type content: L{Content}
"""
pass
|
[
"def",
"setnil",
"(",
"self",
",",
"node",
",",
"content",
")",
":",
"pass"
] |
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/suds/mx/core.py#L130-L138
|
||
BlackLight/platypush
|
a6b552504e2ac327c94f3a28b607061b6b60cf36
|
platypush/plugins/light/hue/__init__.py
|
python
|
LightHuePlugin.on
|
(self, lights=None, groups=None, **kwargs)
|
return self._exec('on', True, lights=lights, groups=groups, **kwargs)
|
Turn lights/groups on.
:param lights: Lights to turn on (names or light objects). Default: plugin default lights
:param groups: Groups to turn on (names or group objects). Default: plugin default groups
|
Turn lights/groups on.
|
[
"Turn",
"lights",
"/",
"groups",
"on",
"."
] |
def on(self, lights=None, groups=None, **kwargs):
"""
Turn lights/groups on.
:param lights: Lights to turn on (names or light objects). Default: plugin default lights
:param groups: Groups to turn on (names or group objects). Default: plugin default groups
"""
if groups is None:
groups = []
if lights is None:
lights = []
return self._exec('on', True, lights=lights, groups=groups, **kwargs)
|
[
"def",
"on",
"(",
"self",
",",
"lights",
"=",
"None",
",",
"groups",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"groups",
"is",
"None",
":",
"groups",
"=",
"[",
"]",
"if",
"lights",
"is",
"None",
":",
"lights",
"=",
"[",
"]",
"return",
"self",
".",
"_exec",
"(",
"'on'",
",",
"True",
",",
"lights",
"=",
"lights",
",",
"groups",
"=",
"groups",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/BlackLight/platypush/blob/a6b552504e2ac327c94f3a28b607061b6b60cf36/platypush/plugins/light/hue/__init__.py#L406-L418
|
|
mysql/mysql-connector-python
|
c5460bcbb0dff8e4e48bf4af7a971c89bf486d85
|
lib/mysqlx/statement.py
|
python
|
FindStatement.sort
|
(self, *clauses)
|
return self._sort(*clauses)
|
Sets the sorting criteria.
Args:
*clauses: The expression strings defining the sort criteria.
Returns:
mysqlx.FindStatement: FindStatement object.
|
Sets the sorting criteria.
|
[
"Sets",
"the",
"sorting",
"criteria",
"."
] |
def sort(self, *clauses):
"""Sets the sorting criteria.
Args:
*clauses: The expression strings defining the sort criteria.
Returns:
mysqlx.FindStatement: FindStatement object.
"""
return self._sort(*clauses)
|
[
"def",
"sort",
"(",
"self",
",",
"*",
"clauses",
")",
":",
"return",
"self",
".",
"_sort",
"(",
"*",
"clauses",
")"
] |
https://github.com/mysql/mysql-connector-python/blob/c5460bcbb0dff8e4e48bf4af7a971c89bf486d85/lib/mysqlx/statement.py#L1036-L1045
|
|
google/capirca
|
679e3885e3a5e5e129dc2dfab204ec44d63b26a4
|
capirca/lib/policy.py
|
python
|
Term.GetAddressOfVersion
|
(self, addr_type, af=None)
|
return [x for x in getattr(self, addr_type) if x.version == af]
|
Returns addresses of the appropriate Address Family.
Args:
addr_type: string, this will be either
'source_address', 'source_address_exclude',
'destination_address' or 'destination_address_exclude'
af: int or None, either 4 for IPv4 or 6 for IPv6
Returns:
list of addresses of the correct family.
|
Returns addresses of the appropriate Address Family.
|
[
"Returns",
"addresses",
"of",
"the",
"appropriate",
"Address",
"Family",
"."
] |
def GetAddressOfVersion(self, addr_type, af=None):
"""Returns addresses of the appropriate Address Family.
Args:
addr_type: string, this will be either
'source_address', 'source_address_exclude',
'destination_address' or 'destination_address_exclude'
af: int or None, either 4 for IPv4 or 6 for IPv6
Returns:
list of addresses of the correct family.
"""
if not af:
return getattr(self, addr_type)
return [x for x in getattr(self, addr_type) if x.version == af]
|
[
"def",
"GetAddressOfVersion",
"(",
"self",
",",
"addr_type",
",",
"af",
"=",
"None",
")",
":",
"if",
"not",
"af",
":",
"return",
"getattr",
"(",
"self",
",",
"addr_type",
")",
"return",
"[",
"x",
"for",
"x",
"in",
"getattr",
"(",
"self",
",",
"addr_type",
")",
"if",
"x",
".",
"version",
"==",
"af",
"]"
] |
https://github.com/google/capirca/blob/679e3885e3a5e5e129dc2dfab204ec44d63b26a4/capirca/lib/policy.py#L1010-L1025
|
|
taseikyo/PyQt5-Apps
|
8c715edd3710f413932d982f8e2e24ea9ec6e9bd
|
cat-calendar/src/main.py
|
python
|
MWin.error
|
(self, msg)
|
[] |
def error(self, msg):
QMessageBox.information(self, 'Cat Calendar', msg, QMessageBox.Ok)
|
[
"def",
"error",
"(",
"self",
",",
"msg",
")",
":",
"QMessageBox",
".",
"information",
"(",
"self",
",",
"'Cat Calendar'",
",",
"msg",
",",
"QMessageBox",
".",
"Ok",
")"
] |
https://github.com/taseikyo/PyQt5-Apps/blob/8c715edd3710f413932d982f8e2e24ea9ec6e9bd/cat-calendar/src/main.py#L69-L70
|
||||
zigpy/zigpy
|
db10b078874d93ad1c546ec810706c2e5dc33d7f
|
zigpy/ota/image.py
|
python
|
HeaderString.deserialize
|
(cls, data)
|
return cls(raw.decode("utf8", errors="replace")), data[cls._size :]
|
[] |
def deserialize(cls, data):
if len(data) < cls._size:
raise ValueError(f"Data is too short. Should be at least {cls._size}")
raw = data[: cls._size].split(b"\x00")[0]
return cls(raw.decode("utf8", errors="replace")), data[cls._size :]
|
[
"def",
"deserialize",
"(",
"cls",
",",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"<",
"cls",
".",
"_size",
":",
"raise",
"ValueError",
"(",
"f\"Data is too short. Should be at least {cls._size}\"",
")",
"raw",
"=",
"data",
"[",
":",
"cls",
".",
"_size",
"]",
".",
"split",
"(",
"b\"\\x00\"",
")",
"[",
"0",
"]",
"return",
"cls",
"(",
"raw",
".",
"decode",
"(",
"\"utf8\"",
",",
"errors",
"=",
"\"replace\"",
")",
")",
",",
"data",
"[",
"cls",
".",
"_size",
":",
"]"
] |
https://github.com/zigpy/zigpy/blob/db10b078874d93ad1c546ec810706c2e5dc33d7f/zigpy/ota/image.py#L40-L44
|
|||
yandex/yandex-tank
|
b41bcc04396c4ed46fc8b28a261197320854fd33
|
yandextank/plugins/DataUploader/plugin.py
|
python
|
Plugin.__uploader
|
(self, queue, sender_method, name='Uploader')
|
[] |
def __uploader(self, queue, sender_method, name='Uploader'):
logger.info('{} thread started'.format(name))
while not self.lp_job.interrupted.is_set():
try:
entry = queue.get(timeout=1)
if entry is None:
logger.info("{} queue returned None".format(name))
break
sender_method(entry)
except Empty:
continue
except APIClient.StoppedFromOnline:
logger.warning("Lunapark is rejecting {} data".format(name))
break
except (APIClient.NetworkError, APIClient.NotAvailable, APIClient.UnderMaintenance) as e:
logger.warn('Failed to push {} data'.format(name))
logger.warn(e)
self.lp_job.interrupted.set()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error("Mysterious exception:\n%s\n%s\n%s", (exc_type, exc_value, exc_traceback))
break
# purge queue
while not queue.empty():
if queue.get_nowait() is None:
break
logger.info("Closing {} thread".format(name))
|
[
"def",
"__uploader",
"(",
"self",
",",
"queue",
",",
"sender_method",
",",
"name",
"=",
"'Uploader'",
")",
":",
"logger",
".",
"info",
"(",
"'{} thread started'",
".",
"format",
"(",
"name",
")",
")",
"while",
"not",
"self",
".",
"lp_job",
".",
"interrupted",
".",
"is_set",
"(",
")",
":",
"try",
":",
"entry",
"=",
"queue",
".",
"get",
"(",
"timeout",
"=",
"1",
")",
"if",
"entry",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"{} queue returned None\"",
".",
"format",
"(",
"name",
")",
")",
"break",
"sender_method",
"(",
"entry",
")",
"except",
"Empty",
":",
"continue",
"except",
"APIClient",
".",
"StoppedFromOnline",
":",
"logger",
".",
"warning",
"(",
"\"Lunapark is rejecting {} data\"",
".",
"format",
"(",
"name",
")",
")",
"break",
"except",
"(",
"APIClient",
".",
"NetworkError",
",",
"APIClient",
".",
"NotAvailable",
",",
"APIClient",
".",
"UnderMaintenance",
")",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"'Failed to push {} data'",
".",
"format",
"(",
"name",
")",
")",
"logger",
".",
"warn",
"(",
"e",
")",
"self",
".",
"lp_job",
".",
"interrupted",
".",
"set",
"(",
")",
"except",
"Exception",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"logger",
".",
"error",
"(",
"\"Mysterious exception:\\n%s\\n%s\\n%s\"",
",",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
")",
"break",
"# purge queue",
"while",
"not",
"queue",
".",
"empty",
"(",
")",
":",
"if",
"queue",
".",
"get_nowait",
"(",
")",
"is",
"None",
":",
"break",
"logger",
".",
"info",
"(",
"\"Closing {} thread\"",
".",
"format",
"(",
"name",
")",
")"
] |
https://github.com/yandex/yandex-tank/blob/b41bcc04396c4ed46fc8b28a261197320854fd33/yandextank/plugins/DataUploader/plugin.py#L405-L431
|
||||
wzpan/dingdang-robot
|
66d95402232a9102e223a2d8ccefcb83500d2c6a
|
client/diagnose.py
|
python
|
check_python_import
|
(package_or_module)
|
return found
|
Checks if a python package or module is importable.
Arguments:
package_or_module -- the package or module name to check
Returns:
True or False
|
Checks if a python package or module is importable.
|
[
"Checks",
"if",
"a",
"python",
"package",
"or",
"module",
"is",
"importable",
"."
] |
def check_python_import(package_or_module):
"""
Checks if a python package or module is importable.
Arguments:
package_or_module -- the package or module name to check
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking python import '%s'...", package_or_module)
loader = pkgutil.get_loader(package_or_module)
found = loader is not None
if found:
logger.debug("Python %s '%s' found: %r",
"package" if loader.is_package(package_or_module)
else "module", package_or_module, loader.get_filename())
else:
logger.debug("Python import '%s' not found", package_or_module)
return found
|
[
"def",
"check_python_import",
"(",
"package_or_module",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Checking python import '%s'...\"",
",",
"package_or_module",
")",
"loader",
"=",
"pkgutil",
".",
"get_loader",
"(",
"package_or_module",
")",
"found",
"=",
"loader",
"is",
"not",
"None",
"if",
"found",
":",
"logger",
".",
"debug",
"(",
"\"Python %s '%s' found: %r\"",
",",
"\"package\"",
"if",
"loader",
".",
"is_package",
"(",
"package_or_module",
")",
"else",
"\"module\"",
",",
"package_or_module",
",",
"loader",
".",
"get_filename",
"(",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Python import '%s' not found\"",
",",
"package_or_module",
")",
"return",
"found"
] |
https://github.com/wzpan/dingdang-robot/blob/66d95402232a9102e223a2d8ccefcb83500d2c6a/client/diagnose.py#L70-L90
|
|
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
runtime/python/lib/python2.7/site-packages/PyWebDAV-0.9.8-py2.7.egg/pywebdav/lib/WebDAVServer.py
|
python
|
DAVRequestHandler.do_COPY
|
(self)
|
copy one resource to another
|
copy one resource to another
|
[
"copy",
"one",
"resource",
"to",
"another"
] |
def do_COPY(self):
""" copy one resource to another """
try:
self.copymove(COPY)
except DAV_Error, (ec, dd):
return self.send_status(ec)
|
[
"def",
"do_COPY",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"copymove",
"(",
"COPY",
")",
"except",
"DAV_Error",
",",
"(",
"ec",
",",
"dd",
")",
":",
"return",
"self",
".",
"send_status",
"(",
"ec",
")"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/PyWebDAV-0.9.8-py2.7.egg/pywebdav/lib/WebDAVServer.py#L633-L638
|
||
nosmokingbandit/Watcher3
|
0217e75158b563bdefc8e01c3be7620008cf3977
|
core/providers/torrent_modules/rarbg.py
|
python
|
_parse
|
(results, imdbid=None)
|
return parsed_results
|
Parse api response
results (list): dicts of releases
Returns list of dicts
|
Parse api response
results (list): dicts of releases
|
[
"Parse",
"api",
"response",
"results",
"(",
"list",
")",
":",
"dicts",
"of",
"releases"
] |
def _parse(results, imdbid=None):
''' Parse api response
results (list): dicts of releases
Returns list of dicts
'''
logging.info('Parsing {} Rarbg results.'.format(len(results)))
item_keep = ('size', 'pubdate', 'title', 'indexer', 'info_link', 'guid', 'torrentfile', 'resolution', 'type', 'seeders')
parsed_results = []
for result in results:
result['indexer'] = 'Rarbg'
result['info_link'] = result['info_page']
result['torrentfile'] = result['download']
result['guid'] = result['download'].split('&')[0].split(':')[-1]
result['type'] = 'magnet'
result['pubdate'] = None
result = {k: v for k, v in result.items() if k in item_keep}
result['imdbid'] = imdbid or result.get('episode_info', {}).get('imdb')
result['status'] = 'Available'
result['score'] = 0
result['downloadid'] = None
result['freeleech'] = 0
result['download_client'] = None
parsed_results.append(result)
logging.info('Found {} results from '.format(len(parsed_results)))
return parsed_results
|
[
"def",
"_parse",
"(",
"results",
",",
"imdbid",
"=",
"None",
")",
":",
"logging",
".",
"info",
"(",
"'Parsing {} Rarbg results.'",
".",
"format",
"(",
"len",
"(",
"results",
")",
")",
")",
"item_keep",
"=",
"(",
"'size'",
",",
"'pubdate'",
",",
"'title'",
",",
"'indexer'",
",",
"'info_link'",
",",
"'guid'",
",",
"'torrentfile'",
",",
"'resolution'",
",",
"'type'",
",",
"'seeders'",
")",
"parsed_results",
"=",
"[",
"]",
"for",
"result",
"in",
"results",
":",
"result",
"[",
"'indexer'",
"]",
"=",
"'Rarbg'",
"result",
"[",
"'info_link'",
"]",
"=",
"result",
"[",
"'info_page'",
"]",
"result",
"[",
"'torrentfile'",
"]",
"=",
"result",
"[",
"'download'",
"]",
"result",
"[",
"'guid'",
"]",
"=",
"result",
"[",
"'download'",
"]",
".",
"split",
"(",
"'&'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
"result",
"[",
"'type'",
"]",
"=",
"'magnet'",
"result",
"[",
"'pubdate'",
"]",
"=",
"None",
"result",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"result",
".",
"items",
"(",
")",
"if",
"k",
"in",
"item_keep",
"}",
"result",
"[",
"'imdbid'",
"]",
"=",
"imdbid",
"or",
"result",
".",
"get",
"(",
"'episode_info'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'imdb'",
")",
"result",
"[",
"'status'",
"]",
"=",
"'Available'",
"result",
"[",
"'score'",
"]",
"=",
"0",
"result",
"[",
"'downloadid'",
"]",
"=",
"None",
"result",
"[",
"'freeleech'",
"]",
"=",
"0",
"result",
"[",
"'download_client'",
"]",
"=",
"None",
"parsed_results",
".",
"append",
"(",
"result",
")",
"logging",
".",
"info",
"(",
"'Found {} results from '",
".",
"format",
"(",
"len",
"(",
"parsed_results",
")",
")",
")",
"return",
"parsed_results"
] |
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/core/providers/torrent_modules/rarbg.py#L136-L167
|
|
CvvT/dumpDex
|
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
|
python/idaapi.py
|
python
|
get_encoding_name
|
(*args)
|
return _idaapi.get_encoding_name(*args)
|
get_encoding_name(idx) -> char const *
|
get_encoding_name(idx) -> char const *
|
[
"get_encoding_name",
"(",
"idx",
")",
"-",
">",
"char",
"const",
"*"
] |
def get_encoding_name(*args):
"""
get_encoding_name(idx) -> char const *
"""
return _idaapi.get_encoding_name(*args)
|
[
"def",
"get_encoding_name",
"(",
"*",
"args",
")",
":",
"return",
"_idaapi",
".",
"get_encoding_name",
"(",
"*",
"args",
")"
] |
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L7357-L7361
|
|
ManyFace/ExtractDexFromOat
|
cec0a4230ecdab41188c1ea99fbda8bb6bbc84b2
|
util/util.py
|
python
|
md5sum
|
(data)
|
return md5.hexdigest().upper()
|
[] |
def md5sum(data):
md5 = hashlib.md5()
md5.update(data)
return md5.hexdigest().upper()
|
[
"def",
"md5sum",
"(",
"data",
")",
":",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"md5",
".",
"update",
"(",
"data",
")",
"return",
"md5",
".",
"hexdigest",
"(",
")",
".",
"upper",
"(",
")"
] |
https://github.com/ManyFace/ExtractDexFromOat/blob/cec0a4230ecdab41188c1ea99fbda8bb6bbc84b2/util/util.py#L8-L11
|
|||
Kozea/cairocffi
|
2473d1bb82a52ca781edec595a95951509db2969
|
cairocffi/patterns.py
|
python
|
Pattern.get_filter
|
(self)
|
return cairo.cairo_pattern_get_filter(self._pointer)
|
Return the current filter string for this pattern.
See :ref:`FILTER` for details on each filter.
|
Return the current filter string for this pattern.
See :ref:`FILTER` for details on each filter.
|
[
"Return",
"the",
"current",
"filter",
"string",
"for",
"this",
"pattern",
".",
"See",
":",
"ref",
":",
"FILTER",
"for",
"details",
"on",
"each",
"filter",
"."
] |
def get_filter(self):
"""Return the current filter string for this pattern.
See :ref:`FILTER` for details on each filter.
"""
return cairo.cairo_pattern_get_filter(self._pointer)
|
[
"def",
"get_filter",
"(",
"self",
")",
":",
"return",
"cairo",
".",
"cairo_pattern_get_filter",
"(",
"self",
".",
"_pointer",
")"
] |
https://github.com/Kozea/cairocffi/blob/2473d1bb82a52ca781edec595a95951509db2969/cairocffi/patterns.py#L104-L109
|
|
freyja-dev/unity-tweak-tool
|
ad254288d2e0fed94c59277de3f21ef399e3197f
|
UnityTweakTool/section/spaghetti/compiz.py
|
python
|
Compizsettings.on_sw_compiz_zoom_active_notify
|
(self, widget, udata = None)
|
[] |
def on_sw_compiz_zoom_active_notify(self, widget, udata = None):
dependants = ['scrolledwindow_compiz_general_zoom']
plugins = gsettings.core.get_strv('active-plugins')
if self.ui['sw_compiz_zoom'].get_active() == True:
self.ui.sensitize(dependants)
if 'ezoom' not in plugins:
plugins.append('ezoom')
gsettings.core.set_strv('active-plugins', plugins)
else:
self.ui.unsensitize(dependants)
if 'ezoom' in plugins:
plugins.remove('ezoom')
gsettings.core.set_strv('active-plugins', plugins)
|
[
"def",
"on_sw_compiz_zoom_active_notify",
"(",
"self",
",",
"widget",
",",
"udata",
"=",
"None",
")",
":",
"dependants",
"=",
"[",
"'scrolledwindow_compiz_general_zoom'",
"]",
"plugins",
"=",
"gsettings",
".",
"core",
".",
"get_strv",
"(",
"'active-plugins'",
")",
"if",
"self",
".",
"ui",
"[",
"'sw_compiz_zoom'",
"]",
".",
"get_active",
"(",
")",
"==",
"True",
":",
"self",
".",
"ui",
".",
"sensitize",
"(",
"dependants",
")",
"if",
"'ezoom'",
"not",
"in",
"plugins",
":",
"plugins",
".",
"append",
"(",
"'ezoom'",
")",
"gsettings",
".",
"core",
".",
"set_strv",
"(",
"'active-plugins'",
",",
"plugins",
")",
"else",
":",
"self",
".",
"ui",
".",
"unsensitize",
"(",
"dependants",
")",
"if",
"'ezoom'",
"in",
"plugins",
":",
"plugins",
".",
"remove",
"(",
"'ezoom'",
")",
"gsettings",
".",
"core",
".",
"set_strv",
"(",
"'active-plugins'",
",",
"plugins",
")"
] |
https://github.com/freyja-dev/unity-tweak-tool/blob/ad254288d2e0fed94c59277de3f21ef399e3197f/UnityTweakTool/section/spaghetti/compiz.py#L540-L552
|
||||
DeepLabCut/DeepLabCut
|
1dd14c54729ae0d8e66ca495aa5baeb83502e1c7
|
deeplabcut/pose_estimation_tensorflow/export.py
|
python
|
load_model
|
(cfg, shuffle=1, trainingsetindex=0, TFGPUinference=True, modelprefix="")
|
return sess, input, output, dlc_cfg
|
Loads a tensorflow session with a DLC model from the associated configuration
Return a tensorflow session with DLC model given cfg and shuffle
Parameters:
-----------
cfg : dict
Configuration read from the project's main config.yaml file
shuffle : int, optional
which shuffle to use
trainingsetindex : int. optional
which training fraction to use, identified by its index
TFGPUinference : bool, optional
use tensorflow inference model? default = True
Returns:
--------
sess : tensorflow session
tensorflow session with DLC model from the provided configuration, shuffle, and trainingsetindex
checkpoint file path : string
the path to the checkpoint file associated with the loaded model
|
[] |
def load_model(cfg, shuffle=1, trainingsetindex=0, TFGPUinference=True, modelprefix=""):
"""
Loads a tensorflow session with a DLC model from the associated configuration
Return a tensorflow session with DLC model given cfg and shuffle
Parameters:
-----------
cfg : dict
Configuration read from the project's main config.yaml file
shuffle : int, optional
which shuffle to use
trainingsetindex : int. optional
which training fraction to use, identified by its index
TFGPUinference : bool, optional
use tensorflow inference model? default = True
Returns:
--------
sess : tensorflow session
tensorflow session with DLC model from the provided configuration, shuffle, and trainingsetindex
checkpoint file path : string
the path to the checkpoint file associated with the loaded model
"""
########################
### find snapshot to use
########################
train_fraction = cfg["TrainingFraction"][trainingsetindex]
model_folder = os.path.join(
cfg["project_path"],
str(
auxiliaryfunctions.GetModelFolder(
train_fraction, shuffle, cfg, modelprefix=modelprefix
)
),
)
path_test_config = os.path.normpath(model_folder + "/test/pose_cfg.yaml")
path_train_config = os.path.normpath(model_folder + "/train/pose_cfg.yaml")
try:
dlc_cfg = load_config(str(path_train_config))
# dlc_cfg_train = load_config(str(path_train_config))
except FileNotFoundError:
raise FileNotFoundError(
"It seems the model for shuffle %s and trainFraction %s does not exist."
% (shuffle, train_fraction)
)
# Check which snapshots are available and sort them by # iterations
try:
Snapshots = np.array(
[
fn.split(".")[0]
for fn in os.listdir(os.path.join(model_folder, "train"))
if "index" in fn
]
)
except FileNotFoundError:
raise FileNotFoundError(
"Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Please train it before trying to export.\n Use the function 'train_network' to train the network for shuffle %s."
% (shuffle, shuffle)
)
if len(Snapshots) == 0:
raise FileNotFoundError(
"The train folder for iteration %s and shuffle %s exists, but no snapshots were found.\n Please train this model before trying to export.\n Use the function 'train_network' to train the network for iteration %s shuffle %s."
% (cfg["iteration"], shuffle, cfg["iteration"], shuffle)
)
if cfg["snapshotindex"] == "all":
print(
"Snapshotindex is set to 'all' in the config.yaml file. Changing snapshot index to -1!"
)
snapshotindex = -1
else:
snapshotindex = cfg["snapshotindex"]
increasing_indices = np.argsort([int(m.split("-")[1]) for m in Snapshots])
Snapshots = Snapshots[increasing_indices]
####################################
### Load and setup CNN part detector
####################################
# Check if data already was generated:
dlc_cfg["init_weights"] = os.path.join(
model_folder, "train", Snapshots[snapshotindex]
)
trainingsiterations = (dlc_cfg["init_weights"].split(os.sep)[-1]).split("-")[-1]
dlc_cfg["num_outputs"] = cfg.get("num_outputs", dlc_cfg.get("num_outputs", 1))
dlc_cfg["batch_size"] = None
# load network
if TFGPUinference:
sess, _, _ = predict.setup_GPUpose_prediction(dlc_cfg)
output = ["concat_1"]
else:
sess, _, _ = predict.setup_pose_prediction(dlc_cfg)
if dlc_cfg["location_refinement"]:
output = ["Sigmoid", "pose/locref_pred/block4/BiasAdd"]
else:
output = ["Sigmoid", "pose/part_pred/block4/BiasAdd"]
input = tf.compat.v1.get_default_graph().get_operations()[0].name
return sess, input, output, dlc_cfg
|
[
"def",
"load_model",
"(",
"cfg",
",",
"shuffle",
"=",
"1",
",",
"trainingsetindex",
"=",
"0",
",",
"TFGPUinference",
"=",
"True",
",",
"modelprefix",
"=",
"\"\"",
")",
":",
"########################",
"### find snapshot to use",
"########################",
"train_fraction",
"=",
"cfg",
"[",
"\"TrainingFraction\"",
"]",
"[",
"trainingsetindex",
"]",
"model_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cfg",
"[",
"\"project_path\"",
"]",
",",
"str",
"(",
"auxiliaryfunctions",
".",
"GetModelFolder",
"(",
"train_fraction",
",",
"shuffle",
",",
"cfg",
",",
"modelprefix",
"=",
"modelprefix",
")",
")",
",",
")",
"path_test_config",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"model_folder",
"+",
"\"/test/pose_cfg.yaml\"",
")",
"path_train_config",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"model_folder",
"+",
"\"/train/pose_cfg.yaml\"",
")",
"try",
":",
"dlc_cfg",
"=",
"load_config",
"(",
"str",
"(",
"path_train_config",
")",
")",
"# dlc_cfg_train = load_config(str(path_train_config))",
"except",
"FileNotFoundError",
":",
"raise",
"FileNotFoundError",
"(",
"\"It seems the model for shuffle %s and trainFraction %s does not exist.\"",
"%",
"(",
"shuffle",
",",
"train_fraction",
")",
")",
"# Check which snapshots are available and sort them by # iterations",
"try",
":",
"Snapshots",
"=",
"np",
".",
"array",
"(",
"[",
"fn",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"model_folder",
",",
"\"train\"",
")",
")",
"if",
"\"index\"",
"in",
"fn",
"]",
")",
"except",
"FileNotFoundError",
":",
"raise",
"FileNotFoundError",
"(",
"\"Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\\n Please train it before trying to export.\\n Use the function 'train_network' to train the network for shuffle %s.\"",
"%",
"(",
"shuffle",
",",
"shuffle",
")",
")",
"if",
"len",
"(",
"Snapshots",
")",
"==",
"0",
":",
"raise",
"FileNotFoundError",
"(",
"\"The train folder for iteration %s and shuffle %s exists, but no snapshots were found.\\n Please train this model before trying to export.\\n Use the function 'train_network' to train the network for iteration %s shuffle %s.\"",
"%",
"(",
"cfg",
"[",
"\"iteration\"",
"]",
",",
"shuffle",
",",
"cfg",
"[",
"\"iteration\"",
"]",
",",
"shuffle",
")",
")",
"if",
"cfg",
"[",
"\"snapshotindex\"",
"]",
"==",
"\"all\"",
":",
"print",
"(",
"\"Snapshotindex is set to 'all' in the config.yaml file. Changing snapshot index to -1!\"",
")",
"snapshotindex",
"=",
"-",
"1",
"else",
":",
"snapshotindex",
"=",
"cfg",
"[",
"\"snapshotindex\"",
"]",
"increasing_indices",
"=",
"np",
".",
"argsort",
"(",
"[",
"int",
"(",
"m",
".",
"split",
"(",
"\"-\"",
")",
"[",
"1",
"]",
")",
"for",
"m",
"in",
"Snapshots",
"]",
")",
"Snapshots",
"=",
"Snapshots",
"[",
"increasing_indices",
"]",
"####################################",
"### Load and setup CNN part detector",
"####################################",
"# Check if data already was generated:",
"dlc_cfg",
"[",
"\"init_weights\"",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"model_folder",
",",
"\"train\"",
",",
"Snapshots",
"[",
"snapshotindex",
"]",
")",
"trainingsiterations",
"=",
"(",
"dlc_cfg",
"[",
"\"init_weights\"",
"]",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"-",
"1",
"]",
")",
".",
"split",
"(",
"\"-\"",
")",
"[",
"-",
"1",
"]",
"dlc_cfg",
"[",
"\"num_outputs\"",
"]",
"=",
"cfg",
".",
"get",
"(",
"\"num_outputs\"",
",",
"dlc_cfg",
".",
"get",
"(",
"\"num_outputs\"",
",",
"1",
")",
")",
"dlc_cfg",
"[",
"\"batch_size\"",
"]",
"=",
"None",
"# load network",
"if",
"TFGPUinference",
":",
"sess",
",",
"_",
",",
"_",
"=",
"predict",
".",
"setup_GPUpose_prediction",
"(",
"dlc_cfg",
")",
"output",
"=",
"[",
"\"concat_1\"",
"]",
"else",
":",
"sess",
",",
"_",
",",
"_",
"=",
"predict",
".",
"setup_pose_prediction",
"(",
"dlc_cfg",
")",
"if",
"dlc_cfg",
"[",
"\"location_refinement\"",
"]",
":",
"output",
"=",
"[",
"\"Sigmoid\"",
",",
"\"pose/locref_pred/block4/BiasAdd\"",
"]",
"else",
":",
"output",
"=",
"[",
"\"Sigmoid\"",
",",
"\"pose/part_pred/block4/BiasAdd\"",
"]",
"input",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_default_graph",
"(",
")",
".",
"get_operations",
"(",
")",
"[",
"0",
"]",
".",
"name",
"return",
"sess",
",",
"input",
",",
"output",
",",
"dlc_cfg"
] |
https://github.com/DeepLabCut/DeepLabCut/blob/1dd14c54729ae0d8e66ca495aa5baeb83502e1c7/deeplabcut/pose_estimation_tensorflow/export.py#L80-L191
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.