nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
x1ddos/simpleauth
|
d6a369e2783466f3b9bbdb54411e5698a5c043d1
|
example/lib/httplib2/socks.py
|
python
|
setdefaultproxy
|
(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None)
|
setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
|
setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
|
[
"setdefaultproxy",
"(",
"proxytype",
"addr",
"[",
"port",
"[",
"rdns",
"[",
"username",
"[",
"password",
"]]]]",
")",
"Sets",
"a",
"default",
"proxy",
"which",
"all",
"further",
"socksocket",
"objects",
"will",
"use",
"unless",
"explicitly",
"changed",
"."
] |
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
|
[
"def",
"setdefaultproxy",
"(",
"proxytype",
"=",
"None",
",",
"addr",
"=",
"None",
",",
"port",
"=",
"None",
",",
"rdns",
"=",
"True",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"global",
"_defaultproxy",
"_defaultproxy",
"=",
"(",
"proxytype",
",",
"addr",
",",
"port",
",",
"rdns",
",",
"username",
",",
"password",
")"
] |
https://github.com/x1ddos/simpleauth/blob/d6a369e2783466f3b9bbdb54411e5698a5c043d1/example/lib/httplib2/socks.py#L96-L102
|
||
lovedaybrooke/gender-decoder
|
c66fb57afed0865eb698bf2c14057a13131217d7
|
app/models.py
|
python
|
CodedWordCounter.__init__
|
(self, ad, word, coding)
|
[] |
def __init__(self, ad, word, coding):
self.ad_hash = ad.hash
self.word = word
self.coding = coding
self.count = 1
db.session.add(self)
db.session.commit()
|
[
"def",
"__init__",
"(",
"self",
",",
"ad",
",",
"word",
",",
"coding",
")",
":",
"self",
".",
"ad_hash",
"=",
"ad",
".",
"hash",
"self",
".",
"word",
"=",
"word",
"self",
".",
"coding",
"=",
"coding",
"self",
".",
"count",
"=",
"1",
"db",
".",
"session",
".",
"add",
"(",
"self",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] |
https://github.com/lovedaybrooke/gender-decoder/blob/c66fb57afed0865eb698bf2c14057a13131217d7/app/models.py#L143-L149
|
||||
GPflow/GPflowOpt
|
3d86bcc000b0367f19e9f03f4458f5641e5dde60
|
gpflowopt/acquisition/acquisition.py
|
python
|
Acquisition.set_data
|
(self, X, Y)
|
return num_outputs_sum
|
Update the training data of the contained models
Sets the _needs_setup attribute to True so the contained models are optimized and :meth:`setup` is run again
right before evaluating the :class:`Acquisition` function.
Let Q be the the sum of the output dimensions of all contained models, Y should have a minimum of
Q columns. Only the first Q columns of Y are used while returning the scalar Q
:param X: input data N x D
:param Y: output data N x R (R >= Q)
:return: Q (sum of output dimensions of contained models)
|
Update the training data of the contained models
|
[
"Update",
"the",
"training",
"data",
"of",
"the",
"contained",
"models"
] |
def set_data(self, X, Y):
"""
Update the training data of the contained models
Sets the _needs_setup attribute to True so the contained models are optimized and :meth:`setup` is run again
right before evaluating the :class:`Acquisition` function.
Let Q be the the sum of the output dimensions of all contained models, Y should have a minimum of
Q columns. Only the first Q columns of Y are used while returning the scalar Q
:param X: input data N x D
:param Y: output data N x R (R >= Q)
:return: Q (sum of output dimensions of contained models)
"""
num_outputs_sum = 0
for model in self.models:
num_outputs = model.Y.shape[1]
Ypart = Y[:, num_outputs_sum:num_outputs_sum + num_outputs]
num_outputs_sum += num_outputs
model.X = X
model.Y = Ypart
self.highest_parent._needs_setup = True
return num_outputs_sum
|
[
"def",
"set_data",
"(",
"self",
",",
"X",
",",
"Y",
")",
":",
"num_outputs_sum",
"=",
"0",
"for",
"model",
"in",
"self",
".",
"models",
":",
"num_outputs",
"=",
"model",
".",
"Y",
".",
"shape",
"[",
"1",
"]",
"Ypart",
"=",
"Y",
"[",
":",
",",
"num_outputs_sum",
":",
"num_outputs_sum",
"+",
"num_outputs",
"]",
"num_outputs_sum",
"+=",
"num_outputs",
"model",
".",
"X",
"=",
"X",
"model",
".",
"Y",
"=",
"Ypart",
"self",
".",
"highest_parent",
".",
"_needs_setup",
"=",
"True",
"return",
"num_outputs_sum"
] |
https://github.com/GPflow/GPflowOpt/blob/3d86bcc000b0367f19e9f03f4458f5641e5dde60/gpflowopt/acquisition/acquisition.py#L145-L169
|
|
crs4/pydoop
|
438c92ed34e2d4f12db7cc1ea3a7ed094206c3a5
|
pydoop/app/submit.py
|
python
|
PydoopSubmitter.set_args
|
(self, args, unknown_args=None)
|
Configure job, based on the arguments provided.
|
Configure job, based on the arguments provided.
|
[
"Configure",
"job",
"based",
"on",
"the",
"arguments",
"provided",
"."
] |
def set_args(self, args, unknown_args=None):
"""
Configure job, based on the arguments provided.
"""
if unknown_args is None:
unknown_args = []
self.logger.setLevel(getattr(logging, args.log_level))
parent = hdfs.path.dirname(hdfs.path.abspath(args.output.rstrip("/")))
self.remote_wd = hdfs.path.join(
parent, utils.make_random_str(prefix="pydoop_submit_")
)
self.remote_exe = hdfs.path.join(self.remote_wd, str(uuid.uuid4()))
self.properties[JOB_NAME] = args.job_name or 'pydoop'
self.properties[IS_JAVA_RR] = (
'false' if args.do_not_use_java_record_reader else 'true'
)
self.properties[IS_JAVA_RW] = (
'false' if args.do_not_use_java_record_writer else 'true'
)
if args.num_reducers is not None:
self.properties[JOB_REDUCES] = args.num_reducers
if args.job_name:
self.properties[JOB_NAME] = args.job_name
self.properties.update(args.job_conf or {})
self.__set_files_to_cache(args)
self.__set_archives_to_cache(args)
self.requested_env = self._env_arg_to_dict(args.set_env or [])
self.args = args
self.unknown_args = unknown_args
|
[
"def",
"set_args",
"(",
"self",
",",
"args",
",",
"unknown_args",
"=",
"None",
")",
":",
"if",
"unknown_args",
"is",
"None",
":",
"unknown_args",
"=",
"[",
"]",
"self",
".",
"logger",
".",
"setLevel",
"(",
"getattr",
"(",
"logging",
",",
"args",
".",
"log_level",
")",
")",
"parent",
"=",
"hdfs",
".",
"path",
".",
"dirname",
"(",
"hdfs",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"output",
".",
"rstrip",
"(",
"\"/\"",
")",
")",
")",
"self",
".",
"remote_wd",
"=",
"hdfs",
".",
"path",
".",
"join",
"(",
"parent",
",",
"utils",
".",
"make_random_str",
"(",
"prefix",
"=",
"\"pydoop_submit_\"",
")",
")",
"self",
".",
"remote_exe",
"=",
"hdfs",
".",
"path",
".",
"join",
"(",
"self",
".",
"remote_wd",
",",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"self",
".",
"properties",
"[",
"JOB_NAME",
"]",
"=",
"args",
".",
"job_name",
"or",
"'pydoop'",
"self",
".",
"properties",
"[",
"IS_JAVA_RR",
"]",
"=",
"(",
"'false'",
"if",
"args",
".",
"do_not_use_java_record_reader",
"else",
"'true'",
")",
"self",
".",
"properties",
"[",
"IS_JAVA_RW",
"]",
"=",
"(",
"'false'",
"if",
"args",
".",
"do_not_use_java_record_writer",
"else",
"'true'",
")",
"if",
"args",
".",
"num_reducers",
"is",
"not",
"None",
":",
"self",
".",
"properties",
"[",
"JOB_REDUCES",
"]",
"=",
"args",
".",
"num_reducers",
"if",
"args",
".",
"job_name",
":",
"self",
".",
"properties",
"[",
"JOB_NAME",
"]",
"=",
"args",
".",
"job_name",
"self",
".",
"properties",
".",
"update",
"(",
"args",
".",
"job_conf",
"or",
"{",
"}",
")",
"self",
".",
"__set_files_to_cache",
"(",
"args",
")",
"self",
".",
"__set_archives_to_cache",
"(",
"args",
")",
"self",
".",
"requested_env",
"=",
"self",
".",
"_env_arg_to_dict",
"(",
"args",
".",
"set_env",
"or",
"[",
"]",
")",
"self",
".",
"args",
"=",
"args",
"self",
".",
"unknown_args",
"=",
"unknown_args"
] |
https://github.com/crs4/pydoop/blob/438c92ed34e2d4f12db7cc1ea3a7ed094206c3a5/pydoop/app/submit.py#L132-L161
|
||
sthanhng/yoloface
|
2b954f318d9bd9136836bed0a71109ab56681790
|
yolo/yolo.py
|
python
|
letterbox_image
|
(image, size)
|
return new_image
|
Resize image with unchanged aspect ratio using padding
|
Resize image with unchanged aspect ratio using padding
|
[
"Resize",
"image",
"with",
"unchanged",
"aspect",
"ratio",
"using",
"padding"
] |
def letterbox_image(image, size):
'''Resize image with unchanged aspect ratio using padding'''
img_width, img_height = image.size
w, h = size
scale = min(w / img_width, h / img_height)
nw = int(img_width * scale)
nh = int(img_height * scale)
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128))
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
return new_image
|
[
"def",
"letterbox_image",
"(",
"image",
",",
"size",
")",
":",
"img_width",
",",
"img_height",
"=",
"image",
".",
"size",
"w",
",",
"h",
"=",
"size",
"scale",
"=",
"min",
"(",
"w",
"/",
"img_width",
",",
"h",
"/",
"img_height",
")",
"nw",
"=",
"int",
"(",
"img_width",
"*",
"scale",
")",
"nh",
"=",
"int",
"(",
"img_height",
"*",
"scale",
")",
"image",
"=",
"image",
".",
"resize",
"(",
"(",
"nw",
",",
"nh",
")",
",",
"Image",
".",
"BICUBIC",
")",
"new_image",
"=",
"Image",
".",
"new",
"(",
"'RGB'",
",",
"size",
",",
"(",
"128",
",",
"128",
",",
"128",
")",
")",
"new_image",
".",
"paste",
"(",
"image",
",",
"(",
"(",
"w",
"-",
"nw",
")",
"//",
"2",
",",
"(",
"h",
"-",
"nh",
")",
"//",
"2",
")",
")",
"return",
"new_image"
] |
https://github.com/sthanhng/yoloface/blob/2b954f318d9bd9136836bed0a71109ab56681790/yolo/yolo.py#L154-L166
|
|
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/twisted/twisted/web/vhost.py
|
python
|
NameVirtualHost.removeHost
|
(self, name)
|
Remove a host.
|
Remove a host.
|
[
"Remove",
"a",
"host",
"."
] |
def removeHost(self, name):
"""Remove a host."""
del self.hosts[name]
|
[
"def",
"removeHost",
"(",
"self",
",",
"name",
")",
":",
"del",
"self",
".",
"hosts",
"[",
"name",
"]"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/web/vhost.py#L71-L73
|
||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_route.py
|
python
|
OCRoute.__init__
|
(self,
config,
verbose=False)
|
Constructor for OCVolume
|
Constructor for OCVolume
|
[
"Constructor",
"for",
"OCVolume"
] |
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCRoute, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self._route = None
|
[
"def",
"__init__",
"(",
"self",
",",
"config",
",",
"verbose",
"=",
"False",
")",
":",
"super",
"(",
"OCRoute",
",",
"self",
")",
".",
"__init__",
"(",
"config",
".",
"namespace",
",",
"kubeconfig",
"=",
"config",
".",
"kubeconfig",
",",
"verbose",
"=",
"verbose",
")",
"self",
".",
"config",
"=",
"config",
"self",
".",
"_route",
"=",
"None"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_route.py#L1674-L1680
|
||
urwid/urwid
|
e2423b5069f51d318ea1ac0f355a0efe5448f7eb
|
urwid/text_layout.py
|
python
|
line_width
|
( segs )
|
return sc
|
Return the screen column width of one line of a text layout structure.
This function ignores any existing shift applied to the line,
represented by an (amount, None) tuple at the start of the line.
|
Return the screen column width of one line of a text layout structure.
|
[
"Return",
"the",
"screen",
"column",
"width",
"of",
"one",
"line",
"of",
"a",
"text",
"layout",
"structure",
"."
] |
def line_width( segs ):
"""
Return the screen column width of one line of a text layout structure.
This function ignores any existing shift applied to the line,
represented by an (amount, None) tuple at the start of the line.
"""
sc = 0
seglist = segs
if segs and len(segs[0])==2 and segs[0][1]==None:
seglist = segs[1:]
for s in seglist:
sc += s[0]
return sc
|
[
"def",
"line_width",
"(",
"segs",
")",
":",
"sc",
"=",
"0",
"seglist",
"=",
"segs",
"if",
"segs",
"and",
"len",
"(",
"segs",
"[",
"0",
"]",
")",
"==",
"2",
"and",
"segs",
"[",
"0",
"]",
"[",
"1",
"]",
"==",
"None",
":",
"seglist",
"=",
"segs",
"[",
"1",
":",
"]",
"for",
"s",
"in",
"seglist",
":",
"sc",
"+=",
"s",
"[",
"0",
"]",
"return",
"sc"
] |
https://github.com/urwid/urwid/blob/e2423b5069f51d318ea1ac0f355a0efe5448f7eb/urwid/text_layout.py#L339-L352
|
|
PaddlePaddle/PaddleX
|
2bab73f81ab54e328204e7871e6ae4a82e719f5d
|
static/paddlex/cv/models/base.py
|
python
|
BaseAPI.export_quant_model
|
(self,
dataset,
save_dir,
batch_size=1,
batch_num=10,
cache_dir="./temp")
|
[] |
def export_quant_model(self,
dataset,
save_dir,
batch_size=1,
batch_num=10,
cache_dir="./temp"):
input_channel = getattr(self, 'input_channel', 3)
arrange_transforms(
model_type=self.model_type,
class_name=self.__class__.__name__,
transforms=dataset.transforms,
mode='quant',
input_channel=input_channel)
dataset.num_samples = batch_size * batch_num
import paddle
version = paddle.__version__.strip().split('.')
if version[0] == '2' or (version[0] == '0' and
hasattr(paddle, 'enable_static')):
from .slim.post_quantization import PaddleXPostTrainingQuantizationV2 as PaddleXPostTrainingQuantization
else:
from .slim.post_quantization import PaddleXPostTrainingQuantization
PaddleXPostTrainingQuantization._collect_target_varnames
is_use_cache_file = True
if cache_dir is None:
is_use_cache_file = False
quant_prog = self.test_prog.clone(for_test=True)
post_training_quantization = PaddleXPostTrainingQuantization(
executor=self.exe,
dataset=dataset,
program=quant_prog,
inputs=self.test_inputs,
outputs=self.test_outputs,
batch_size=batch_size,
batch_nums=batch_num,
scope=self.scope,
algo='KL',
quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
is_full_quantize=False,
is_use_cache_file=is_use_cache_file,
cache_dir=cache_dir)
post_training_quantization.quantize()
post_training_quantization.save_quantized_model(save_dir)
model_info = self.get_model_info()
model_info['status'] = 'Quant'
# 保存模型输出的变量描述
model_info['_ModelInputsOutputs'] = dict()
model_info['_ModelInputsOutputs']['test_inputs'] = [
[k, v.name] for k, v in self.test_inputs.items()
]
model_info['_ModelInputsOutputs']['test_outputs'] = [
[k, v.name] for k, v in self.test_outputs.items()
]
with open(
osp.join(save_dir, 'model.yml'), encoding='utf-8',
mode='w') as f:
yaml.dump(model_info, f)
|
[
"def",
"export_quant_model",
"(",
"self",
",",
"dataset",
",",
"save_dir",
",",
"batch_size",
"=",
"1",
",",
"batch_num",
"=",
"10",
",",
"cache_dir",
"=",
"\"./temp\"",
")",
":",
"input_channel",
"=",
"getattr",
"(",
"self",
",",
"'input_channel'",
",",
"3",
")",
"arrange_transforms",
"(",
"model_type",
"=",
"self",
".",
"model_type",
",",
"class_name",
"=",
"self",
".",
"__class__",
".",
"__name__",
",",
"transforms",
"=",
"dataset",
".",
"transforms",
",",
"mode",
"=",
"'quant'",
",",
"input_channel",
"=",
"input_channel",
")",
"dataset",
".",
"num_samples",
"=",
"batch_size",
"*",
"batch_num",
"import",
"paddle",
"version",
"=",
"paddle",
".",
"__version__",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'.'",
")",
"if",
"version",
"[",
"0",
"]",
"==",
"'2'",
"or",
"(",
"version",
"[",
"0",
"]",
"==",
"'0'",
"and",
"hasattr",
"(",
"paddle",
",",
"'enable_static'",
")",
")",
":",
"from",
".",
"slim",
".",
"post_quantization",
"import",
"PaddleXPostTrainingQuantizationV2",
"as",
"PaddleXPostTrainingQuantization",
"else",
":",
"from",
".",
"slim",
".",
"post_quantization",
"import",
"PaddleXPostTrainingQuantization",
"PaddleXPostTrainingQuantization",
".",
"_collect_target_varnames",
"is_use_cache_file",
"=",
"True",
"if",
"cache_dir",
"is",
"None",
":",
"is_use_cache_file",
"=",
"False",
"quant_prog",
"=",
"self",
".",
"test_prog",
".",
"clone",
"(",
"for_test",
"=",
"True",
")",
"post_training_quantization",
"=",
"PaddleXPostTrainingQuantization",
"(",
"executor",
"=",
"self",
".",
"exe",
",",
"dataset",
"=",
"dataset",
",",
"program",
"=",
"quant_prog",
",",
"inputs",
"=",
"self",
".",
"test_inputs",
",",
"outputs",
"=",
"self",
".",
"test_outputs",
",",
"batch_size",
"=",
"batch_size",
",",
"batch_nums",
"=",
"batch_num",
",",
"scope",
"=",
"self",
".",
"scope",
",",
"algo",
"=",
"'KL'",
",",
"quantizable_op_type",
"=",
"[",
"\"conv2d\"",
",",
"\"depthwise_conv2d\"",
",",
"\"mul\"",
"]",
",",
"is_full_quantize",
"=",
"False",
",",
"is_use_cache_file",
"=",
"is_use_cache_file",
",",
"cache_dir",
"=",
"cache_dir",
")",
"post_training_quantization",
".",
"quantize",
"(",
")",
"post_training_quantization",
".",
"save_quantized_model",
"(",
"save_dir",
")",
"model_info",
"=",
"self",
".",
"get_model_info",
"(",
")",
"model_info",
"[",
"'status'",
"]",
"=",
"'Quant'",
"# 保存模型输出的变量描述",
"model_info",
"[",
"'_ModelInputsOutputs'",
"]",
"=",
"dict",
"(",
")",
"model_info",
"[",
"'_ModelInputsOutputs'",
"]",
"[",
"'test_inputs'",
"]",
"=",
"[",
"[",
"k",
",",
"v",
".",
"name",
"]",
"for",
"k",
",",
"v",
"in",
"self",
".",
"test_inputs",
".",
"items",
"(",
")",
"]",
"model_info",
"[",
"'_ModelInputsOutputs'",
"]",
"[",
"'test_outputs'",
"]",
"=",
"[",
"[",
"k",
",",
"v",
".",
"name",
"]",
"for",
"k",
",",
"v",
"in",
"self",
".",
"test_outputs",
".",
"items",
"(",
")",
"]",
"with",
"open",
"(",
"osp",
".",
"join",
"(",
"save_dir",
",",
"'model.yml'",
")",
",",
"encoding",
"=",
"'utf-8'",
",",
"mode",
"=",
"'w'",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"model_info",
",",
"f",
")"
] |
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/static/paddlex/cv/models/base.py#L132-L189
|
||||
deepmind/learning-to-learn
|
f3c1a8d176b8ea7cc60478bfcfdd10a7a52fd296
|
networks.py
|
python
|
KernelDeepLSTM.__init__
|
(self, kernel_shape, name="kernel_deep_lstm", **kwargs)
|
Creates an instance of `KernelDeepLSTM`.
Args:
kernel_shape: Kernel shape (2D `tuple`).
name: Module name.
**kwargs: Additional `DeepLSTM` args.
|
Creates an instance of `KernelDeepLSTM`.
|
[
"Creates",
"an",
"instance",
"of",
"KernelDeepLSTM",
"."
] |
def __init__(self, kernel_shape, name="kernel_deep_lstm", **kwargs):
"""Creates an instance of `KernelDeepLSTM`.
Args:
kernel_shape: Kernel shape (2D `tuple`).
name: Module name.
**kwargs: Additional `DeepLSTM` args.
"""
self._kernel_shape = kernel_shape
output_size = np.prod(kernel_shape)
super(KernelDeepLSTM, self).__init__(output_size, name=name, **kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"kernel_shape",
",",
"name",
"=",
"\"kernel_deep_lstm\"",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_kernel_shape",
"=",
"kernel_shape",
"output_size",
"=",
"np",
".",
"prod",
"(",
"kernel_shape",
")",
"super",
"(",
"KernelDeepLSTM",
",",
"self",
")",
".",
"__init__",
"(",
"output_size",
",",
"name",
"=",
"name",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/deepmind/learning-to-learn/blob/f3c1a8d176b8ea7cc60478bfcfdd10a7a52fd296/networks.py#L268-L278
|
||
linxid/Machine_Learning_Study_Path
|
558e82d13237114bbb8152483977806fc0c222af
|
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pkg_resources/__init__.py
|
python
|
ZipProvider.__init__
|
(self, module)
|
[] |
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
|
[
"def",
"__init__",
"(",
"self",
",",
"module",
")",
":",
"EggProvider",
".",
"__init__",
"(",
"self",
",",
"module",
")",
"self",
".",
"zip_pre",
"=",
"self",
".",
"loader",
".",
"archive",
"+",
"os",
".",
"sep"
] |
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pkg_resources/__init__.py#L1689-L1691
|
||||
cronyo/cronyo
|
cd5abab0871b68bf31b18aac934303928130a441
|
cronyo/vendor/idna/core.py
|
python
|
check_bidi
|
(label, check_ltr=False)
|
return True
|
[] |
def check_bidi(label, check_ltr=False):
# Bidi rules should only be applied if string contains RTL characters
bidi_label = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if direction == '':
# String likely comes from a newer version of Unicode
raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx))
if direction in ['R', 'AL', 'AN']:
bidi_label = True
if not bidi_label and not check_ltr:
return True
# Bidi rule 1
direction = unicodedata.bidirectional(label[0])
if direction in ['R', 'AL']:
rtl = True
elif direction == 'L':
rtl = False
else:
raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label)))
valid_ending = False
number_type = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if rtl:
# Bidi rule 2
if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx))
# Bidi rule 3
if direction in ['R', 'AL', 'EN', 'AN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
# Bidi rule 4
if direction in ['AN', 'EN']:
if not number_type:
number_type = direction
else:
if number_type != direction:
raise IDNABidiError('Can not mix numeral types in a right-to-left label')
else:
# Bidi rule 5
if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx))
# Bidi rule 6
if direction in ['L', 'EN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
if not valid_ending:
raise IDNABidiError('Label ends with illegal codepoint directionality')
return True
|
[
"def",
"check_bidi",
"(",
"label",
",",
"check_ltr",
"=",
"False",
")",
":",
"# Bidi rules should only be applied if string contains RTL characters",
"bidi_label",
"=",
"False",
"for",
"(",
"idx",
",",
"cp",
")",
"in",
"enumerate",
"(",
"label",
",",
"1",
")",
":",
"direction",
"=",
"unicodedata",
".",
"bidirectional",
"(",
"cp",
")",
"if",
"direction",
"==",
"''",
":",
"# String likely comes from a newer version of Unicode",
"raise",
"IDNABidiError",
"(",
"'Unknown directionality in label {0} at position {1}'",
".",
"format",
"(",
"repr",
"(",
"label",
")",
",",
"idx",
")",
")",
"if",
"direction",
"in",
"[",
"'R'",
",",
"'AL'",
",",
"'AN'",
"]",
":",
"bidi_label",
"=",
"True",
"if",
"not",
"bidi_label",
"and",
"not",
"check_ltr",
":",
"return",
"True",
"# Bidi rule 1",
"direction",
"=",
"unicodedata",
".",
"bidirectional",
"(",
"label",
"[",
"0",
"]",
")",
"if",
"direction",
"in",
"[",
"'R'",
",",
"'AL'",
"]",
":",
"rtl",
"=",
"True",
"elif",
"direction",
"==",
"'L'",
":",
"rtl",
"=",
"False",
"else",
":",
"raise",
"IDNABidiError",
"(",
"'First codepoint in label {0} must be directionality L, R or AL'",
".",
"format",
"(",
"repr",
"(",
"label",
")",
")",
")",
"valid_ending",
"=",
"False",
"number_type",
"=",
"False",
"for",
"(",
"idx",
",",
"cp",
")",
"in",
"enumerate",
"(",
"label",
",",
"1",
")",
":",
"direction",
"=",
"unicodedata",
".",
"bidirectional",
"(",
"cp",
")",
"if",
"rtl",
":",
"# Bidi rule 2",
"if",
"not",
"direction",
"in",
"[",
"'R'",
",",
"'AL'",
",",
"'AN'",
",",
"'EN'",
",",
"'ES'",
",",
"'CS'",
",",
"'ET'",
",",
"'ON'",
",",
"'BN'",
",",
"'NSM'",
"]",
":",
"raise",
"IDNABidiError",
"(",
"'Invalid direction for codepoint at position {0} in a right-to-left label'",
".",
"format",
"(",
"idx",
")",
")",
"# Bidi rule 3",
"if",
"direction",
"in",
"[",
"'R'",
",",
"'AL'",
",",
"'EN'",
",",
"'AN'",
"]",
":",
"valid_ending",
"=",
"True",
"elif",
"direction",
"!=",
"'NSM'",
":",
"valid_ending",
"=",
"False",
"# Bidi rule 4",
"if",
"direction",
"in",
"[",
"'AN'",
",",
"'EN'",
"]",
":",
"if",
"not",
"number_type",
":",
"number_type",
"=",
"direction",
"else",
":",
"if",
"number_type",
"!=",
"direction",
":",
"raise",
"IDNABidiError",
"(",
"'Can not mix numeral types in a right-to-left label'",
")",
"else",
":",
"# Bidi rule 5",
"if",
"not",
"direction",
"in",
"[",
"'L'",
",",
"'EN'",
",",
"'ES'",
",",
"'CS'",
",",
"'ET'",
",",
"'ON'",
",",
"'BN'",
",",
"'NSM'",
"]",
":",
"raise",
"IDNABidiError",
"(",
"'Invalid direction for codepoint at position {0} in a left-to-right label'",
".",
"format",
"(",
"idx",
")",
")",
"# Bidi rule 6",
"if",
"direction",
"in",
"[",
"'L'",
",",
"'EN'",
"]",
":",
"valid_ending",
"=",
"True",
"elif",
"direction",
"!=",
"'NSM'",
":",
"valid_ending",
"=",
"False",
"if",
"not",
"valid_ending",
":",
"raise",
"IDNABidiError",
"(",
"'Label ends with illegal codepoint directionality'",
")",
"return",
"True"
] |
https://github.com/cronyo/cronyo/blob/cd5abab0871b68bf31b18aac934303928130a441/cronyo/vendor/idna/core.py#L67-L124
|
|||
replit-archive/empythoned
|
977ec10ced29a3541a4973dc2b59910805695752
|
dist/lib/python2.7/cookielib.py
|
python
|
http2time
|
(text)
|
return _str2time(day, mon, yr, hr, min, sec, tz)
|
Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
|
Returns time in seconds since epoch of time represented by a string.
|
[
"Returns",
"time",
"in",
"seconds",
"since",
"epoch",
"of",
"time",
"represented",
"by",
"a",
"string",
"."
] |
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
|
[
"def",
"http2time",
"(",
"text",
")",
":",
"# fast exit for strictly conforming string",
"m",
"=",
"STRICT_DATE_RE",
".",
"search",
"(",
"text",
")",
"if",
"m",
":",
"g",
"=",
"m",
".",
"groups",
"(",
")",
"mon",
"=",
"MONTHS_LOWER",
".",
"index",
"(",
"g",
"[",
"1",
"]",
".",
"lower",
"(",
")",
")",
"+",
"1",
"tt",
"=",
"(",
"int",
"(",
"g",
"[",
"2",
"]",
")",
",",
"mon",
",",
"int",
"(",
"g",
"[",
"0",
"]",
")",
",",
"int",
"(",
"g",
"[",
"3",
"]",
")",
",",
"int",
"(",
"g",
"[",
"4",
"]",
")",
",",
"float",
"(",
"g",
"[",
"5",
"]",
")",
")",
"return",
"_timegm",
"(",
"tt",
")",
"# No, we need some messy parsing...",
"# clean up",
"text",
"=",
"text",
".",
"lstrip",
"(",
")",
"text",
"=",
"WEEKDAY_RE",
".",
"sub",
"(",
"\"\"",
",",
"text",
",",
"1",
")",
"# Useless weekday",
"# tz is time zone specifier string",
"day",
",",
"mon",
",",
"yr",
",",
"hr",
",",
"min",
",",
"sec",
",",
"tz",
"=",
"[",
"None",
"]",
"*",
"7",
"# loose regexp parse",
"m",
"=",
"LOOSE_HTTP_DATE_RE",
".",
"search",
"(",
"text",
")",
"if",
"m",
"is",
"not",
"None",
":",
"day",
",",
"mon",
",",
"yr",
",",
"hr",
",",
"min",
",",
"sec",
",",
"tz",
"=",
"m",
".",
"groups",
"(",
")",
"else",
":",
"return",
"None",
"# bad format",
"return",
"_str2time",
"(",
"day",
",",
"mon",
",",
"yr",
",",
"hr",
",",
"min",
",",
"sec",
",",
"tz",
")"
] |
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/cookielib.py#L212-L266
|
|
scikit-learn/scikit-learn
|
1d1aadd0711b87d2a11c80aad15df6f8cf156712
|
sklearn/gaussian_process/kernels.py
|
python
|
Matern.__call__
|
(self, X, Y=None, eval_gradient=False)
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
Return the kernel k(X, Y) and optionally its gradient.
|
[
"Return",
"the",
"kernel",
"k",
"(",
"X",
"Y",
")",
"and",
"optionally",
"its",
"gradient",
"."
] |
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric="euclidean")
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale, metric="euclidean")
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1.0 + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1.0 + K + K ** 2 / 3.0) * np.exp(-K)
elif self.nu == np.inf:
K = np.exp(-(dists ** 2) / 2.0)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = math.sqrt(2 * self.nu) * K
K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
length_scale ** 2
)
else:
D = squareform(dists ** 2)[:, :, np.newaxis]
if self.nu == 0.5:
denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
K_gradient = K[..., np.newaxis] * np.divide(
D, denominator, where=denominator != 0
)
elif self.nu == 1.5:
K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
elif self.nu == np.inf:
K_gradient = D * K[..., np.newaxis]
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
|
[
"def",
"__call__",
"(",
"self",
",",
"X",
",",
"Y",
"=",
"None",
",",
"eval_gradient",
"=",
"False",
")",
":",
"X",
"=",
"np",
".",
"atleast_2d",
"(",
"X",
")",
"length_scale",
"=",
"_check_length_scale",
"(",
"X",
",",
"self",
".",
"length_scale",
")",
"if",
"Y",
"is",
"None",
":",
"dists",
"=",
"pdist",
"(",
"X",
"/",
"length_scale",
",",
"metric",
"=",
"\"euclidean\"",
")",
"else",
":",
"if",
"eval_gradient",
":",
"raise",
"ValueError",
"(",
"\"Gradient can only be evaluated when Y is None.\"",
")",
"dists",
"=",
"cdist",
"(",
"X",
"/",
"length_scale",
",",
"Y",
"/",
"length_scale",
",",
"metric",
"=",
"\"euclidean\"",
")",
"if",
"self",
".",
"nu",
"==",
"0.5",
":",
"K",
"=",
"np",
".",
"exp",
"(",
"-",
"dists",
")",
"elif",
"self",
".",
"nu",
"==",
"1.5",
":",
"K",
"=",
"dists",
"*",
"math",
".",
"sqrt",
"(",
"3",
")",
"K",
"=",
"(",
"1.0",
"+",
"K",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"K",
")",
"elif",
"self",
".",
"nu",
"==",
"2.5",
":",
"K",
"=",
"dists",
"*",
"math",
".",
"sqrt",
"(",
"5",
")",
"K",
"=",
"(",
"1.0",
"+",
"K",
"+",
"K",
"**",
"2",
"/",
"3.0",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"K",
")",
"elif",
"self",
".",
"nu",
"==",
"np",
".",
"inf",
":",
"K",
"=",
"np",
".",
"exp",
"(",
"-",
"(",
"dists",
"**",
"2",
")",
"/",
"2.0",
")",
"else",
":",
"# general case; expensive to evaluate",
"K",
"=",
"dists",
"K",
"[",
"K",
"==",
"0.0",
"]",
"+=",
"np",
".",
"finfo",
"(",
"float",
")",
".",
"eps",
"# strict zeros result in nan",
"tmp",
"=",
"math",
".",
"sqrt",
"(",
"2",
"*",
"self",
".",
"nu",
")",
"*",
"K",
"K",
".",
"fill",
"(",
"(",
"2",
"**",
"(",
"1.0",
"-",
"self",
".",
"nu",
")",
")",
"/",
"gamma",
"(",
"self",
".",
"nu",
")",
")",
"K",
"*=",
"tmp",
"**",
"self",
".",
"nu",
"K",
"*=",
"kv",
"(",
"self",
".",
"nu",
",",
"tmp",
")",
"if",
"Y",
"is",
"None",
":",
"# convert from upper-triangular matrix to square matrix",
"K",
"=",
"squareform",
"(",
"K",
")",
"np",
".",
"fill_diagonal",
"(",
"K",
",",
"1",
")",
"if",
"eval_gradient",
":",
"if",
"self",
".",
"hyperparameter_length_scale",
".",
"fixed",
":",
"# Hyperparameter l kept fixed",
"K_gradient",
"=",
"np",
".",
"empty",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"0",
")",
")",
"return",
"K",
",",
"K_gradient",
"# We need to recompute the pairwise dimension-wise distances",
"if",
"self",
".",
"anisotropic",
":",
"D",
"=",
"(",
"X",
"[",
":",
",",
"np",
".",
"newaxis",
",",
":",
"]",
"-",
"X",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"]",
")",
"**",
"2",
"/",
"(",
"length_scale",
"**",
"2",
")",
"else",
":",
"D",
"=",
"squareform",
"(",
"dists",
"**",
"2",
")",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"self",
".",
"nu",
"==",
"0.5",
":",
"denominator",
"=",
"np",
".",
"sqrt",
"(",
"D",
".",
"sum",
"(",
"axis",
"=",
"2",
")",
")",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"K_gradient",
"=",
"K",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"*",
"np",
".",
"divide",
"(",
"D",
",",
"denominator",
",",
"where",
"=",
"denominator",
"!=",
"0",
")",
"elif",
"self",
".",
"nu",
"==",
"1.5",
":",
"K_gradient",
"=",
"3",
"*",
"D",
"*",
"np",
".",
"exp",
"(",
"-",
"np",
".",
"sqrt",
"(",
"3",
"*",
"D",
".",
"sum",
"(",
"-",
"1",
")",
")",
")",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"elif",
"self",
".",
"nu",
"==",
"2.5",
":",
"tmp",
"=",
"np",
".",
"sqrt",
"(",
"5",
"*",
"D",
".",
"sum",
"(",
"-",
"1",
")",
")",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"K_gradient",
"=",
"5.0",
"/",
"3.0",
"*",
"D",
"*",
"(",
"tmp",
"+",
"1",
")",
"*",
"np",
".",
"exp",
"(",
"-",
"tmp",
")",
"elif",
"self",
".",
"nu",
"==",
"np",
".",
"inf",
":",
"K_gradient",
"=",
"D",
"*",
"K",
"[",
"...",
",",
"np",
".",
"newaxis",
"]",
"else",
":",
"# approximate gradient numerically",
"def",
"f",
"(",
"theta",
")",
":",
"# helper function",
"return",
"self",
".",
"clone_with_theta",
"(",
"theta",
")",
"(",
"X",
",",
"Y",
")",
"return",
"K",
",",
"_approx_fprime",
"(",
"self",
".",
"theta",
",",
"f",
",",
"1e-10",
")",
"if",
"not",
"self",
".",
"anisotropic",
":",
"return",
"K",
",",
"K_gradient",
"[",
":",
",",
":",
"]",
".",
"sum",
"(",
"-",
"1",
")",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"else",
":",
"return",
"K",
",",
"K_gradient",
"else",
":",
"return",
"K"
] |
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/gaussian_process/kernels.py#L1660-L1758
|
||
vxgmichel/aiostream
|
7c3853b6d7bec1c00497d389dc4faec0d63a8367
|
aiostream/stream/create.py
|
python
|
throw
|
(exc)
|
Throw an exception without generating any value.
|
Throw an exception without generating any value.
|
[
"Throw",
"an",
"exception",
"without",
"generating",
"any",
"value",
"."
] |
async def throw(exc):
"""Throw an exception without generating any value."""
if False:
yield
raise exc
|
[
"async",
"def",
"throw",
"(",
"exc",
")",
":",
"if",
"False",
":",
"yield",
"raise",
"exc"
] |
https://github.com/vxgmichel/aiostream/blob/7c3853b6d7bec1c00497d389dc4faec0d63a8367/aiostream/stream/create.py#L91-L95
|
||
awslabs/aws-config-rules
|
8dfeacf9d9e5e5f0fbb1b8545ff702dea700ea7a
|
python/API_GW_AUTHORIZER_IN_PLACE/API_GW_AUTHORIZER_IN_PLACE.py
|
python
|
build_evaluation_from_config_item
|
(configuration_item, compliance_type, annotation=None)
|
return eval_ci
|
Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
|
Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
|
[
"Form",
"an",
"evaluation",
"as",
"a",
"dictionary",
".",
"Usually",
"suited",
"to",
"report",
"on",
"configuration",
"change",
"rules",
"."
] |
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
"""Form an evaluation as a dictionary. Usually suited to report on configuration change rules.
Keyword arguments:
configuration_item -- the configurationItem dictionary in the invokingEvent
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
"""
eval_ci = {}
if annotation:
eval_ci['Annotation'] = build_annotation(annotation)
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
|
[
"def",
"build_evaluation_from_config_item",
"(",
"configuration_item",
",",
"compliance_type",
",",
"annotation",
"=",
"None",
")",
":",
"eval_ci",
"=",
"{",
"}",
"if",
"annotation",
":",
"eval_ci",
"[",
"'Annotation'",
"]",
"=",
"build_annotation",
"(",
"annotation",
")",
"eval_ci",
"[",
"'ComplianceResourceType'",
"]",
"=",
"configuration_item",
"[",
"'resourceType'",
"]",
"eval_ci",
"[",
"'ComplianceResourceId'",
"]",
"=",
"configuration_item",
"[",
"'resourceId'",
"]",
"eval_ci",
"[",
"'ComplianceType'",
"]",
"=",
"compliance_type",
"eval_ci",
"[",
"'OrderingTimestamp'",
"]",
"=",
"configuration_item",
"[",
"'configurationItemCaptureTime'",
"]",
"return",
"eval_ci"
] |
https://github.com/awslabs/aws-config-rules/blob/8dfeacf9d9e5e5f0fbb1b8545ff702dea700ea7a/python/API_GW_AUTHORIZER_IN_PLACE/API_GW_AUTHORIZER_IN_PLACE.py#L192-L207
|
|
pandas-dev/pandas
|
5ba7d714014ae8feaccc0dd4a98890828cf2832d
|
pandas/core/arrays/categorical.py
|
python
|
Categorical.as_unordered
|
(self, inplace=False)
|
return self.set_ordered(False, inplace=inplace)
|
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical or None
Unordered Categorical or None if ``inplace=True``.
|
Set the Categorical to be unordered.
|
[
"Set",
"the",
"Categorical",
"to",
"be",
"unordered",
"."
] |
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical or None
Unordered Categorical or None if ``inplace=True``.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
|
[
"def",
"as_unordered",
"(",
"self",
",",
"inplace",
"=",
"False",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"\"inplace\"",
")",
"return",
"self",
".",
"set_ordered",
"(",
"False",
",",
"inplace",
"=",
"inplace",
")"
] |
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/core/arrays/categorical.py#L876-L892
|
|
golismero/golismero
|
7d605b937e241f51c1ca4f47b20f755eeefb9d76
|
thirdparty_libs/httpparser/util.py
|
python
|
IOrderedDict.popitem
|
(self, last=True)
|
return key, value
|
od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
|
od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
|
[
"od",
".",
"popitem",
"()",
"-",
">",
"(",
"k",
"v",
")",
"return",
"and",
"remove",
"a",
"(",
"key",
"value",
")",
"pair",
".",
"Pairs",
"are",
"returned",
"in",
"LIFO",
"order",
"if",
"last",
"is",
"true",
"or",
"FIFO",
"order",
"if",
"false",
"."
] |
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
|
[
"def",
"popitem",
"(",
"self",
",",
"last",
"=",
"True",
")",
":",
"if",
"not",
"self",
":",
"raise",
"KeyError",
"(",
"'dictionary is empty'",
")",
"key",
"=",
"next",
"(",
"reversed",
"(",
"self",
")",
"if",
"last",
"else",
"iter",
"(",
"self",
")",
")",
"value",
"=",
"self",
".",
"pop",
"(",
"key",
")",
"return",
"key",
",",
"value"
] |
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/httpparser/util.py#L174-L183
|
|
kwea123/VTuber_Unity
|
a8e226c5fd3f10ad4bb21b60f2fd943d375b5314
|
face_alignment/models.py
|
python
|
conv3x3
|
(in_planes, out_planes, strd=1, padding=1, bias=False)
|
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=strd, padding=padding, bias=bias)
|
3x3 convolution with padding
|
3x3 convolution with padding
|
[
"3x3",
"convolution",
"with",
"padding"
] |
def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=strd, padding=padding, bias=bias)
|
[
"def",
"conv3x3",
"(",
"in_planes",
",",
"out_planes",
",",
"strd",
"=",
"1",
",",
"padding",
"=",
"1",
",",
"bias",
"=",
"False",
")",
":",
"return",
"nn",
".",
"Conv2d",
"(",
"in_planes",
",",
"out_planes",
",",
"kernel_size",
"=",
"3",
",",
"stride",
"=",
"strd",
",",
"padding",
"=",
"padding",
",",
"bias",
"=",
"bias",
")"
] |
https://github.com/kwea123/VTuber_Unity/blob/a8e226c5fd3f10ad4bb21b60f2fd943d375b5314/face_alignment/models.py#L7-L10
|
|
Emptyset110/dHydra
|
8ec44994ff4dda8bf1ec40e38dd068b757945933
|
dHydra/Vendor/CtpTraderApi/CtpTraderApi.py
|
python
|
CtpTraderApi.OnRspQryProduct
|
(self, pProduct, pRspInfo, nRequestID, bIsLast)
|
请求查询产品响应
|
请求查询产品响应
|
[
"请求查询产品响应"
] |
def OnRspQryProduct(self, pProduct, pRspInfo, nRequestID, bIsLast):
"""请求查询产品响应"""
if pRspInfo.ErrorID == 0:
self.logger.info(
"OnRspQryProduct: Received"
", no operation is followed"
)
# 否则,推送错误信息
else:
self.logger.error(
"OnRspQryProduct:{}, ErrorMsg:{}"
.format(
pRspInfo.ErrorID,
pRspInfo.ErrorMsg.decode('gbk')
)
)
|
[
"def",
"OnRspQryProduct",
"(",
"self",
",",
"pProduct",
",",
"pRspInfo",
",",
"nRequestID",
",",
"bIsLast",
")",
":",
"if",
"pRspInfo",
".",
"ErrorID",
"==",
"0",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"OnRspQryProduct: Received\"",
"\", no operation is followed\"",
")",
"# 否则,推送错误信息",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"OnRspQryProduct:{}, ErrorMsg:{}\"",
".",
"format",
"(",
"pRspInfo",
".",
"ErrorID",
",",
"pRspInfo",
".",
"ErrorMsg",
".",
"decode",
"(",
"'gbk'",
")",
")",
")"
] |
https://github.com/Emptyset110/dHydra/blob/8ec44994ff4dda8bf1ec40e38dd068b757945933/dHydra/Vendor/CtpTraderApi/CtpTraderApi.py#L1776-L1791
|
||
google/timesketch
|
1ce6b60e125d104e6644947c6f1dbe1b82ac76b6
|
timesketch/models/sketch.py
|
python
|
AggregationGroup.__init__
|
(
self, name, description, user, sketch, aggregations=None,
parameters='', orientation='', view=None)
|
Initialize the AggregationGroup object.
Args:
name (str): Name of the aggregation
description (str): Description of the aggregation
user (User): The user who created the aggregation
sketch (Sketch): The sketch that the aggregation is bound to
aggregations (Aggregation): List of aggregation objects.
parameters (str): A JSON formatted dict with parameters for
charting.
orientation (str): Describes how charts should be joined together.
view (View): Optional: The view that the aggregation is bound to
|
Initialize the AggregationGroup object.
|
[
"Initialize",
"the",
"AggregationGroup",
"object",
"."
] |
def __init__(
self, name, description, user, sketch, aggregations=None,
parameters='', orientation='', view=None):
"""Initialize the AggregationGroup object.
Args:
name (str): Name of the aggregation
description (str): Description of the aggregation
user (User): The user who created the aggregation
sketch (Sketch): The sketch that the aggregation is bound to
aggregations (Aggregation): List of aggregation objects.
parameters (str): A JSON formatted dict with parameters for
charting.
orientation (str): Describes how charts should be joined together.
view (View): Optional: The view that the aggregation is bound to
"""
super().__init__()
self.name = name
self.description = description
self.aggregations = aggregations or []
self.parameters = parameters
self.orientation = orientation
self.user = user
self.sketch = sketch
self.view = view
|
[
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"description",
",",
"user",
",",
"sketch",
",",
"aggregations",
"=",
"None",
",",
"parameters",
"=",
"''",
",",
"orientation",
"=",
"''",
",",
"view",
"=",
"None",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"name",
"=",
"name",
"self",
".",
"description",
"=",
"description",
"self",
".",
"aggregations",
"=",
"aggregations",
"or",
"[",
"]",
"self",
".",
"parameters",
"=",
"parameters",
"self",
".",
"orientation",
"=",
"orientation",
"self",
".",
"user",
"=",
"user",
"self",
".",
"sketch",
"=",
"sketch",
"self",
".",
"view",
"=",
"view"
] |
https://github.com/google/timesketch/blob/1ce6b60e125d104e6644947c6f1dbe1b82ac76b6/timesketch/models/sketch.py#L496-L520
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/celery/backends/database/__init__.py
|
python
|
DatabaseBackend.__reduce__
|
(self, args=(), kwargs={})
|
return super(DatabaseBackend, self).__reduce__(args, kwargs)
|
[] |
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(dburi=self.url,
expires=self.expires,
engine_options=self.engine_options))
return super(DatabaseBackend, self).__reduce__(args, kwargs)
|
[
"def",
"__reduce__",
"(",
"self",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"{",
"}",
")",
":",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"dburi",
"=",
"self",
".",
"url",
",",
"expires",
"=",
"self",
".",
"expires",
",",
"engine_options",
"=",
"self",
".",
"engine_options",
")",
")",
"return",
"super",
"(",
"DatabaseBackend",
",",
"self",
")",
".",
"__reduce__",
"(",
"args",
",",
"kwargs",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/celery/backends/database/__init__.py#L183-L188
|
|||
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/pygments/util.py
|
python
|
unirange
|
(a, b)
|
Returns a regular expression string to match the given non-BMP range.
|
Returns a regular expression string to match the given non-BMP range.
|
[
"Returns",
"a",
"regular",
"expression",
"string",
"to",
"match",
"the",
"given",
"non",
"-",
"BMP",
"range",
"."
] |
def unirange(a, b):
"""
Returns a regular expression string to match the given non-BMP range.
"""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
|
[
"def",
"unirange",
"(",
"a",
",",
"b",
")",
":",
"if",
"b",
"<",
"a",
":",
"raise",
"ValueError",
"(",
"\"Bad character range\"",
")",
"if",
"a",
"<",
"0x10000",
"or",
"b",
"<",
"0x10000",
":",
"raise",
"ValueError",
"(",
"\"unirange is only defined for non-BMP ranges\"",
")",
"if",
"sys",
".",
"maxunicode",
">",
"0xffff",
":",
"# wide build",
"return",
"u'[%s-%s]'",
"%",
"(",
"unichr",
"(",
"a",
")",
",",
"unichr",
"(",
"b",
")",
")",
"else",
":",
"# narrow build stores surrogates, and the 're' module handles them",
"# (incorrectly) as characters. Since there is still ordering among",
"# these characters, expand the range to one that it understands. Some",
"# background in http://bugs.python.org/issue3665 and",
"# http://bugs.python.org/issue12749",
"#",
"# Additionally, the lower constants are using unichr rather than",
"# literals because jython [which uses the wide path] can't load this",
"# file if they are literals.",
"ah",
",",
"al",
"=",
"_surrogatepair",
"(",
"a",
")",
"bh",
",",
"bl",
"=",
"_surrogatepair",
"(",
"b",
")",
"if",
"ah",
"==",
"bh",
":",
"return",
"u'(?:%s[%s-%s])'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"unichr",
"(",
"bl",
")",
")",
"else",
":",
"buf",
"=",
"[",
"]",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"ah",
"==",
"bh",
"and",
"unichr",
"(",
"bl",
")",
"or",
"unichr",
"(",
"0xdfff",
")",
")",
")",
"if",
"ah",
"-",
"bh",
">",
"1",
":",
"buf",
".",
"append",
"(",
"u'[%s-%s][%s-%s]'",
"%",
"unichr",
"(",
"ah",
"+",
"1",
")",
",",
"unichr",
"(",
"bh",
"-",
"1",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"0xdfff",
")",
")",
"if",
"ah",
"!=",
"bh",
":",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"bh",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"bl",
")",
")",
")",
"return",
"u'(?:'",
"+",
"u'|'",
".",
"join",
"(",
"buf",
")",
"+",
"u')'"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/pygments/util.py#L214-L252
|
||
freedombox/FreedomBox
|
335a7f92cc08f27981f838a7cddfc67740598e54
|
plinth/modules/storage/forms.py
|
python
|
DirectorySelectForm.get_initial
|
(self, choices)
|
return (initial_selection, subdir)
|
Get initial form data.
|
Get initial form data.
|
[
"Get",
"initial",
"form",
"data",
"."
] |
def get_initial(self, choices):
"""Get initial form data."""
initial_selection = ()
subdir = ''
storage_path = self.initial['storage_path']
for choice in choices:
if storage_path.startswith(choice[0]):
initial_selection = choice
subdir = storage_path.split(choice[0], 1)[1].strip('/')
if choice[0] == '/':
subdir = '/' + subdir
break
return (initial_selection, subdir)
|
[
"def",
"get_initial",
"(",
"self",
",",
"choices",
")",
":",
"initial_selection",
"=",
"(",
")",
"subdir",
"=",
"''",
"storage_path",
"=",
"self",
".",
"initial",
"[",
"'storage_path'",
"]",
"for",
"choice",
"in",
"choices",
":",
"if",
"storage_path",
".",
"startswith",
"(",
"choice",
"[",
"0",
"]",
")",
":",
"initial_selection",
"=",
"choice",
"subdir",
"=",
"storage_path",
".",
"split",
"(",
"choice",
"[",
"0",
"]",
",",
"1",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"'/'",
")",
"if",
"choice",
"[",
"0",
"]",
"==",
"'/'",
":",
"subdir",
"=",
"'/'",
"+",
"subdir",
"break",
"return",
"(",
"initial_selection",
",",
"subdir",
")"
] |
https://github.com/freedombox/FreedomBox/blob/335a7f92cc08f27981f838a7cddfc67740598e54/plinth/modules/storage/forms.py#L119-L131
|
|
qibinlou/SinaWeibo-Emotion-Classification
|
f336fc104abd68b0ec4180fe2ed80fafe49cb790
|
nltk/sem/evaluate.py
|
python
|
is_rel
|
(s)
|
Check whether a set represents a relation (of any arity).
:param s: a set containing tuples of str elements
:type s: set
:rtype: bool
|
Check whether a set represents a relation (of any arity).
|
[
"Check",
"whether",
"a",
"set",
"represents",
"a",
"relation",
"(",
"of",
"any",
"arity",
")",
"."
] |
def is_rel(s):
"""
Check whether a set represents a relation (of any arity).
:param s: a set containing tuples of str elements
:type s: set
:rtype: bool
"""
# we have the empty relation, i.e. set()
if len(s) == 0:
return True
# all the elements are tuples of the same length
elif s == set([elem for elem in s if isinstance(elem, tuple)]) and\
len(max(s))==len(min(s)):
return True
else:
raise ValueError, "Set %r contains sequences of different lengths" % s
|
[
"def",
"is_rel",
"(",
"s",
")",
":",
"# we have the empty relation, i.e. set()",
"if",
"len",
"(",
"s",
")",
"==",
"0",
":",
"return",
"True",
"# all the elements are tuples of the same length",
"elif",
"s",
"==",
"set",
"(",
"[",
"elem",
"for",
"elem",
"in",
"s",
"if",
"isinstance",
"(",
"elem",
",",
"tuple",
")",
"]",
")",
"and",
"len",
"(",
"max",
"(",
"s",
")",
")",
"==",
"len",
"(",
"min",
"(",
"s",
")",
")",
":",
"return",
"True",
"else",
":",
"raise",
"ValueError",
",",
"\"Set %r contains sequences of different lengths\"",
"%",
"s"
] |
https://github.com/qibinlou/SinaWeibo-Emotion-Classification/blob/f336fc104abd68b0ec4180fe2ed80fafe49cb790/nltk/sem/evaluate.py#L44-L60
|
||
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/sympy/sympy/categories/diagram_drawing.py
|
python
|
DiagramGrid._find_triangle_to_weld
|
(triangles, fringe, grid)
|
return None
|
Finds, if possible, a triangle and an edge in the fringe to
which the triangle could be attached. Returns the tuple
containing the triangle and the index of the corresponding
edge in the fringe.
This function relies on the fact that objects are unique in
the diagram.
|
Finds, if possible, a triangle and an edge in the fringe to
which the triangle could be attached. Returns the tuple
containing the triangle and the index of the corresponding
edge in the fringe.
|
[
"Finds",
"if",
"possible",
"a",
"triangle",
"and",
"an",
"edge",
"in",
"the",
"fringe",
"to",
"which",
"the",
"triangle",
"could",
"be",
"attached",
".",
"Returns",
"the",
"tuple",
"containing",
"the",
"triangle",
"and",
"the",
"index",
"of",
"the",
"corresponding",
"edge",
"in",
"the",
"fringe",
"."
] |
def _find_triangle_to_weld(triangles, fringe, grid):
"""
Finds, if possible, a triangle and an edge in the fringe to
which the triangle could be attached. Returns the tuple
containing the triangle and the index of the corresponding
edge in the fringe.
This function relies on the fact that objects are unique in
the diagram.
"""
for triangle in triangles:
for (a, b) in fringe:
if frozenset([grid[a], grid[b]]) in triangle:
return (triangle, (a, b))
return None
|
[
"def",
"_find_triangle_to_weld",
"(",
"triangles",
",",
"fringe",
",",
"grid",
")",
":",
"for",
"triangle",
"in",
"triangles",
":",
"for",
"(",
"a",
",",
"b",
")",
"in",
"fringe",
":",
"if",
"frozenset",
"(",
"[",
"grid",
"[",
"a",
"]",
",",
"grid",
"[",
"b",
"]",
"]",
")",
"in",
"triangle",
":",
"return",
"(",
"triangle",
",",
"(",
"a",
",",
"b",
")",
")",
"return",
"None"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/categories/diagram_drawing.py#L558-L572
|
|
WikidPad/WikidPad
|
558109638807bc76b4672922686e416ab2d5f79c
|
WikidPad/lib/whoosh/lang/isri.py
|
python
|
ISRIStemmer.stem
|
(self, token)
|
return self.stm
|
Stemming a word token using the ISRI stemmer.
|
Stemming a word token using the ISRI stemmer.
|
[
"Stemming",
"a",
"word",
"token",
"using",
"the",
"ISRI",
"stemmer",
"."
] |
def stem(self, token):
"""
Stemming a word token using the ISRI stemmer.
"""
self.stm = token
self.norm(1) # remove diacritics which representing Arabic short vowels
if self.stm in self.stop_words: return self.stm # exclude stop words from being processed
self.pre32() # remove length three and length two prefixes in this order
self.suf32() # remove length three and length two suffixes in this order
self.waw() # remove connective ‘و’ if it precedes a word beginning with ‘و’
self.norm(2) # normalize initial hamza to bare alif
if len(self.stm) <= 3: return self.stm # return stem if less than or equal to three
if len(self.stm) == 4: # length 4 word
self.pro_w4()
return self.stm
elif len(self.stm) == 5: # length 5 word
self.pro_w53()
self.end_w5()
return self.stm
elif len(self.stm) == 6: # length 6 word
self.pro_w6()
self.end_w6()
return self.stm
elif len(self.stm) == 7: # length 7 word
self.suf1()
if len(self.stm) == 7:
self.pre1()
if len(self.stm) == 6:
self.pro_w6()
self.end_w6()
return self.stm
return self.stm
|
[
"def",
"stem",
"(",
"self",
",",
"token",
")",
":",
"self",
".",
"stm",
"=",
"token",
"self",
".",
"norm",
"(",
"1",
")",
"# remove diacritics which representing Arabic short vowels",
"if",
"self",
".",
"stm",
"in",
"self",
".",
"stop_words",
":",
"return",
"self",
".",
"stm",
"# exclude stop words from being processed",
"self",
".",
"pre32",
"(",
")",
"# remove length three and length two prefixes in this order",
"self",
".",
"suf32",
"(",
")",
"# remove length three and length two suffixes in this order",
"self",
".",
"waw",
"(",
")",
"# remove connective ‘و’ if it precedes a word beginning with ‘و’",
"self",
".",
"norm",
"(",
"2",
")",
"# normalize initial hamza to bare alif",
"if",
"len",
"(",
"self",
".",
"stm",
")",
"<=",
"3",
":",
"return",
"self",
".",
"stm",
"# return stem if less than or equal to three",
"if",
"len",
"(",
"self",
".",
"stm",
")",
"==",
"4",
":",
"# length 4 word",
"self",
".",
"pro_w4",
"(",
")",
"return",
"self",
".",
"stm",
"elif",
"len",
"(",
"self",
".",
"stm",
")",
"==",
"5",
":",
"# length 5 word",
"self",
".",
"pro_w53",
"(",
")",
"self",
".",
"end_w5",
"(",
")",
"return",
"self",
".",
"stm",
"elif",
"len",
"(",
"self",
".",
"stm",
")",
"==",
"6",
":",
"# length 6 word",
"self",
".",
"pro_w6",
"(",
")",
"self",
".",
"end_w6",
"(",
")",
"return",
"self",
".",
"stm",
"elif",
"len",
"(",
"self",
".",
"stm",
")",
"==",
"7",
":",
"# length 7 word",
"self",
".",
"suf1",
"(",
")",
"if",
"len",
"(",
"self",
".",
"stm",
")",
"==",
"7",
":",
"self",
".",
"pre1",
"(",
")",
"if",
"len",
"(",
"self",
".",
"stm",
")",
"==",
"6",
":",
"self",
".",
"pro_w6",
"(",
")",
"self",
".",
"end_w6",
"(",
")",
"return",
"self",
".",
"stm",
"return",
"self",
".",
"stm"
] |
https://github.com/WikidPad/WikidPad/blob/558109638807bc76b4672922686e416ab2d5f79c/WikidPad/lib/whoosh/lang/isri.py#L142-L175
|
|
fortharris/Pcode
|
147962d160a834c219e12cb456abc130826468e4
|
Xtra/autopep8.py
|
python
|
FixPEP8.fix_e251
|
(self, result)
|
Remove whitespace around parameter '=' sign.
|
Remove whitespace around parameter '=' sign.
|
[
"Remove",
"whitespace",
"around",
"parameter",
"=",
"sign",
"."
] |
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pep8 sometimes reports columns that goes
# past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed
|
[
"def",
"fix_e251",
"(",
"self",
",",
"result",
")",
":",
"line_index",
"=",
"result",
"[",
"'line'",
"]",
"-",
"1",
"target",
"=",
"self",
".",
"source",
"[",
"line_index",
"]",
"# This is necessary since pep8 sometimes reports columns that goes",
"# past the end of the physical line. This happens in cases like,",
"# foo(bar\\n=None)",
"c",
"=",
"min",
"(",
"result",
"[",
"'column'",
"]",
"-",
"1",
",",
"len",
"(",
"target",
")",
"-",
"1",
")",
"if",
"target",
"[",
"c",
"]",
".",
"strip",
"(",
")",
":",
"fixed",
"=",
"target",
"else",
":",
"fixed",
"=",
"target",
"[",
":",
"c",
"]",
".",
"rstrip",
"(",
")",
"+",
"target",
"[",
"c",
":",
"]",
".",
"lstrip",
"(",
")",
"# There could be an escaped newline",
"#",
"# def foo(a=\\",
"# 1)",
"if",
"fixed",
".",
"endswith",
"(",
"(",
"'=\\\\\\n'",
",",
"'=\\\\\\r\\n'",
",",
"'=\\\\\\r'",
")",
")",
":",
"self",
".",
"source",
"[",
"line_index",
"]",
"=",
"fixed",
".",
"rstrip",
"(",
"'\\n\\r \\t\\\\'",
")",
"self",
".",
"source",
"[",
"line_index",
"+",
"1",
"]",
"=",
"self",
".",
"source",
"[",
"line_index",
"+",
"1",
"]",
".",
"lstrip",
"(",
")",
"return",
"[",
"line_index",
"+",
"1",
",",
"line_index",
"+",
"2",
"]",
"# Line indexed at 1",
"self",
".",
"source",
"[",
"result",
"[",
"'line'",
"]",
"-",
"1",
"]",
"=",
"fixed"
] |
https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/Xtra/autopep8.py#L658-L683
|
||
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
base/site-packages/mongoengine/django/storage.py
|
python
|
GridFSStorage.size
|
(self, name)
|
Returns the total size, in bytes, of the file specified by name.
|
Returns the total size, in bytes, of the file specified by name.
|
[
"Returns",
"the",
"total",
"size",
"in",
"bytes",
"of",
"the",
"file",
"specified",
"by",
"name",
"."
] |
def size(self, name):
"""Returns the total size, in bytes, of the file specified by name.
"""
doc = self._get_doc_with_name(name)
if doc:
return getattr(doc, self.field).length
else:
raise ValueError("No such file or directory: '%s'" % name)
|
[
"def",
"size",
"(",
"self",
",",
"name",
")",
":",
"doc",
"=",
"self",
".",
"_get_doc_with_name",
"(",
"name",
")",
"if",
"doc",
":",
"return",
"getattr",
"(",
"doc",
",",
"self",
".",
"field",
")",
".",
"length",
"else",
":",
"raise",
"ValueError",
"(",
"\"No such file or directory: '%s'\"",
"%",
"name",
")"
] |
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/mongoengine/django/storage.py#L58-L65
|
||
leo-editor/leo-editor
|
383d6776d135ef17d73d935a2f0ecb3ac0e99494
|
leo/core/leoCommands.py
|
python
|
Commands.redraw_later
|
(self)
|
Ensure that c.redraw() will be called eventually.
c.outerUpdate will call c.redraw() only if no other code calls c.redraw().
|
Ensure that c.redraw() will be called eventually.
|
[
"Ensure",
"that",
"c",
".",
"redraw",
"()",
"will",
"be",
"called",
"eventually",
"."
] |
def redraw_later(self):
"""
Ensure that c.redraw() will be called eventually.
c.outerUpdate will call c.redraw() only if no other code calls c.redraw().
"""
c = self
c.requestLaterRedraw = True
if 'drawing' in g.app.debug:
# g.trace('\n' + g.callers(8))
g.trace(g.callers())
|
[
"def",
"redraw_later",
"(",
"self",
")",
":",
"c",
"=",
"self",
"c",
".",
"requestLaterRedraw",
"=",
"True",
"if",
"'drawing'",
"in",
"g",
".",
"app",
".",
"debug",
":",
"# g.trace('\\n' + g.callers(8))",
"g",
".",
"trace",
"(",
"g",
".",
"callers",
"(",
")",
")"
] |
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/core/leoCommands.py#L3201-L3211
|
||
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
runtime/python/lib/python2.7/pickletools.py
|
python
|
read_stringnl_noescape_pair
|
(f)
|
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
|
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
|
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
|
[
"r",
">>>",
"import",
"StringIO",
">>>",
"read_stringnl_noescape_pair",
"(",
"StringIO",
".",
"StringIO",
"(",
"Queue",
"\\",
"nEmpty",
"\\",
"njunk",
"))",
"Queue",
"Empty"
] |
def read_stringnl_noescape_pair(f):
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
|
[
"def",
"read_stringnl_noescape_pair",
"(",
"f",
")",
":",
"return",
"\"%s %s\"",
"%",
"(",
"read_stringnl_noescape",
"(",
"f",
")",
",",
"read_stringnl_noescape",
"(",
"f",
")",
")"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/pickletools.py#L335-L342
|
|
IronLanguages/ironpython2
|
51fdedeeda15727717fb8268a805f71b06c0b9f1
|
Src/StdLib/Lib/decimal.py
|
python
|
Context.remainder_near
|
(self, a, b)
|
return a.remainder_near(b, context=self)
|
Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
|
Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
|
[
"Returns",
"to",
"be",
"a",
"-",
"b",
"*",
"n",
"where",
"n",
"is",
"the",
"integer",
"nearest",
"the",
"exact",
"value",
"of",
"x",
"/",
"b",
"(",
"if",
"two",
"integers",
"are",
"equally",
"near",
"then",
"the",
"even",
"one",
"is",
"chosen",
")",
".",
"If",
"the",
"result",
"is",
"equal",
"to",
"0",
"then",
"its",
"sign",
"will",
"be",
"the",
"sign",
"of",
"a",
"."
] |
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
|
[
"def",
"remainder_near",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"a",
"=",
"_convert_other",
"(",
"a",
",",
"raiseit",
"=",
"True",
")",
"return",
"a",
".",
"remainder_near",
"(",
"b",
",",
"context",
"=",
"self",
")"
] |
https://github.com/IronLanguages/ironpython2/blob/51fdedeeda15727717fb8268a805f71b06c0b9f1/Src/StdLib/Lib/decimal.py#L5155-L5187
|
|
KalleHallden/AutoTimer
|
2d954216700c4930baa154e28dbddc34609af7ce
|
env/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.py
|
python
|
add_stderr_logger
|
(level=logging.DEBUG)
|
return handler
|
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
|
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
|
[
"Helper",
"for",
"quickly",
"adding",
"a",
"StreamHandler",
"to",
"the",
"logger",
".",
"Useful",
"for",
"debugging",
"."
] |
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
|
[
"def",
"add_stderr_logger",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
":",
"# This method needs to be in this __init__.py to get the __name__ correct",
"# even if urllib3 is vendored within another package.",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s %(levelname)s %(message)s'",
")",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"logger",
".",
"debug",
"(",
"'Added a stderr logging handler to logger: %s'",
",",
"__name__",
")",
"return",
"handler"
] |
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pip/_vendor/urllib3/__init__.py#L51-L66
|
|
timkpaine/pyEX
|
254acd2b0cf7cb7183100106f4ecc11d1860c46a
|
pyEX/commodities/commodities.py
|
python
|
natgasDF
|
(token="", version="stable", filter="", format="json", **timeseries_kwargs)
|
return timeSeriesDF(
id="ENERGY",
key="DHHNGSP",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
|
[] |
def natgasDF(token="", version="stable", filter="", format="json", **timeseries_kwargs):
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ENERGY",
key="DHHNGSP",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
|
[
"def",
"natgasDF",
"(",
"token",
"=",
"\"\"",
",",
"version",
"=",
"\"stable\"",
",",
"filter",
"=",
"\"\"",
",",
"format",
"=",
"\"json\"",
",",
"*",
"*",
"timeseries_kwargs",
")",
":",
"_timeseriesWrapper",
"(",
"timeseries_kwargs",
")",
"return",
"timeSeriesDF",
"(",
"id",
"=",
"\"ENERGY\"",
",",
"key",
"=",
"\"DHHNGSP\"",
",",
"token",
"=",
"token",
",",
"version",
"=",
"version",
",",
"filter",
"=",
"filter",
",",
"format",
"=",
"format",
",",
"*",
"*",
"timeseries_kwargs",
")"
] |
https://github.com/timkpaine/pyEX/blob/254acd2b0cf7cb7183100106f4ecc11d1860c46a/pyEX/commodities/commodities.py#L204-L214
|
|||
pydata/patsy
|
5fc881104b749b720b08e393a5505d6e69d72f95
|
patsy/design_info.py
|
python
|
DesignInfo.linear_constraint
|
(self, constraint_likes)
|
return linear_constraint(constraint_likes, self.column_names)
|
Construct a linear constraint in matrix form from a (possibly
symbolic) description.
Possible inputs:
* A dictionary which is taken as a set of equality constraint. Keys
can be either string column names, or integer column indexes.
* A string giving a arithmetic expression referring to the matrix
columns by name.
* A list of such strings which are ANDed together.
* A tuple (A, b) where A and b are array_likes, and the constraint is
Ax = b. If necessary, these will be coerced to the proper
dimensionality by appending dimensions with size 1.
The string-based language has the standard arithmetic operators, / * +
- and parentheses, plus "=" is used for equality and "," is used to
AND together multiple constraint equations within a string. You can
If no = appears in some expression, then that expression is assumed to
be equal to zero. Division is always float-based, even if
``__future__.true_division`` isn't in effect.
Returns a :class:`LinearConstraint` object.
Examples::
di = DesignInfo(["x1", "x2", "x3"])
# Equivalent ways to write x1 == 0:
di.linear_constraint({"x1": 0}) # by name
di.linear_constraint({0: 0}) # by index
di.linear_constraint("x1 = 0") # string based
di.linear_constraint("x1") # can leave out "= 0"
di.linear_constraint("2 * x1 = (x1 + 2 * x1) / 3")
di.linear_constraint(([1, 0, 0], 0)) # constraint matrices
# Equivalent ways to write x1 == 0 and x3 == 10
di.linear_constraint({"x1": 0, "x3": 10})
di.linear_constraint({0: 0, 2: 10})
di.linear_constraint({0: 0, "x3": 10})
di.linear_constraint("x1 = 0, x3 = 10")
di.linear_constraint("x1, x3 = 10")
di.linear_constraint(["x1", "x3 = 0"]) # list of strings
di.linear_constraint("x1 = 0, x3 - 10 = x1")
di.linear_constraint([[1, 0, 0], [0, 0, 1]], [0, 10])
# You can also chain together equalities, just like Python:
di.linear_constraint("x1 = x2 = 3")
|
Construct a linear constraint in matrix form from a (possibly
symbolic) description.
|
[
"Construct",
"a",
"linear",
"constraint",
"in",
"matrix",
"form",
"from",
"a",
"(",
"possibly",
"symbolic",
")",
"description",
"."
] |
def linear_constraint(self, constraint_likes):
"""Construct a linear constraint in matrix form from a (possibly
symbolic) description.
Possible inputs:
* A dictionary which is taken as a set of equality constraint. Keys
can be either string column names, or integer column indexes.
* A string giving a arithmetic expression referring to the matrix
columns by name.
* A list of such strings which are ANDed together.
* A tuple (A, b) where A and b are array_likes, and the constraint is
Ax = b. If necessary, these will be coerced to the proper
dimensionality by appending dimensions with size 1.
The string-based language has the standard arithmetic operators, / * +
- and parentheses, plus "=" is used for equality and "," is used to
AND together multiple constraint equations within a string. You can
If no = appears in some expression, then that expression is assumed to
be equal to zero. Division is always float-based, even if
``__future__.true_division`` isn't in effect.
Returns a :class:`LinearConstraint` object.
Examples::
di = DesignInfo(["x1", "x2", "x3"])
# Equivalent ways to write x1 == 0:
di.linear_constraint({"x1": 0}) # by name
di.linear_constraint({0: 0}) # by index
di.linear_constraint("x1 = 0") # string based
di.linear_constraint("x1") # can leave out "= 0"
di.linear_constraint("2 * x1 = (x1 + 2 * x1) / 3")
di.linear_constraint(([1, 0, 0], 0)) # constraint matrices
# Equivalent ways to write x1 == 0 and x3 == 10
di.linear_constraint({"x1": 0, "x3": 10})
di.linear_constraint({0: 0, 2: 10})
di.linear_constraint({0: 0, "x3": 10})
di.linear_constraint("x1 = 0, x3 = 10")
di.linear_constraint("x1, x3 = 10")
di.linear_constraint(["x1", "x3 = 0"]) # list of strings
di.linear_constraint("x1 = 0, x3 - 10 = x1")
di.linear_constraint([[1, 0, 0], [0, 0, 1]], [0, 10])
# You can also chain together equalities, just like Python:
di.linear_constraint("x1 = x2 = 3")
"""
return linear_constraint(constraint_likes, self.column_names)
|
[
"def",
"linear_constraint",
"(",
"self",
",",
"constraint_likes",
")",
":",
"return",
"linear_constraint",
"(",
"constraint_likes",
",",
"self",
".",
"column_names",
")"
] |
https://github.com/pydata/patsy/blob/5fc881104b749b720b08e393a5505d6e69d72f95/patsy/design_info.py#L487-L536
|
|
JiYou/openstack
|
8607dd488bde0905044b303eb6e52bdea6806923
|
packages/source/quantum/quantum/openstack/common/rpc/impl_kombu.py
|
python
|
Connection.iterconsume
|
(self, limit=None, timeout=None)
|
Return an iterator that will consume from all queues/consumers
|
Return an iterator that will consume from all queues/consumers
|
[
"Return",
"an",
"iterator",
"that",
"will",
"consume",
"from",
"all",
"queues",
"/",
"consumers"
] |
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1]
queues_tail = self.consumers[-1]
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
|
[
"def",
"iterconsume",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"info",
"=",
"{",
"'do_consume'",
":",
"True",
"}",
"def",
"_error_callback",
"(",
"exc",
")",
":",
"if",
"isinstance",
"(",
"exc",
",",
"socket",
".",
"timeout",
")",
":",
"LOG",
".",
"debug",
"(",
"_",
"(",
"'Timed out waiting for RPC response: %s'",
")",
"%",
"str",
"(",
"exc",
")",
")",
"raise",
"rpc_common",
".",
"Timeout",
"(",
")",
"else",
":",
"LOG",
".",
"exception",
"(",
"_",
"(",
"'Failed to consume message from queue: %s'",
")",
"%",
"str",
"(",
"exc",
")",
")",
"info",
"[",
"'do_consume'",
"]",
"=",
"True",
"def",
"_consume",
"(",
")",
":",
"if",
"info",
"[",
"'do_consume'",
"]",
":",
"queues_head",
"=",
"self",
".",
"consumers",
"[",
":",
"-",
"1",
"]",
"queues_tail",
"=",
"self",
".",
"consumers",
"[",
"-",
"1",
"]",
"for",
"queue",
"in",
"queues_head",
":",
"queue",
".",
"consume",
"(",
"nowait",
"=",
"True",
")",
"queues_tail",
".",
"consume",
"(",
"nowait",
"=",
"False",
")",
"info",
"[",
"'do_consume'",
"]",
"=",
"False",
"return",
"self",
".",
"connection",
".",
"drain_events",
"(",
"timeout",
"=",
"timeout",
")",
"for",
"iteration",
"in",
"itertools",
".",
"count",
"(",
"0",
")",
":",
"if",
"limit",
"and",
"iteration",
">=",
"limit",
":",
"raise",
"StopIteration",
"yield",
"self",
".",
"ensure",
"(",
"_error_callback",
",",
"_consume",
")"
] |
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/quantum/quantum/openstack/common/rpc/impl_kombu.py#L620-L648
|
||
google/coursebuilder-core
|
08f809db3226d9269e30d5edd0edd33bd22041f4
|
coursebuilder/modules/dashboard/filer.py
|
python
|
FilesItemRESTHandler.delete
|
(self)
|
Handles REST DELETE verb.
|
Handles REST DELETE verb.
|
[
"Handles",
"REST",
"DELETE",
"verb",
"."
] |
def delete(self):
"""Handles REST DELETE verb."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-asset', {'key': key}):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
fs = self.app_context.fs.impl
path = fs.physical_to_logical(key)
if not fs.isfile(path):
transforms.send_json_response(
self, 403, 'File does not exist.', None)
return
fs.delete(path)
transforms.send_json_response(self, 200, 'Deleted.')
|
[
"def",
"delete",
"(",
"self",
")",
":",
"key",
"=",
"self",
".",
"request",
".",
"get",
"(",
"'key'",
")",
"if",
"not",
"self",
".",
"assert_xsrf_token_or_fail",
"(",
"self",
".",
"request",
",",
"'delete-asset'",
",",
"{",
"'key'",
":",
"key",
"}",
")",
":",
"return",
"if",
"not",
"FilesRights",
".",
"can_delete",
"(",
"self",
")",
":",
"transforms",
".",
"send_json_response",
"(",
"self",
",",
"401",
",",
"'Access denied.'",
",",
"{",
"'key'",
":",
"key",
"}",
")",
"return",
"fs",
"=",
"self",
".",
"app_context",
".",
"fs",
".",
"impl",
"path",
"=",
"fs",
".",
"physical_to_logical",
"(",
"key",
")",
"if",
"not",
"fs",
".",
"isfile",
"(",
"path",
")",
":",
"transforms",
".",
"send_json_response",
"(",
"self",
",",
"403",
",",
"'File does not exist.'",
",",
"None",
")",
"return",
"fs",
".",
"delete",
"(",
"path",
")",
"transforms",
".",
"send_json_response",
"(",
"self",
",",
"200",
",",
"'Deleted.'",
")"
] |
https://github.com/google/coursebuilder-core/blob/08f809db3226d9269e30d5edd0edd33bd22041f4/coursebuilder/modules/dashboard/filer.py#L499-L521
|
||
naver/sqlova
|
fc68af6008fd2fd5839210e4b06a352007f609b6
|
bert/modeling.py
|
python
|
BERTSelfAttention.forward
|
(self, hidden_states, attention_mask)
|
return context_layer
|
[] |
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# [B, num_attention_heads, seq_len, attention_head_size] * [B, num_attention_heads, attention_head_size, seq_len]
# -> [B, num_attention_heads, seq_len, seq_len]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask # sort of multiplication in soft-max step. It is ~ -10000
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# [B, num_attention_heads, seq_len, seq_len] * [B, num_attention_heads, seq_len, attention_head_size]
# -> [B, num_attention_heads, seq_len, attention_head_size]
context_layer = torch.matmul(attention_probs, value_layer)
# -> [B, seq_len, num_attention_heads, attention_head_size]
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# [B, seq_len] + [all_head_size=hidden_size] -> [B, seq_len, all_head_size]
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
|
[
"def",
"forward",
"(",
"self",
",",
"hidden_states",
",",
"attention_mask",
")",
":",
"mixed_query_layer",
"=",
"self",
".",
"query",
"(",
"hidden_states",
")",
"mixed_key_layer",
"=",
"self",
".",
"key",
"(",
"hidden_states",
")",
"mixed_value_layer",
"=",
"self",
".",
"value",
"(",
"hidden_states",
")",
"query_layer",
"=",
"self",
".",
"transpose_for_scores",
"(",
"mixed_query_layer",
")",
"key_layer",
"=",
"self",
".",
"transpose_for_scores",
"(",
"mixed_key_layer",
")",
"value_layer",
"=",
"self",
".",
"transpose_for_scores",
"(",
"mixed_value_layer",
")",
"# Take the dot product between \"query\" and \"key\" to get the raw attention scores.",
"# [B, num_attention_heads, seq_len, attention_head_size] * [B, num_attention_heads, attention_head_size, seq_len]",
"# -> [B, num_attention_heads, seq_len, seq_len]",
"attention_scores",
"=",
"torch",
".",
"matmul",
"(",
"query_layer",
",",
"key_layer",
".",
"transpose",
"(",
"-",
"1",
",",
"-",
"2",
")",
")",
"attention_scores",
"=",
"attention_scores",
"/",
"math",
".",
"sqrt",
"(",
"self",
".",
"attention_head_size",
")",
"# Apply the attention mask is (precomputed for all layers in BertModel forward() function)",
"attention_scores",
"=",
"attention_scores",
"+",
"attention_mask",
"# sort of multiplication in soft-max step. It is ~ -10000",
"# Normalize the attention scores to probabilities.",
"attention_probs",
"=",
"nn",
".",
"Softmax",
"(",
"dim",
"=",
"-",
"1",
")",
"(",
"attention_scores",
")",
"# This is actually dropping out entire tokens to attend to, which might",
"# seem a bit unusual, but is taken from the original Transformer paper.",
"attention_probs",
"=",
"self",
".",
"dropout",
"(",
"attention_probs",
")",
"# [B, num_attention_heads, seq_len, seq_len] * [B, num_attention_heads, seq_len, attention_head_size]",
"# -> [B, num_attention_heads, seq_len, attention_head_size]",
"context_layer",
"=",
"torch",
".",
"matmul",
"(",
"attention_probs",
",",
"value_layer",
")",
"# -> [B, seq_len, num_attention_heads, attention_head_size]",
"context_layer",
"=",
"context_layer",
".",
"permute",
"(",
"0",
",",
"2",
",",
"1",
",",
"3",
")",
".",
"contiguous",
"(",
")",
"# [B, seq_len] + [all_head_size=hidden_size] -> [B, seq_len, all_head_size]",
"new_context_layer_shape",
"=",
"context_layer",
".",
"size",
"(",
")",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"self",
".",
"all_head_size",
",",
")",
"context_layer",
"=",
"context_layer",
".",
"view",
"(",
"*",
"new_context_layer_shape",
")",
"return",
"context_layer"
] |
https://github.com/naver/sqlova/blob/fc68af6008fd2fd5839210e4b06a352007f609b6/bert/modeling.py#L214-L248
|
|||
linxid/Machine_Learning_Study_Path
|
558e82d13237114bbb8152483977806fc0c222af
|
Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/requests/packages/urllib3/connectionpool.py
|
python
|
HTTPSConnectionPool._prepare_conn
|
(self, conn)
|
return conn
|
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
|
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
|
[
"Prepare",
"the",
"connection",
"for",
":",
"meth",
":",
"urllib3",
".",
"util",
".",
"ssl_wrap_socket",
"and",
"establish",
"the",
"tunnel",
"if",
"proxy",
"is",
"used",
"."
] |
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
|
[
"def",
"_prepare_conn",
"(",
"self",
",",
"conn",
")",
":",
"if",
"isinstance",
"(",
"conn",
",",
"VerifiedHTTPSConnection",
")",
":",
"conn",
".",
"set_cert",
"(",
"key_file",
"=",
"self",
".",
"key_file",
",",
"cert_file",
"=",
"self",
".",
"cert_file",
",",
"cert_reqs",
"=",
"self",
".",
"cert_reqs",
",",
"ca_certs",
"=",
"self",
".",
"ca_certs",
",",
"ca_cert_dir",
"=",
"self",
".",
"ca_cert_dir",
",",
"assert_hostname",
"=",
"self",
".",
"assert_hostname",
",",
"assert_fingerprint",
"=",
"self",
".",
"assert_fingerprint",
")",
"conn",
".",
"ssl_version",
"=",
"self",
".",
"ssl_version",
"return",
"conn"
] |
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/requests/packages/urllib3/connectionpool.py#L763-L779
|
|
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/matrices/matrices.py
|
python
|
MatrixBase.solve_least_squares
|
(self, rhs, method='CH')
|
return (t*self).inv(method=method)*t*rhs
|
Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
|
Return the least-square fit to the data.
|
[
"Return",
"the",
"least",
"-",
"square",
"fit",
"to",
"the",
"data",
"."
] |
def solve_least_squares(self, rhs, method='CH'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return self.cholesky_solve(rhs)
t = self.T
return (t*self).inv(method=method)*t*rhs
|
[
"def",
"solve_least_squares",
"(",
"self",
",",
"rhs",
",",
"method",
"=",
"'CH'",
")",
":",
"if",
"method",
"==",
"'CH'",
":",
"return",
"self",
".",
"cholesky_solve",
"(",
"rhs",
")",
"t",
"=",
"self",
".",
"T",
"return",
"(",
"t",
"*",
"self",
")",
".",
"inv",
"(",
"method",
"=",
"method",
")",
"*",
"t",
"*",
"rhs"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/matrices/matrices.py#L918-L975
|
|
frappe/frappe
|
b64cab6867dfd860f10ccaf41a4ec04bc890b583
|
frappe/email/doctype/newsletter/newsletter.py
|
python
|
Newsletter.get_success_recipients
|
(self)
|
return frappe.get_all("Email Queue Recipient",
filters={
"status": ("in", ["Not Sent", "Sending", "Sent"]),
"parentfield": ("in", self.get_linked_email_queue()),
},
pluck="recipient",
)
|
Recipients who have already recieved the newsletter.
Couldn't think of a better name ;)
|
Recipients who have already recieved the newsletter.
|
[
"Recipients",
"who",
"have",
"already",
"recieved",
"the",
"newsletter",
"."
] |
def get_success_recipients(self) -> List[str]:
"""Recipients who have already recieved the newsletter.
Couldn't think of a better name ;)
"""
return frappe.get_all("Email Queue Recipient",
filters={
"status": ("in", ["Not Sent", "Sending", "Sent"]),
"parentfield": ("in", self.get_linked_email_queue()),
},
pluck="recipient",
)
|
[
"def",
"get_success_recipients",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"frappe",
".",
"get_all",
"(",
"\"Email Queue Recipient\"",
",",
"filters",
"=",
"{",
"\"status\"",
":",
"(",
"\"in\"",
",",
"[",
"\"Not Sent\"",
",",
"\"Sending\"",
",",
"\"Sent\"",
"]",
")",
",",
"\"parentfield\"",
":",
"(",
"\"in\"",
",",
"self",
".",
"get_linked_email_queue",
"(",
")",
")",
",",
"}",
",",
"pluck",
"=",
"\"recipient\"",
",",
")"
] |
https://github.com/frappe/frappe/blob/b64cab6867dfd860f10ccaf41a4ec04bc890b583/frappe/email/doctype/newsletter/newsletter.py#L127-L138
|
|
cvxgrp/cvxportfolio
|
3985059af9341b58d3f6219280da64e2c85d2749
|
cvxportfolio/simulator.py
|
python
|
MarketSimulator.attribute
|
(self, true_results, policy,
selector=None,
delta=1,
fit="linear",
parallel=True)
|
return data
|
Attributes returns over a period to individual alpha sources.
Args:
true_results: observed results.
policy: the policy that achieved the returns.
Alpha model must be a stream.
selector: A map from SimulationResult to time series.
delta: the fractional deviation.
fit: the type of fit to perform.
Returns:
A dict of alpha source to return series.
|
Attributes returns over a period to individual alpha sources.
|
[
"Attributes",
"returns",
"over",
"a",
"period",
"to",
"individual",
"alpha",
"sources",
"."
] |
def attribute(self, true_results, policy,
selector=None,
delta=1,
fit="linear",
parallel=True):
"""Attributes returns over a period to individual alpha sources.
Args:
true_results: observed results.
policy: the policy that achieved the returns.
Alpha model must be a stream.
selector: A map from SimulationResult to time series.
delta: the fractional deviation.
fit: the type of fit to perform.
Returns:
A dict of alpha source to return series.
"""
# Default selector looks at profits.
if selector is None:
def selector(result):
return result.v - sum(result.initial_portfolio)
alpha_stream = policy.return_forecast
assert isinstance(alpha_stream, MultipleReturnsForecasts)
times = true_results.h.index
weights = alpha_stream.weights
assert np.sum(weights) == 1
alpha_sources = alpha_stream.alpha_sources
num_sources = len(alpha_sources)
Wmat = self.reduce_signal_perturb(weights, delta)
perturb_pols = []
for idx in range(len(alpha_sources)):
new_pol = copy.copy(policy)
new_pol.return_forecast = MultipleReturnsForecasts(alpha_sources,
Wmat[idx, :])
perturb_pols.append(new_pol)
# Simulate
p0 = true_results.initial_portfolio
alt_results = self.run_multiple_backtest(p0, times[0], times[-1],
perturb_pols, parallel=parallel)
# Attribute.
true_arr = selector(true_results).values
attr_times = selector(true_results).index
Rmat = np.zeros((num_sources, len(attr_times)))
for idx, result in enumerate(alt_results):
Rmat[idx, :] = selector(result).values
Pmat = cvx.Variable((num_sources, len(attr_times)))
if fit == "linear":
prob = cvx.Problem(cvx.Minimize(0), [Wmat @ Pmat == Rmat])
prob.solve()
elif fit == "least-squares":
error = cvx.sum_squares(Wmat @ Pmat - Rmat)
prob = cvx.Problem(cvx.Minimize(error),
[Pmat.T @ weights == true_arr])
prob.solve()
else:
raise Exception("Unknown fitting method.")
# Dict of results.
wmask = np.tile(weights[:, np.newaxis], (1, len(attr_times))).T
data = pd.DataFrame(columns=[s.name for s in alpha_sources],
index=attr_times,
data=Pmat.value.T * wmask)
data['residual'] = true_arr - np.asarray((weights @ Pmat).value).ravel()
data['RMS error'] = np.asarray(
cvx.norm(Wmat @ Pmat - Rmat, 2, axis=0).value).ravel()
data['RMS error'] /= np.sqrt(num_sources)
return data
|
[
"def",
"attribute",
"(",
"self",
",",
"true_results",
",",
"policy",
",",
"selector",
"=",
"None",
",",
"delta",
"=",
"1",
",",
"fit",
"=",
"\"linear\"",
",",
"parallel",
"=",
"True",
")",
":",
"# Default selector looks at profits.",
"if",
"selector",
"is",
"None",
":",
"def",
"selector",
"(",
"result",
")",
":",
"return",
"result",
".",
"v",
"-",
"sum",
"(",
"result",
".",
"initial_portfolio",
")",
"alpha_stream",
"=",
"policy",
".",
"return_forecast",
"assert",
"isinstance",
"(",
"alpha_stream",
",",
"MultipleReturnsForecasts",
")",
"times",
"=",
"true_results",
".",
"h",
".",
"index",
"weights",
"=",
"alpha_stream",
".",
"weights",
"assert",
"np",
".",
"sum",
"(",
"weights",
")",
"==",
"1",
"alpha_sources",
"=",
"alpha_stream",
".",
"alpha_sources",
"num_sources",
"=",
"len",
"(",
"alpha_sources",
")",
"Wmat",
"=",
"self",
".",
"reduce_signal_perturb",
"(",
"weights",
",",
"delta",
")",
"perturb_pols",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"alpha_sources",
")",
")",
":",
"new_pol",
"=",
"copy",
".",
"copy",
"(",
"policy",
")",
"new_pol",
".",
"return_forecast",
"=",
"MultipleReturnsForecasts",
"(",
"alpha_sources",
",",
"Wmat",
"[",
"idx",
",",
":",
"]",
")",
"perturb_pols",
".",
"append",
"(",
"new_pol",
")",
"# Simulate",
"p0",
"=",
"true_results",
".",
"initial_portfolio",
"alt_results",
"=",
"self",
".",
"run_multiple_backtest",
"(",
"p0",
",",
"times",
"[",
"0",
"]",
",",
"times",
"[",
"-",
"1",
"]",
",",
"perturb_pols",
",",
"parallel",
"=",
"parallel",
")",
"# Attribute.",
"true_arr",
"=",
"selector",
"(",
"true_results",
")",
".",
"values",
"attr_times",
"=",
"selector",
"(",
"true_results",
")",
".",
"index",
"Rmat",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_sources",
",",
"len",
"(",
"attr_times",
")",
")",
")",
"for",
"idx",
",",
"result",
"in",
"enumerate",
"(",
"alt_results",
")",
":",
"Rmat",
"[",
"idx",
",",
":",
"]",
"=",
"selector",
"(",
"result",
")",
".",
"values",
"Pmat",
"=",
"cvx",
".",
"Variable",
"(",
"(",
"num_sources",
",",
"len",
"(",
"attr_times",
")",
")",
")",
"if",
"fit",
"==",
"\"linear\"",
":",
"prob",
"=",
"cvx",
".",
"Problem",
"(",
"cvx",
".",
"Minimize",
"(",
"0",
")",
",",
"[",
"Wmat",
"@",
"Pmat",
"==",
"Rmat",
"]",
")",
"prob",
".",
"solve",
"(",
")",
"elif",
"fit",
"==",
"\"least-squares\"",
":",
"error",
"=",
"cvx",
".",
"sum_squares",
"(",
"Wmat",
"@",
"Pmat",
"-",
"Rmat",
")",
"prob",
"=",
"cvx",
".",
"Problem",
"(",
"cvx",
".",
"Minimize",
"(",
"error",
")",
",",
"[",
"Pmat",
".",
"T",
"@",
"weights",
"==",
"true_arr",
"]",
")",
"prob",
".",
"solve",
"(",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknown fitting method.\"",
")",
"# Dict of results.",
"wmask",
"=",
"np",
".",
"tile",
"(",
"weights",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"(",
"1",
",",
"len",
"(",
"attr_times",
")",
")",
")",
".",
"T",
"data",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"s",
".",
"name",
"for",
"s",
"in",
"alpha_sources",
"]",
",",
"index",
"=",
"attr_times",
",",
"data",
"=",
"Pmat",
".",
"value",
".",
"T",
"*",
"wmask",
")",
"data",
"[",
"'residual'",
"]",
"=",
"true_arr",
"-",
"np",
".",
"asarray",
"(",
"(",
"weights",
"@",
"Pmat",
")",
".",
"value",
")",
".",
"ravel",
"(",
")",
"data",
"[",
"'RMS error'",
"]",
"=",
"np",
".",
"asarray",
"(",
"cvx",
".",
"norm",
"(",
"Wmat",
"@",
"Pmat",
"-",
"Rmat",
",",
"2",
",",
"axis",
"=",
"0",
")",
".",
"value",
")",
".",
"ravel",
"(",
")",
"data",
"[",
"'RMS error'",
"]",
"/=",
"np",
".",
"sqrt",
"(",
"num_sources",
")",
"return",
"data"
] |
https://github.com/cvxgrp/cvxportfolio/blob/3985059af9341b58d3f6219280da64e2c85d2749/cvxportfolio/simulator.py#L184-L250
|
|
TesterlifeRaymond/doraemon
|
d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333
|
venv/lib/python3.6/site-packages/pip/utils/__init__.py
|
python
|
unzip_file
|
(filename, location, flatten=True)
|
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
|
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
|
[
"Unzip",
"the",
"file",
"(",
"with",
"path",
"filename",
")",
"to",
"the",
"destination",
"location",
".",
"All",
"files",
"are",
"written",
"based",
"on",
"system",
"defaults",
"and",
"umask",
"(",
"i",
".",
"e",
".",
"permissions",
"are",
"not",
"preserved",
")",
"except",
"that",
"regular",
"file",
"members",
"with",
"any",
"execute",
"permissions",
"(",
"user",
"group",
"or",
"world",
")",
"have",
"chmod",
"+",
"x",
"applied",
"after",
"being",
"written",
".",
"Note",
"that",
"for",
"windows",
"any",
"execute",
"changes",
"using",
"os",
".",
"chmod",
"are",
"no",
"-",
"ops",
"per",
"the",
"python",
"docs",
"."
] |
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
|
[
"def",
"unzip_file",
"(",
"filename",
",",
"location",
",",
"flatten",
"=",
"True",
")",
":",
"ensure_dir",
"(",
"location",
")",
"zipfp",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"try",
":",
"zip",
"=",
"zipfile",
".",
"ZipFile",
"(",
"zipfp",
",",
"allowZip64",
"=",
"True",
")",
"leading",
"=",
"has_leading_dir",
"(",
"zip",
".",
"namelist",
"(",
")",
")",
"and",
"flatten",
"for",
"info",
"in",
"zip",
".",
"infolist",
"(",
")",
":",
"name",
"=",
"info",
".",
"filename",
"data",
"=",
"zip",
".",
"read",
"(",
"name",
")",
"fn",
"=",
"name",
"if",
"leading",
":",
"fn",
"=",
"split_leading_dir",
"(",
"name",
")",
"[",
"1",
"]",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"location",
",",
"fn",
")",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fn",
")",
"if",
"fn",
".",
"endswith",
"(",
"'/'",
")",
"or",
"fn",
".",
"endswith",
"(",
"'\\\\'",
")",
":",
"# A directory",
"ensure_dir",
"(",
"fn",
")",
"else",
":",
"ensure_dir",
"(",
"dir",
")",
"fp",
"=",
"open",
"(",
"fn",
",",
"'wb'",
")",
"try",
":",
"fp",
".",
"write",
"(",
"data",
")",
"finally",
":",
"fp",
".",
"close",
"(",
")",
"mode",
"=",
"info",
".",
"external_attr",
">>",
"16",
"# if mode and regular file and any execute permissions for",
"# user/group/world?",
"if",
"mode",
"and",
"stat",
".",
"S_ISREG",
"(",
"mode",
")",
"and",
"mode",
"&",
"0o111",
":",
"# make dest file have execute for user/group/world",
"# (chmod +x) no-op on windows per python docs",
"os",
".",
"chmod",
"(",
"fn",
",",
"(",
"0o777",
"-",
"current_umask",
"(",
")",
"|",
"0o111",
")",
")",
"finally",
":",
"zipfp",
".",
"close",
"(",
")"
] |
https://github.com/TesterlifeRaymond/doraemon/blob/d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333/venv/lib/python3.6/site-packages/pip/utils/__init__.py#L472-L512
|
||
inspurer/WorkAttendanceSystem
|
1221e2d67bdf5bb15fe99517cc3ded58ccb066df
|
V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/utils/__init__.py
|
python
|
backup_dir
|
(dir, ext='.bak')
|
return dir + extension
|
Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)
|
Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)
|
[
"Figure",
"out",
"the",
"name",
"of",
"a",
"directory",
"to",
"back",
"up",
"the",
"given",
"dir",
"to",
"(",
"adding",
".",
"bak",
".",
"bak2",
"etc",
")"
] |
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
|
[
"def",
"backup_dir",
"(",
"dir",
",",
"ext",
"=",
"'.bak'",
")",
":",
"n",
"=",
"1",
"extension",
"=",
"ext",
"while",
"os",
".",
"path",
".",
"exists",
"(",
"dir",
"+",
"extension",
")",
":",
"n",
"+=",
"1",
"extension",
"=",
"ext",
"+",
"str",
"(",
"n",
")",
"return",
"dir",
"+",
"extension"
] |
https://github.com/inspurer/WorkAttendanceSystem/blob/1221e2d67bdf5bb15fe99517cc3ded58ccb066df/V2.0/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/utils/__init__.py#L132-L140
|
|
CLUEbenchmark/CLUEPretrainedModels
|
b384fd41665a8261f9c689c940cf750b3bc21fce
|
run_classifier.py
|
python
|
ColaProcessor.get_test_examples
|
(self, data_dir)
|
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
See base class.
|
See base class.
|
[
"See",
"base",
"class",
"."
] |
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
|
[
"def",
"get_test_examples",
"(",
"self",
",",
"data_dir",
")",
":",
"return",
"self",
".",
"_create_examples",
"(",
"self",
".",
"_read_tsv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"\"test.tsv\"",
")",
")",
",",
"\"test\"",
")"
] |
https://github.com/CLUEbenchmark/CLUEPretrainedModels/blob/b384fd41665a8261f9c689c940cf750b3bc21fce/run_classifier.py#L354-L357
|
|
IronLanguages/ironpython3
|
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
|
Src/StdLib/Lib/nntplib.py
|
python
|
_NNTPBase.descriptions
|
(self, group_pattern)
|
return self._getdescriptions(group_pattern, True)
|
Get descriptions for a range of groups.
|
Get descriptions for a range of groups.
|
[
"Get",
"descriptions",
"for",
"a",
"range",
"of",
"groups",
"."
] |
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
|
[
"def",
"descriptions",
"(",
"self",
",",
"group_pattern",
")",
":",
"return",
"self",
".",
"_getdescriptions",
"(",
"group_pattern",
",",
"True",
")"
] |
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/nntplib.py#L647-L649
|
|
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-windows/x86/asn1crypto/x509.py
|
python
|
Certificate.key_identifier
|
(self)
|
return self.key_identifier_value.native
|
:return:
None or a byte string of the certificate's key identifier from the
key identifier extension
|
:return:
None or a byte string of the certificate's key identifier from the
key identifier extension
|
[
":",
"return",
":",
"None",
"or",
"a",
"byte",
"string",
"of",
"the",
"certificate",
"s",
"key",
"identifier",
"from",
"the",
"key",
"identifier",
"extension"
] |
def key_identifier(self):
"""
:return:
None or a byte string of the certificate's key identifier from the
key identifier extension
"""
if not self.key_identifier_value:
return None
return self.key_identifier_value.native
|
[
"def",
"key_identifier",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"key_identifier_value",
":",
"return",
"None",
"return",
"self",
".",
"key_identifier_value",
".",
"native"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-windows/x86/asn1crypto/x509.py#L2563-L2573
|
|
trailofbits/manticore
|
b050fdf0939f6c63f503cdf87ec0ab159dd41159
|
manticore/core/manticore.py
|
python
|
ManticoreBase._all_states
|
(self)
|
return tuple(self._ready_states) + tuple(self._terminated_states)
|
Only allowed at not running.
(At running we can have states at busy)
Returns a tuple with all active state ids.
Notably the "killed" states are not included here.
|
Only allowed at not running.
(At running we can have states at busy)
Returns a tuple with all active state ids.
Notably the "killed" states are not included here.
|
[
"Only",
"allowed",
"at",
"not",
"running",
".",
"(",
"At",
"running",
"we",
"can",
"have",
"states",
"at",
"busy",
")",
"Returns",
"a",
"tuple",
"with",
"all",
"active",
"state",
"ids",
".",
"Notably",
"the",
"killed",
"states",
"are",
"not",
"included",
"here",
"."
] |
def _all_states(self):
"""Only allowed at not running.
(At running we can have states at busy)
Returns a tuple with all active state ids.
Notably the "killed" states are not included here.
"""
return tuple(self._ready_states) + tuple(self._terminated_states)
|
[
"def",
"_all_states",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"self",
".",
"_ready_states",
")",
"+",
"tuple",
"(",
"self",
".",
"_terminated_states",
")"
] |
https://github.com/trailofbits/manticore/blob/b050fdf0939f6c63f503cdf87ec0ab159dd41159/manticore/core/manticore.py#L838-L844
|
|
simons-public/protonfixes
|
24ecb378bc4e99bfe698090661d255dcbb5b677f
|
protonfixes/gamefixes/46330.py
|
python
|
main
|
()
|
Install vb6run
|
Install vb6run
|
[
"Install",
"vb6run"
] |
def main():
""" Install vb6run
"""
util.protontricks('vb6run')
|
[
"def",
"main",
"(",
")",
":",
"util",
".",
"protontricks",
"(",
"'vb6run'",
")"
] |
https://github.com/simons-public/protonfixes/blob/24ecb378bc4e99bfe698090661d255dcbb5b677f/protonfixes/gamefixes/46330.py#L8-L12
|
||
doorstop-dev/doorstop
|
03aa287e5069e29da6979274e1cb6714ee450d3a
|
doorstop/core/editor.py
|
python
|
edit
|
(path, tool=None)
|
Open a file and wait for the default editor to exit.
:param path: path of file to open
:param tool: path of alternate editor
:return: launched process
|
Open a file and wait for the default editor to exit.
|
[
"Open",
"a",
"file",
"and",
"wait",
"for",
"the",
"default",
"editor",
"to",
"exit",
"."
] |
def edit(path, tool=None):
"""Open a file and wait for the default editor to exit.
:param path: path of file to open
:param tool: path of alternate editor
:return: launched process
"""
process = launch(path, tool=tool)
if process:
try:
process.wait()
except KeyboardInterrupt:
log.debug("user cancelled")
finally:
if process.returncode is None:
process.terminate()
log.warning("force closed editor")
log.debug("process exited: {}".format(process.returncode))
|
[
"def",
"edit",
"(",
"path",
",",
"tool",
"=",
"None",
")",
":",
"process",
"=",
"launch",
"(",
"path",
",",
"tool",
"=",
"tool",
")",
"if",
"process",
":",
"try",
":",
"process",
".",
"wait",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"log",
".",
"debug",
"(",
"\"user cancelled\"",
")",
"finally",
":",
"if",
"process",
".",
"returncode",
"is",
"None",
":",
"process",
".",
"terminate",
"(",
")",
"log",
".",
"warning",
"(",
"\"force closed editor\"",
")",
"log",
".",
"debug",
"(",
"\"process exited: {}\"",
".",
"format",
"(",
"process",
".",
"returncode",
")",
")"
] |
https://github.com/doorstop-dev/doorstop/blob/03aa287e5069e29da6979274e1cb6714ee450d3a/doorstop/core/editor.py#L20-L39
|
||
Instagram/LibCST
|
13370227703fe3171e94c57bdd7977f3af696b73
|
libcst/tool.py
|
python
|
_initialize_impl
|
(proc_name: str, command_args: List[str])
|
return 0
|
[] |
def _initialize_impl(proc_name: str, command_args: List[str]) -> int:
# Now, construct the full parser, parse the args and run the class.
parser = argparse.ArgumentParser(
description="Initialize a directory by writing a default LibCST config to it.",
prog=f"{proc_name} initialize",
fromfile_prefix_chars="@",
)
parser.add_argument(
"path",
metavar="PATH",
type=str,
help="Path to initialize with a default LibCST codemod configuration",
)
args = parser.parse_args(command_args)
# Get default configuration file, write it to the YAML file we
# recognize as our config.
default_config = _default_config()
# We serialize for ourselves here, since PyYAML doesn't allow
# us to control comments in the default file.
serializers: Dict[str, _SerializerBase] = {
"generated_code_marker": _StrSerializer(
"String that LibCST should look for in code which indicates "
+ "that the module is generated code."
),
"formatter": _ListSerializer(
"Command line and arguments for invoking a code formatter. "
+ "Anything specified here must be capable of taking code via "
+ "stdin and returning formatted code via stdout."
),
"blacklist_patterns": _ListSerializer(
"List of regex patterns which LibCST will evaluate against "
+ "filenames to determine if the module should be touched."
),
"modules": _ListSerializer(
"List of modules that contain codemods inside of them.", newlines=True
),
"repo_root": _StrSerializer(
"Absolute or relative path of the repository root, used for "
+ "providing full-repo metadata. Relative paths should be "
+ "specified with this file location as the base."
),
}
config_str = "".join(
serializers[key].serialize(key, val) for key, val in default_config.items()
)
# For safety, verify that it parses to the identical file.
actual_config = yaml.safe_load(config_str)
if actual_config != default_config:
raise Exception("Logic error, serialization is invalid!")
config_file = os.path.abspath(os.path.join(args.path, CONFIG_FILE_NAME))
with open(config_file, "w") as fp:
fp.write(config_str)
print(f"Successfully wrote default config file to {config_file}")
return 0
|
[
"def",
"_initialize_impl",
"(",
"proc_name",
":",
"str",
",",
"command_args",
":",
"List",
"[",
"str",
"]",
")",
"->",
"int",
":",
"# Now, construct the full parser, parse the args and run the class.",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Initialize a directory by writing a default LibCST config to it.\"",
",",
"prog",
"=",
"f\"{proc_name} initialize\"",
",",
"fromfile_prefix_chars",
"=",
"\"@\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"path\"",
",",
"metavar",
"=",
"\"PATH\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Path to initialize with a default LibCST codemod configuration\"",
",",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"command_args",
")",
"# Get default configuration file, write it to the YAML file we",
"# recognize as our config.",
"default_config",
"=",
"_default_config",
"(",
")",
"# We serialize for ourselves here, since PyYAML doesn't allow",
"# us to control comments in the default file.",
"serializers",
":",
"Dict",
"[",
"str",
",",
"_SerializerBase",
"]",
"=",
"{",
"\"generated_code_marker\"",
":",
"_StrSerializer",
"(",
"\"String that LibCST should look for in code which indicates \"",
"+",
"\"that the module is generated code.\"",
")",
",",
"\"formatter\"",
":",
"_ListSerializer",
"(",
"\"Command line and arguments for invoking a code formatter. \"",
"+",
"\"Anything specified here must be capable of taking code via \"",
"+",
"\"stdin and returning formatted code via stdout.\"",
")",
",",
"\"blacklist_patterns\"",
":",
"_ListSerializer",
"(",
"\"List of regex patterns which LibCST will evaluate against \"",
"+",
"\"filenames to determine if the module should be touched.\"",
")",
",",
"\"modules\"",
":",
"_ListSerializer",
"(",
"\"List of modules that contain codemods inside of them.\"",
",",
"newlines",
"=",
"True",
")",
",",
"\"repo_root\"",
":",
"_StrSerializer",
"(",
"\"Absolute or relative path of the repository root, used for \"",
"+",
"\"providing full-repo metadata. Relative paths should be \"",
"+",
"\"specified with this file location as the base.\"",
")",
",",
"}",
"config_str",
"=",
"\"\"",
".",
"join",
"(",
"serializers",
"[",
"key",
"]",
".",
"serialize",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"default_config",
".",
"items",
"(",
")",
")",
"# For safety, verify that it parses to the identical file.",
"actual_config",
"=",
"yaml",
".",
"safe_load",
"(",
"config_str",
")",
"if",
"actual_config",
"!=",
"default_config",
":",
"raise",
"Exception",
"(",
"\"Logic error, serialization is invalid!\"",
")",
"config_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"path",
",",
"CONFIG_FILE_NAME",
")",
")",
"with",
"open",
"(",
"config_file",
",",
"\"w\"",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"config_str",
")",
"print",
"(",
"f\"Successfully wrote default config file to {config_file}\"",
")",
"return",
"0"
] |
https://github.com/Instagram/LibCST/blob/13370227703fe3171e94c57bdd7977f3af696b73/libcst/tool.py#L641-L700
|
|||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/enum/__init__.py
|
python
|
__str__
|
(self)
|
return "%s.%s" % (self.__class__.__name__, self._name_)
|
[] |
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
|
[
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"\"%s.%s\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_name_",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/enum/__init__.py#L672-L673
|
|||
emesene/emesene
|
4548a4098310e21b16437bb36223a7f632a4f7bc
|
emesene/e3/xmpp/SleekXMPP/sleekxmpp/basexmpp.py
|
python
|
BaseXMPP.make_query_roster
|
(self, iq=None)
|
return ET.Element("{jabber:iq:roster}query")
|
Create a roster query element.
:param iq: Optionally use an existing stanza instead
of generating a new one.
|
Create a roster query element.
|
[
"Create",
"a",
"roster",
"query",
"element",
"."
] |
def make_query_roster(self, iq=None):
"""Create a roster query element.
:param iq: Optionally use an existing stanza instead
of generating a new one.
"""
if iq:
iq['query'] = 'jabber:iq:roster'
return ET.Element("{jabber:iq:roster}query")
|
[
"def",
"make_query_roster",
"(",
"self",
",",
"iq",
"=",
"None",
")",
":",
"if",
"iq",
":",
"iq",
"[",
"'query'",
"]",
"=",
"'jabber:iq:roster'",
"return",
"ET",
".",
"Element",
"(",
"\"{jabber:iq:roster}query\"",
")"
] |
https://github.com/emesene/emesene/blob/4548a4098310e21b16437bb36223a7f632a4f7bc/emesene/e3/xmpp/SleekXMPP/sleekxmpp/basexmpp.py#L470-L478
|
|
wrye-bash/wrye-bash
|
d495c47cfdb44475befa523438a40c4419cb386f
|
Mopy/bash/bolt.py
|
python
|
Path.list
|
(self)
|
For directory: Returns list of files.
|
For directory: Returns list of files.
|
[
"For",
"directory",
":",
"Returns",
"list",
"of",
"files",
"."
] |
def list(self):
"""For directory: Returns list of files."""
try:
return [GPath_no_norm(x) for x in os.listdir(self._s)]
except FileNotFoundError:
return []
|
[
"def",
"list",
"(",
"self",
")",
":",
"try",
":",
"return",
"[",
"GPath_no_norm",
"(",
"x",
")",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"_s",
")",
"]",
"except",
"FileNotFoundError",
":",
"return",
"[",
"]"
] |
https://github.com/wrye-bash/wrye-bash/blob/d495c47cfdb44475befa523438a40c4419cb386f/Mopy/bash/bolt.py#L752-L757
|
||
inventree/InvenTree
|
4a5e4a88ac3e91d64a21e8cab3708ecbc6e2bd8b
|
InvenTree/part/templatetags/inventree_extras.py
|
python
|
inventree_title
|
(*args, **kwargs)
|
return version.inventreeInstanceTitle()
|
Return the title for the current instance - respecting the settings
|
Return the title for the current instance - respecting the settings
|
[
"Return",
"the",
"title",
"for",
"the",
"current",
"instance",
"-",
"respecting",
"the",
"settings"
] |
def inventree_title(*args, **kwargs):
""" Return the title for the current instance - respecting the settings """
return version.inventreeInstanceTitle()
|
[
"def",
"inventree_title",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"version",
".",
"inventreeInstanceTitle",
"(",
")"
] |
https://github.com/inventree/InvenTree/blob/4a5e4a88ac3e91d64a21e8cab3708ecbc6e2bd8b/InvenTree/part/templatetags/inventree_extras.py#L130-L132
|
|
cbfinn/maml_rl
|
9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95
|
rllab/algos/cma_es_lib.py
|
python
|
NoiseHandler.indices
|
(self, fit)
|
return the set of indices to be reevaluated for noise
measurement.
Given the first values are the earliest, this is a useful policy also
with a time changing objective.
|
return the set of indices to be reevaluated for noise
measurement.
|
[
"return",
"the",
"set",
"of",
"indices",
"to",
"be",
"reevaluated",
"for",
"noise",
"measurement",
"."
] |
def indices(self, fit):
"""return the set of indices to be reevaluated for noise
measurement.
Given the first values are the earliest, this is a useful policy also
with a time changing objective.
"""
## meta_parameters.noise_reeval_multiplier == 1.0
lam_reev = 1.0 * (self.lam_reeval if self.lam_reeval
else 2 + len(fit) / 20)
lam_reev = int(lam_reev) + ((lam_reev % 1) > np.random.rand())
## meta_parameters.noise_choose_reeval == 1
choice = 1
if choice == 1:
# take n_first first and reev - n_first best of the remaining
n_first = lam_reev - lam_reev // 2
sort_idx = np.argsort(array(fit, copy=False)[n_first:]) + n_first
return np.array(list(range(0, n_first)) +
list(sort_idx[0:lam_reev - n_first]), copy=False)
elif choice == 2:
idx_sorted = np.argsort(array(fit, copy=False))
# take lam_reev equally spaced, starting with best
linsp = np.linspace(0, len(fit) - len(fit) / lam_reev, lam_reev)
return idx_sorted[[int(i) for i in linsp]]
# take the ``lam_reeval`` best from the first ``2 * lam_reeval + 2`` values.
elif choice == 3:
return np.argsort(array(fit, copy=False)[:2 * (lam_reev + 1)])[:lam_reev]
else:
raise ValueError('unrecognized choice value %d for noise reev'
% choice)
|
[
"def",
"indices",
"(",
"self",
",",
"fit",
")",
":",
"## meta_parameters.noise_reeval_multiplier == 1.0",
"lam_reev",
"=",
"1.0",
"*",
"(",
"self",
".",
"lam_reeval",
"if",
"self",
".",
"lam_reeval",
"else",
"2",
"+",
"len",
"(",
"fit",
")",
"/",
"20",
")",
"lam_reev",
"=",
"int",
"(",
"lam_reev",
")",
"+",
"(",
"(",
"lam_reev",
"%",
"1",
")",
">",
"np",
".",
"random",
".",
"rand",
"(",
")",
")",
"## meta_parameters.noise_choose_reeval == 1",
"choice",
"=",
"1",
"if",
"choice",
"==",
"1",
":",
"# take n_first first and reev - n_first best of the remaining",
"n_first",
"=",
"lam_reev",
"-",
"lam_reev",
"//",
"2",
"sort_idx",
"=",
"np",
".",
"argsort",
"(",
"array",
"(",
"fit",
",",
"copy",
"=",
"False",
")",
"[",
"n_first",
":",
"]",
")",
"+",
"n_first",
"return",
"np",
".",
"array",
"(",
"list",
"(",
"range",
"(",
"0",
",",
"n_first",
")",
")",
"+",
"list",
"(",
"sort_idx",
"[",
"0",
":",
"lam_reev",
"-",
"n_first",
"]",
")",
",",
"copy",
"=",
"False",
")",
"elif",
"choice",
"==",
"2",
":",
"idx_sorted",
"=",
"np",
".",
"argsort",
"(",
"array",
"(",
"fit",
",",
"copy",
"=",
"False",
")",
")",
"# take lam_reev equally spaced, starting with best",
"linsp",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"fit",
")",
"-",
"len",
"(",
"fit",
")",
"/",
"lam_reev",
",",
"lam_reev",
")",
"return",
"idx_sorted",
"[",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"linsp",
"]",
"]",
"# take the ``lam_reeval`` best from the first ``2 * lam_reeval + 2`` values.",
"elif",
"choice",
"==",
"3",
":",
"return",
"np",
".",
"argsort",
"(",
"array",
"(",
"fit",
",",
"copy",
"=",
"False",
")",
"[",
":",
"2",
"*",
"(",
"lam_reev",
"+",
"1",
")",
"]",
")",
"[",
":",
"lam_reev",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'unrecognized choice value %d for noise reev'",
"%",
"choice",
")"
] |
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/algos/cma_es_lib.py#L7093-L7123
|
||
ronreiter/interactive-tutorials
|
d026d1ae58941863d60eb30a8a94a8650d2bd4bf
|
suds/sax/enc.py
|
python
|
Encoder.encode
|
(self, s)
|
return s
|
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
|
Encode special characters found in string I{s}.
|
[
"Encode",
"special",
"characters",
"found",
"in",
"string",
"I",
"{",
"s",
"}",
"."
] |
def encode(self, s):
"""
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
"""
if isinstance(s, str) and self.needsEncoding(s):
for x in self.encodings:
s = re.sub(x[0], x[1], s)
return s
|
[
"def",
"encode",
"(",
"self",
",",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
"and",
"self",
".",
"needsEncoding",
"(",
"s",
")",
":",
"for",
"x",
"in",
"self",
".",
"encodings",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
",",
"s",
")",
"return",
"s"
] |
https://github.com/ronreiter/interactive-tutorials/blob/d026d1ae58941863d60eb30a8a94a8650d2bd4bf/suds/sax/enc.py#L65-L76
|
|
googleapis/python-dialogflow
|
e48ea001b7c8a4a5c1fe4b162bad49ea397458e9
|
google/cloud/dialogflow_v2/services/environments/pagers.py
|
python
|
ListEnvironmentsPager.__getattr__
|
(self, name: str)
|
return getattr(self._response, name)
|
[] |
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
|
[
"def",
"__getattr__",
"(",
"self",
",",
"name",
":",
"str",
")",
"->",
"Any",
":",
"return",
"getattr",
"(",
"self",
".",
"_response",
",",
"name",
")"
] |
https://github.com/googleapis/python-dialogflow/blob/e48ea001b7c8a4a5c1fe4b162bad49ea397458e9/google/cloud/dialogflow_v2/services/environments/pagers.py#L73-L74
|
|||
SUSE/DeepSea
|
9c7fad93915ba1250c40d50c855011e9fe41ed21
|
srv/salt/_modules/dg.py
|
python
|
SizeMatcher.low
|
(self, low: Tuple)
|
Setter for 'low' matchers
|
Setter for 'low' matchers
|
[
"Setter",
"for",
"low",
"matchers"
] |
def low(self, low: Tuple) -> None:
""" Setter for 'low' matchers
"""
self._low, self._low_suffix = low
|
[
"def",
"low",
"(",
"self",
",",
"low",
":",
"Tuple",
")",
"->",
"None",
":",
"self",
".",
"_low",
",",
"self",
".",
"_low_suffix",
"=",
"low"
] |
https://github.com/SUSE/DeepSea/blob/9c7fad93915ba1250c40d50c855011e9fe41ed21/srv/salt/_modules/dg.py#L382-L385
|
||
insarlab/MintPy
|
4357b8c726dec8a3f936770e3f3dda92882685b7
|
mintpy/utils/utils0.py
|
python
|
azimuth2heading_angle
|
(az_angle)
|
return head_angle
|
Convert azimuth angle from ISCE los.rdr band2 into satellite orbit heading angle
ISCE-2 los.* file band2 is azimuth angle of LOS vector from ground target to the satellite
measured from the north in anti-clockwise as positive
Below are typical values in deg for satellites with near-polar orbit:
ascending orbit: heading angle of -12 and azimuth angle of 102
descending orbit: heading angle of -168 and azimuth angle of -102
|
Convert azimuth angle from ISCE los.rdr band2 into satellite orbit heading angle
|
[
"Convert",
"azimuth",
"angle",
"from",
"ISCE",
"los",
".",
"rdr",
"band2",
"into",
"satellite",
"orbit",
"heading",
"angle"
] |
def azimuth2heading_angle(az_angle):
"""Convert azimuth angle from ISCE los.rdr band2 into satellite orbit heading angle
ISCE-2 los.* file band2 is azimuth angle of LOS vector from ground target to the satellite
measured from the north in anti-clockwise as positive
Below are typical values in deg for satellites with near-polar orbit:
ascending orbit: heading angle of -12 and azimuth angle of 102
descending orbit: heading angle of -168 and azimuth angle of -102
"""
head_angle = 90 - az_angle
head_angle -= np.round(head_angle / 360.) * 360.
return head_angle
|
[
"def",
"azimuth2heading_angle",
"(",
"az_angle",
")",
":",
"head_angle",
"=",
"90",
"-",
"az_angle",
"head_angle",
"-=",
"np",
".",
"round",
"(",
"head_angle",
"/",
"360.",
")",
"*",
"360.",
"return",
"head_angle"
] |
https://github.com/insarlab/MintPy/blob/4357b8c726dec8a3f936770e3f3dda92882685b7/mintpy/utils/utils0.py#L366-L378
|
|
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py
|
python
|
V1AWSElasticBlockStoreVolumeSource.partition
|
(self)
|
return self._partition
|
Gets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).
:return: The partition of this V1AWSElasticBlockStoreVolumeSource.
:rtype: int
|
Gets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).
|
[
"Gets",
"the",
"partition",
"of",
"this",
"V1AWSElasticBlockStoreVolumeSource",
".",
"The",
"partition",
"in",
"the",
"volume",
"that",
"you",
"want",
"to",
"mount",
".",
"If",
"omitted",
"the",
"default",
"is",
"to",
"mount",
"by",
"volume",
"name",
".",
"Examples",
":",
"For",
"volume",
"/",
"dev",
"/",
"sda1",
"you",
"specify",
"the",
"partition",
"as",
"\\",
"1",
"\\",
".",
"Similarly",
"the",
"volume",
"partition",
"for",
"/",
"dev",
"/",
"sda",
"is",
"\\",
"0",
"\\",
"(",
"or",
"you",
"can",
"leave",
"the",
"property",
"empty",
")",
"."
] |
def partition(self):
"""
Gets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).
:return: The partition of this V1AWSElasticBlockStoreVolumeSource.
:rtype: int
"""
return self._partition
|
[
"def",
"partition",
"(",
"self",
")",
":",
"return",
"self",
".",
"_partition"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py#L76-L84
|
|
mayank93/Twitter-Sentiment-Analysis
|
f095c6ca6bf69787582b5dabb140fefaf278eb37
|
front-end/web2py/gluon/contrib/markdown/markdown2.py
|
python
|
_dedentlines
|
(lines, tabsize=8, skip_first_line=False)
|
return lines
|
_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
|
_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
|
[
"_dedentlines",
"(",
"lines",
"tabsize",
"=",
"8",
"skip_first_line",
"=",
"False",
")",
"-",
">",
"dedented",
"lines"
] |
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line)
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print "dedent: margin=%r" % margin
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print "dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin)
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
|
[
"def",
"_dedentlines",
"(",
"lines",
",",
"tabsize",
"=",
"8",
",",
"skip_first_line",
"=",
"False",
")",
":",
"DEBUG",
"=",
"False",
"if",
"DEBUG",
":",
"print",
"\"dedent: dedent(..., tabsize=%d, skip_first_line=%r)\"",
"%",
"(",
"tabsize",
",",
"skip_first_line",
")",
"indents",
"=",
"[",
"]",
"margin",
"=",
"None",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"i",
"==",
"0",
"and",
"skip_first_line",
":",
"continue",
"indent",
"=",
"0",
"for",
"ch",
"in",
"line",
":",
"if",
"ch",
"==",
"' '",
":",
"indent",
"+=",
"1",
"elif",
"ch",
"==",
"'\\t'",
":",
"indent",
"+=",
"tabsize",
"-",
"(",
"indent",
"%",
"tabsize",
")",
"elif",
"ch",
"in",
"'\\r\\n'",
":",
"continue",
"# skip all-whitespace lines",
"else",
":",
"break",
"else",
":",
"continue",
"# skip all-whitespace lines",
"if",
"DEBUG",
":",
"print",
"\"dedent: indent=%d: %r\"",
"%",
"(",
"indent",
",",
"line",
")",
"if",
"margin",
"is",
"None",
":",
"margin",
"=",
"indent",
"else",
":",
"margin",
"=",
"min",
"(",
"margin",
",",
"indent",
")",
"if",
"DEBUG",
":",
"print",
"\"dedent: margin=%r\"",
"%",
"margin",
"if",
"margin",
"is",
"not",
"None",
"and",
"margin",
">",
"0",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"i",
"==",
"0",
"and",
"skip_first_line",
":",
"continue",
"removed",
"=",
"0",
"for",
"j",
",",
"ch",
"in",
"enumerate",
"(",
"line",
")",
":",
"if",
"ch",
"==",
"' '",
":",
"removed",
"+=",
"1",
"elif",
"ch",
"==",
"'\\t'",
":",
"removed",
"+=",
"tabsize",
"-",
"(",
"removed",
"%",
"tabsize",
")",
"elif",
"ch",
"in",
"'\\r\\n'",
":",
"if",
"DEBUG",
":",
"print",
"\"dedent: %r: EOL -> strip up to EOL\"",
"%",
"line",
"lines",
"[",
"i",
"]",
"=",
"lines",
"[",
"i",
"]",
"[",
"j",
":",
"]",
"break",
"else",
":",
"raise",
"ValueError",
"(",
"\"unexpected non-whitespace char %r in \"",
"\"line %r while removing %d-space margin\"",
"%",
"(",
"ch",
",",
"line",
",",
"margin",
")",
")",
"if",
"DEBUG",
":",
"print",
"\"dedent: %r: %r -> removed %d/%d\"",
"%",
"(",
"line",
",",
"ch",
",",
"removed",
",",
"margin",
")",
"if",
"removed",
"==",
"margin",
":",
"lines",
"[",
"i",
"]",
"=",
"lines",
"[",
"i",
"]",
"[",
"j",
"+",
"1",
":",
"]",
"break",
"elif",
"removed",
">",
"margin",
":",
"lines",
"[",
"i",
"]",
"=",
"' '",
"*",
"(",
"removed",
"-",
"margin",
")",
"+",
"lines",
"[",
"i",
"]",
"[",
"j",
"+",
"1",
":",
"]",
"break",
"else",
":",
"if",
"removed",
":",
"lines",
"[",
"i",
"]",
"=",
"lines",
"[",
"i",
"]",
"[",
"removed",
":",
"]",
"return",
"lines"
] |
https://github.com/mayank93/Twitter-Sentiment-Analysis/blob/f095c6ca6bf69787582b5dabb140fefaf278eb37/front-end/web2py/gluon/contrib/markdown/markdown2.py#L1592-L1660
|
|
desaster/kippo
|
0d036350a719288f83078da8399572121f337f7e
|
kippo/dblog/mysql.py
|
python
|
DBLogger.simpleQuery
|
(self, sql, args)
|
Just run a deferred sql query, only care about errors
|
Just run a deferred sql query, only care about errors
|
[
"Just",
"run",
"a",
"deferred",
"sql",
"query",
"only",
"care",
"about",
"errors"
] |
def simpleQuery(self, sql, args):
""" Just run a deferred sql query, only care about errors """
d = self.db.runQuery(sql, args)
d.addErrback(self.sqlerror)
|
[
"def",
"simpleQuery",
"(",
"self",
",",
"sql",
",",
"args",
")",
":",
"d",
"=",
"self",
".",
"db",
".",
"runQuery",
"(",
"sql",
",",
"args",
")",
"d",
".",
"addErrback",
"(",
"self",
".",
"sqlerror",
")"
] |
https://github.com/desaster/kippo/blob/0d036350a719288f83078da8399572121f337f7e/kippo/dblog/mysql.py#L51-L54
|
||
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTamingwangxianWordpressCom.py
|
python
|
extractTamingwangxianWordpressCom
|
(item)
|
return False
|
Parser for 'tamingwangxian.wordpress.com'
|
Parser for 'tamingwangxian.wordpress.com'
|
[
"Parser",
"for",
"tamingwangxian",
".",
"wordpress",
".",
"com"
] |
def extractTamingwangxianWordpressCom(item):
'''
Parser for 'tamingwangxian.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('mdzs', 'Grandmaster of Demonic Cultivation', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"def",
"extractTamingwangxianWordpressCom",
"(",
"item",
")",
":",
"vol",
",",
"chp",
",",
"frag",
",",
"postfix",
"=",
"extractVolChapterFragmentPostfix",
"(",
"item",
"[",
"'title'",
"]",
")",
"if",
"not",
"(",
"chp",
"or",
"vol",
")",
"or",
"\"preview\"",
"in",
"item",
"[",
"'title'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"None",
"tagmap",
"=",
"[",
"(",
"'mdzs'",
",",
"'Grandmaster of Demonic Cultivation'",
",",
"'translated'",
")",
",",
"(",
"'PRC'",
",",
"'PRC'",
",",
"'translated'",
")",
",",
"(",
"'Loiterous'",
",",
"'Loiterous'",
",",
"'oel'",
")",
",",
"]",
"for",
"tagname",
",",
"name",
",",
"tl_type",
"in",
"tagmap",
":",
"if",
"tagname",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"buildReleaseMessageWithType",
"(",
"item",
",",
"name",
",",
"vol",
",",
"chp",
",",
"frag",
"=",
"frag",
",",
"postfix",
"=",
"postfix",
",",
"tl_type",
"=",
"tl_type",
")",
"return",
"False"
] |
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractTamingwangxianWordpressCom.py#L1-L21
|
|
burke-software/schooldriver
|
a07262ba864aee0182548ecceb661e49c925725f
|
appy/fields/string.py
|
python
|
String.getCkLanguage
|
(self)
|
return 'en_US'
|
Gets the language for CK editor SCAYT. We will use
self.contentLanguage. If it is not supported by CK, we use
english.
|
Gets the language for CK editor SCAYT. We will use
self.contentLanguage. If it is not supported by CK, we use
english.
|
[
"Gets",
"the",
"language",
"for",
"CK",
"editor",
"SCAYT",
".",
"We",
"will",
"use",
"self",
".",
"contentLanguage",
".",
"If",
"it",
"is",
"not",
"supported",
"by",
"CK",
"we",
"use",
"english",
"."
] |
def getCkLanguage(self):
'''Gets the language for CK editor SCAYT. We will use
self.contentLanguage. If it is not supported by CK, we use
english.'''
lang = self.contentLanguage
if lang and (lang in self.ckLanguages): return self.ckLanguages[lang]
return 'en_US'
|
[
"def",
"getCkLanguage",
"(",
"self",
")",
":",
"lang",
"=",
"self",
".",
"contentLanguage",
"if",
"lang",
"and",
"(",
"lang",
"in",
"self",
".",
"ckLanguages",
")",
":",
"return",
"self",
".",
"ckLanguages",
"[",
"lang",
"]",
"return",
"'en_US'"
] |
https://github.com/burke-software/schooldriver/blob/a07262ba864aee0182548ecceb661e49c925725f/appy/fields/string.py#L682-L688
|
|
huggingface/transformers
|
623b4f7c63f60cce917677ee704d6c93ee960b4b
|
src/transformers/trainer_utils.py
|
python
|
TrainerMemoryTracker.__init__
|
(self, skip_memory_metrics=False)
|
[] |
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
|
[
"def",
"__init__",
"(",
"self",
",",
"skip_memory_metrics",
"=",
"False",
")",
":",
"self",
".",
"skip_memory_metrics",
"=",
"skip_memory_metrics",
"if",
"not",
"is_psutil_available",
"(",
")",
":",
"# soft dependency on psutil",
"self",
".",
"skip_memory_metrics",
"=",
"True",
"if",
"self",
".",
"skip_memory_metrics",
":",
"return",
"import",
"psutil",
"# noqa",
"if",
"is_torch_cuda_available",
"(",
")",
":",
"import",
"torch",
"self",
".",
"torch",
"=",
"torch",
"self",
".",
"gpu",
"=",
"{",
"}",
"else",
":",
"self",
".",
"torch",
"=",
"None",
"self",
".",
"process",
"=",
"psutil",
".",
"Process",
"(",
")",
"self",
".",
"cur_stage",
"=",
"None",
"self",
".",
"cpu",
"=",
"{",
"}",
"self",
".",
"init_reported",
"=",
"False"
] |
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/trainer_utils.py#L321-L346
|
||||
JaniceWuo/MovieRecommend
|
4c86db64ca45598917d304f535413df3bc9fea65
|
movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/ipaddress.py
|
python
|
_IPAddressBase._prefix_from_ip_string
|
(cls, ip_str)
|
Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
|
Turn a netmask/hostmask string into a prefix length
|
[
"Turn",
"a",
"netmask",
"/",
"hostmask",
"string",
"into",
"a",
"prefix",
"length"
] |
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
|
[
"def",
"_prefix_from_ip_string",
"(",
"cls",
",",
"ip_str",
")",
":",
"# Parse the netmask/hostmask like an IP address.",
"try",
":",
"ip_int",
"=",
"cls",
".",
"_ip_int_from_string",
"(",
"ip_str",
")",
"except",
"AddressValueError",
":",
"cls",
".",
"_report_invalid_netmask",
"(",
"ip_str",
")",
"# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).",
"# Note that the two ambiguous cases (all-ones and all-zeroes) are",
"# treated as netmasks.",
"try",
":",
"return",
"cls",
".",
"_prefix_from_ip_int",
"(",
"ip_int",
")",
"except",
"ValueError",
":",
"pass",
"# Invert the bits, and try matching a /0+1+/ hostmask instead.",
"ip_int",
"^=",
"cls",
".",
"_ALL_ONES",
"try",
":",
"return",
"cls",
".",
"_prefix_from_ip_int",
"(",
"ip_int",
")",
"except",
"ValueError",
":",
"cls",
".",
"_report_invalid_netmask",
"(",
"ip_str",
")"
] |
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/ipaddress.py#L624-L655
|
||
python-diamond/Diamond
|
7000e16cfdf4508ed9291fc4b3800592557b2431
|
src/collectors/bind/bind.py
|
python
|
BindCollector.get_default_config
|
(self)
|
return config
|
Returns the default collector settings
|
Returns the default collector settings
|
[
"Returns",
"the",
"default",
"collector",
"settings"
] |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(BindCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8080,
'path': 'bind',
# Available stats:
# - resolver (Per-view resolver and cache statistics)
# - server (Incoming requests and their answers)
# - zonemgmt (Requests/responses related to zone management)
# - sockets (Socket statistics)
# - memory (Global memory usage)
'publish': [
'resolver',
'server',
'zonemgmt',
'sockets',
'memory',
],
# By default we don't publish these special views
'publish_view_bind': False,
'publish_view_meta': False,
})
return config
|
[
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"BindCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'host'",
":",
"'localhost'",
",",
"'port'",
":",
"8080",
",",
"'path'",
":",
"'bind'",
",",
"# Available stats:",
"# - resolver (Per-view resolver and cache statistics)",
"# - server (Incoming requests and their answers)",
"# - zonemgmt (Requests/responses related to zone management)",
"# - sockets (Socket statistics)",
"# - memory (Global memory usage)",
"'publish'",
":",
"[",
"'resolver'",
",",
"'server'",
",",
"'zonemgmt'",
",",
"'sockets'",
",",
"'memory'",
",",
"]",
",",
"# By default we don't publish these special views",
"'publish_view_bind'",
":",
"False",
",",
"'publish_view_meta'",
":",
"False",
",",
"}",
")",
"return",
"config"
] |
https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/collectors/bind/bind.py#L36-L62
|
|
XanaduAI/strawberryfields
|
298601e409528f22c6717c2d816ab68ae8bda1fa
|
strawberryfields/backends/bosonicbackend/bosoniccircuit.py
|
python
|
BosonicModes.squeeze
|
(self, r, phi, k)
|
r"""Squeeze mode ``k`` by the amount ``r*exp(1j*phi)``.
Args:
r (float): squeezing magnitude
phi (float): squeezing phase
k (int): mode to be squeezed
Raises:
ValueError: if the mode is not in the list of active modes
|
r"""Squeeze mode ``k`` by the amount ``r*exp(1j*phi)``.
|
[
"r",
"Squeeze",
"mode",
"k",
"by",
"the",
"amount",
"r",
"*",
"exp",
"(",
"1j",
"*",
"phi",
")",
"."
] |
def squeeze(self, r, phi, k):
r"""Squeeze mode ``k`` by the amount ``r*exp(1j*phi)``.
Args:
r (float): squeezing magnitude
phi (float): squeezing phase
k (int): mode to be squeezed
Raises:
ValueError: if the mode is not in the list of active modes
"""
if self.active[k] is None:
raise ValueError("Cannot squeeze mode, mode does not exist")
sq = symp.expand(symp.squeezing(r, phi), k, self.nlen)
self.means = update_means(self.means, sq, self.from_xp)
self.covs = update_covs(self.covs, sq, self.from_xp)
|
[
"def",
"squeeze",
"(",
"self",
",",
"r",
",",
"phi",
",",
"k",
")",
":",
"if",
"self",
".",
"active",
"[",
"k",
"]",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot squeeze mode, mode does not exist\"",
")",
"sq",
"=",
"symp",
".",
"expand",
"(",
"symp",
".",
"squeezing",
"(",
"r",
",",
"phi",
")",
",",
"k",
",",
"self",
".",
"nlen",
")",
"self",
".",
"means",
"=",
"update_means",
"(",
"self",
".",
"means",
",",
"sq",
",",
"self",
".",
"from_xp",
")",
"self",
".",
"covs",
"=",
"update_covs",
"(",
"self",
".",
"covs",
",",
"sq",
",",
"self",
".",
"from_xp",
")"
] |
https://github.com/XanaduAI/strawberryfields/blob/298601e409528f22c6717c2d816ab68ae8bda1fa/strawberryfields/backends/bosonicbackend/bosoniccircuit.py#L263-L279
|
||
biolab/orange3
|
41685e1c7b1d1babe680113685a2d44bcc9fec0b
|
Orange/widgets/visualize/owvenndiagram.py
|
python
|
OWVennDiagram.extract_columnwise
|
(self, var_dict, columns=None)
|
return self.merge_data(domain, values)
|
[] |
def extract_columnwise(self, var_dict, columns=None):
domain = {type_ : [] for type_ in self.atr_types}
values = defaultdict(list)
renamed = []
for atr_type, vars_dict in var_dict.items():
for var_name, var_data in vars_dict.items():
is_selected = bool(columns) and var_name.name in columns
if var_data[0]:
#columns are different, copy all, rename them
for var, table_key in var_data[1]:
idx = list(self.data).index(table_key) + 1
new_atr = var.copy(name=f'{var_name.name} ({idx})')
if columns and atr_type == 'attributes':
new_atr.attributes['Selected'] = is_selected
domain[atr_type].append(new_atr)
renamed.append(var_name.name)
values[atr_type].append(getattr(self.data[table_key].table[:, var_name],
self.atr_vals[atr_type])
.reshape(-1, 1))
else:
new_atr = var_data[1][0][0].copy()
if columns and atr_type == 'attributes':
new_atr.attributes['Selected'] = is_selected
domain[atr_type].append(new_atr)
values[atr_type].append(getattr(self.data[var_data[1][0][1]].table[:, var_name],
self.atr_vals[atr_type])
.reshape(-1, 1))
if renamed:
self.Warning.renamed_vars(', '.join(renamed))
return self.merge_data(domain, values)
|
[
"def",
"extract_columnwise",
"(",
"self",
",",
"var_dict",
",",
"columns",
"=",
"None",
")",
":",
"domain",
"=",
"{",
"type_",
":",
"[",
"]",
"for",
"type_",
"in",
"self",
".",
"atr_types",
"}",
"values",
"=",
"defaultdict",
"(",
"list",
")",
"renamed",
"=",
"[",
"]",
"for",
"atr_type",
",",
"vars_dict",
"in",
"var_dict",
".",
"items",
"(",
")",
":",
"for",
"var_name",
",",
"var_data",
"in",
"vars_dict",
".",
"items",
"(",
")",
":",
"is_selected",
"=",
"bool",
"(",
"columns",
")",
"and",
"var_name",
".",
"name",
"in",
"columns",
"if",
"var_data",
"[",
"0",
"]",
":",
"#columns are different, copy all, rename them",
"for",
"var",
",",
"table_key",
"in",
"var_data",
"[",
"1",
"]",
":",
"idx",
"=",
"list",
"(",
"self",
".",
"data",
")",
".",
"index",
"(",
"table_key",
")",
"+",
"1",
"new_atr",
"=",
"var",
".",
"copy",
"(",
"name",
"=",
"f'{var_name.name} ({idx})'",
")",
"if",
"columns",
"and",
"atr_type",
"==",
"'attributes'",
":",
"new_atr",
".",
"attributes",
"[",
"'Selected'",
"]",
"=",
"is_selected",
"domain",
"[",
"atr_type",
"]",
".",
"append",
"(",
"new_atr",
")",
"renamed",
".",
"append",
"(",
"var_name",
".",
"name",
")",
"values",
"[",
"atr_type",
"]",
".",
"append",
"(",
"getattr",
"(",
"self",
".",
"data",
"[",
"table_key",
"]",
".",
"table",
"[",
":",
",",
"var_name",
"]",
",",
"self",
".",
"atr_vals",
"[",
"atr_type",
"]",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
")",
"else",
":",
"new_atr",
"=",
"var_data",
"[",
"1",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"copy",
"(",
")",
"if",
"columns",
"and",
"atr_type",
"==",
"'attributes'",
":",
"new_atr",
".",
"attributes",
"[",
"'Selected'",
"]",
"=",
"is_selected",
"domain",
"[",
"atr_type",
"]",
".",
"append",
"(",
"new_atr",
")",
"values",
"[",
"atr_type",
"]",
".",
"append",
"(",
"getattr",
"(",
"self",
".",
"data",
"[",
"var_data",
"[",
"1",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
".",
"table",
"[",
":",
",",
"var_name",
"]",
",",
"self",
".",
"atr_vals",
"[",
"atr_type",
"]",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
")",
"if",
"renamed",
":",
"self",
".",
"Warning",
".",
"renamed_vars",
"(",
"', '",
".",
"join",
"(",
"renamed",
")",
")",
"return",
"self",
".",
"merge_data",
"(",
"domain",
",",
"values",
")"
] |
https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/visualize/owvenndiagram.py#L452-L481
|
|||
natewong1313/bird-bot
|
0a76dca2157c021c6cd5734928b1ffcf46a2b3b2
|
pages/settingspage.py
|
python
|
SettingsPage.update_settings
|
(self,settings_data)
|
[] |
def update_settings(self,settings_data):
global webhook, webhook_on_browser, webhook_on_order, webhook_on_failed, browser_on_failed
settings.webhook, settings.webhook_on_browser, settings.webhook_on_order, settings.webhook_on_failed, settings.browser_on_failed, settings.buy_one = settings_data["webhook"], settings_data["webhookonbrowser"], settings_data["webhookonorder"], settings_data["webhookonfailed"], settings_data["browseronfailed"], settings_data['onlybuyone']
|
[
"def",
"update_settings",
"(",
"self",
",",
"settings_data",
")",
":",
"global",
"webhook",
",",
"webhook_on_browser",
",",
"webhook_on_order",
",",
"webhook_on_failed",
",",
"browser_on_failed",
"settings",
".",
"webhook",
",",
"settings",
".",
"webhook_on_browser",
",",
"settings",
".",
"webhook_on_order",
",",
"settings",
".",
"webhook_on_failed",
",",
"settings",
".",
"browser_on_failed",
",",
"settings",
".",
"buy_one",
"=",
"settings_data",
"[",
"\"webhook\"",
"]",
",",
"settings_data",
"[",
"\"webhookonbrowser\"",
"]",
",",
"settings_data",
"[",
"\"webhookonorder\"",
"]",
",",
"settings_data",
"[",
"\"webhookonfailed\"",
"]",
",",
"settings_data",
"[",
"\"browseronfailed\"",
"]",
",",
"settings_data",
"[",
"'onlybuyone'",
"]"
] |
https://github.com/natewong1313/bird-bot/blob/0a76dca2157c021c6cd5734928b1ffcf46a2b3b2/pages/settingspage.py#L116-L118
|
||||
wistbean/learn_python3_spider
|
73c873f4845f4385f097e5057407d03dd37a117b
|
stackoverflow/venv/lib/python3.6/site-packages/zope/interface/interface.py
|
python
|
Element.getName
|
(self)
|
return self.__name__
|
Returns the name of the object.
|
Returns the name of the object.
|
[
"Returns",
"the",
"name",
"of",
"the",
"object",
"."
] |
def getName(self):
""" Returns the name of the object. """
return self.__name__
|
[
"def",
"getName",
"(",
"self",
")",
":",
"return",
"self",
".",
"__name__"
] |
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/zope/interface/interface.py#L69-L71
|
|
barseghyanartur/django-elasticsearch-dsl-drf
|
8fe35265d44501269b2603570773be47f20fa471
|
examples/simple/factories/books_book.py
|
python
|
BookWithoutTagsAndOrdersFactory.orders
|
(obj, created, extracted, **kwargs)
|
Dummy.
|
Dummy.
|
[
"Dummy",
"."
] |
def orders(obj, created, extracted, **kwargs):
"""Dummy."""
|
[
"def",
"orders",
"(",
"obj",
",",
"created",
",",
"extracted",
",",
"*",
"*",
"kwargs",
")",
":"
] |
https://github.com/barseghyanartur/django-elasticsearch-dsl-drf/blob/8fe35265d44501269b2603570773be47f20fa471/examples/simple/factories/books_book.py#L161-L162
|
||
wikimedia/pywikibot
|
81a01ffaec7271bf5b4b170f85a80388420a4e78
|
pywikibot/logging.py
|
python
|
log
|
(text: object, decoder: Optional[str] = None, newline: bool = True,
**kwargs: Any)
|
Output a record to the log file.
:param text: the message which is to be logged to the log file.
:param decoder: If None, text should be a unicode string else it should
be encoded in the given encoding.
:param newline: If True, a line feed will be added after printing the text.
:param kwargs: The keyword arguments can be found in the python doc:
https://docs.python.org/3/howto/logging-cookbook.html
|
Output a record to the log file.
|
[
"Output",
"a",
"record",
"to",
"the",
"log",
"file",
"."
] |
def log(text: object, decoder: Optional[str] = None, newline: bool = True,
**kwargs: Any) -> None:
"""Output a record to the log file.
:param text: the message which is to be logged to the log file.
:param decoder: If None, text should be a unicode string else it should
be encoded in the given encoding.
:param newline: If True, a line feed will be added after printing the text.
:param kwargs: The keyword arguments can be found in the python doc:
https://docs.python.org/3/howto/logging-cookbook.html
"""
logoutput(text, decoder, newline, VERBOSE, **kwargs)
|
[
"def",
"log",
"(",
"text",
":",
"object",
",",
"decoder",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"newline",
":",
"bool",
"=",
"True",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"None",
":",
"logoutput",
"(",
"text",
",",
"decoder",
",",
"newline",
",",
"VERBOSE",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wikimedia/pywikibot/blob/81a01ffaec7271bf5b4b170f85a80388420a4e78/pywikibot/logging.py#L182-L193
|
||
arrayfire/arrayfire-python
|
96fa9768ee02e5fb5ffcaf3d1f744c898b141637
|
arrayfire/arith.py
|
python
|
exp
|
(a)
|
return _arith_unary_func(a, backend.get().af_exp)
|
Exponential of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the exponential of each value from `a`.
Note
-------
`a` must not be complex.
|
Exponential of each element in the array.
|
[
"Exponential",
"of",
"each",
"element",
"in",
"the",
"array",
"."
] |
def exp(a):
"""
Exponential of each element in the array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array.
Returns
--------
out : af.Array
array containing the exponential of each value from `a`.
Note
-------
`a` must not be complex.
"""
return _arith_unary_func(a, backend.get().af_exp)
|
[
"def",
"exp",
"(",
"a",
")",
":",
"return",
"_arith_unary_func",
"(",
"a",
",",
"backend",
".",
"get",
"(",
")",
".",
"af_exp",
")"
] |
https://github.com/arrayfire/arrayfire-python/blob/96fa9768ee02e5fb5ffcaf3d1f744c898b141637/arrayfire/arith.py#L779-L797
|
|
sio2project/oioioi
|
adeb6a7b278b6bed853405e525f87fd2726c06ac
|
oioioi/sinolpack/package.py
|
python
|
SinolPackage._process_attachments
|
(self)
|
Removes previously added attachments for the problem,
and saves new ones from the attachment directory.
|
Removes previously added attachments for the problem,
and saves new ones from the attachment directory.
|
[
"Removes",
"previously",
"added",
"attachments",
"for",
"the",
"problem",
"and",
"saves",
"new",
"ones",
"from",
"the",
"attachment",
"directory",
"."
] |
def _process_attachments(self):
"""Removes previously added attachments for the problem,
and saves new ones from the attachment directory.
"""
problem_attachments = ProblemAttachment.objects.filter(problem=self.problem)
if problem_attachments is not None:
problem_attachments.delete()
attachments_dir = os.path.join(self.rootdir, 'attachments')
if not os.path.isdir(attachments_dir):
return
attachments = [
attachment
for attachment in os.listdir(attachments_dir)
if os.path.isfile(os.path.join(attachments_dir, attachment))
]
if len(attachments) == 0:
return
for attachment in attachments:
path = os.path.join(attachments_dir, attachment)
instance = ProblemAttachment(problem=self.problem, description=attachment)
instance.content.save(attachment, File(open(path, 'rb')))
logger.info('%s: attachment: %s', path, attachment)
|
[
"def",
"_process_attachments",
"(",
"self",
")",
":",
"problem_attachments",
"=",
"ProblemAttachment",
".",
"objects",
".",
"filter",
"(",
"problem",
"=",
"self",
".",
"problem",
")",
"if",
"problem_attachments",
"is",
"not",
"None",
":",
"problem_attachments",
".",
"delete",
"(",
")",
"attachments_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"rootdir",
",",
"'attachments'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"attachments_dir",
")",
":",
"return",
"attachments",
"=",
"[",
"attachment",
"for",
"attachment",
"in",
"os",
".",
"listdir",
"(",
"attachments_dir",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"attachments_dir",
",",
"attachment",
")",
")",
"]",
"if",
"len",
"(",
"attachments",
")",
"==",
"0",
":",
"return",
"for",
"attachment",
"in",
"attachments",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"attachments_dir",
",",
"attachment",
")",
"instance",
"=",
"ProblemAttachment",
"(",
"problem",
"=",
"self",
".",
"problem",
",",
"description",
"=",
"attachment",
")",
"instance",
".",
"content",
".",
"save",
"(",
"attachment",
",",
"File",
"(",
"open",
"(",
"path",
",",
"'rb'",
")",
")",
")",
"logger",
".",
"info",
"(",
"'%s: attachment: %s'",
",",
"path",
",",
"attachment",
")"
] |
https://github.com/sio2project/oioioi/blob/adeb6a7b278b6bed853405e525f87fd2726c06ac/oioioi/sinolpack/package.py#L1232-L1255
|
||
ARM-DOE/pyart
|
72affe5b669f1996cd3cc39ec7d8dd29b838bd48
|
pyart/core/transforms.py
|
python
|
cartesian_to_geographic
|
(x, y, projparams)
|
return lon, lat
|
Cartesian to Geographic coordinate transform.
Transform a set of Cartesian/Cartographic coordinates (x, y) to a
geographic coordinate system (lat, lon) using pyproj or a build in
Azimuthal equidistant projection.
Parameters
----------
x, y : array-like
Cartesian coordinates in meters unless R is defined in different units
in the projparams parameter.
projparams : dict or str
Projection parameters passed to pyproj.Proj. If this parameter is a
dictionary with a 'proj' key equal to 'pyart_aeqd' then a azimuthal
equidistant projection will be used that is native to Py-ART and
does not require pyproj to be installed. In this case a non-default
value of R can be specified by setting the 'R' key to the desired
value.
Returns
-------
lon, lat : array
Longitude and latitude of the Cartesian coordinates in degrees.
|
Cartesian to Geographic coordinate transform.
|
[
"Cartesian",
"to",
"Geographic",
"coordinate",
"transform",
"."
] |
def cartesian_to_geographic(x, y, projparams):
"""
Cartesian to Geographic coordinate transform.
Transform a set of Cartesian/Cartographic coordinates (x, y) to a
geographic coordinate system (lat, lon) using pyproj or a build in
Azimuthal equidistant projection.
Parameters
----------
x, y : array-like
Cartesian coordinates in meters unless R is defined in different units
in the projparams parameter.
projparams : dict or str
Projection parameters passed to pyproj.Proj. If this parameter is a
dictionary with a 'proj' key equal to 'pyart_aeqd' then a azimuthal
equidistant projection will be used that is native to Py-ART and
does not require pyproj to be installed. In this case a non-default
value of R can be specified by setting the 'R' key to the desired
value.
Returns
-------
lon, lat : array
Longitude and latitude of the Cartesian coordinates in degrees.
"""
if isinstance(projparams, dict) and projparams.get('proj') == 'pyart_aeqd':
# Use Py-ART's Azimuthal equidistance projection
lon_0 = projparams['lon_0']
lat_0 = projparams['lat_0']
if 'R' in projparams:
R = projparams['R']
lon, lat = cartesian_to_geographic_aeqd(x, y, lon_0, lat_0, R)
else:
lon, lat = cartesian_to_geographic_aeqd(x, y, lon_0, lat_0)
else:
# Use pyproj for the projection
# check that pyproj is available
if not _PYPROJ_AVAILABLE:
raise MissingOptionalDependency(
"PyProj is required to use cartesian_to_geographic "
"with a projection other than pyart_aeqd but it is not "
"installed")
proj = pyproj.Proj(projparams)
lon, lat = proj(x, y, inverse=True)
return lon, lat
|
[
"def",
"cartesian_to_geographic",
"(",
"x",
",",
"y",
",",
"projparams",
")",
":",
"if",
"isinstance",
"(",
"projparams",
",",
"dict",
")",
"and",
"projparams",
".",
"get",
"(",
"'proj'",
")",
"==",
"'pyart_aeqd'",
":",
"# Use Py-ART's Azimuthal equidistance projection",
"lon_0",
"=",
"projparams",
"[",
"'lon_0'",
"]",
"lat_0",
"=",
"projparams",
"[",
"'lat_0'",
"]",
"if",
"'R'",
"in",
"projparams",
":",
"R",
"=",
"projparams",
"[",
"'R'",
"]",
"lon",
",",
"lat",
"=",
"cartesian_to_geographic_aeqd",
"(",
"x",
",",
"y",
",",
"lon_0",
",",
"lat_0",
",",
"R",
")",
"else",
":",
"lon",
",",
"lat",
"=",
"cartesian_to_geographic_aeqd",
"(",
"x",
",",
"y",
",",
"lon_0",
",",
"lat_0",
")",
"else",
":",
"# Use pyproj for the projection",
"# check that pyproj is available",
"if",
"not",
"_PYPROJ_AVAILABLE",
":",
"raise",
"MissingOptionalDependency",
"(",
"\"PyProj is required to use cartesian_to_geographic \"",
"\"with a projection other than pyart_aeqd but it is not \"",
"\"installed\"",
")",
"proj",
"=",
"pyproj",
".",
"Proj",
"(",
"projparams",
")",
"lon",
",",
"lat",
"=",
"proj",
"(",
"x",
",",
"y",
",",
"inverse",
"=",
"True",
")",
"return",
"lon",
",",
"lat"
] |
https://github.com/ARM-DOE/pyart/blob/72affe5b669f1996cd3cc39ec7d8dd29b838bd48/pyart/core/transforms.py#L462-L508
|
|
mlrun/mlrun
|
4c120719d64327a34b7ee1ab08fb5e01b258b00a
|
mlrun/db/httpdb.py
|
python
|
HTTPRunDB.remote_start
|
(self, func_url)
|
return schemas.BackgroundTask(**resp.json())
|
Execute a function remotely, Used for ``dask`` functions.
:param func_url: URL to the function to be executed.
:returns: A BackgroundTask object, with details on execution process and its status.
|
Execute a function remotely, Used for ``dask`` functions.
|
[
"Execute",
"a",
"function",
"remotely",
"Used",
"for",
"dask",
"functions",
"."
] |
def remote_start(self, func_url) -> schemas.BackgroundTask:
""" Execute a function remotely, Used for ``dask`` functions.
:param func_url: URL to the function to be executed.
:returns: A BackgroundTask object, with details on execution process and its status.
"""
try:
req = {"functionUrl": func_url}
resp = self.api_call(
"POST",
"start/function",
json=req,
timeout=int(config.submit_timeout) or 60,
)
except OSError as err:
logger.error(f"error starting function: {err}")
raise OSError(f"error: cannot start function, {err}")
if not resp.ok:
logger.error(f"bad resp!!\n{resp.text}")
raise ValueError("bad function start response")
return schemas.BackgroundTask(**resp.json())
|
[
"def",
"remote_start",
"(",
"self",
",",
"func_url",
")",
"->",
"schemas",
".",
"BackgroundTask",
":",
"try",
":",
"req",
"=",
"{",
"\"functionUrl\"",
":",
"func_url",
"}",
"resp",
"=",
"self",
".",
"api_call",
"(",
"\"POST\"",
",",
"\"start/function\"",
",",
"json",
"=",
"req",
",",
"timeout",
"=",
"int",
"(",
"config",
".",
"submit_timeout",
")",
"or",
"60",
",",
")",
"except",
"OSError",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"f\"error starting function: {err}\"",
")",
"raise",
"OSError",
"(",
"f\"error: cannot start function, {err}\"",
")",
"if",
"not",
"resp",
".",
"ok",
":",
"logger",
".",
"error",
"(",
"f\"bad resp!!\\n{resp.text}\"",
")",
"raise",
"ValueError",
"(",
"\"bad function start response\"",
")",
"return",
"schemas",
".",
"BackgroundTask",
"(",
"*",
"*",
"resp",
".",
"json",
"(",
")",
")"
] |
https://github.com/mlrun/mlrun/blob/4c120719d64327a34b7ee1ab08fb5e01b258b00a/mlrun/db/httpdb.py#L1170-L1193
|
|
phantomcyber/playbooks
|
9e850ecc44cb98c5dde53784744213a1ed5799bd
|
zscaler_hunt_and_block_url.py
|
python
|
regular_long_description
|
(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs)
|
return
|
[] |
def regular_long_description(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('regular_long_description() called')
template = """Endpoint as trying to access a known bad URL:
{0}
Positives from VirusTotal: {1}
Link to Phantom Incident:
https://172.16.22.128/mission/{3}
Splunk Results:
{2}"""
# parameter list for template variable replacement
parameters = [
"artifact:*.cef.requestURL",
"url_reputation_2:action_result.summary.positives",
"filtered-data:filter_3:condition_1:run_query_1:action_result.data.*._raw",
"container:id",
]
phantom.format(container=container, template=template, parameters=parameters, name="regular_long_description")
create_regular_ticket(container=container)
return
|
[
"def",
"regular_long_description",
"(",
"action",
"=",
"None",
",",
"success",
"=",
"None",
",",
"container",
"=",
"None",
",",
"results",
"=",
"None",
",",
"handle",
"=",
"None",
",",
"filtered_artifacts",
"=",
"None",
",",
"filtered_results",
"=",
"None",
",",
"custom_function",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"phantom",
".",
"debug",
"(",
"'regular_long_description() called'",
")",
"template",
"=",
"\"\"\"Endpoint as trying to access a known bad URL:\n{0}\n\nPositives from VirusTotal: {1}\n\nLink to Phantom Incident: \nhttps://172.16.22.128/mission/{3}\n\nSplunk Results: \n{2}\"\"\"",
"# parameter list for template variable replacement",
"parameters",
"=",
"[",
"\"artifact:*.cef.requestURL\"",
",",
"\"url_reputation_2:action_result.summary.positives\"",
",",
"\"filtered-data:filter_3:condition_1:run_query_1:action_result.data.*._raw\"",
",",
"\"container:id\"",
",",
"]",
"phantom",
".",
"format",
"(",
"container",
"=",
"container",
",",
"template",
"=",
"template",
",",
"parameters",
"=",
"parameters",
",",
"name",
"=",
"\"regular_long_description\"",
")",
"create_regular_ticket",
"(",
"container",
"=",
"container",
")",
"return"
] |
https://github.com/phantomcyber/playbooks/blob/9e850ecc44cb98c5dde53784744213a1ed5799bd/zscaler_hunt_and_block_url.py#L354-L380
|
|||
RhetTbull/osxphotos
|
231d13279296ee4a242d3140d8abe7b5a5bcc9c0
|
osxphotos/photoinfo.py
|
python
|
PhotoInfo.exiftool
|
(self)
|
Returns a ExifToolCaching (read-only instance of ExifTool) object for the photo.
Requires that exiftool (https://exiftool.org/) be installed
If exiftool not installed, logs warning and returns None
If photo path is missing, returns None
|
Returns a ExifToolCaching (read-only instance of ExifTool) object for the photo.
Requires that exiftool (https://exiftool.org/) be installed
If exiftool not installed, logs warning and returns None
If photo path is missing, returns None
|
[
"Returns",
"a",
"ExifToolCaching",
"(",
"read",
"-",
"only",
"instance",
"of",
"ExifTool",
")",
"object",
"for",
"the",
"photo",
".",
"Requires",
"that",
"exiftool",
"(",
"https",
":",
"//",
"exiftool",
".",
"org",
"/",
")",
"be",
"installed",
"If",
"exiftool",
"not",
"installed",
"logs",
"warning",
"and",
"returns",
"None",
"If",
"photo",
"path",
"is",
"missing",
"returns",
"None"
] |
def exiftool(self):
"""Returns a ExifToolCaching (read-only instance of ExifTool) object for the photo.
Requires that exiftool (https://exiftool.org/) be installed
If exiftool not installed, logs warning and returns None
If photo path is missing, returns None
"""
try:
# return the memoized instance if it exists
return self._exiftool
except AttributeError:
try:
exiftool_path = self._db._exiftool_path or get_exiftool_path()
if self.path is not None and os.path.isfile(self.path):
exiftool = ExifToolCaching(self.path, exiftool=exiftool_path)
else:
exiftool = None
except FileNotFoundError:
# get_exiftool_path raises FileNotFoundError if exiftool not found
exiftool = None
logging.warning(
"exiftool not in path; download and install from https://exiftool.org/"
)
self._exiftool = exiftool
return self._exiftool
|
[
"def",
"exiftool",
"(",
"self",
")",
":",
"try",
":",
"# return the memoized instance if it exists",
"return",
"self",
".",
"_exiftool",
"except",
"AttributeError",
":",
"try",
":",
"exiftool_path",
"=",
"self",
".",
"_db",
".",
"_exiftool_path",
"or",
"get_exiftool_path",
"(",
")",
"if",
"self",
".",
"path",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"path",
")",
":",
"exiftool",
"=",
"ExifToolCaching",
"(",
"self",
".",
"path",
",",
"exiftool",
"=",
"exiftool_path",
")",
"else",
":",
"exiftool",
"=",
"None",
"except",
"FileNotFoundError",
":",
"# get_exiftool_path raises FileNotFoundError if exiftool not found",
"exiftool",
"=",
"None",
"logging",
".",
"warning",
"(",
"\"exiftool not in path; download and install from https://exiftool.org/\"",
")",
"self",
".",
"_exiftool",
"=",
"exiftool",
"return",
"self",
".",
"_exiftool"
] |
https://github.com/RhetTbull/osxphotos/blob/231d13279296ee4a242d3140d8abe7b5a5bcc9c0/osxphotos/photoinfo.py#L1328-L1352
|
||
rembo10/headphones
|
b3199605be1ebc83a7a8feab6b1e99b64014187c
|
lib/biplist/__init__.py
|
python
|
PlistWriter.writeRoot
|
(self, root)
|
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
|
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
|
[
"Strategy",
"is",
":",
"-",
"write",
"header",
"-",
"wrap",
"root",
"object",
"so",
"everything",
"is",
"hashable",
"-",
"compute",
"size",
"of",
"objects",
"which",
"will",
"be",
"written",
"-",
"need",
"to",
"do",
"this",
"in",
"order",
"to",
"know",
"how",
"large",
"the",
"object",
"refs",
"will",
"be",
"in",
"the",
"list",
"/",
"dict",
"/",
"set",
"reference",
"lists",
"-",
"write",
"objects",
"-",
"keep",
"objects",
"in",
"writtenReferences",
"-",
"keep",
"positions",
"of",
"object",
"references",
"in",
"referencePositions",
"-",
"write",
"object",
"references",
"with",
"the",
"length",
"computed",
"previously",
"-",
"computer",
"object",
"reference",
"length",
"-",
"write",
"object",
"reference",
"positions",
"-",
"write",
"trailer"
] |
def writeRoot(self, root):
"""
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
"""
output = self.header
wrapped_root = self.wrapRoot(root)
should_reference_root = True#not isinstance(wrapped_root, HashableWrapper)
self.computeOffsets(wrapped_root, asReference=should_reference_root, isRoot=True)
self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))})
(_, output) = self.writeObjectReference(wrapped_root, output)
output = self.writeObject(wrapped_root, output, setReferencePosition=True)
# output size at this point is an upper bound on how big the
# object reference offsets need to be.
self.trailer = self.trailer._replace(**{
'offsetSize':self.intSize(len(output)),
'offsetCount':len(self.computedUniques),
'offsetTableOffset':len(output),
'topLevelObjectNumber':0
})
output = self.writeOffsetTable(output)
output += pack('!xxxxxxBBQQQ', *self.trailer)
self.file.write(output)
|
[
"def",
"writeRoot",
"(",
"self",
",",
"root",
")",
":",
"output",
"=",
"self",
".",
"header",
"wrapped_root",
"=",
"self",
".",
"wrapRoot",
"(",
"root",
")",
"should_reference_root",
"=",
"True",
"#not isinstance(wrapped_root, HashableWrapper)",
"self",
".",
"computeOffsets",
"(",
"wrapped_root",
",",
"asReference",
"=",
"should_reference_root",
",",
"isRoot",
"=",
"True",
")",
"self",
".",
"trailer",
"=",
"self",
".",
"trailer",
".",
"_replace",
"(",
"*",
"*",
"{",
"'objectRefSize'",
":",
"self",
".",
"intSize",
"(",
"len",
"(",
"self",
".",
"computedUniques",
")",
")",
"}",
")",
"(",
"_",
",",
"output",
")",
"=",
"self",
".",
"writeObjectReference",
"(",
"wrapped_root",
",",
"output",
")",
"output",
"=",
"self",
".",
"writeObject",
"(",
"wrapped_root",
",",
"output",
",",
"setReferencePosition",
"=",
"True",
")",
"# output size at this point is an upper bound on how big the",
"# object reference offsets need to be.",
"self",
".",
"trailer",
"=",
"self",
".",
"trailer",
".",
"_replace",
"(",
"*",
"*",
"{",
"'offsetSize'",
":",
"self",
".",
"intSize",
"(",
"len",
"(",
"output",
")",
")",
",",
"'offsetCount'",
":",
"len",
"(",
"self",
".",
"computedUniques",
")",
",",
"'offsetTableOffset'",
":",
"len",
"(",
"output",
")",
",",
"'topLevelObjectNumber'",
":",
"0",
"}",
")",
"output",
"=",
"self",
".",
"writeOffsetTable",
"(",
"output",
")",
"output",
"+=",
"pack",
"(",
"'!xxxxxxBBQQQ'",
",",
"*",
"self",
".",
"trailer",
")",
"self",
".",
"file",
".",
"write",
"(",
"output",
")"
] |
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/biplist/__init__.py#L492-L527
|
||
henkelis/sonospy
|
841f52010fd6e1e932d8f1a8896ad4e5a0667b8a
|
web2py/gluon/tools.py
|
python
|
Auth.del_membership
|
(self, group_id, user_id=None)
|
return self.db(membership.user_id
== user_id)(membership.group_id
== group_id).delete()
|
revokes membership from group_id to user_id
if group_id==None than user_id is that of current logged in user
|
revokes membership from group_id to user_id
if group_id==None than user_id is that of current logged in user
|
[
"revokes",
"membership",
"from",
"group_id",
"to",
"user_id",
"if",
"group_id",
"==",
"None",
"than",
"user_id",
"is",
"that",
"of",
"current",
"logged",
"in",
"user"
] |
def del_membership(self, group_id, user_id=None):
"""
revokes membership from group_id to user_id
if group_id==None than user_id is that of current logged in user
"""
if not user_id and self.user:
user_id = self.user.id
membership = self.settings.table_membership
log = self.messages.del_membership_log
if log:
self.log_event(log % dict(user_id=user_id,
group_id=group_id))
return self.db(membership.user_id
== user_id)(membership.group_id
== group_id).delete()
|
[
"def",
"del_membership",
"(",
"self",
",",
"group_id",
",",
"user_id",
"=",
"None",
")",
":",
"if",
"not",
"user_id",
"and",
"self",
".",
"user",
":",
"user_id",
"=",
"self",
".",
"user",
".",
"id",
"membership",
"=",
"self",
".",
"settings",
".",
"table_membership",
"log",
"=",
"self",
".",
"messages",
".",
"del_membership_log",
"if",
"log",
":",
"self",
".",
"log_event",
"(",
"log",
"%",
"dict",
"(",
"user_id",
"=",
"user_id",
",",
"group_id",
"=",
"group_id",
")",
")",
"return",
"self",
".",
"db",
"(",
"membership",
".",
"user_id",
"==",
"user_id",
")",
"(",
"membership",
".",
"group_id",
"==",
"group_id",
")",
".",
"delete",
"(",
")"
] |
https://github.com/henkelis/sonospy/blob/841f52010fd6e1e932d8f1a8896ad4e5a0667b8a/web2py/gluon/tools.py#L2010-L2025
|
|
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/redis/v20180412/models.py
|
python
|
InquiryPriceCreateInstanceRequest.__init__
|
(self)
|
r"""
:param TypeId: 实例类型:2 – Redis2.8内存版(标准架构),3 – CKV 3.2内存版(标准架构),4 – CKV 3.2内存版(集群架构),6 – Redis4.0内存版(标准架构),7 – Redis4.0内存版(集群架构),8 – Redis5.0内存版(标准架构),9 – Redis5.0内存版(集群架构)。
:type TypeId: int
:param MemSize: 内存容量,单位为MB, 数值需为1024的整数倍,具体规格以 [查询产品售卖规格](https://cloud.tencent.com/document/api/239/30600) 返回的规格为准。
TypeId为标准架构时,MemSize是实例总内存容量;TypeId为集群架构时,MemSize是单分片内存容量。
:type MemSize: int
:param GoodsNum: 实例数量,单次购买实例数量以 [查询产品售卖规格](https://cloud.tencent.com/document/api/239/30600) 返回的规格为准。
:type GoodsNum: int
:param Period: 购买时长,在创建包年包月实例的时候需要填写,按量计费实例填1即可,单位:月,取值范围 [1,2,3,4,5,6,7,8,9,10,11,12,24,36]。
:type Period: int
:param BillingMode: 付费方式:0-按量计费,1-包年包月。
:type BillingMode: int
:param ZoneId: 实例所属的可用区ID,可参考[地域和可用区](https://cloud.tencent.com/document/product/239/4106) 。
:type ZoneId: int
:param RedisShardNum: 实例分片数量,Redis2.8标准架构、CKV标准架构和Redis2.8单机版、Redis4.0标准架构不需要填写。
:type RedisShardNum: int
:param RedisReplicasNum: 实例副本数量,Redis2.8标准架构、CKV标准架构和Redis2.8单机版不需要填写。
:type RedisReplicasNum: int
:param ReplicasReadonly: 是否支持副本只读,Redis2.8标准架构、CKV标准架构和Redis2.8单机版不需要填写。
:type ReplicasReadonly: bool
:param ZoneName: 实例所属的可用区名称,可参考[地域和可用区](https://cloud.tencent.com/document/product/239/4106) 。
:type ZoneName: str
|
r"""
:param TypeId: 实例类型:2 – Redis2.8内存版(标准架构),3 – CKV 3.2内存版(标准架构),4 – CKV 3.2内存版(集群架构),6 – Redis4.0内存版(标准架构),7 – Redis4.0内存版(集群架构),8 – Redis5.0内存版(标准架构),9 – Redis5.0内存版(集群架构)。
:type TypeId: int
:param MemSize: 内存容量,单位为MB, 数值需为1024的整数倍,具体规格以 [查询产品售卖规格](https://cloud.tencent.com/document/api/239/30600) 返回的规格为准。
TypeId为标准架构时,MemSize是实例总内存容量;TypeId为集群架构时,MemSize是单分片内存容量。
:type MemSize: int
:param GoodsNum: 实例数量,单次购买实例数量以 [查询产品售卖规格](https://cloud.tencent.com/document/api/239/30600) 返回的规格为准。
:type GoodsNum: int
:param Period: 购买时长,在创建包年包月实例的时候需要填写,按量计费实例填1即可,单位:月,取值范围 [1,2,3,4,5,6,7,8,9,10,11,12,24,36]。
:type Period: int
:param BillingMode: 付费方式:0-按量计费,1-包年包月。
:type BillingMode: int
:param ZoneId: 实例所属的可用区ID,可参考[地域和可用区](https://cloud.tencent.com/document/product/239/4106) 。
:type ZoneId: int
:param RedisShardNum: 实例分片数量,Redis2.8标准架构、CKV标准架构和Redis2.8单机版、Redis4.0标准架构不需要填写。
:type RedisShardNum: int
:param RedisReplicasNum: 实例副本数量,Redis2.8标准架构、CKV标准架构和Redis2.8单机版不需要填写。
:type RedisReplicasNum: int
:param ReplicasReadonly: 是否支持副本只读,Redis2.8标准架构、CKV标准架构和Redis2.8单机版不需要填写。
:type ReplicasReadonly: bool
:param ZoneName: 实例所属的可用区名称,可参考[地域和可用区](https://cloud.tencent.com/document/product/239/4106) 。
:type ZoneName: str
|
[
"r",
":",
"param",
"TypeId",
":",
"实例类型:2",
"–",
"Redis2",
".",
"8内存版",
"(",
"标准架构",
")",
",3",
"–",
"CKV",
"3",
".",
"2内存版",
"(",
"标准架构",
")",
",4",
"–",
"CKV",
"3",
".",
"2内存版",
"(",
"集群架构",
")",
",6",
"–",
"Redis4",
".",
"0内存版",
"(",
"标准架构",
")",
",7",
"–",
"Redis4",
".",
"0内存版",
"(",
"集群架构",
")",
",8",
"–",
"Redis5",
".",
"0内存版",
"(",
"标准架构",
")",
",9",
"–",
"Redis5",
".",
"0内存版",
"(",
"集群架构",
")",
"。",
":",
"type",
"TypeId",
":",
"int",
":",
"param",
"MemSize",
":",
"内存容量,单位为MB,",
"数值需为1024的整数倍,具体规格以",
"[",
"查询产品售卖规格",
"]",
"(",
"https",
":",
"//",
"cloud",
".",
"tencent",
".",
"com",
"/",
"document",
"/",
"api",
"/",
"239",
"/",
"30600",
")",
"返回的规格为准。",
"TypeId为标准架构时,MemSize是实例总内存容量;TypeId为集群架构时,MemSize是单分片内存容量。",
":",
"type",
"MemSize",
":",
"int",
":",
"param",
"GoodsNum",
":",
"实例数量,单次购买实例数量以",
"[",
"查询产品售卖规格",
"]",
"(",
"https",
":",
"//",
"cloud",
".",
"tencent",
".",
"com",
"/",
"document",
"/",
"api",
"/",
"239",
"/",
"30600",
")",
"返回的规格为准。",
":",
"type",
"GoodsNum",
":",
"int",
":",
"param",
"Period",
":",
"购买时长,在创建包年包月实例的时候需要填写,按量计费实例填1即可,单位:月,取值范围",
"[",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"24",
"36",
"]",
"。",
":",
"type",
"Period",
":",
"int",
":",
"param",
"BillingMode",
":",
"付费方式",
":",
"0",
"-",
"按量计费,1",
"-",
"包年包月。",
":",
"type",
"BillingMode",
":",
"int",
":",
"param",
"ZoneId",
":",
"实例所属的可用区ID,可参考",
"[",
"地域和可用区",
"]",
"(",
"https",
":",
"//",
"cloud",
".",
"tencent",
".",
"com",
"/",
"document",
"/",
"product",
"/",
"239",
"/",
"4106",
")",
"。",
":",
"type",
"ZoneId",
":",
"int",
":",
"param",
"RedisShardNum",
":",
"实例分片数量,Redis2",
".",
"8标准架构、CKV标准架构和Redis2",
".",
"8单机版、Redis4",
".",
"0标准架构不需要填写。",
":",
"type",
"RedisShardNum",
":",
"int",
":",
"param",
"RedisReplicasNum",
":",
"实例副本数量,Redis2",
".",
"8标准架构、CKV标准架构和Redis2",
".",
"8单机版不需要填写。",
":",
"type",
"RedisReplicasNum",
":",
"int",
":",
"param",
"ReplicasReadonly",
":",
"是否支持副本只读,Redis2",
".",
"8标准架构、CKV标准架构和Redis2",
".",
"8单机版不需要填写。",
":",
"type",
"ReplicasReadonly",
":",
"bool",
":",
"param",
"ZoneName",
":",
"实例所属的可用区名称,可参考",
"[",
"地域和可用区",
"]",
"(",
"https",
":",
"//",
"cloud",
".",
"tencent",
".",
"com",
"/",
"document",
"/",
"product",
"/",
"239",
"/",
"4106",
")",
"。",
":",
"type",
"ZoneName",
":",
"str"
] |
def __init__(self):
r"""
:param TypeId: 实例类型:2 – Redis2.8内存版(标准架构),3 – CKV 3.2内存版(标准架构),4 – CKV 3.2内存版(集群架构),6 – Redis4.0内存版(标准架构),7 – Redis4.0内存版(集群架构),8 – Redis5.0内存版(标准架构),9 – Redis5.0内存版(集群架构)。
:type TypeId: int
:param MemSize: 内存容量,单位为MB, 数值需为1024的整数倍,具体规格以 [查询产品售卖规格](https://cloud.tencent.com/document/api/239/30600) 返回的规格为准。
TypeId为标准架构时,MemSize是实例总内存容量;TypeId为集群架构时,MemSize是单分片内存容量。
:type MemSize: int
:param GoodsNum: 实例数量,单次购买实例数量以 [查询产品售卖规格](https://cloud.tencent.com/document/api/239/30600) 返回的规格为准。
:type GoodsNum: int
:param Period: 购买时长,在创建包年包月实例的时候需要填写,按量计费实例填1即可,单位:月,取值范围 [1,2,3,4,5,6,7,8,9,10,11,12,24,36]。
:type Period: int
:param BillingMode: 付费方式:0-按量计费,1-包年包月。
:type BillingMode: int
:param ZoneId: 实例所属的可用区ID,可参考[地域和可用区](https://cloud.tencent.com/document/product/239/4106) 。
:type ZoneId: int
:param RedisShardNum: 实例分片数量,Redis2.8标准架构、CKV标准架构和Redis2.8单机版、Redis4.0标准架构不需要填写。
:type RedisShardNum: int
:param RedisReplicasNum: 实例副本数量,Redis2.8标准架构、CKV标准架构和Redis2.8单机版不需要填写。
:type RedisReplicasNum: int
:param ReplicasReadonly: 是否支持副本只读,Redis2.8标准架构、CKV标准架构和Redis2.8单机版不需要填写。
:type ReplicasReadonly: bool
:param ZoneName: 实例所属的可用区名称,可参考[地域和可用区](https://cloud.tencent.com/document/product/239/4106) 。
:type ZoneName: str
"""
self.TypeId = None
self.MemSize = None
self.GoodsNum = None
self.Period = None
self.BillingMode = None
self.ZoneId = None
self.RedisShardNum = None
self.RedisReplicasNum = None
self.ReplicasReadonly = None
self.ZoneName = None
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"TypeId",
"=",
"None",
"self",
".",
"MemSize",
"=",
"None",
"self",
".",
"GoodsNum",
"=",
"None",
"self",
".",
"Period",
"=",
"None",
"self",
".",
"BillingMode",
"=",
"None",
"self",
".",
"ZoneId",
"=",
"None",
"self",
".",
"RedisShardNum",
"=",
"None",
"self",
".",
"RedisReplicasNum",
"=",
"None",
"self",
".",
"ReplicasReadonly",
"=",
"None",
"self",
".",
"ZoneName",
"=",
"None"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/redis/v20180412/models.py#L3478-L3511
|
||
keiohta/tf2rl
|
43523930b3328b28fcf2ce64e6a9a8cf4a403044
|
tf2rl/algos/bi_res_ddpg.py
|
python
|
BiResDDPG.__init__
|
(self, eta=0.05, name="BiResDDPG", **kwargs)
|
Initialize BiResDDPG agent
Args:
eta (float): Gradients mixing factor.
name (str): Name of agent. The default is ``"BiResDDPG"``.
state_shape (iterable of int):
action_dim (int):
max_action (float): Size of maximum action. (``-max_action`` <= action <= ``max_action``). The degault is ``1``.
lr_actor (float): Learning rate for actor network. The default is ``0.001``.
lr_critic (float): Learning rage for critic network. The default is ``0.001``.
actor_units (iterable of int): Number of units at hidden layers of actor.
critic_units (iterable of int): Number of units at hidden layers of critic.
sigma (float): Standard deviation of Gaussian noise. The default is ``0.1``.
tau (float): Weight update ratio for target network. ``target = (1-tau)*target + tau*network`` The default is ``0.005``.
n_warmup (int): Number of warmup steps before training. The default is ``1e4``.
memory_capacity (int): Replay Buffer size. The default is ``1e4``.
batch_size (int): Batch size. The default is ``256``.
discount (float): Discount factor. The default is ``0.99``.
max_grad (float): Maximum gradient. The default is ``10``.
gpu (int): GPU id. ``-1`` disables GPU. The default is ``0``.
|
Initialize BiResDDPG agent
|
[
"Initialize",
"BiResDDPG",
"agent"
] |
def __init__(self, eta=0.05, name="BiResDDPG", **kwargs):
"""
Initialize BiResDDPG agent
Args:
eta (float): Gradients mixing factor.
name (str): Name of agent. The default is ``"BiResDDPG"``.
state_shape (iterable of int):
action_dim (int):
max_action (float): Size of maximum action. (``-max_action`` <= action <= ``max_action``). The degault is ``1``.
lr_actor (float): Learning rate for actor network. The default is ``0.001``.
lr_critic (float): Learning rage for critic network. The default is ``0.001``.
actor_units (iterable of int): Number of units at hidden layers of actor.
critic_units (iterable of int): Number of units at hidden layers of critic.
sigma (float): Standard deviation of Gaussian noise. The default is ``0.1``.
tau (float): Weight update ratio for target network. ``target = (1-tau)*target + tau*network`` The default is ``0.005``.
n_warmup (int): Number of warmup steps before training. The default is ``1e4``.
memory_capacity (int): Replay Buffer size. The default is ``1e4``.
batch_size (int): Batch size. The default is ``256``.
discount (float): Discount factor. The default is ``0.99``.
max_grad (float): Maximum gradient. The default is ``10``.
gpu (int): GPU id. ``-1`` disables GPU. The default is ``0``.
"""
kwargs["name"] = name
super().__init__(**kwargs)
self._eta = eta
|
[
"def",
"__init__",
"(",
"self",
",",
"eta",
"=",
"0.05",
",",
"name",
"=",
"\"BiResDDPG\"",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"name\"",
"]",
"=",
"name",
"super",
"(",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"_eta",
"=",
"eta"
] |
https://github.com/keiohta/tf2rl/blob/43523930b3328b28fcf2ce64e6a9a8cf4a403044/tf2rl/algos/bi_res_ddpg.py#L21-L46
|
||
cmbruns/pyopenvr
|
ac4847a8a05cda0d4bcf7c4f243008b2a191c7a5
|
src/translate/generator.py
|
python
|
main
|
(sub_version=1)
|
[] |
def main(sub_version=1):
file_name1 = 'openvr.h'
file_string1 = pkg_resources.resource_string(__name__, file_name1)
declarations = Parser().parse_file(file_name=file_name1, file_string=file_string1)
version = get_version(declarations)
patch_version = str(version[2]).zfill(2) + str(sub_version).zfill(2)
py_version = (version[0], version[1], patch_version)
write_version(
file_out=open('../openvr/version.py', 'w'),
version=py_version,
)
generator = CTypesGenerator()
generator.generate(
declarations=declarations,
file_out=open('../openvr/__init__.py', 'w', newline=None),
version=version,
)
generator.generate_errors(
declarations=declarations,
file_out=open('../openvr/error_code/__init__.py', 'w', newline=None),
)
|
[
"def",
"main",
"(",
"sub_version",
"=",
"1",
")",
":",
"file_name1",
"=",
"'openvr.h'",
"file_string1",
"=",
"pkg_resources",
".",
"resource_string",
"(",
"__name__",
",",
"file_name1",
")",
"declarations",
"=",
"Parser",
"(",
")",
".",
"parse_file",
"(",
"file_name",
"=",
"file_name1",
",",
"file_string",
"=",
"file_string1",
")",
"version",
"=",
"get_version",
"(",
"declarations",
")",
"patch_version",
"=",
"str",
"(",
"version",
"[",
"2",
"]",
")",
".",
"zfill",
"(",
"2",
")",
"+",
"str",
"(",
"sub_version",
")",
".",
"zfill",
"(",
"2",
")",
"py_version",
"=",
"(",
"version",
"[",
"0",
"]",
",",
"version",
"[",
"1",
"]",
",",
"patch_version",
")",
"write_version",
"(",
"file_out",
"=",
"open",
"(",
"'../openvr/version.py'",
",",
"'w'",
")",
",",
"version",
"=",
"py_version",
",",
")",
"generator",
"=",
"CTypesGenerator",
"(",
")",
"generator",
".",
"generate",
"(",
"declarations",
"=",
"declarations",
",",
"file_out",
"=",
"open",
"(",
"'../openvr/__init__.py'",
",",
"'w'",
",",
"newline",
"=",
"None",
")",
",",
"version",
"=",
"version",
",",
")",
"generator",
".",
"generate_errors",
"(",
"declarations",
"=",
"declarations",
",",
"file_out",
"=",
"open",
"(",
"'../openvr/error_code/__init__.py'",
",",
"'w'",
",",
"newline",
"=",
"None",
")",
",",
")"
] |
https://github.com/cmbruns/pyopenvr/blob/ac4847a8a05cda0d4bcf7c4f243008b2a191c7a5/src/translate/generator.py#L342-L362
|
||||
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/google/appengine/api/taskqueue/taskqueue.py
|
python
|
Queue.modify_task_lease
|
(self, task, lease_seconds)
|
Modifies the lease of a task in this queue.
Args:
task: A task instance that will have its lease modified.
lease_seconds: Number of seconds, from the current time, that the task
lease will be modified to. If lease_seconds is 0, then the task lease
is removed and the task will be available for leasing again using
the lease_tasks method.
Raises:
TypeError: if lease_seconds is not a valid float or integer.
InvalidLeaseTimeError: if lease_seconds is outside the valid range.
Error-subclass on application errors.
|
Modifies the lease of a task in this queue.
|
[
"Modifies",
"the",
"lease",
"of",
"a",
"task",
"in",
"this",
"queue",
"."
] |
def modify_task_lease(self, task, lease_seconds):
"""Modifies the lease of a task in this queue.
Args:
task: A task instance that will have its lease modified.
lease_seconds: Number of seconds, from the current time, that the task
lease will be modified to. If lease_seconds is 0, then the task lease
is removed and the task will be available for leasing again using
the lease_tasks method.
Raises:
TypeError: if lease_seconds is not a valid float or integer.
InvalidLeaseTimeError: if lease_seconds is outside the valid range.
Error-subclass on application errors.
"""
lease_seconds = self._ValidateLeaseSeconds(lease_seconds)
request = taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest()
response = taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse()
request.set_queue_name(self.__name)
request.set_task_name(task.name)
request.set_eta_usec(task._eta_usec)
request.set_lease_seconds(lease_seconds)
try:
apiproxy_stub_map.MakeSyncCall('taskqueue',
'ModifyTaskLease',
request,
response)
except apiproxy_errors.ApplicationError, e:
raise _TranslateError(e.application_error, e.error_detail)
task._Task__eta_posix = response.updated_eta_usec() * 1e-6
task._Task__eta = None
|
[
"def",
"modify_task_lease",
"(",
"self",
",",
"task",
",",
"lease_seconds",
")",
":",
"lease_seconds",
"=",
"self",
".",
"_ValidateLeaseSeconds",
"(",
"lease_seconds",
")",
"request",
"=",
"taskqueue_service_pb",
".",
"TaskQueueModifyTaskLeaseRequest",
"(",
")",
"response",
"=",
"taskqueue_service_pb",
".",
"TaskQueueModifyTaskLeaseResponse",
"(",
")",
"request",
".",
"set_queue_name",
"(",
"self",
".",
"__name",
")",
"request",
".",
"set_task_name",
"(",
"task",
".",
"name",
")",
"request",
".",
"set_eta_usec",
"(",
"task",
".",
"_eta_usec",
")",
"request",
".",
"set_lease_seconds",
"(",
"lease_seconds",
")",
"try",
":",
"apiproxy_stub_map",
".",
"MakeSyncCall",
"(",
"'taskqueue'",
",",
"'ModifyTaskLease'",
",",
"request",
",",
"response",
")",
"except",
"apiproxy_errors",
".",
"ApplicationError",
",",
"e",
":",
"raise",
"_TranslateError",
"(",
"e",
".",
"application_error",
",",
"e",
".",
"error_detail",
")",
"task",
".",
"_Task__eta_posix",
"=",
"response",
".",
"updated_eta_usec",
"(",
")",
"*",
"1e-6",
"task",
".",
"_Task__eta",
"=",
"None"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/api/taskqueue/taskqueue.py#L2131-L2165
|
||
tensorflow/graphics
|
86997957324bfbdd85848daae989b4c02588faa0
|
tensorflow_graphics/nn/metric/precision.py
|
python
|
evaluate
|
(ground_truth: type_alias.TensorLike,
prediction: type_alias.TensorLike,
classes: Optional[Union[int, List[int], Tuple[int]]] = None,
reduce_average: bool = True,
prediction_to_category_function: Callable[..., Any] = _cast_to_int,
name: str = "precision_evaluate")
|
Computes the precision metric for the given ground truth and predictions.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
ground_truth: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the ground truth labels. Will be cast to int32.
prediction: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the predictions (which can be continuous).
classes: An integer or a list/tuple of integers representing the classes for
which the precision will be evaluated. In case 'classes' is 'None', the
number of classes will be inferred from the given labels and the precision
will be calculated for each of the classes. Defaults to 'None'.
reduce_average: Whether to calculate the average of the precision for each
class and return a single precision value. Defaults to true.
prediction_to_category_function: A function to associate a `prediction` to a
category. Defaults to rounding down the value of the prediction to the
nearest integer value.
name: A name for this op. Defaults to "precision_evaluate".
Returns:
A tensor of shape `[A1, ..., An, C]`, where the last axis represents the
precision calculated for each of the requested classes.
Raises:
ValueError: if the shape of `ground_truth`, `prediction` is not supported.
|
Computes the precision metric for the given ground truth and predictions.
|
[
"Computes",
"the",
"precision",
"metric",
"for",
"the",
"given",
"ground",
"truth",
"and",
"predictions",
"."
] |
def evaluate(ground_truth: type_alias.TensorLike,
prediction: type_alias.TensorLike,
classes: Optional[Union[int, List[int], Tuple[int]]] = None,
reduce_average: bool = True,
prediction_to_category_function: Callable[..., Any] = _cast_to_int,
name: str = "precision_evaluate") -> tf.Tensor:
"""Computes the precision metric for the given ground truth and predictions.
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
ground_truth: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the ground truth labels. Will be cast to int32.
prediction: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the predictions (which can be continuous).
classes: An integer or a list/tuple of integers representing the classes for
which the precision will be evaluated. In case 'classes' is 'None', the
number of classes will be inferred from the given labels and the precision
will be calculated for each of the classes. Defaults to 'None'.
reduce_average: Whether to calculate the average of the precision for each
class and return a single precision value. Defaults to true.
prediction_to_category_function: A function to associate a `prediction` to a
category. Defaults to rounding down the value of the prediction to the
nearest integer value.
name: A name for this op. Defaults to "precision_evaluate".
Returns:
A tensor of shape `[A1, ..., An, C]`, where the last axis represents the
precision calculated for each of the requested classes.
Raises:
ValueError: if the shape of `ground_truth`, `prediction` is not supported.
"""
with tf.name_scope(name):
ground_truth = tf.cast(
x=tf.convert_to_tensor(value=ground_truth), dtype=tf.int32)
prediction = tf.convert_to_tensor(value=prediction)
shape.compare_batch_dimensions(
tensors=(ground_truth, prediction),
tensor_names=("ground_truth", "prediction"),
last_axes=-1,
broadcast_compatible=True)
prediction = prediction_to_category_function(prediction)
if classes is None:
num_classes = tf.math.maximum(
tf.math.reduce_max(input_tensor=ground_truth),
tf.math.reduce_max(input_tensor=prediction)) + 1
classes = tf.range(num_classes)
else:
classes = tf.convert_to_tensor(value=classes)
# Make sure classes is a tensor of rank 1.
classes = tf.reshape(classes, [1]) if tf.rank(classes) == 0 else classes
# Create a confusion matrix for each of the classes (with dimensions
# [A1, ..., An, C, N]).
classes = tf.expand_dims(classes, -1)
ground_truth_per_class = tf.equal(tf.expand_dims(ground_truth, -2), classes)
prediction_per_class = tf.equal(tf.expand_dims(prediction, -2), classes)
# Calculate the precision for each of the classes.
true_positives = tf.math.reduce_sum(
input_tensor=tf.cast(
x=tf.math.logical_and(ground_truth_per_class, prediction_per_class),
dtype=tf.float32),
axis=-1)
total_predicted_positives = tf.math.reduce_sum(
input_tensor=tf.cast(x=prediction_per_class, dtype=tf.float32), axis=-1)
precision_per_class = safe_ops.safe_signed_div(true_positives,
total_predicted_positives)
if reduce_average:
return tf.math.reduce_mean(input_tensor=precision_per_class, axis=-1)
else:
return precision_per_class
|
[
"def",
"evaluate",
"(",
"ground_truth",
":",
"type_alias",
".",
"TensorLike",
",",
"prediction",
":",
"type_alias",
".",
"TensorLike",
",",
"classes",
":",
"Optional",
"[",
"Union",
"[",
"int",
",",
"List",
"[",
"int",
"]",
",",
"Tuple",
"[",
"int",
"]",
"]",
"]",
"=",
"None",
",",
"reduce_average",
":",
"bool",
"=",
"True",
",",
"prediction_to_category_function",
":",
"Callable",
"[",
"...",
",",
"Any",
"]",
"=",
"_cast_to_int",
",",
"name",
":",
"str",
"=",
"\"precision_evaluate\"",
")",
"->",
"tf",
".",
"Tensor",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
":",
"ground_truth",
"=",
"tf",
".",
"cast",
"(",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"ground_truth",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"prediction",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"prediction",
")",
"shape",
".",
"compare_batch_dimensions",
"(",
"tensors",
"=",
"(",
"ground_truth",
",",
"prediction",
")",
",",
"tensor_names",
"=",
"(",
"\"ground_truth\"",
",",
"\"prediction\"",
")",
",",
"last_axes",
"=",
"-",
"1",
",",
"broadcast_compatible",
"=",
"True",
")",
"prediction",
"=",
"prediction_to_category_function",
"(",
"prediction",
")",
"if",
"classes",
"is",
"None",
":",
"num_classes",
"=",
"tf",
".",
"math",
".",
"maximum",
"(",
"tf",
".",
"math",
".",
"reduce_max",
"(",
"input_tensor",
"=",
"ground_truth",
")",
",",
"tf",
".",
"math",
".",
"reduce_max",
"(",
"input_tensor",
"=",
"prediction",
")",
")",
"+",
"1",
"classes",
"=",
"tf",
".",
"range",
"(",
"num_classes",
")",
"else",
":",
"classes",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"value",
"=",
"classes",
")",
"# Make sure classes is a tensor of rank 1.",
"classes",
"=",
"tf",
".",
"reshape",
"(",
"classes",
",",
"[",
"1",
"]",
")",
"if",
"tf",
".",
"rank",
"(",
"classes",
")",
"==",
"0",
"else",
"classes",
"# Create a confusion matrix for each of the classes (with dimensions",
"# [A1, ..., An, C, N]).",
"classes",
"=",
"tf",
".",
"expand_dims",
"(",
"classes",
",",
"-",
"1",
")",
"ground_truth_per_class",
"=",
"tf",
".",
"equal",
"(",
"tf",
".",
"expand_dims",
"(",
"ground_truth",
",",
"-",
"2",
")",
",",
"classes",
")",
"prediction_per_class",
"=",
"tf",
".",
"equal",
"(",
"tf",
".",
"expand_dims",
"(",
"prediction",
",",
"-",
"2",
")",
",",
"classes",
")",
"# Calculate the precision for each of the classes.",
"true_positives",
"=",
"tf",
".",
"math",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"cast",
"(",
"x",
"=",
"tf",
".",
"math",
".",
"logical_and",
"(",
"ground_truth_per_class",
",",
"prediction_per_class",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"axis",
"=",
"-",
"1",
")",
"total_predicted_positives",
"=",
"tf",
".",
"math",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"cast",
"(",
"x",
"=",
"prediction_per_class",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
",",
"axis",
"=",
"-",
"1",
")",
"precision_per_class",
"=",
"safe_ops",
".",
"safe_signed_div",
"(",
"true_positives",
",",
"total_predicted_positives",
")",
"if",
"reduce_average",
":",
"return",
"tf",
".",
"math",
".",
"reduce_mean",
"(",
"input_tensor",
"=",
"precision_per_class",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"return",
"precision_per_class"
] |
https://github.com/tensorflow/graphics/blob/86997957324bfbdd85848daae989b4c02588faa0/tensorflow_graphics/nn/metric/precision.py#L34-L111
|
||
p5py/p5
|
4ef1580b26179f1973c1669751da4522c5823f17
|
p5/core/image.py
|
python
|
PImage.__getitem__
|
(self, key)
|
return self._get_patch(key)
|
Return the color of the indexed pixel or the requested sub-region
Note :: when the specified `key` denotes a single pixel, the
color of that pixel is returned. Else, a new PImage
(constructed using the slice specified by `key`). Note
that this causes the internal buffer data to be reloaded
(when the image is in an "unclean" state) and hence, many
such operations can potentially slow things down.
:returns: a sub-image or a the pixel color
:rtype: p5.Color | p5.PImage
:raises KeyError: When `key` is invalid.
|
Return the color of the indexed pixel or the requested sub-region
|
[
"Return",
"the",
"color",
"of",
"the",
"indexed",
"pixel",
"or",
"the",
"requested",
"sub",
"-",
"region"
] |
def __getitem__(self, key):
"""Return the color of the indexed pixel or the requested sub-region
Note :: when the specified `key` denotes a single pixel, the
color of that pixel is returned. Else, a new PImage
(constructed using the slice specified by `key`). Note
that this causes the internal buffer data to be reloaded
(when the image is in an "unclean" state) and hence, many
such operations can potentially slow things down.
:returns: a sub-image or a the pixel color
:rtype: p5.Color | p5.PImage
:raises KeyError: When `key` is invalid.
"""
if len(key) != 2:
raise KeyError("Invalid image index")
if _is_numeric(key[0]) and _is_numeric(key[1]):
return self._get_pixel(key)
return self._get_patch(key)
|
[
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"if",
"len",
"(",
"key",
")",
"!=",
"2",
":",
"raise",
"KeyError",
"(",
"\"Invalid image index\"",
")",
"if",
"_is_numeric",
"(",
"key",
"[",
"0",
"]",
")",
"and",
"_is_numeric",
"(",
"key",
"[",
"1",
"]",
")",
":",
"return",
"self",
".",
"_get_pixel",
"(",
"key",
")",
"return",
"self",
".",
"_get_patch",
"(",
"key",
")"
] |
https://github.com/p5py/p5/blob/4ef1580b26179f1973c1669751da4522c5823f17/p5/core/image.py#L240-L262
|
|
CalebBell/thermo
|
572a47d1b03d49fe609b8d5f826fa6a7cde00828
|
thermo/phases/phase.py
|
python
|
Phase.PIP
|
(self)
|
return phase_identification_parameter(self.V(), self.dP_dT(), self.dP_dV(),
self.d2P_dV2(), self.d2P_dTdV())
|
r'''Method to calculate and return the phase identification parameter
of the phase.
.. math::
\Pi = V \left[\frac{\frac{\partial^2 P}{\partial V \partial T}}
{\frac{\partial P }{\partial T}}- \frac{\frac{\partial^2 P}{\partial
V^2}}{\frac{\partial P}{\partial V}} \right]
Returns
-------
PIP : float
Phase identification parameter, [-]
|
r'''Method to calculate and return the phase identification parameter
of the phase.
|
[
"r",
"Method",
"to",
"calculate",
"and",
"return",
"the",
"phase",
"identification",
"parameter",
"of",
"the",
"phase",
"."
] |
def PIP(self):
r'''Method to calculate and return the phase identification parameter
of the phase.
.. math::
\Pi = V \left[\frac{\frac{\partial^2 P}{\partial V \partial T}}
{\frac{\partial P }{\partial T}}- \frac{\frac{\partial^2 P}{\partial
V^2}}{\frac{\partial P}{\partial V}} \right]
Returns
-------
PIP : float
Phase identification parameter, [-]
'''
return phase_identification_parameter(self.V(), self.dP_dT(), self.dP_dV(),
self.d2P_dV2(), self.d2P_dTdV())
|
[
"def",
"PIP",
"(",
"self",
")",
":",
"return",
"phase_identification_parameter",
"(",
"self",
".",
"V",
"(",
")",
",",
"self",
".",
"dP_dT",
"(",
")",
",",
"self",
".",
"dP_dV",
"(",
")",
",",
"self",
".",
"d2P_dV2",
"(",
")",
",",
"self",
".",
"d2P_dTdV",
"(",
")",
")"
] |
https://github.com/CalebBell/thermo/blob/572a47d1b03d49fe609b8d5f826fa6a7cde00828/thermo/phases/phase.py#L2585-L2600
|
|
d2l-ai/d2l-en
|
39a7d4174534740b2387b0dc5eb22f409b82ee10
|
d2l/torch.py
|
python
|
load_data_snli
|
(batch_size, num_steps=50)
|
return train_iter, test_iter, train_set.vocab
|
Download the SNLI dataset and return data iterators and vocabulary.
Defined in :numref:`sec_natural-language-inference-and-dataset`
|
Download the SNLI dataset and return data iterators and vocabulary.
|
[
"Download",
"the",
"SNLI",
"dataset",
"and",
"return",
"data",
"iterators",
"and",
"vocabulary",
"."
] |
def load_data_snli(batch_size, num_steps=50):
"""Download the SNLI dataset and return data iterators and vocabulary.
Defined in :numref:`sec_natural-language-inference-and-dataset`"""
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_data = read_snli(data_dir, True)
test_data = read_snli(data_dir, False)
train_set = SNLIDataset(train_data, num_steps)
test_set = SNLIDataset(test_data, num_steps, train_set.vocab)
train_iter = torch.utils.data.DataLoader(train_set, batch_size,
shuffle=True,
num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(test_set, batch_size,
shuffle=False,
num_workers=num_workers)
return train_iter, test_iter, train_set.vocab
|
[
"def",
"load_data_snli",
"(",
"batch_size",
",",
"num_steps",
"=",
"50",
")",
":",
"num_workers",
"=",
"d2l",
".",
"get_dataloader_workers",
"(",
")",
"data_dir",
"=",
"d2l",
".",
"download_extract",
"(",
"'SNLI'",
")",
"train_data",
"=",
"read_snli",
"(",
"data_dir",
",",
"True",
")",
"test_data",
"=",
"read_snli",
"(",
"data_dir",
",",
"False",
")",
"train_set",
"=",
"SNLIDataset",
"(",
"train_data",
",",
"num_steps",
")",
"test_set",
"=",
"SNLIDataset",
"(",
"test_data",
",",
"num_steps",
",",
"train_set",
".",
"vocab",
")",
"train_iter",
"=",
"torch",
".",
"utils",
".",
"data",
".",
"DataLoader",
"(",
"train_set",
",",
"batch_size",
",",
"shuffle",
"=",
"True",
",",
"num_workers",
"=",
"num_workers",
")",
"test_iter",
"=",
"torch",
".",
"utils",
".",
"data",
".",
"DataLoader",
"(",
"test_set",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
",",
"num_workers",
"=",
"num_workers",
")",
"return",
"train_iter",
",",
"test_iter",
",",
"train_set",
".",
"vocab"
] |
https://github.com/d2l-ai/d2l-en/blob/39a7d4174534740b2387b0dc5eb22f409b82ee10/d2l/torch.py#L2454-L2470
|
|
mfrister/pushproxy
|
55f9386420986ba3cf61b61a9f5a78f73e5a82f2
|
setup/osx/extractkeychain/extractkeychain.py
|
python
|
getitemkey
|
( f )
|
[] |
def getitemkey( f ):
global keys
# 0 0xfade0711 - magic number
# 4 version
# 8 crypto-offset - offset of the interesting data
# 12 total len
# 16 iv (8 bytes)
# 24 CSSM header (large, we don't care)
# ... stuff here not used for now
# 156 the name of the key (ends null-terminated, there's probably another way
# to figure the length, we don't care)
# ...
# ??? 'ssgp................' - 20 byte label, starting with 'ssgp'. Use this
# to match up the later record - this is at totallen + 8
pos = f.tell() - 4
# IV
f.seek( pos + 16 )
iv = f.read( IVLEN )
# total len
f.seek( pos + 12 )
str = f.read(4)
totallen = unpack(">I", str)[0]
# label
f.seek( pos + totallen + 8 )
label = f.read( LABELLEN )
if label[0:4] == 'SYSK':
# don't care about system keys
return
if label[0:4] != 'ssgp':
# TODO - we mightn't care about this, but warn during testing
print "Unknown label %s after %d" % ( hexlify(label), pos)
# ciphertext offset
f.seek( pos + 8 )
str = f.read(4)
cipheroff = unpack(">I", str)[0]
cipherlen = totallen - cipheroff
if cipherlen % BLOCKSIZE != 0:
raise "Bad ciphertext len after %d" % pos
# ciphertext
f.seek( pos + cipheroff )
ciphertext = f.read( cipherlen )
import pdb; pdb.set_trace()
# we're unwrapping it, so there's a magic IV we use.
plain = kcdecrypt( dbkey, magicCmsIV, ciphertext )
# now we handle the unwrapping. we need to take the first 32 bytes,
# and reverse them.
revplain = ''
for i in range(32):
revplain += plain[31-i]
# now the real key gets found. */
plain = kcdecrypt( dbkey, iv, revplain )
itemkey = plain[4:]
if len(itemkey) != KEYLEN:
raise Exception("Bad decrypted keylen!")
keys[label] = itemkey
|
[
"def",
"getitemkey",
"(",
"f",
")",
":",
"global",
"keys",
"# 0 0xfade0711 - magic number",
"# 4 version",
"# 8 crypto-offset - offset of the interesting data",
"# 12 total len",
"# 16 iv (8 bytes)",
"# 24 CSSM header (large, we don't care)",
"# ... stuff here not used for now",
"# 156 the name of the key (ends null-terminated, there's probably another way",
"# to figure the length, we don't care)",
"# ...",
"# ??? 'ssgp................' - 20 byte label, starting with 'ssgp'. Use this",
"# to match up the later record - this is at totallen + 8",
"pos",
"=",
"f",
".",
"tell",
"(",
")",
"-",
"4",
"# IV",
"f",
".",
"seek",
"(",
"pos",
"+",
"16",
")",
"iv",
"=",
"f",
".",
"read",
"(",
"IVLEN",
")",
"# total len",
"f",
".",
"seek",
"(",
"pos",
"+",
"12",
")",
"str",
"=",
"f",
".",
"read",
"(",
"4",
")",
"totallen",
"=",
"unpack",
"(",
"\">I\"",
",",
"str",
")",
"[",
"0",
"]",
"# label",
"f",
".",
"seek",
"(",
"pos",
"+",
"totallen",
"+",
"8",
")",
"label",
"=",
"f",
".",
"read",
"(",
"LABELLEN",
")",
"if",
"label",
"[",
"0",
":",
"4",
"]",
"==",
"'SYSK'",
":",
"# don't care about system keys",
"return",
"if",
"label",
"[",
"0",
":",
"4",
"]",
"!=",
"'ssgp'",
":",
"# TODO - we mightn't care about this, but warn during testing",
"print",
"\"Unknown label %s after %d\"",
"%",
"(",
"hexlify",
"(",
"label",
")",
",",
"pos",
")",
"# ciphertext offset",
"f",
".",
"seek",
"(",
"pos",
"+",
"8",
")",
"str",
"=",
"f",
".",
"read",
"(",
"4",
")",
"cipheroff",
"=",
"unpack",
"(",
"\">I\"",
",",
"str",
")",
"[",
"0",
"]",
"cipherlen",
"=",
"totallen",
"-",
"cipheroff",
"if",
"cipherlen",
"%",
"BLOCKSIZE",
"!=",
"0",
":",
"raise",
"\"Bad ciphertext len after %d\"",
"%",
"pos",
"# ciphertext",
"f",
".",
"seek",
"(",
"pos",
"+",
"cipheroff",
")",
"ciphertext",
"=",
"f",
".",
"read",
"(",
"cipherlen",
")",
"import",
"pdb",
"pdb",
".",
"set_trace",
"(",
")",
"# we're unwrapping it, so there's a magic IV we use.",
"plain",
"=",
"kcdecrypt",
"(",
"dbkey",
",",
"magicCmsIV",
",",
"ciphertext",
")",
"# now we handle the unwrapping. we need to take the first 32 bytes,",
"# and reverse them.",
"revplain",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"32",
")",
":",
"revplain",
"+=",
"plain",
"[",
"31",
"-",
"i",
"]",
"# now the real key gets found. */",
"plain",
"=",
"kcdecrypt",
"(",
"dbkey",
",",
"iv",
",",
"revplain",
")",
"itemkey",
"=",
"plain",
"[",
"4",
":",
"]",
"if",
"len",
"(",
"itemkey",
")",
"!=",
"KEYLEN",
":",
"raise",
"Exception",
"(",
"\"Bad decrypted keylen!\"",
")",
"keys",
"[",
"label",
"]",
"=",
"itemkey"
] |
https://github.com/mfrister/pushproxy/blob/55f9386420986ba3cf61b61a9f5a78f73e5a82f2/setup/osx/extractkeychain/extractkeychain.py#L96-L167
|
||||
tjweir/liftbook
|
e977a7face13ade1a4558e1909a6951d2f8928dd
|
elyxer.py
|
python
|
FormulaNumber.parsebit
|
(self, pos)
|
Parse a bunch of digits
|
Parse a bunch of digits
|
[
"Parse",
"a",
"bunch",
"of",
"digits"
] |
def parsebit(self, pos):
"Parse a bunch of digits"
digits = pos.glob(lambda current: current.isdigit())
self.add(FormulaConstant(digits))
self.type = 'number'
|
[
"def",
"parsebit",
"(",
"self",
",",
"pos",
")",
":",
"digits",
"=",
"pos",
".",
"glob",
"(",
"lambda",
"current",
":",
"current",
".",
"isdigit",
"(",
")",
")",
"self",
".",
"add",
"(",
"FormulaConstant",
"(",
"digits",
")",
")",
"self",
".",
"type",
"=",
"'number'"
] |
https://github.com/tjweir/liftbook/blob/e977a7face13ade1a4558e1909a6951d2f8928dd/elyxer.py#L4046-L4050
|
||
ales-tsurko/cells
|
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
|
packaging/macos/python/lib/python3.7/datetime.py
|
python
|
timedelta.days
|
(self)
|
return self._days
|
days
|
days
|
[
"days"
] |
def days(self):
"""days"""
return self._days
|
[
"def",
"days",
"(",
"self",
")",
":",
"return",
"self",
".",
"_days"
] |
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/datetime.py#L607-L609
|
|
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/libs/libsentry/gen-py/sentry_policy_service/SentryPolicyService.py
|
python
|
Client.create_sentry_role
|
(self, request)
|
return self.recv_create_sentry_role()
|
Parameters:
- request
|
Parameters:
- request
|
[
"Parameters",
":",
"-",
"request"
] |
def create_sentry_role(self, request):
"""
Parameters:
- request
"""
self.send_create_sentry_role(request)
return self.recv_create_sentry_role()
|
[
"def",
"create_sentry_role",
"(",
"self",
",",
"request",
")",
":",
"self",
".",
"send_create_sentry_role",
"(",
"request",
")",
"return",
"self",
".",
"recv_create_sentry_role",
"(",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/libs/libsentry/gen-py/sentry_policy_service/SentryPolicyService.py#L123-L129
|
|
andyet/thoonk.py
|
4535ad05975a6410fe3448ace28d591ba1452f02
|
thoonk/pubsub.py
|
python
|
Thoonk.feed_exists
|
(self, feed)
|
return self.redis.sismember('feeds', feed)
|
Check if a given feed exists.
Arguments:
feed -- The name of the feed.
|
Check if a given feed exists.
|
[
"Check",
"if",
"a",
"given",
"feed",
"exists",
"."
] |
def feed_exists(self, feed):
"""
Check if a given feed exists.
Arguments:
feed -- The name of the feed.
"""
return self.redis.sismember('feeds', feed)
|
[
"def",
"feed_exists",
"(",
"self",
",",
"feed",
")",
":",
"return",
"self",
".",
"redis",
".",
"sismember",
"(",
"'feeds'",
",",
"feed",
")"
] |
https://github.com/andyet/thoonk.py/blob/4535ad05975a6410fe3448ace28d591ba1452f02/thoonk/pubsub.py#L256-L263
|
|
mchristopher/PokemonGo-DesktopMap
|
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
|
app/pywin/Lib/lib2to3/pytree.py
|
python
|
Node.__repr__
|
(self)
|
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
|
Return a canonical string representation.
|
Return a canonical string representation.
|
[
"Return",
"a",
"canonical",
"string",
"representation",
"."
] |
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
|
[
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"\"%s(%s, %r)\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"type_repr",
"(",
"self",
".",
"type",
")",
",",
"self",
".",
"children",
")"
] |
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pywin/Lib/lib2to3/pytree.py#L268-L272
|
|
brean/python-pathfinding
|
18c54b14f98c042d298b9fcc3092bdcaa0c8f4e7
|
pathfinding/finder/a_star.py
|
python
|
AStarFinder.check_neighbors
|
(self, start, end, grid, open_list,
open_value=True, backtrace_by=None)
|
return None
|
find next path segment based on given node
(or return path if we found the end)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:param open_list: stores nodes that will be processed next
|
find next path segment based on given node
(or return path if we found the end)
|
[
"find",
"next",
"path",
"segment",
"based",
"on",
"given",
"node",
"(",
"or",
"return",
"path",
"if",
"we",
"found",
"the",
"end",
")"
] |
def check_neighbors(self, start, end, grid, open_list,
open_value=True, backtrace_by=None):
"""
find next path segment based on given node
(or return path if we found the end)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:param open_list: stores nodes that will be processed next
"""
# pop node with minimum 'f' value
node = heapq.nsmallest(1, open_list)[0]
open_list.remove(node)
node.closed = True
# if reached the end position, construct the path and return it
# (ignored for bi-directional a*, there we look for a neighbor that is
# part of the oncoming path)
if not backtrace_by and node == end:
return backtrace(end)
# get neighbors of the current node
neighbors = self.find_neighbors(grid, node)
for neighbor in neighbors:
if neighbor.closed:
# already visited last minimum f value
continue
if backtrace_by and neighbor.opened == backtrace_by:
# found the oncoming path
if backtrace_by == BY_END:
return bi_backtrace(node, neighbor)
else:
return bi_backtrace(neighbor, node)
# check if the neighbor has not been inspected yet, or
# can be reached with smaller cost from the current node
self.process_node(neighbor, node, end, open_list, open_value)
# the end has not been reached (yet) keep the find_path loop running
return None
|
[
"def",
"check_neighbors",
"(",
"self",
",",
"start",
",",
"end",
",",
"grid",
",",
"open_list",
",",
"open_value",
"=",
"True",
",",
"backtrace_by",
"=",
"None",
")",
":",
"# pop node with minimum 'f' value",
"node",
"=",
"heapq",
".",
"nsmallest",
"(",
"1",
",",
"open_list",
")",
"[",
"0",
"]",
"open_list",
".",
"remove",
"(",
"node",
")",
"node",
".",
"closed",
"=",
"True",
"# if reached the end position, construct the path and return it",
"# (ignored for bi-directional a*, there we look for a neighbor that is",
"# part of the oncoming path)",
"if",
"not",
"backtrace_by",
"and",
"node",
"==",
"end",
":",
"return",
"backtrace",
"(",
"end",
")",
"# get neighbors of the current node",
"neighbors",
"=",
"self",
".",
"find_neighbors",
"(",
"grid",
",",
"node",
")",
"for",
"neighbor",
"in",
"neighbors",
":",
"if",
"neighbor",
".",
"closed",
":",
"# already visited last minimum f value",
"continue",
"if",
"backtrace_by",
"and",
"neighbor",
".",
"opened",
"==",
"backtrace_by",
":",
"# found the oncoming path",
"if",
"backtrace_by",
"==",
"BY_END",
":",
"return",
"bi_backtrace",
"(",
"node",
",",
"neighbor",
")",
"else",
":",
"return",
"bi_backtrace",
"(",
"neighbor",
",",
"node",
")",
"# check if the neighbor has not been inspected yet, or",
"# can be reached with smaller cost from the current node",
"self",
".",
"process_node",
"(",
"neighbor",
",",
"node",
",",
"end",
",",
"open_list",
",",
"open_value",
")",
"# the end has not been reached (yet) keep the find_path loop running",
"return",
"None"
] |
https://github.com/brean/python-pathfinding/blob/18c54b14f98c042d298b9fcc3092bdcaa0c8f4e7/pathfinding/finder/a_star.py#L42-L82
|
|
bnpy/bnpy
|
d5b311e8f58ccd98477f4a0c8a4d4982e3fca424
|
bnpy/datasets/zzz_unsupported/ToyARK13.py
|
python
|
showEachSetOfStatesIn3D
|
()
|
Make a 3D plot in separate figure for each of the 3 states in a "set"
These three states just vary the speed of rotation and scale of noise,
from slow and large to fast and smaller.
|
Make a 3D plot in separate figure for each of the 3 states in a "set"
|
[
"Make",
"a",
"3D",
"plot",
"in",
"separate",
"figure",
"for",
"each",
"of",
"the",
"3",
"states",
"in",
"a",
"set"
] |
def showEachSetOfStatesIn3D():
''' Make a 3D plot in separate figure for each of the 3 states in a "set"
These three states just vary the speed of rotation and scale of noise,
from slow and large to fast and smaller.
'''
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
L = len(degPerSteps)
for ii in range(L):
plotSequenceForRotatingState3D(-1 * degPerSteps[ii], sigma2s[ii], 2)
|
[
"def",
"showEachSetOfStatesIn3D",
"(",
")",
":",
"from",
"matplotlib",
"import",
"pylab",
"from",
"mpl_toolkits",
".",
"mplot3d",
"import",
"Axes3D",
"L",
"=",
"len",
"(",
"degPerSteps",
")",
"for",
"ii",
"in",
"range",
"(",
"L",
")",
":",
"plotSequenceForRotatingState3D",
"(",
"-",
"1",
"*",
"degPerSteps",
"[",
"ii",
"]",
",",
"sigma2s",
"[",
"ii",
"]",
",",
"2",
")"
] |
https://github.com/bnpy/bnpy/blob/d5b311e8f58ccd98477f4a0c8a4d4982e3fca424/bnpy/datasets/zzz_unsupported/ToyARK13.py#L213-L223
|
||
makerbot/ReplicatorG
|
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
|
skein_engines/skeinforge-35/fabmetheus_utilities/geometry/creation/teardrop.py
|
python
|
addNegativesByDerivation
|
(end, extrudeDerivation, negatives, radius, start, xmlElement)
|
Add teardrop drill hole to negatives.
|
Add teardrop drill hole to negatives.
|
[
"Add",
"teardrop",
"drill",
"hole",
"to",
"negatives",
"."
] |
def addNegativesByDerivation(end, extrudeDerivation, negatives, radius, start, xmlElement):
"Add teardrop drill hole to negatives."
extrudeDerivation.offsetAlongDefault = [start, end]
extrudeDerivation.tiltFollow = True
extrudeDerivation.tiltTop = Vector3(0.0, 0.0, 1.0)
extrudeDerivation.setToXMLElement(xmlElement.getCopyShallow())
extrude.addNegatives(extrudeDerivation, negatives, [getTeardropPathByEndStart(end, radius, start, xmlElement)])
|
[
"def",
"addNegativesByDerivation",
"(",
"end",
",",
"extrudeDerivation",
",",
"negatives",
",",
"radius",
",",
"start",
",",
"xmlElement",
")",
":",
"extrudeDerivation",
".",
"offsetAlongDefault",
"=",
"[",
"start",
",",
"end",
"]",
"extrudeDerivation",
".",
"tiltFollow",
"=",
"True",
"extrudeDerivation",
".",
"tiltTop",
"=",
"Vector3",
"(",
"0.0",
",",
"0.0",
",",
"1.0",
")",
"extrudeDerivation",
".",
"setToXMLElement",
"(",
"xmlElement",
".",
"getCopyShallow",
"(",
")",
")",
"extrude",
".",
"addNegatives",
"(",
"extrudeDerivation",
",",
"negatives",
",",
"[",
"getTeardropPathByEndStart",
"(",
"end",
",",
"radius",
",",
"start",
",",
"xmlElement",
")",
"]",
")"
] |
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-35/fabmetheus_utilities/geometry/creation/teardrop.py#L24-L30
|
||
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/Django-1.11.29/django/contrib/staticfiles/finders.py
|
python
|
FileSystemFinder.find
|
(self, path, all=False)
|
return matches
|
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
|
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
|
[
"Looks",
"for",
"files",
"in",
"the",
"extra",
"locations",
"as",
"defined",
"in",
"STATICFILES_DIRS",
"."
] |
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
|
[
"def",
"find",
"(",
"self",
",",
"path",
",",
"all",
"=",
"False",
")",
":",
"matches",
"=",
"[",
"]",
"for",
"prefix",
",",
"root",
"in",
"self",
".",
"locations",
":",
"if",
"root",
"not",
"in",
"searched_locations",
":",
"searched_locations",
".",
"append",
"(",
"root",
")",
"matched_path",
"=",
"self",
".",
"find_location",
"(",
"root",
",",
"path",
",",
"prefix",
")",
"if",
"matched_path",
":",
"if",
"not",
"all",
":",
"return",
"matched_path",
"matches",
".",
"append",
"(",
"matched_path",
")",
"return",
"matches"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/Django-1.11.29/django/contrib/staticfiles/finders.py#L76-L90
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.