Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
GetArrayViewFromVnlVector | (vnlVector) | Get an array view of vnlVector
| Get an array view of vnlVector
| def GetArrayViewFromVnlVector(vnlVector):
"""Get an array view of vnlVector
"""
return _GetArrayFromVnlObject(vnlVector, "GetArrayViewFromVnlVector") | [
"def",
"GetArrayViewFromVnlVector",
"(",
"vnlVector",
")",
":",
"return",
"_GetArrayFromVnlObject",
"(",
"vnlVector",
",",
"\"GetArrayViewFromVnlVector\"",
")"
] | [
304,
0
] | [
307,
73
] | python | en | ['en', 'en', 'en'] | True |
GetArrayFromVnlMatrix | (vnlMatrix) | Get an array with the content of vnlMatrix
| Get an array with the content of vnlMatrix
| def GetArrayFromVnlMatrix(vnlMatrix):
"""Get an array with the content of vnlMatrix
"""
return _GetArrayFromVnlObject(vnlMatrix, "GetArrayFromVnlMatrix") | [
"def",
"GetArrayFromVnlMatrix",
"(",
"vnlMatrix",
")",
":",
"return",
"_GetArrayFromVnlObject",
"(",
"vnlMatrix",
",",
"\"GetArrayFromVnlMatrix\"",
")"
] | [
309,
0
] | [
312,
69
] | python | en | ['en', 'en', 'en'] | True |
GetArrayViewFromVnlMatrix | (vnlMatrix) | Get an array view of vnlMatrix
| Get an array view of vnlMatrix
| def GetArrayViewFromVnlMatrix(vnlMatrix):
"""Get an array view of vnlMatrix
"""
return _GetArrayFromVnlObject(vnlMatrix, "GetArrayViewFromVnlMatrix") | [
"def",
"GetArrayViewFromVnlMatrix",
"(",
"vnlMatrix",
")",
":",
"return",
"_GetArrayFromVnlObject",
"(",
"vnlMatrix",
",",
"\"GetArrayViewFromVnlMatrix\"",
")"
] | [
314,
0
] | [
317,
73
] | python | en | ['en', 'lb', 'en'] | True |
_GetVnlObjectFromArray | (arr, function) | Get a vnl object from a Python array.
| Get a vnl object from a Python array.
| def _GetVnlObjectFromArray(arr, function):
"""Get a vnl object from a Python array.
"""
if not HAVE_NUMPY:
raise ImportError('Numpy not available.')
import itk
PixelType = _get_itk_pixelid(arr)
templatedFunction = getattr(itk.PyVnl[PixelType], function)
return templatedFunction(arr) | [
"def",
"_GetVnlObjectFromArray",
"(",
"arr",
",",
"function",
")",
":",
"if",
"not",
"HAVE_NUMPY",
":",
"raise",
"ImportError",
"(",
"'Numpy not available.'",
")",
"import",
"itk",
"PixelType",
"=",
"_get_itk_pixelid",
"(",
"arr",
")",
"templatedFunction",
"=",
"getattr",
"(",
"itk",
".",
"PyVnl",
"[",
"PixelType",
"]",
",",
"function",
")",
"return",
"templatedFunction",
"(",
"arr",
")"
] | [
319,
0
] | [
327,
33
] | python | en | ['en', 'en', 'en'] | True |
GetVnlVectorFromArray | (arr) | Get a vnl vector from a Python array.
| Get a vnl vector from a Python array.
| def GetVnlVectorFromArray(arr):
"""Get a vnl vector from a Python array.
"""
return _GetVnlObjectFromArray(arr, "GetVnlVectorFromArray") | [
"def",
"GetVnlVectorFromArray",
"(",
"arr",
")",
":",
"return",
"_GetVnlObjectFromArray",
"(",
"arr",
",",
"\"GetVnlVectorFromArray\"",
")"
] | [
329,
0
] | [
332,
63
] | python | en | ['en', 'lb', 'en'] | True |
GetVnlMatrixFromArray | (arr) | Get a vnl matrix from a Python array.
| Get a vnl matrix from a Python array.
| def GetVnlMatrixFromArray(arr):
"""Get a vnl matrix from a Python array.
"""
return _GetVnlObjectFromArray(arr, "GetVnlMatrixFromArray") | [
"def",
"GetVnlMatrixFromArray",
"(",
"arr",
")",
":",
"return",
"_GetVnlObjectFromArray",
"(",
"arr",
",",
"\"GetVnlMatrixFromArray\"",
")"
] | [
334,
0
] | [
337,
63
] | python | en | ['en', 'lb', 'en'] | True |
template | (cl) | Return the template of a class (or of the class of an object) and
its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
| Return the template of a class (or of the class of an object) and
its parameters | def template(cl):
"""Return the template of a class (or of the class of an object) and
its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
"""
from itkTemplate import itkTemplate
return itkTemplate.__class_to_template__[class_(cl)] | [
"def",
"template",
"(",
"cl",
")",
":",
"from",
"itkTemplate",
"import",
"itkTemplate",
"return",
"itkTemplate",
".",
"__class_to_template__",
"[",
"class_",
"(",
"cl",
")",
"]"
] | [
343,
0
] | [
352,
56
] | python | en | ['en', 'en', 'en'] | True |
ctype | (s) | Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
| Return the c type corresponding to the string passed in parameter | def ctype(s):
"""Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
"""
from itkTypes import itkCType
ret = itkCType.GetCType(" ".join(s.split()))
if ret is None:
raise KeyError("Unrecognized C type '%s'" % s)
return ret | [
"def",
"ctype",
"(",
"s",
")",
":",
"from",
"itkTypes",
"import",
"itkCType",
"ret",
"=",
"itkCType",
".",
"GetCType",
"(",
"\" \"",
".",
"join",
"(",
"s",
".",
"split",
"(",
")",
")",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"Unrecognized C type '%s'\"",
"%",
"s",
")",
"return",
"ret"
] | [
355,
0
] | [
366,
14
] | python | en | ['en', 'en', 'en'] | True |
class_ | (obj) | Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
| Return a class from an object | def class_(obj):
"""Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
"""
import inspect
if inspect.isclass(obj):
# obj is already a class !
return obj
else:
return obj.__class__ | [
"def",
"class_",
"(",
"obj",
")",
":",
"import",
"inspect",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"# obj is already a class !",
"return",
"obj",
"else",
":",
"return",
"obj",
".",
"__class__"
] | [
369,
0
] | [
380,
28
] | python | en | ['en', 'lb', 'en'] | True |
range | (imageOrFilter) | Return the range of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
range() take care of updating the pipeline
| Return the range of values in a image of in the output image of a filter | def range(imageOrFilter):
"""Return the range of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
range() take care of updating the pipeline
"""
import itk
img = output(imageOrFilter)
img.UpdateOutputInformation()
img.Update()
# don't put that calculator in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
auto_pipeline.current = tmp_auto_pipeline
comp.Compute()
return (comp.GetMinimum(), comp.GetMaximum()) | [
"def",
"range",
"(",
"imageOrFilter",
")",
":",
"import",
"itk",
"img",
"=",
"output",
"(",
"imageOrFilter",
")",
"img",
".",
"UpdateOutputInformation",
"(",
")",
"img",
".",
"Update",
"(",
")",
"# don't put that calculator in the automatic pipeline",
"tmp_auto_pipeline",
"=",
"auto_pipeline",
".",
"current",
"auto_pipeline",
".",
"current",
"=",
"None",
"comp",
"=",
"itk",
".",
"MinimumMaximumImageCalculator",
"[",
"img",
"]",
".",
"New",
"(",
"Image",
"=",
"img",
")",
"auto_pipeline",
".",
"current",
"=",
"tmp_auto_pipeline",
"comp",
".",
"Compute",
"(",
")",
"return",
"(",
"comp",
".",
"GetMinimum",
"(",
")",
",",
"comp",
".",
"GetMaximum",
"(",
")",
")"
] | [
383,
0
] | [
399,
49
] | python | en | ['en', 'en', 'en'] | True |
imwrite | (imageOrFilter, fileName, compression=False) | Write a image or the output image of a filter to a file.
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter).
| Write a image or the output image of a filter to a file. | def imwrite(imageOrFilter, fileName, compression=False):
"""Write a image or the output image of a filter to a file.
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter).
"""
import itk
img = output(imageOrFilter)
img.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.ImageFileWriter[img].New(
Input=img,
FileName=fileName,
UseCompression=compression)
auto_pipeline.current = tmp_auto_pipeline
writer.Update() | [
"def",
"imwrite",
"(",
"imageOrFilter",
",",
"fileName",
",",
"compression",
"=",
"False",
")",
":",
"import",
"itk",
"img",
"=",
"output",
"(",
"imageOrFilter",
")",
"img",
".",
"UpdateOutputInformation",
"(",
")",
"# don't put that writer in the automatic pipeline",
"tmp_auto_pipeline",
"=",
"auto_pipeline",
".",
"current",
"auto_pipeline",
".",
"current",
"=",
"None",
"writer",
"=",
"itk",
".",
"ImageFileWriter",
"[",
"img",
"]",
".",
"New",
"(",
"Input",
"=",
"img",
",",
"FileName",
"=",
"fileName",
",",
"UseCompression",
"=",
"compression",
")",
"auto_pipeline",
".",
"current",
"=",
"tmp_auto_pipeline",
"writer",
".",
"Update",
"(",
")"
] | [
402,
0
] | [
419,
19
] | python | en | ['en', 'en', 'en'] | True |
imread | (fileName, pixelType=None) | Read an image from a file and return an itk.Image.
The reader is instantiated with the image type of the image file.
| Read an image from a file and return an itk.Image. | def imread(fileName, pixelType=None):
"""Read an image from a file and return an itk.Image.
The reader is instantiated with the image type of the image file.
"""
import itk
if pixelType:
imageIO = itk.ImageIOFactory.CreateImageIO( fileName, itk.ImageIOFactory.ReadMode )
if not imageIO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
imageIO.SetFileName( fileName )
imageIO.ReadImageInformation()
dimension = imageIO.GetNumberOfDimensions()
ImageType=itk.Image[pixelType,dimension]
reader = itk.ImageFileReader[ImageType].New(FileName=fileName)
else:
reader = itk.ImageFileReader.New(FileName=fileName)
reader.Update()
return reader.GetOutput() | [
"def",
"imread",
"(",
"fileName",
",",
"pixelType",
"=",
"None",
")",
":",
"import",
"itk",
"if",
"pixelType",
":",
"imageIO",
"=",
"itk",
".",
"ImageIOFactory",
".",
"CreateImageIO",
"(",
"fileName",
",",
"itk",
".",
"ImageIOFactory",
".",
"ReadMode",
")",
"if",
"not",
"imageIO",
":",
"raise",
"RuntimeError",
"(",
"\"No ImageIO is registered to handle the given file.\"",
")",
"imageIO",
".",
"SetFileName",
"(",
"fileName",
")",
"imageIO",
".",
"ReadImageInformation",
"(",
")",
"dimension",
"=",
"imageIO",
".",
"GetNumberOfDimensions",
"(",
")",
"ImageType",
"=",
"itk",
".",
"Image",
"[",
"pixelType",
",",
"dimension",
"]",
"reader",
"=",
"itk",
".",
"ImageFileReader",
"[",
"ImageType",
"]",
".",
"New",
"(",
"FileName",
"=",
"fileName",
")",
"else",
":",
"reader",
"=",
"itk",
".",
"ImageFileReader",
".",
"New",
"(",
"FileName",
"=",
"fileName",
")",
"reader",
".",
"Update",
"(",
")",
"return",
"reader",
".",
"GetOutput",
"(",
")"
] | [
428,
0
] | [
446,
29
] | python | en | ['en', 'en', 'en'] | True |
search | (s, case_sensitive=False) | Search for a class name in the itk module.
| Search for a class name in the itk module.
| def search(s, case_sensitive=False): # , fuzzy=True):
"""Search for a class name in the itk module.
"""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = sorted(dir(itk))
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res | [
"def",
"search",
"(",
"s",
",",
"case_sensitive",
"=",
"False",
")",
":",
"# , fuzzy=True):",
"s",
"=",
"s",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
"if",
"not",
"case_sensitive",
":",
"s",
"=",
"s",
".",
"lower",
"(",
")",
"import",
"itk",
"names",
"=",
"sorted",
"(",
"dir",
"(",
"itk",
")",
")",
"# exact match first",
"if",
"case_sensitive",
":",
"res",
"=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"s",
"==",
"n",
"]",
"else",
":",
"res",
"=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"s",
"==",
"n",
".",
"lower",
"(",
")",
"]",
"# then exact match inside the name",
"if",
"case_sensitive",
":",
"res",
"+=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"s",
"in",
"n",
"and",
"s",
"!=",
"n",
"]",
"else",
":",
"res",
"+=",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"s",
"in",
"n",
".",
"lower",
"(",
")",
"and",
"s",
"!=",
"n",
".",
"lower",
"(",
")",
"]",
"# if fuzzy:",
"# try:",
"# everything now requires editdist",
"# import editdist",
"# if case_sensitive:",
"# res.sort(key=lambda x: editdist.distance(x, s))",
"# else:",
"# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))",
"# except:",
"# pass",
"return",
"res"
] | [
448,
0
] | [
476,
14
] | python | en | ['en', 'en', 'en'] | True |
set_inputs | (newItkObject, args=[], kargs={}) | Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the newItkObject
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
| Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs | def set_inputs(newItkObject, args=[], kargs={}):
"""Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the newItkObject
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# usefull with filter which take 2 input (or more) like SubstractImageFiler
# Ex: substract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
try:
for setInputNb, arg in enumerate(args):
methodName = 'SetInput%i' % (setInputNb + 1)
if methodName in dir(newItkObject):
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of
# multiple input types
getattr(newItkObject, methodName)(arg)
else:
# no method called SetInput?
# try with the standard SetInput(nb, input)
newItkObject.SetInput(setInputNb, arg)
except TypeError as e:
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0:
raise e
# it's the first input, try to use the SetInput() method without input
# number
newItkObject.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1:
raise TypeError('Object accept only 1 input.')
except AttributeError:
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1:
raise TypeError('Object accept only 1 input.')
methodList = ['SetImage', 'SetInputImage']
methodName = None
for m in methodList:
if m in dir(newItkObject):
methodName = m
if methodName:
getattr(newItkObject, methodName)(args[0])
else:
raise AttributeError('No method found to set the input.')
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.items():
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than
# with the full name
# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"]:
attrib = getattr(newItkObject, 'Set' + attribName)
attrib(output(value)) | [
"def",
"set_inputs",
"(",
"newItkObject",
",",
"args",
"=",
"[",
"]",
",",
"kargs",
"=",
"{",
"}",
")",
":",
"# try to get the images from the filters in args",
"args",
"=",
"[",
"output",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
"# args without name are filter used to set input image",
"#",
"# count SetInput calls to call SetInput, SetInput2, SetInput3, ...",
"# usefull with filter which take 2 input (or more) like SubstractImageFiler",
"# Ex: substract image2.png to image1.png and save the result in result.png",
"# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')",
"# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')",
"# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)",
"# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()",
"try",
":",
"for",
"setInputNb",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"methodName",
"=",
"'SetInput%i'",
"%",
"(",
"setInputNb",
"+",
"1",
")",
"if",
"methodName",
"in",
"dir",
"(",
"newItkObject",
")",
":",
"# first try to use methods called SetInput1, SetInput2, ...",
"# those method should have more chances to work in case of",
"# multiple input types",
"getattr",
"(",
"newItkObject",
",",
"methodName",
")",
"(",
"arg",
")",
"else",
":",
"# no method called SetInput?",
"# try with the standard SetInput(nb, input)",
"newItkObject",
".",
"SetInput",
"(",
"setInputNb",
",",
"arg",
")",
"except",
"TypeError",
"as",
"e",
":",
"# the exception have (at least) to possible reasons:",
"# + the filter don't take the input number as first argument",
"# + arg is an object of wrong type",
"#",
"# if it's not the first input, re-raise the exception",
"if",
"setInputNb",
"!=",
"0",
":",
"raise",
"e",
"# it's the first input, try to use the SetInput() method without input",
"# number",
"newItkObject",
".",
"SetInput",
"(",
"args",
"[",
"0",
"]",
")",
"# but raise an exception if there is more than 1 argument",
"if",
"len",
"(",
"args",
")",
">",
"1",
":",
"raise",
"TypeError",
"(",
"'Object accept only 1 input.'",
")",
"except",
"AttributeError",
":",
"# There is no SetInput() method, try SetImage",
"# but before, check the number of inputs",
"if",
"len",
"(",
"args",
")",
">",
"1",
":",
"raise",
"TypeError",
"(",
"'Object accept only 1 input.'",
")",
"methodList",
"=",
"[",
"'SetImage'",
",",
"'SetInputImage'",
"]",
"methodName",
"=",
"None",
"for",
"m",
"in",
"methodList",
":",
"if",
"m",
"in",
"dir",
"(",
"newItkObject",
")",
":",
"methodName",
"=",
"m",
"if",
"methodName",
":",
"getattr",
"(",
"newItkObject",
",",
"methodName",
")",
"(",
"args",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"'No method found to set the input.'",
")",
"# named args : name is the function name, value is argument(s)",
"for",
"attribName",
",",
"value",
"in",
"kargs",
".",
"items",
"(",
")",
":",
"# use Set as prefix. It allow to use a shorter and more intuitive",
"# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than",
"# with the full name",
"# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))",
"if",
"attribName",
"not",
"in",
"[",
"\"auto_progress\"",
",",
"\"template_parameters\"",
"]",
":",
"attrib",
"=",
"getattr",
"(",
"newItkObject",
",",
"'Set'",
"+",
"attribName",
")",
"attrib",
"(",
"output",
"(",
"value",
")",
")"
] | [
479,
0
] | [
559,
33
] | python | en | ['en', 'en', 'en'] | True |
show | (input, **kargs) | display an image
| display an image
| def show(input, **kargs):
"""display an image
"""
import itk
img = output(input)
if img.GetImageDimension() == 3 and "show3D" in dir(itk):
return itk.show3D(input, **kargs)
else:
# print("2D not supported yet, use the 3D viewer.")
return show2D(input, **kargs) | [
"def",
"show",
"(",
"input",
",",
"*",
"*",
"kargs",
")",
":",
"import",
"itk",
"img",
"=",
"output",
"(",
"input",
")",
"if",
"img",
".",
"GetImageDimension",
"(",
")",
"==",
"3",
"and",
"\"show3D\"",
"in",
"dir",
"(",
"itk",
")",
":",
"return",
"itk",
".",
"show3D",
"(",
"input",
",",
"*",
"*",
"kargs",
")",
"else",
":",
"# print(\"2D not supported yet, use the 3D viewer.\")",
"return",
"show2D",
"(",
"input",
",",
"*",
"*",
"kargs",
")"
] | [
562,
0
] | [
571,
37
] | python | en | ['en', 'ceb', 'en'] | True |
down_cast | (obj) | Down cast an itkLightObject (or a object of a subclass) to its most
specialized type.
| Down cast an itkLightObject (or a object of a subclass) to its most
specialized type.
| def down_cast(obj):
"""Down cast an itkLightObject (or a object of a subclass) to its most
specialized type.
"""
import itk
import itkTemplate
className = obj.GetNameOfClass()
t = getattr(itk, className)
if isinstance(t, itkTemplate.itkTemplate):
for c in t.values():
try:
return c.cast(obj)
except:
# fail silently for now
pass
raise RuntimeError(
"Can't downcast to a specialization of %s" %
className)
else:
return t.cast(obj) | [
"def",
"down_cast",
"(",
"obj",
")",
":",
"import",
"itk",
"import",
"itkTemplate",
"className",
"=",
"obj",
".",
"GetNameOfClass",
"(",
")",
"t",
"=",
"getattr",
"(",
"itk",
",",
"className",
")",
"if",
"isinstance",
"(",
"t",
",",
"itkTemplate",
".",
"itkTemplate",
")",
":",
"for",
"c",
"in",
"t",
".",
"values",
"(",
")",
":",
"try",
":",
"return",
"c",
".",
"cast",
"(",
"obj",
")",
"except",
":",
"# fail silently for now",
"pass",
"raise",
"RuntimeError",
"(",
"\"Can't downcast to a specialization of %s\"",
"%",
"className",
")",
"else",
":",
"return",
"t",
".",
"cast",
"(",
"obj",
")"
] | [
1008,
0
] | [
1027,
26
] | python | en | ['en', 'en', 'en'] | True |
attribute_list | (i, name) | Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
| Returns a list of the specified attributes for the objects in the image. | def attribute_list(i, name):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=name,
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects() + 1):
l.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
return l | [
"def",
"attribute_list",
"(",
"i",
",",
"name",
")",
":",
"import",
"itk",
"i",
"=",
"itk",
".",
"output",
"(",
"i",
")",
"relabel",
"=",
"itk",
".",
"StatisticsRelabelLabelMapFilter",
"[",
"i",
"]",
".",
"New",
"(",
"i",
",",
"Attribute",
"=",
"name",
",",
"ReverseOrdering",
"=",
"True",
",",
"InPlace",
"=",
"False",
")",
"relabel",
".",
"UpdateLargestPossibleRegion",
"(",
")",
"r",
"=",
"relabel",
".",
"GetOutput",
"(",
")",
"l",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"r",
".",
"GetNumberOfLabelObjects",
"(",
")",
"+",
"1",
")",
":",
"l",
".",
"append",
"(",
"r",
".",
"GetLabelObject",
"(",
"i",
")",
".",
"__getattribute__",
"(",
"\"Get\"",
"+",
"name",
")",
"(",
")",
")",
"return",
"l"
] | [
1030,
0
] | [
1048,
12
] | python | en | ['en', 'en', 'en'] | True |
attributes_list | (i, names) | Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
| Returns a list of the specified attributes for the objects in the image. | def attributes_list(i, names):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=names[0],
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l = []
for i in range(1, r.GetNumberOfLabelObjects() + 1):
attrs = []
for name in names:
attrs.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
l.append(tuple(attrs))
return l | [
"def",
"attributes_list",
"(",
"i",
",",
"names",
")",
":",
"import",
"itk",
"i",
"=",
"itk",
".",
"output",
"(",
"i",
")",
"relabel",
"=",
"itk",
".",
"StatisticsRelabelLabelMapFilter",
"[",
"i",
"]",
".",
"New",
"(",
"i",
",",
"Attribute",
"=",
"names",
"[",
"0",
"]",
",",
"ReverseOrdering",
"=",
"True",
",",
"InPlace",
"=",
"False",
")",
"relabel",
".",
"UpdateLargestPossibleRegion",
"(",
")",
"r",
"=",
"relabel",
".",
"GetOutput",
"(",
")",
"l",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"r",
".",
"GetNumberOfLabelObjects",
"(",
")",
"+",
"1",
")",
":",
"attrs",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"attrs",
".",
"append",
"(",
"r",
".",
"GetLabelObject",
"(",
"i",
")",
".",
"__getattribute__",
"(",
"\"Get\"",
"+",
"name",
")",
"(",
")",
")",
"l",
".",
"append",
"(",
"tuple",
"(",
"attrs",
")",
")",
"return",
"l"
] | [
1051,
0
] | [
1072,
12
] | python | en | ['en', 'en', 'en'] | True |
attribute_dict | (i, name) | Returns a dict with the attribute values in keys and a list of the
corresponding objects in value
i: the input LabelImage
name: the name of the attribute
| Returns a dict with the attribute values in keys and a list of the
corresponding objects in value | def attribute_dict(i, name):
"""Returns a dict with the attribute values in keys and a list of the
corresponding objects in value
i: the input LabelImage
name: the name of the attribute
"""
import itk
i = itk.output(i)
relabel = itk.StatisticsRelabelLabelMapFilter[i].New(
i,
Attribute=name,
ReverseOrdering=True,
InPlace=False)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
d = {}
for i in range(1, r.GetNumberOfLabelObjects() + 1):
lo = r.GetLabelObject(i)
v = lo.__getattribute__("Get" + name)()
l = d.get(v, [])
l.append(lo)
d[v] = l
return d | [
"def",
"attribute_dict",
"(",
"i",
",",
"name",
")",
":",
"import",
"itk",
"i",
"=",
"itk",
".",
"output",
"(",
"i",
")",
"relabel",
"=",
"itk",
".",
"StatisticsRelabelLabelMapFilter",
"[",
"i",
"]",
".",
"New",
"(",
"i",
",",
"Attribute",
"=",
"name",
",",
"ReverseOrdering",
"=",
"True",
",",
"InPlace",
"=",
"False",
")",
"relabel",
".",
"UpdateLargestPossibleRegion",
"(",
")",
"r",
"=",
"relabel",
".",
"GetOutput",
"(",
")",
"d",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"r",
".",
"GetNumberOfLabelObjects",
"(",
")",
"+",
"1",
")",
":",
"lo",
"=",
"r",
".",
"GetLabelObject",
"(",
"i",
")",
"v",
"=",
"lo",
".",
"__getattribute__",
"(",
"\"Get\"",
"+",
"name",
")",
"(",
")",
"l",
"=",
"d",
".",
"get",
"(",
"v",
",",
"[",
"]",
")",
"l",
".",
"append",
"(",
"lo",
")",
"d",
"[",
"v",
"]",
"=",
"l",
"return",
"d"
] | [
1075,
0
] | [
1098,
12
] | python | en | ['en', 'en', 'en'] | True |
number_of_objects | (i) | Returns the number of objets in the image.
i: the input LabelImage
| Returns the number of objets in the image. | def number_of_objects(i):
"""Returns the number of objets in the image.
i: the input LabelImage
"""
import itk
i.UpdateLargestPossibleRegion()
i = itk.output(i)
return i.GetNumberOfLabelObjects() | [
"def",
"number_of_objects",
"(",
"i",
")",
":",
"import",
"itk",
"i",
".",
"UpdateLargestPossibleRegion",
"(",
")",
"i",
"=",
"itk",
".",
"output",
"(",
"i",
")",
"return",
"i",
".",
"GetNumberOfLabelObjects",
"(",
")"
] | [
1101,
0
] | [
1109,
38
] | python | en | ['en', 'en', 'en'] | True |
ipython_kw_matches | (text) | Match named ITK object's named parameters | Match named ITK object's named parameters | def ipython_kw_matches(text):
"""Match named ITK object's named parameters"""
import IPython
import itk
import re
import inspect
import itkTemplate
regexp = re.compile(r'''
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
''', re.VERBOSE | re.DOTALL)
ip = IPython.get_ipython()
if "." in text: # a parameter cannot be dotted
return []
# 1. Find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
if ip.Completer.readline:
textUntilCursor = ip.Completer.readline.get_line_buffer()[:ip.Completer.readline.get_endidx()]
else:
# IPython >= 5.0.0, which is based on the Python Prompt Toolkit
textUntilCursor = ip.Completer.text_until_cursor
tokens = regexp.findall(textUntilCursor)
tokens.reverse()
iterTokens = iter(tokens)
openPar = 0
for token in iterTokens:
if token == ')':
openPar -= 1
elif token == '(':
openPar += 1
if openPar > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
isId = re.compile(r'\w+$').match
while True:
try:
ids.append(iterTokens.next())
if not isId(ids[-1]):
ids.pop()
break
if not iterTokens.next() == '.':
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callableMatches = ip.Completer.global_matches(ids[0])
else:
callableMatches = ip.Completer.attr_matches('.'.join(ids[::-1]))
argMatches = []
for callableMatch in callableMatches:
# drop the .New at this end, so we can search in the class members
if callableMatch.endswith(".New"):
callableMatch = callableMatch[:-4]
try:
object = eval(callableMatch, ip.Completer.namespace)
if isinstance(object, itkTemplate.itkTemplate):
# this is a template - lets grab the first entry to search for
# the methods
object = object.values()[0]
namedArgs = []
isin = isinstance(object, itk.LightObject)
if inspect.isclass(object):
issub = issubclass(object, itk.LightObject)
if isin or (inspect.isclass(object) and issub):
namedArgs = [n[3:] for n in dir(object) if n.startswith("Set")]
except Exception as e:
print(e)
continue
for namedArg in namedArgs:
if namedArg.startswith(text):
argMatches.append(u"%s=" % namedArg)
return argMatches | [
"def",
"ipython_kw_matches",
"(",
"text",
")",
":",
"import",
"IPython",
"import",
"itk",
"import",
"re",
"import",
"inspect",
"import",
"itkTemplate",
"regexp",
"=",
"re",
".",
"compile",
"(",
"r'''\n '.*?' | # single quoted strings or\n \".*?\" | # double quoted strings or\n \\w+ | # identifier\n \\S # other characters\n '''",
",",
"re",
".",
"VERBOSE",
"|",
"re",
".",
"DOTALL",
")",
"ip",
"=",
"IPython",
".",
"get_ipython",
"(",
")",
"if",
"\".\"",
"in",
"text",
":",
"# a parameter cannot be dotted",
"return",
"[",
"]",
"# 1. Find the nearest identifier that comes before an unclosed",
"# parenthesis e.g. for \"foo (1+bar(x), pa\", the candidate is \"foo\".",
"if",
"ip",
".",
"Completer",
".",
"readline",
":",
"textUntilCursor",
"=",
"ip",
".",
"Completer",
".",
"readline",
".",
"get_line_buffer",
"(",
")",
"[",
":",
"ip",
".",
"Completer",
".",
"readline",
".",
"get_endidx",
"(",
")",
"]",
"else",
":",
"# IPython >= 5.0.0, which is based on the Python Prompt Toolkit",
"textUntilCursor",
"=",
"ip",
".",
"Completer",
".",
"text_until_cursor",
"tokens",
"=",
"regexp",
".",
"findall",
"(",
"textUntilCursor",
")",
"tokens",
".",
"reverse",
"(",
")",
"iterTokens",
"=",
"iter",
"(",
"tokens",
")",
"openPar",
"=",
"0",
"for",
"token",
"in",
"iterTokens",
":",
"if",
"token",
"==",
"')'",
":",
"openPar",
"-=",
"1",
"elif",
"token",
"==",
"'('",
":",
"openPar",
"+=",
"1",
"if",
"openPar",
">",
"0",
":",
"# found the last unclosed parenthesis",
"break",
"else",
":",
"return",
"[",
"]",
"# 2. Concatenate dotted names (\"foo.bar\" for \"foo.bar(x, pa\" )",
"ids",
"=",
"[",
"]",
"isId",
"=",
"re",
".",
"compile",
"(",
"r'\\w+$'",
")",
".",
"match",
"while",
"True",
":",
"try",
":",
"ids",
".",
"append",
"(",
"iterTokens",
".",
"next",
"(",
")",
")",
"if",
"not",
"isId",
"(",
"ids",
"[",
"-",
"1",
"]",
")",
":",
"ids",
".",
"pop",
"(",
")",
"break",
"if",
"not",
"iterTokens",
".",
"next",
"(",
")",
"==",
"'.'",
":",
"break",
"except",
"StopIteration",
":",
"break",
"# lookup the candidate callable matches either using global_matches",
"# or attr_matches for dotted names",
"if",
"len",
"(",
"ids",
")",
"==",
"1",
":",
"callableMatches",
"=",
"ip",
".",
"Completer",
".",
"global_matches",
"(",
"ids",
"[",
"0",
"]",
")",
"else",
":",
"callableMatches",
"=",
"ip",
".",
"Completer",
".",
"attr_matches",
"(",
"'.'",
".",
"join",
"(",
"ids",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"argMatches",
"=",
"[",
"]",
"for",
"callableMatch",
"in",
"callableMatches",
":",
"# drop the .New at this end, so we can search in the class members",
"if",
"callableMatch",
".",
"endswith",
"(",
"\".New\"",
")",
":",
"callableMatch",
"=",
"callableMatch",
"[",
":",
"-",
"4",
"]",
"try",
":",
"object",
"=",
"eval",
"(",
"callableMatch",
",",
"ip",
".",
"Completer",
".",
"namespace",
")",
"if",
"isinstance",
"(",
"object",
",",
"itkTemplate",
".",
"itkTemplate",
")",
":",
"# this is a template - lets grab the first entry to search for",
"# the methods",
"object",
"=",
"object",
".",
"values",
"(",
")",
"[",
"0",
"]",
"namedArgs",
"=",
"[",
"]",
"isin",
"=",
"isinstance",
"(",
"object",
",",
"itk",
".",
"LightObject",
")",
"if",
"inspect",
".",
"isclass",
"(",
"object",
")",
":",
"issub",
"=",
"issubclass",
"(",
"object",
",",
"itk",
".",
"LightObject",
")",
"if",
"isin",
"or",
"(",
"inspect",
".",
"isclass",
"(",
"object",
")",
"and",
"issub",
")",
":",
"namedArgs",
"=",
"[",
"n",
"[",
"3",
":",
"]",
"for",
"n",
"in",
"dir",
"(",
"object",
")",
"if",
"n",
".",
"startswith",
"(",
"\"Set\"",
")",
"]",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"continue",
"for",
"namedArg",
"in",
"namedArgs",
":",
"if",
"namedArg",
".",
"startswith",
"(",
"text",
")",
":",
"argMatches",
".",
"append",
"(",
"u\"%s=\"",
"%",
"namedArg",
")",
"return",
"argMatches"
] | [
1112,
0
] | [
1192,
21
] | python | en | ['en', 'en', 'en'] | True |
templated_class.__init__ | (self, cls) | cls is the custom class
| cls is the custom class
| def __init__(self, cls):
"""cls is the custom class
"""
self.__cls__ = cls
self.__templates__ = {} | [
"def",
"__init__",
"(",
"self",
",",
"cls",
")",
":",
"self",
".",
"__cls__",
"=",
"cls",
"self",
".",
"__templates__",
"=",
"{",
"}"
] | [
700,
4
] | [
704,
31
] | python | en | ['en', 'en', 'en'] | True |
templated_class.New | (self, *args, **kargs) | Use the parameters to infer the types of the template parameters.
| Use the parameters to infer the types of the template parameters.
| def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters.
"""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(itk.class_(o) for o in args)
return self[types].New(*args, **kargs) | [
"def",
"New",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"# extract the types from the arguments to instantiate the class",
"import",
"itk",
"types",
"=",
"tuple",
"(",
"itk",
".",
"class_",
"(",
"o",
")",
"for",
"o",
"in",
"args",
")",
"return",
"self",
"[",
"types",
"]",
".",
"New",
"(",
"*",
"args",
",",
"*",
"*",
"kargs",
")"
] | [
706,
4
] | [
712,
46
] | python | en | ['en', 'en', 'en'] | True |
templated_class.__getitem__ | (self, template_parameters) | Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
| Return a pair class-template parameters ready to be instantiated. | def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return (
templated_class.__templated_class_and_parameters__(
self,
template_parameters)
) | [
"def",
"__getitem__",
"(",
"self",
",",
"template_parameters",
")",
":",
"if",
"not",
"isinstance",
"(",
"template_parameters",
",",
"tuple",
")",
":",
"template_parameters",
"=",
"(",
"template_parameters",
",",
")",
"return",
"(",
"templated_class",
".",
"__templated_class_and_parameters__",
"(",
"self",
",",
"template_parameters",
")",
")"
] | [
714,
4
] | [
726,
9
] | python | en | ['en', 'en', 'en'] | True |
templated_class.check_template_parameters | (self, template_parameters) | Check the template parameters passed in parameter.
| Check the template parameters passed in parameter.
| def check_template_parameters(self, template_parameters):
"""Check the template parameters passed in parameter.
"""
# this method is there mainly to make possible to reuse it in the
# custom class constructor after having used templated_class().
# Without that, the following example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# other init stuff
# def check_template_parameters(template_parameters):
# check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters) | [
"def",
"check_template_parameters",
"(",
"self",
",",
"template_parameters",
")",
":",
"# this method is there mainly to make possible to reuse it in the",
"# custom class constructor after having used templated_class().",
"# Without that, the following example doesn't work:",
"#",
"# class CustomClass:",
"# def __init__(self, *args, **kargs):",
"# template_parameters = kargs[\"template_parameters\"]",
"# CustomClass.check_template_parameters(template_parameters)",
"# other init stuff",
"# def check_template_parameters(template_parameters):",
"# check, really",
"# pass",
"# CustomClass = templated_class(CustomClass)",
"#",
"self",
".",
"__cls__",
".",
"check_template_parameters",
"(",
"template_parameters",
")"
] | [
728,
4
] | [
745,
67
] | python | en | ['en', 'en', 'en'] | True |
pipeline.connect | (self, filter) | Connect a new filter to the pipeline
The output of the first filter will be used as the input of this
one and the filter passed as parameter will be added to the list
| Connect a new filter to the pipeline | def connect(self, filter):
"""Connect a new filter to the pipeline
The output of the first filter will be used as the input of this
one and the filter passed as parameter will be added to the list
"""
if self.GetOutput() is not None:
set_inputs(filter, [self.GetOutput()])
self.append(filter) | [
"def",
"connect",
"(",
"self",
",",
"filter",
")",
":",
"if",
"self",
".",
"GetOutput",
"(",
")",
"is",
"not",
"None",
":",
"set_inputs",
"(",
"filter",
",",
"[",
"self",
".",
"GetOutput",
"(",
")",
"]",
")",
"self",
".",
"append",
"(",
"filter",
")"
] | [
871,
4
] | [
879,
27
] | python | en | ['en', 'en', 'en'] | True |
pipeline.append | (self, filter) | Add a new filter to the pipeline
The new filter will not be connected. The user must connect it.
| Add a new filter to the pipeline | def append(self, filter):
"""Add a new filter to the pipeline
The new filter will not be connected. The user must connect it.
"""
self.filters.append(filter) | [
"def",
"append",
"(",
"self",
",",
"filter",
")",
":",
"self",
".",
"filters",
".",
"append",
"(",
"filter",
")"
] | [
881,
4
] | [
886,
35
] | python | en | ['en', 'en', 'en'] | True |
pipeline.clear | (self) | Clear the filter list
| Clear the filter list
| def clear(self):
"""Clear the filter list
"""
self.filters = [] | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"filters",
"=",
"[",
"]"
] | [
888,
4
] | [
891,
25
] | python | en | ['en', 'en', 'en'] | True |
pipeline.GetOutput | (self, index=0) | Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method,
subclass pipeline to implement another GetOutput() method, or use
expose()
| Return the output of the pipeline | def GetOutput(self, index=0):
"""Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method,
subclass pipeline to implement another GetOutput() method, or use
expose()
"""
if len(self.filters) == 0:
return self.GetInput()
else:
filter = self.filters[-1]
if hasattr(filter, "__getitem__"):
return filter[index]
try:
return filter.GetOutput(index)
except:
if index == 0:
return filter.GetOutput()
else:
raise ValueError("Index can only be 0 on that object") | [
"def",
"GetOutput",
"(",
"self",
",",
"index",
"=",
"0",
")",
":",
"if",
"len",
"(",
"self",
".",
"filters",
")",
"==",
"0",
":",
"return",
"self",
".",
"GetInput",
"(",
")",
"else",
":",
"filter",
"=",
"self",
".",
"filters",
"[",
"-",
"1",
"]",
"if",
"hasattr",
"(",
"filter",
",",
"\"__getitem__\"",
")",
":",
"return",
"filter",
"[",
"index",
"]",
"try",
":",
"return",
"filter",
".",
"GetOutput",
"(",
"index",
")",
"except",
":",
"if",
"index",
"==",
"0",
":",
"return",
"filter",
".",
"GetOutput",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Index can only be 0 on that object\"",
")"
] | [
893,
4
] | [
913,
74
] | python | en | ['en', 'en', 'en'] | True |
pipeline.SetInput | (self, input) | Set the input of the pipeline
| Set the input of the pipeline
| def SetInput(self, input):
"""Set the input of the pipeline
"""
if len(self.filters) != 0:
set_inputs(self.filters[0], [input])
self.input = input | [
"def",
"SetInput",
"(",
"self",
",",
"input",
")",
":",
"if",
"len",
"(",
"self",
".",
"filters",
")",
"!=",
"0",
":",
"set_inputs",
"(",
"self",
".",
"filters",
"[",
"0",
"]",
",",
"[",
"input",
"]",
")",
"self",
".",
"input",
"=",
"input"
] | [
915,
4
] | [
920,
26
] | python | en | ['en', 'en', 'en'] | True |
pipeline.GetInput | (self) | Get the input of the pipeline
| Get the input of the pipeline
| def GetInput(self):
"""Get the input of the pipeline
"""
return self.input | [
"def",
"GetInput",
"(",
"self",
")",
":",
"return",
"self",
".",
"input"
] | [
922,
4
] | [
925,
25
] | python | en | ['en', 'en', 'en'] | True |
pipeline.Update | (self) | Update the pipeline
| Update the pipeline
| def Update(self):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].Update() | [
"def",
"Update",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"filters",
")",
">",
"0",
":",
"return",
"self",
".",
"filters",
"[",
"-",
"1",
"]",
".",
"Update",
"(",
")"
] | [
927,
4
] | [
931,
44
] | python | en | ['en', 'en', 'en'] | True |
pipeline.UpdateLargestPossibleRegion | (self) | Update the pipeline
| Update the pipeline
| def UpdateLargestPossibleRegion(self):
"""Update the pipeline
"""
if len(self.filters) > 0:
return self.filters[-1].UpdateLargestPossibleRegion() | [
"def",
"UpdateLargestPossibleRegion",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"filters",
")",
">",
"0",
":",
"return",
"self",
".",
"filters",
"[",
"-",
"1",
"]",
".",
"UpdateLargestPossibleRegion",
"(",
")"
] | [
933,
4
] | [
937,
65
] | python | en | ['en', 'en', 'en'] | True |
pipeline.expose | (self, name, new_name=None, position=-1) | Expose an attribute from a filter of the minipeline.
Once called, the pipeline instance has a new Set/Get set of methods to
access directly the corresponding method of one of the filter of the
pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same
name than the one of the filter, but it can be changed by providing a
value to new_name.
The last filter of the pipeline is used by default, but another one may
be used by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
| Expose an attribute from a filter of the minipeline. | def expose(self, name, new_name=None, position=-1):
"""Expose an attribute from a filter of the minipeline.
Once called, the pipeline instance has a new Set/Get set of methods to
access directly the corresponding method of one of the filter of the
pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same
name than the one of the filter, but it can be changed by providing a
value to new_name.
The last filter of the pipeline is used by default, but another one may
be used by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
"""
if new_name is None:
new_name = name
src = self.filters[position]
ok = False
set_name = "Set" + name
if set_name in dir(src):
setattr(self, "Set" + new_name, getattr(src, set_name))
ok = True
get_name = "Get" + name
if get_name in dir(src):
setattr(self, "Get" + new_name, getattr(src, get_name))
ok = True
if not ok:
raise RuntimeError(
"No attribute %s at position %s." %
(name, position)) | [
"def",
"expose",
"(",
"self",
",",
"name",
",",
"new_name",
"=",
"None",
",",
"position",
"=",
"-",
"1",
")",
":",
"if",
"new_name",
"is",
"None",
":",
"new_name",
"=",
"name",
"src",
"=",
"self",
".",
"filters",
"[",
"position",
"]",
"ok",
"=",
"False",
"set_name",
"=",
"\"Set\"",
"+",
"name",
"if",
"set_name",
"in",
"dir",
"(",
"src",
")",
":",
"setattr",
"(",
"self",
",",
"\"Set\"",
"+",
"new_name",
",",
"getattr",
"(",
"src",
",",
"set_name",
")",
")",
"ok",
"=",
"True",
"get_name",
"=",
"\"Get\"",
"+",
"name",
"if",
"get_name",
"in",
"dir",
"(",
"src",
")",
":",
"setattr",
"(",
"self",
",",
"\"Get\"",
"+",
"new_name",
",",
"getattr",
"(",
"src",
",",
"get_name",
")",
")",
"ok",
"=",
"True",
"if",
"not",
"ok",
":",
"raise",
"RuntimeError",
"(",
"\"No attribute %s at position %s.\"",
"%",
"(",
"name",
",",
"position",
")",
")"
] | [
959,
4
] | [
991,
33
] | python | en | ['en', 'en', 'en'] | True |
display_map | (latitude = None, longitude = None, resolution = None) | Generates a folium map with a lat-lon bounded rectangle drawn on it. Folium maps can be
Args:
latitude (float,float): a tuple of latitude bounds in (min,max) format
longitude ((float, float)): a tuple of longitude bounds in (min,max) format
resolution ((float, float)): tuple in (lat,lon) format used to draw a grid on your map. Values denote
spacing of latitude and longitude lines. Gridding starts at top left
corner. Default displays no grid at all.
Returns:
folium.Map: A map centered on the lat lon bounds. A rectangle is drawn on this map detailing the
perimeter of the lat,lon bounds. A zoom level is calculated such that the resulting viewport is the
closest it can possibly get to the centered bounding rectangle without clipping it. An
optional grid can be overlaid with primitive interpolation.
.. _Folium
https://github.com/python-visualization/folium
| Generates a folium map with a lat-lon bounded rectangle drawn on it. Folium maps can be
Args:
latitude (float,float): a tuple of latitude bounds in (min,max) format
longitude ((float, float)): a tuple of longitude bounds in (min,max) format
resolution ((float, float)): tuple in (lat,lon) format used to draw a grid on your map. Values denote
spacing of latitude and longitude lines. Gridding starts at top left
corner. Default displays no grid at all. | def display_map(latitude = None, longitude = None, resolution = None):
""" Generates a folium map with a lat-lon bounded rectangle drawn on it. Folium maps can be
Args:
latitude (float,float): a tuple of latitude bounds in (min,max) format
longitude ((float, float)): a tuple of longitude bounds in (min,max) format
resolution ((float, float)): tuple in (lat,lon) format used to draw a grid on your map. Values denote
spacing of latitude and longitude lines. Gridding starts at top left
corner. Default displays no grid at all.
Returns:
folium.Map: A map centered on the lat lon bounds. A rectangle is drawn on this map detailing the
perimeter of the lat,lon bounds. A zoom level is calculated such that the resulting viewport is the
closest it can possibly get to the centered bounding rectangle without clipping it. An
optional grid can be overlaid with primitive interpolation.
.. _Folium
https://github.com/python-visualization/folium
"""
assert latitude is not None
assert longitude is not None
###### ###### ###### CALC ZOOM LEVEL ###### ###### ######
margin = -0.5
zoom_bias = 0
lat_zoom_level = _degree_to_zoom_level(margin = margin, *latitude ) + zoom_bias
lon_zoom_level = _degree_to_zoom_level(margin = margin, *longitude) + zoom_bias
zoom_level = min(lat_zoom_level, lon_zoom_level)
###### ###### ###### CENTER POINT ###### ###### ######
center = [np.mean(latitude), np.mean(longitude)]
###### ###### ###### CREATE MAP ###### ###### ######
map_hybrid = folium.Map(
location=center,
zoom_start=zoom_level,
tiles=" http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}",
attr="Google"
)
###### ###### ###### RESOLUTION GRID ###### ###### ######
if resolution is not None:
res_lat, res_lon = resolution
lats = np.arange(abs(res_lat), *latitude)
lons = np.arange(abs(res_lon), *longitude)
vertical_grid = map(lambda x :([x[0][0],x[1]],[x[0][1],x[1]]),itertools.product([latitude],lons))
horizontal_grid = map(lambda x :([x[1],x[0][0]],[x[1],x[0][1]]),itertools.product([longitude],lats))
for segment in vertical_grid:
folium.features.PolyLine(segment, color = 'white', opacity = 0.3).add_to(map_hybrid)
for segment in horizontal_grid:
folium.features.PolyLine(segment, color = 'white', opacity = 0.3).add_to(map_hybrid)
###### ###### ###### BOUNDING BOX ###### ###### ######
line_segments = [(latitude[0],longitude[0]),
(latitude[0],longitude[1]),
(latitude[1],longitude[1]),
(latitude[1],longitude[0]),
(latitude[0],longitude[0])
]
map_hybrid.add_child(
folium.features.PolyLine(
locations=line_segments,
color='red',
opacity=0.8)
)
map_hybrid.add_child(folium.features.LatLngPopup())
return map_hybrid | [
"def",
"display_map",
"(",
"latitude",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"resolution",
"=",
"None",
")",
":",
"assert",
"latitude",
"is",
"not",
"None",
"assert",
"longitude",
"is",
"not",
"None",
"###### ###### ###### CALC ZOOM LEVEL ###### ###### ######",
"margin",
"=",
"-",
"0.5",
"zoom_bias",
"=",
"0",
"lat_zoom_level",
"=",
"_degree_to_zoom_level",
"(",
"margin",
"=",
"margin",
",",
"*",
"latitude",
")",
"+",
"zoom_bias",
"lon_zoom_level",
"=",
"_degree_to_zoom_level",
"(",
"margin",
"=",
"margin",
",",
"*",
"longitude",
")",
"+",
"zoom_bias",
"zoom_level",
"=",
"min",
"(",
"lat_zoom_level",
",",
"lon_zoom_level",
")",
"###### ###### ###### CENTER POINT ###### ###### ######",
"center",
"=",
"[",
"np",
".",
"mean",
"(",
"latitude",
")",
",",
"np",
".",
"mean",
"(",
"longitude",
")",
"]",
"###### ###### ###### CREATE MAP ###### ###### ######",
"map_hybrid",
"=",
"folium",
".",
"Map",
"(",
"location",
"=",
"center",
",",
"zoom_start",
"=",
"zoom_level",
",",
"tiles",
"=",
"\" http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}\"",
",",
"attr",
"=",
"\"Google\"",
")",
"###### ###### ###### RESOLUTION GRID ###### ###### ######",
"if",
"resolution",
"is",
"not",
"None",
":",
"res_lat",
",",
"res_lon",
"=",
"resolution",
"lats",
"=",
"np",
".",
"arange",
"(",
"abs",
"(",
"res_lat",
")",
",",
"*",
"latitude",
")",
"lons",
"=",
"np",
".",
"arange",
"(",
"abs",
"(",
"res_lon",
")",
",",
"*",
"longitude",
")",
"vertical_grid",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"[",
"x",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
"]",
",",
"[",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"x",
"[",
"1",
"]",
"]",
")",
",",
"itertools",
".",
"product",
"(",
"[",
"latitude",
"]",
",",
"lons",
")",
")",
"horizontal_grid",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"[",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
",",
"[",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
")",
",",
"itertools",
".",
"product",
"(",
"[",
"longitude",
"]",
",",
"lats",
")",
")",
"for",
"segment",
"in",
"vertical_grid",
":",
"folium",
".",
"features",
".",
"PolyLine",
"(",
"segment",
",",
"color",
"=",
"'white'",
",",
"opacity",
"=",
"0.3",
")",
".",
"add_to",
"(",
"map_hybrid",
")",
"for",
"segment",
"in",
"horizontal_grid",
":",
"folium",
".",
"features",
".",
"PolyLine",
"(",
"segment",
",",
"color",
"=",
"'white'",
",",
"opacity",
"=",
"0.3",
")",
".",
"add_to",
"(",
"map_hybrid",
")",
"###### ###### ###### BOUNDING BOX ###### ###### ######",
"line_segments",
"=",
"[",
"(",
"latitude",
"[",
"0",
"]",
",",
"longitude",
"[",
"0",
"]",
")",
",",
"(",
"latitude",
"[",
"0",
"]",
",",
"longitude",
"[",
"1",
"]",
")",
",",
"(",
"latitude",
"[",
"1",
"]",
",",
"longitude",
"[",
"1",
"]",
")",
",",
"(",
"latitude",
"[",
"1",
"]",
",",
"longitude",
"[",
"0",
"]",
")",
",",
"(",
"latitude",
"[",
"0",
"]",
",",
"longitude",
"[",
"0",
"]",
")",
"]",
"map_hybrid",
".",
"add_child",
"(",
"folium",
".",
"features",
".",
"PolyLine",
"(",
"locations",
"=",
"line_segments",
",",
"color",
"=",
"'red'",
",",
"opacity",
"=",
"0.8",
")",
")",
"map_hybrid",
".",
"add_child",
"(",
"folium",
".",
"features",
".",
"LatLngPopup",
"(",
")",
")",
"return",
"map_hybrid"
] | [
16,
0
] | [
99,
21
] | python | en | ['en', 'en', 'en'] | True |
SuiteProfileNotebookRenderer.render_to_disk | (self, notebook_file_path: str) |
Render a notebook to disk from an expectation suite.
|
Render a notebook to disk from an expectation suite.
| def render_to_disk(self, notebook_file_path: str):
"""
Render a notebook to disk from an expectation suite.
"""
self.render()
self.write_notebook_to_disk(
notebook=self._notebook, notebook_file_path=notebook_file_path
) | [
"def",
"render_to_disk",
"(",
"self",
",",
"notebook_file_path",
":",
"str",
")",
":",
"self",
".",
"render",
"(",
")",
"self",
".",
"write_notebook_to_disk",
"(",
"notebook",
"=",
"self",
".",
"_notebook",
",",
"notebook_file_path",
"=",
"notebook_file_path",
")"
] | [
156,
4
] | [
163,
9
] | python | en | ['en', 'error', 'th'] | False |
merge_errors | (errors1, errors2) | Deeply merge two error messages.
The format of ``errors1`` and ``errors2`` matches the ``message``
parameter of :exc:`marshmallow.exceptions.ValidationError`.
| Deeply merge two error messages. | def merge_errors(errors1, errors2):
"""Deeply merge two error messages.
The format of ``errors1`` and ``errors2`` matches the ``message``
parameter of :exc:`marshmallow.exceptions.ValidationError`.
"""
if not errors1:
return errors2
if not errors2:
return errors1
if isinstance(errors1, list):
if isinstance(errors2, list):
return errors1 + errors2
if isinstance(errors2, dict):
return dict(errors2, **{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))})
return errors1 + [errors2]
if isinstance(errors1, dict):
if isinstance(errors2, list):
return dict(errors1, **{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)})
if isinstance(errors2, dict):
errors = dict(errors1)
for key, val in errors2.items():
if key in errors:
errors[key] = merge_errors(errors[key], val)
else:
errors[key] = val
return errors
return dict(errors1, **{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)})
if isinstance(errors2, list):
return [errors1] + errors2
if isinstance(errors2, dict):
return dict(errors2, **{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))})
return [errors1, errors2] | [
"def",
"merge_errors",
"(",
"errors1",
",",
"errors2",
")",
":",
"if",
"not",
"errors1",
":",
"return",
"errors2",
"if",
"not",
"errors2",
":",
"return",
"errors1",
"if",
"isinstance",
"(",
"errors1",
",",
"list",
")",
":",
"if",
"isinstance",
"(",
"errors2",
",",
"list",
")",
":",
"return",
"errors1",
"+",
"errors2",
"if",
"isinstance",
"(",
"errors2",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"errors2",
",",
"*",
"*",
"{",
"SCHEMA",
":",
"merge_errors",
"(",
"errors1",
",",
"errors2",
".",
"get",
"(",
"SCHEMA",
")",
")",
"}",
")",
"return",
"errors1",
"+",
"[",
"errors2",
"]",
"if",
"isinstance",
"(",
"errors1",
",",
"dict",
")",
":",
"if",
"isinstance",
"(",
"errors2",
",",
"list",
")",
":",
"return",
"dict",
"(",
"errors1",
",",
"*",
"*",
"{",
"SCHEMA",
":",
"merge_errors",
"(",
"errors1",
".",
"get",
"(",
"SCHEMA",
")",
",",
"errors2",
")",
"}",
")",
"if",
"isinstance",
"(",
"errors2",
",",
"dict",
")",
":",
"errors",
"=",
"dict",
"(",
"errors1",
")",
"for",
"key",
",",
"val",
"in",
"errors2",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"errors",
":",
"errors",
"[",
"key",
"]",
"=",
"merge_errors",
"(",
"errors",
"[",
"key",
"]",
",",
"val",
")",
"else",
":",
"errors",
"[",
"key",
"]",
"=",
"val",
"return",
"errors",
"return",
"dict",
"(",
"errors1",
",",
"*",
"*",
"{",
"SCHEMA",
":",
"merge_errors",
"(",
"errors1",
".",
"get",
"(",
"SCHEMA",
")",
",",
"errors2",
")",
"}",
")",
"if",
"isinstance",
"(",
"errors2",
",",
"list",
")",
":",
"return",
"[",
"errors1",
"]",
"+",
"errors2",
"if",
"isinstance",
"(",
"errors2",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"errors2",
",",
"*",
"*",
"{",
"SCHEMA",
":",
"merge_errors",
"(",
"errors1",
",",
"errors2",
".",
"get",
"(",
"SCHEMA",
")",
")",
"}",
")",
"return",
"[",
"errors1",
",",
"errors2",
"]"
] | [
27,
0
] | [
59,
29
] | python | en | ['en', 'en', 'en'] | True |
Doxy2SWIG.__init__ | (self, src) | Initialize the instance given a source object (file or
filename).
| Initialize the instance given a source object (file or
filename). | def __init__(self, src):
"""Initialize the instance given a source object (file or
filename).
"""
f = my_open_read(src)
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
self.pieces.append('\n// File: %s\n'%\
os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.multi = 0
self.ignores = ('inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref') | [
"def",
"__init__",
"(",
"self",
",",
"src",
")",
":",
"f",
"=",
"my_open_read",
"(",
"src",
")",
"self",
".",
"my_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"f",
".",
"name",
")",
"self",
".",
"xmldoc",
"=",
"minidom",
".",
"parse",
"(",
"f",
")",
".",
"documentElement",
"f",
".",
"close",
"(",
")",
"self",
".",
"pieces",
"=",
"[",
"]",
"self",
".",
"pieces",
".",
"append",
"(",
"'\\n// File: %s\\n'",
"%",
"os",
".",
"path",
".",
"basename",
"(",
"f",
".",
"name",
")",
")",
"self",
".",
"space_re",
"=",
"re",
".",
"compile",
"(",
"r'\\s+'",
")",
"self",
".",
"lead_spc",
"=",
"re",
".",
"compile",
"(",
"r'^(%feature\\S+\\s+\\S+\\s*?)\"\\s+(\\S)'",
")",
"self",
".",
"multi",
"=",
"0",
"self",
".",
"ignores",
"=",
"(",
"'inheritancegraph'",
",",
"'param'",
",",
"'listofallmembers'",
",",
"'innerclass'",
",",
"'name'",
",",
"'declname'",
",",
"'incdepgraph'",
",",
"'invincdepgraph'",
",",
"'programlisting'",
",",
"'type'",
",",
"'references'",
",",
"'referencedby'",
",",
"'location'",
",",
"'collaborationgraph'",
",",
"'reimplements'",
",",
"'reimplementedby'",
",",
"'derivedcompoundref'",
",",
"'basecompoundref'",
")"
] | [
54,
4
] | [
77,
42
] | python | en | ['en', 'en', 'en'] | True |
Doxy2SWIG.generate | (self) | Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
| Parses the file set in the initialization. The resulting
data is stored in `self.pieces`. | def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc) | [
"def",
"generate",
"(",
"self",
")",
":",
"self",
".",
"parse",
"(",
"self",
".",
"xmldoc",
")"
] | [
80,
4
] | [
85,
31
] | python | en | ['en', 'en', 'en'] | True |
Doxy2SWIG.parse | (self, node) | Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
| Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes. | def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node) | [
"def",
"parse",
"(",
"self",
",",
"node",
")",
":",
"pm",
"=",
"getattr",
"(",
"self",
",",
"\"parse_%s\"",
"%",
"node",
".",
"__class__",
".",
"__name__",
")",
"pm",
"(",
"node",
")"
] | [
87,
4
] | [
94,
16
] | python | en | ['en', 'en', 'en'] | True |
Doxy2SWIG.parse_Element | (self, node) | Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
| Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored. | def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node) | [
"def",
"parse_Element",
"(",
"self",
",",
"node",
")",
":",
"name",
"=",
"node",
".",
"tagName",
"ignores",
"=",
"self",
".",
"ignores",
"if",
"name",
"in",
"ignores",
":",
"return",
"attr",
"=",
"\"do_%s\"",
"%",
"name",
"if",
"hasattr",
"(",
"self",
",",
"attr",
")",
":",
"handlerMethod",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"handlerMethod",
"(",
"node",
")",
"else",
":",
"self",
".",
"generic_parse",
"(",
"node",
")"
] | [
110,
4
] | [
126,
36
] | python | en | ['en', 'en', 'en'] | True |
Doxy2SWIG.add_text | (self, value) | Adds text corresponding to `value` into `self.pieces`. | Adds text corresponding to `value` into `self.pieces`. | def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if sys.version_info >= (3,0):
listTypes = (list, tuple)
else:
listTypes = (types.ListType, types.TupleType)
if type(value) in listTypes:
self.pieces.extend(value)
else:
self.pieces.append(value) | [
"def",
"add_text",
"(",
"self",
",",
"value",
")",
":",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"0",
")",
":",
"listTypes",
"=",
"(",
"list",
",",
"tuple",
")",
"else",
":",
"listTypes",
"=",
"(",
"types",
".",
"ListType",
",",
"types",
".",
"TupleType",
")",
"if",
"type",
"(",
"value",
")",
"in",
"listTypes",
":",
"self",
".",
"pieces",
".",
"extend",
"(",
"value",
")",
"else",
":",
"self",
".",
"pieces",
".",
"append",
"(",
"value",
")"
] | [
129,
4
] | [
138,
37
] | python | en | ['en', 'en', 'en'] | True |
Doxy2SWIG.get_specific_nodes | (self, node, names) | Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
| Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name. | def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes \
if x.nodeType == x.ELEMENT_NODE and \
x.tagName in names]
return dict(nodes) | [
"def",
"get_specific_nodes",
"(",
"self",
",",
"node",
",",
"names",
")",
":",
"nodes",
"=",
"[",
"(",
"x",
".",
"tagName",
",",
"x",
")",
"for",
"x",
"in",
"node",
".",
"childNodes",
"if",
"x",
".",
"nodeType",
"==",
"x",
".",
"ELEMENT_NODE",
"and",
"x",
".",
"tagName",
"in",
"names",
"]",
"return",
"dict",
"(",
"nodes",
")"
] | [
140,
4
] | [
149,
26
] | python | en | ['en', 'en', 'en'] | True |
Doxy2SWIG.generic_parse | (self, node, pad=0) | A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
| A Generic parser for arbitrary tags in a node. | def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n') | [
"def",
"generic_parse",
"(",
"self",
",",
"node",
",",
"pad",
"=",
"0",
")",
":",
"npiece",
"=",
"0",
"if",
"pad",
":",
"npiece",
"=",
"len",
"(",
"self",
".",
"pieces",
")",
"if",
"pad",
"==",
"2",
":",
"self",
".",
"add_text",
"(",
"'\\n'",
")",
"for",
"n",
"in",
"node",
".",
"childNodes",
":",
"self",
".",
"parse",
"(",
"n",
")",
"if",
"pad",
":",
"if",
"len",
"(",
"self",
".",
"pieces",
")",
">",
"npiece",
":",
"self",
".",
"add_text",
"(",
"'\\n'",
")"
] | [
151,
4
] | [
174,
35
] | python | en | ['en', 'en', 'it'] | True |
Doxy2SWIG.clean_pieces | (self, pieces) | Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
| Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely. | def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:':
ret.extend(['Parameters:\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
_tmp = textwrap.fill(i.strip())
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret | [
"def",
"clean_pieces",
"(",
"self",
",",
"pieces",
")",
":",
"ret",
"=",
"[",
"]",
"count",
"=",
"0",
"for",
"i",
"in",
"pieces",
":",
"if",
"i",
"==",
"'\\n'",
":",
"count",
"=",
"count",
"+",
"1",
"else",
":",
"if",
"i",
"==",
"'\";'",
":",
"if",
"count",
":",
"ret",
".",
"append",
"(",
"'\\n'",
")",
"elif",
"count",
">",
"2",
":",
"ret",
".",
"append",
"(",
"'\\n\\n'",
")",
"elif",
"count",
":",
"ret",
".",
"append",
"(",
"'\\n'",
"*",
"count",
")",
"count",
"=",
"0",
"ret",
".",
"append",
"(",
"i",
")",
"_data",
"=",
"\"\"",
".",
"join",
"(",
"ret",
")",
"ret",
"=",
"[",
"]",
"for",
"i",
"in",
"_data",
".",
"split",
"(",
"'\\n\\n'",
")",
":",
"if",
"i",
"==",
"'Parameters:'",
":",
"ret",
".",
"extend",
"(",
"[",
"'Parameters:\\n-----------'",
",",
"'\\n\\n'",
"]",
")",
"elif",
"i",
".",
"find",
"(",
"'// File:'",
")",
">",
"-",
"1",
":",
"# leave comments alone.",
"ret",
".",
"extend",
"(",
"[",
"i",
",",
"'\\n'",
"]",
")",
"else",
":",
"_tmp",
"=",
"textwrap",
".",
"fill",
"(",
"i",
".",
"strip",
"(",
")",
")",
"_tmp",
"=",
"self",
".",
"lead_spc",
".",
"sub",
"(",
"r'\\1\"\\2'",
",",
"_tmp",
")",
"ret",
".",
"extend",
"(",
"[",
"_tmp",
",",
"'\\n\\n'",
"]",
")",
"return",
"ret"
] | [
329,
4
] | [
362,
18
] | python | en | ['en', 'en', 'en'] | True |
shapefile_mask | (dataset: xr.Dataset, shapefile) | Extracts a mask from a shapefile using dataset latitude and longitude extents.
Args:
dataset (xarray.Dataset): The dataset with the latitude and longitude extents.
shapefile (string): The shapefile to be used for extraction.
Returns:
A boolean mask array.
| Extracts a mask from a shapefile using dataset latitude and longitude extents. | def shapefile_mask(dataset: xr.Dataset, shapefile) -> np.array:
"""Extracts a mask from a shapefile using dataset latitude and longitude extents.
Args:
dataset (xarray.Dataset): The dataset with the latitude and longitude extents.
shapefile (string): The shapefile to be used for extraction.
Returns:
A boolean mask array.
"""
with fiona.open(shapefile, 'r') as source:
collection = list(source)
geometries = []
for feature in collection:
geom = shape(feature['geometry'])
project = partial(
pyproj.transform,
pyproj.Proj(init=source.crs['init']), # source crs
pyproj.Proj(init='epsg:4326')) # destination crs
geom = transform(project, geom) # apply projection
geometries.append(geom)
geobox = dataset.geobox
mask = geometry_mask(
geometries,
out_shape=geobox.shape,
transform=geobox.affine,
all_touched=True,
invert=True)
return mask | [
"def",
"shapefile_mask",
"(",
"dataset",
":",
"xr",
".",
"Dataset",
",",
"shapefile",
")",
"->",
"np",
".",
"array",
":",
"with",
"fiona",
".",
"open",
"(",
"shapefile",
",",
"'r'",
")",
"as",
"source",
":",
"collection",
"=",
"list",
"(",
"source",
")",
"geometries",
"=",
"[",
"]",
"for",
"feature",
"in",
"collection",
":",
"geom",
"=",
"shape",
"(",
"feature",
"[",
"'geometry'",
"]",
")",
"project",
"=",
"partial",
"(",
"pyproj",
".",
"transform",
",",
"pyproj",
".",
"Proj",
"(",
"init",
"=",
"source",
".",
"crs",
"[",
"'init'",
"]",
")",
",",
"# source crs",
"pyproj",
".",
"Proj",
"(",
"init",
"=",
"'epsg:4326'",
")",
")",
"# destination crs",
"geom",
"=",
"transform",
"(",
"project",
",",
"geom",
")",
"# apply projection",
"geometries",
".",
"append",
"(",
"geom",
")",
"geobox",
"=",
"dataset",
".",
"geobox",
"mask",
"=",
"geometry_mask",
"(",
"geometries",
",",
"out_shape",
"=",
"geobox",
".",
"shape",
",",
"transform",
"=",
"geobox",
".",
"affine",
",",
"all_touched",
"=",
"True",
",",
"invert",
"=",
"True",
")",
"return",
"mask"
] | [
27,
0
] | [
55,
15
] | python | en | ['en', 'en', 'en'] | True |
step_impl | (context) |
:type context: behave.runner.Context
|
:type context: behave.runner.Context
| def step_impl(context):
"""
:type context: behave.runner.Context
"""
print("Hello") | [
"def",
"step_impl",
"(",
"context",
")",
":",
"print",
"(",
"\"Hello\"",
")"
] | [
7,
0
] | [
11,
18
] | python | en | ['en', 'error', 'th'] | False |
apply_visitor | (visitor, decl_inst) |
Applies a visitor on declaration instance.
:param visitor: instance
:type visitor: :class:`type_visitor_t` or :class:`decl_visitor_t`
|
Applies a visitor on declaration instance. | def apply_visitor(visitor, decl_inst):
"""
Applies a visitor on declaration instance.
:param visitor: instance
:type visitor: :class:`type_visitor_t` or :class:`decl_visitor_t`
"""
fname = 'visit_' + \
decl_inst.__class__.__name__[:-2] # removing '_t' from class name
if not hasattr(visitor, fname):
raise runtime_errors.visit_function_has_not_been_found_t(
visitor, decl_inst)
return getattr(visitor, fname)() | [
"def",
"apply_visitor",
"(",
"visitor",
",",
"decl_inst",
")",
":",
"fname",
"=",
"'visit_'",
"+",
"decl_inst",
".",
"__class__",
".",
"__name__",
"[",
":",
"-",
"2",
"]",
"# removing '_t' from class name",
"if",
"not",
"hasattr",
"(",
"visitor",
",",
"fname",
")",
":",
"raise",
"runtime_errors",
".",
"visit_function_has_not_been_found_t",
"(",
"visitor",
",",
"decl_inst",
")",
"return",
"getattr",
"(",
"visitor",
",",
"fname",
")",
"(",
")"
] | [
72,
0
] | [
86,
36
] | python | en | ['en', 'error', 'th'] | False |
match_declaration_t.does_match_exist | (self, inst) |
Returns True if inst does match one of specified criteria.
:param inst: declaration instance
:type inst: :class:`declaration_t`
:rtype: bool
|
Returns True if inst does match one of specified criteria. | def does_match_exist(self, inst):
"""
Returns True if inst does match one of specified criteria.
:param inst: declaration instance
:type inst: :class:`declaration_t`
:rtype: bool
"""
answer = True
if self._decl_type is not None:
answer &= isinstance(inst, self._decl_type)
if self.name is not None:
answer &= inst.name == self.name
if self.parent is not None:
answer &= self.parent is inst.parent
if self.fullname is not None:
if inst.name:
answer &= self.fullname == declaration_utils.full_name(inst)
else:
answer = False
return answer | [
"def",
"does_match_exist",
"(",
"self",
",",
"inst",
")",
":",
"answer",
"=",
"True",
"if",
"self",
".",
"_decl_type",
"is",
"not",
"None",
":",
"answer",
"&=",
"isinstance",
"(",
"inst",
",",
"self",
".",
"_decl_type",
")",
"if",
"self",
".",
"name",
"is",
"not",
"None",
":",
"answer",
"&=",
"inst",
".",
"name",
"==",
"self",
".",
"name",
"if",
"self",
".",
"parent",
"is",
"not",
"None",
":",
"answer",
"&=",
"self",
".",
"parent",
"is",
"inst",
".",
"parent",
"if",
"self",
".",
"fullname",
"is",
"not",
"None",
":",
"if",
"inst",
".",
"name",
":",
"answer",
"&=",
"self",
".",
"fullname",
"==",
"declaration_utils",
".",
"full_name",
"(",
"inst",
")",
"else",
":",
"answer",
"=",
"False",
"return",
"answer"
] | [
36,
4
] | [
59,
21
] | python | en | ['en', 'error', 'th'] | False |
match_declaration_t.__call__ | (self, inst) |
.. code-block:: python
return self.does_match_exist(inst)
|
.. code-block:: python | def __call__(self, inst):
"""
.. code-block:: python
return self.does_match_exist(inst)
"""
return self.does_match_exist(inst) | [
"def",
"__call__",
"(",
"self",
",",
"inst",
")",
":",
"return",
"self",
".",
"does_match_exist",
"(",
"inst",
")"
] | [
61,
4
] | [
69,
42
] | python | en | ['en', 'error', 'th'] | False |
point_confusion_matrix | (expected, observed, data=None, start=None, end=None) | Compute the confusion matrix between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
tuple:
number of true negative, false positive, false negative, true positive.
| Compute the confusion matrix between the ground truth and the detected anomalies. | def point_confusion_matrix(expected, observed, data=None, start=None, end=None):
"""Compute the confusion matrix between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
tuple:
number of true negative, false positive, false negative, true positive.
"""
def _ws(x, y, z, w):
return _weighted_segment(x, y, _point_partition, z, w)
if data is not None:
start = data['timestamp'].min()
end = data['timestamp'].max()
if not isinstance(expected, list):
expected = list(expected['timestamp'])
if not isinstance(observed, list):
observed = list(observed['timestamp'])
return _ws(expected, observed, start, end) | [
"def",
"point_confusion_matrix",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"def",
"_ws",
"(",
"x",
",",
"y",
",",
"z",
",",
"w",
")",
":",
"return",
"_weighted_segment",
"(",
"x",
",",
"y",
",",
"_point_partition",
",",
"z",
",",
"w",
")",
"if",
"data",
"is",
"not",
"None",
":",
"start",
"=",
"data",
"[",
"'timestamp'",
"]",
".",
"min",
"(",
")",
"end",
"=",
"data",
"[",
"'timestamp'",
"]",
".",
"max",
"(",
")",
"if",
"not",
"isinstance",
"(",
"expected",
",",
"list",
")",
":",
"expected",
"=",
"list",
"(",
"expected",
"[",
"'timestamp'",
"]",
")",
"if",
"not",
"isinstance",
"(",
"observed",
",",
"list",
")",
":",
"observed",
"=",
"list",
"(",
"observed",
"[",
"'timestamp'",
"]",
")",
"return",
"_ws",
"(",
"expected",
",",
"observed",
",",
"start",
",",
"end",
")"
] | [
29,
0
] | [
64,
46
] | python | en | ['en', 'en', 'en'] | True |
point_accuracy | (expected, observed, data=None, start=None, end=None) | Compute an accuracy score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Accuracy score between the ground truth and detected anomalies.
| Compute an accuracy score between the ground truth and the detected anomalies. | def point_accuracy(expected, observed, data=None, start=None, end=None):
"""Compute an accuracy score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Accuracy score between the ground truth and detected anomalies.
"""
return _accuracy(expected, observed, data, start, end, cm=point_confusion_matrix) | [
"def",
"point_accuracy",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"return",
"_accuracy",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"cm",
"=",
"point_confusion_matrix",
")"
] | [
67,
0
] | [
89,
85
] | python | en | ['en', 'en', 'en'] | True |
point_precision | (expected, observed, data=None, start=None, end=None) | Compute an precision score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Precision score between the ground truth and detected anomalies.
| Compute an precision score between the ground truth and the detected anomalies. | def point_precision(expected, observed, data=None, start=None, end=None):
"""Compute an precision score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Precision score between the ground truth and detected anomalies.
"""
return _precision(expected, observed, data, start, end, cm=point_confusion_matrix) | [
"def",
"point_precision",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"return",
"_precision",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"cm",
"=",
"point_confusion_matrix",
")"
] | [
92,
0
] | [
114,
86
] | python | en | ['en', 'en', 'en'] | True |
point_recall | (expected, observed, data=None, start=None, end=None) | Compute an recall score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Recall score between the ground truth and detected anomalies.
| Compute an recall score between the ground truth and the detected anomalies. | def point_recall(expected, observed, data=None, start=None, end=None):
"""Compute an recall score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Recall score between the ground truth and detected anomalies.
"""
return _recall(expected, observed, data, start, end, cm=point_confusion_matrix) | [
"def",
"point_recall",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"return",
"_recall",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"cm",
"=",
"point_confusion_matrix",
")"
] | [
117,
0
] | [
139,
83
] | python | en | ['en', 'en', 'en'] | True |
point_f1_score | (expected, observed, data=None, start=None, end=None) | Compute an f1 score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
F1 score between the ground truth and detected anomalies.
| Compute an f1 score between the ground truth and the detected anomalies. | def point_f1_score(expected, observed, data=None, start=None, end=None):
"""Compute an f1 score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
F1 score between the ground truth and detected anomalies.
"""
return _f1_score(expected, observed, data, start, end, cm=point_confusion_matrix) | [
"def",
"point_f1_score",
"(",
"expected",
",",
"observed",
",",
"data",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"return",
"_f1_score",
"(",
"expected",
",",
"observed",
",",
"data",
",",
"start",
",",
"end",
",",
"cm",
"=",
"point_confusion_matrix",
")"
] | [
142,
0
] | [
164,
85
] | python | en | ['en', 'en', 'en'] | True |
ansiformat | (attr, text) |
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
|
Format ``text`` with a color and/or some attributes:: | def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result) | [
"def",
"ansiformat",
"(",
"attr",
",",
"text",
")",
":",
"result",
"=",
"[",
"]",
"if",
"attr",
"[",
":",
"1",
"]",
"==",
"attr",
"[",
"-",
"1",
":",
"]",
"==",
"'+'",
":",
"result",
".",
"append",
"(",
"codes",
"[",
"'blink'",
"]",
")",
"attr",
"=",
"attr",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"attr",
"[",
":",
"1",
"]",
"==",
"attr",
"[",
"-",
"1",
":",
"]",
"==",
"'*'",
":",
"result",
".",
"append",
"(",
"codes",
"[",
"'bold'",
"]",
")",
"attr",
"=",
"attr",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"attr",
"[",
":",
"1",
"]",
"==",
"attr",
"[",
"-",
"1",
":",
"]",
"==",
"'_'",
":",
"result",
".",
"append",
"(",
"codes",
"[",
"'underline'",
"]",
")",
"attr",
"=",
"attr",
"[",
"1",
":",
"-",
"1",
"]",
"result",
".",
"append",
"(",
"codes",
"[",
"attr",
"]",
")",
"result",
".",
"append",
"(",
"text",
")",
"result",
".",
"append",
"(",
"codes",
"[",
"'reset'",
"]",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | [
51,
0
] | [
73,
26
] | python | en | ['en', 'error', 'th'] | False |
swap_bbox_format | (bbox_tuple) | Swap between (row0, col0, row1, col1) and (x0, y0, x1, y1) formats. | Swap between (row0, col0, row1, col1) and (x0, y0, x1, y1) formats. | def swap_bbox_format(bbox_tuple):
"""Swap between (row0, col0, row1, col1) and (x0, y0, x1, y1) formats."""
assert len(bbox_tuple) >= 4
return (bbox_tuple[1], bbox_tuple[0], bbox_tuple[3], bbox_tuple[2]) | [
"def",
"swap_bbox_format",
"(",
"bbox_tuple",
")",
":",
"assert",
"len",
"(",
"bbox_tuple",
")",
">=",
"4",
"return",
"(",
"bbox_tuple",
"[",
"1",
"]",
",",
"bbox_tuple",
"[",
"0",
"]",
",",
"bbox_tuple",
"[",
"3",
"]",
",",
"bbox_tuple",
"[",
"2",
"]",
")"
] | [
12,
0
] | [
15,
71
] | python | en | ['en', 'es', 'en'] | True |
draw_on_file | (file_path : pathlib.Path, bbox_list : List, output_folder : pathlib.Path, save_images : bool = True) |
file_path: pathlib.Path to image file.
bbox_list: list of 4-tuples
output_folder: pathlib.Path to save out drawn-on image
|
file_path: pathlib.Path to image file.
bbox_list: list of 4-tuples
output_folder: pathlib.Path to save out drawn-on image
| def draw_on_file(file_path : pathlib.Path, bbox_list : List, output_folder : pathlib.Path, save_images : bool = True):
"""
file_path: pathlib.Path to image file.
bbox_list: list of 4-tuples
output_folder: pathlib.Path to save out drawn-on image
"""
# read image using PIL:
image = Image.open(file_path)
# Draw bounding boxes and save out images
draw = ImageDraw.Draw(image)
NEON_GREEN = '#39FF14'
for bbox in bbox_list:
assert len(bbox) >= 4
draw.rectangle(swap_bbox_format(bbox), outline=NEON_GREEN)
if save_images:
output_path = output_folder.joinpath(file_path.name)
print('Saving to:', output_path)
image.save(output_path) | [
"def",
"draw_on_file",
"(",
"file_path",
":",
"pathlib",
".",
"Path",
",",
"bbox_list",
":",
"List",
",",
"output_folder",
":",
"pathlib",
".",
"Path",
",",
"save_images",
":",
"bool",
"=",
"True",
")",
":",
"# read image using PIL:",
"image",
"=",
"Image",
".",
"open",
"(",
"file_path",
")",
"# Draw bounding boxes and save out images",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"image",
")",
"NEON_GREEN",
"=",
"'#39FF14'",
"for",
"bbox",
"in",
"bbox_list",
":",
"assert",
"len",
"(",
"bbox",
")",
">=",
"4",
"draw",
".",
"rectangle",
"(",
"swap_bbox_format",
"(",
"bbox",
")",
",",
"outline",
"=",
"NEON_GREEN",
")",
"if",
"save_images",
":",
"output_path",
"=",
"output_folder",
".",
"joinpath",
"(",
"file_path",
".",
"name",
")",
"print",
"(",
"'Saving to:'",
",",
"output_path",
")",
"image",
".",
"save",
"(",
"output_path",
")"
] | [
45,
0
] | [
64,
31
] | python | en | ['en', 'error', 'th'] | False |
cli_message_list | (string_list, list_intro_string=None) | Simple util function for displaying simple lists in cli | Simple util function for displaying simple lists in cli | def cli_message_list(string_list, list_intro_string=None):
"""Simple util function for displaying simple lists in cli"""
if list_intro_string:
cli_message(list_intro_string)
for string in string_list:
cli_message(string) | [
"def",
"cli_message_list",
"(",
"string_list",
",",
"list_intro_string",
"=",
"None",
")",
":",
"if",
"list_intro_string",
":",
"cli_message",
"(",
"list_intro_string",
")",
"for",
"string",
"in",
"string_list",
":",
"cli_message",
"(",
"string",
")"
] | [
39,
0
] | [
44,
27
] | python | en | ['en', 'en', 'en'] | True |
action_list_to_string | (action_list) | Util function for turning an action list into pretty string | Util function for turning an action list into pretty string | def action_list_to_string(action_list):
"""Util function for turning an action list into pretty string"""
action_list_string = ""
for idx, action in enumerate(action_list):
action_list_string += "{} ({})".format(
action["name"], action["action"]["class_name"]
)
if idx == len(action_list) - 1:
continue
action_list_string += " => "
return action_list_string | [
"def",
"action_list_to_string",
"(",
"action_list",
")",
":",
"action_list_string",
"=",
"\"\"",
"for",
"idx",
",",
"action",
"in",
"enumerate",
"(",
"action_list",
")",
":",
"action_list_string",
"+=",
"\"{} ({})\"",
".",
"format",
"(",
"action",
"[",
"\"name\"",
"]",
",",
"action",
"[",
"\"action\"",
"]",
"[",
"\"class_name\"",
"]",
")",
"if",
"idx",
"==",
"len",
"(",
"action_list",
")",
"-",
"1",
":",
"continue",
"action_list_string",
"+=",
"\" => \"",
"return",
"action_list_string"
] | [
47,
0
] | [
57,
29
] | python | en | ['en', 'en', 'en'] | True |
cli_message_dict | (
dict_, indent=3, bullet_char="-", message_list=None, recursion_flag=False
) | Util function for displaying nested dicts representing ge objects in cli | Util function for displaying nested dicts representing ge objects in cli | def cli_message_dict(
dict_, indent=3, bullet_char="-", message_list=None, recursion_flag=False
):
"""Util function for displaying nested dicts representing ge objects in cli"""
if message_list is None:
message_list = []
if dict_.get("name"):
name = dict_.pop("name")
message = "{}<cyan>name:</cyan> {}".format(" " * indent, name)
message_list.append(message)
if dict_.get("module_name"):
module_name = dict_.pop("module_name")
message = "{}<cyan>module_name:</cyan> {}".format(" " * indent, module_name)
message_list.append(message)
if dict_.get("class_name"):
class_name = dict_.pop("class_name")
message = "{}<cyan>class_name:</cyan> {}".format(" " * indent, class_name)
message_list.append(message)
if dict_.get("action_list"):
action_list = dict_.pop("action_list")
action_list_string = action_list_to_string(action_list)
message = "{}<cyan>action_list:</cyan> {}".format(
" " * indent, action_list_string
)
message_list.append(message)
sorted_keys = sorted(dict_.keys())
for key in sorted_keys:
if key == "password":
message = "{}<cyan>password:</cyan> ******".format(" " * indent)
message_list.append(message)
continue
if isinstance(dict_[key], dict):
message = "{}<cyan>{}:</cyan>".format(" " * indent, key)
message_list.append(message)
cli_message_dict(
dict_[key],
indent=indent + 2,
message_list=message_list,
recursion_flag=True,
)
else:
message = "{}<cyan>{}:</cyan> {}".format(" " * indent, key, str(dict_[key]))
message_list.append(message)
if not recursion_flag:
if bullet_char and indent > 1:
first = message_list[0]
new_first = first[:1] + bullet_char + first[2:]
message_list[0] = new_first
cli_message_list(message_list) | [
"def",
"cli_message_dict",
"(",
"dict_",
",",
"indent",
"=",
"3",
",",
"bullet_char",
"=",
"\"-\"",
",",
"message_list",
"=",
"None",
",",
"recursion_flag",
"=",
"False",
")",
":",
"if",
"message_list",
"is",
"None",
":",
"message_list",
"=",
"[",
"]",
"if",
"dict_",
".",
"get",
"(",
"\"name\"",
")",
":",
"name",
"=",
"dict_",
".",
"pop",
"(",
"\"name\"",
")",
"message",
"=",
"\"{}<cyan>name:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"name",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"dict_",
".",
"get",
"(",
"\"module_name\"",
")",
":",
"module_name",
"=",
"dict_",
".",
"pop",
"(",
"\"module_name\"",
")",
"message",
"=",
"\"{}<cyan>module_name:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"module_name",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"dict_",
".",
"get",
"(",
"\"class_name\"",
")",
":",
"class_name",
"=",
"dict_",
".",
"pop",
"(",
"\"class_name\"",
")",
"message",
"=",
"\"{}<cyan>class_name:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"class_name",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"dict_",
".",
"get",
"(",
"\"action_list\"",
")",
":",
"action_list",
"=",
"dict_",
".",
"pop",
"(",
"\"action_list\"",
")",
"action_list_string",
"=",
"action_list_to_string",
"(",
"action_list",
")",
"message",
"=",
"\"{}<cyan>action_list:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"action_list_string",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"sorted_keys",
"=",
"sorted",
"(",
"dict_",
".",
"keys",
"(",
")",
")",
"for",
"key",
"in",
"sorted_keys",
":",
"if",
"key",
"==",
"\"password\"",
":",
"message",
"=",
"\"{}<cyan>password:</cyan> ******\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"continue",
"if",
"isinstance",
"(",
"dict_",
"[",
"key",
"]",
",",
"dict",
")",
":",
"message",
"=",
"\"{}<cyan>{}:</cyan>\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"key",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"cli_message_dict",
"(",
"dict_",
"[",
"key",
"]",
",",
"indent",
"=",
"indent",
"+",
"2",
",",
"message_list",
"=",
"message_list",
",",
"recursion_flag",
"=",
"True",
",",
")",
"else",
":",
"message",
"=",
"\"{}<cyan>{}:</cyan> {}\"",
".",
"format",
"(",
"\" \"",
"*",
"indent",
",",
"key",
",",
"str",
"(",
"dict_",
"[",
"key",
"]",
")",
")",
"message_list",
".",
"append",
"(",
"message",
")",
"if",
"not",
"recursion_flag",
":",
"if",
"bullet_char",
"and",
"indent",
">",
"1",
":",
"first",
"=",
"message_list",
"[",
"0",
"]",
"new_first",
"=",
"first",
"[",
":",
"1",
"]",
"+",
"bullet_char",
"+",
"first",
"[",
"2",
":",
"]",
"message_list",
"[",
"0",
"]",
"=",
"new_first",
"cli_message_list",
"(",
"message_list",
")"
] | [
60,
0
] | [
108,
38
] | python | en | ['en', 'en', 'en'] | True |
multiply | (a, b) |
'multiply' multiplies two numbers and returns the result.
>>> multiply(5, 10)
50
>>> multiply(-1, 1)
-1
>>> multiply(0.5, 1.5)
0.75
|
'multiply' multiplies two numbers and returns the result. | def multiply(a, b):
"""
'multiply' multiplies two numbers and returns the result.
>>> multiply(5, 10)
50
>>> multiply(-1, 1)
-1
>>> multiply(0.5, 1.5)
0.75
"""
return a*b | [
"def",
"multiply",
"(",
"a",
",",
"b",
")",
":",
"return",
"a",
"*",
"b"
] | [
0,
0
] | [
11,
12
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever.__init__ | (self,
document_store: BaseDocumentStore,
query_embedding_model: Union[Path, str] = "facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model: Union[Path, str] = "facebook/dpr-ctx_encoder-single-nq-base",
model_version: Optional[str] = None,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
use_gpu: bool = True,
batch_size: int = 16,
embed_title: bool = True,
use_fast_tokenizers: bool = True,
infer_tokenizer_classes: bool = False,
similarity_function: str = "dot_product",
progress_bar: bool = True
) |
Init the Retriever incl. the two encoder models from a local or remote model checkpoint.
The checkpoint format matches huggingface transformers' model format
**Example:**
```python
| # remote model from FAIR
| DensePassageRetriever(document_store=your_doc_store,
| query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
| passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base")
| # or from local path
| DensePassageRetriever(document_store=your_doc_store,
| query_embedding_model="model_directory/question-encoder",
| passage_embedding_model="model_directory/context-encoder")
```
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param query_embedding_model: Local path or remote name of question encoder checkpoint. The format equals the
one used by hugging-face transformers' modelhub models
Currently available remote names: ``"facebook/dpr-question_encoder-single-nq-base"``
:param passage_embedding_model: Local path or remote name of passage encoder checkpoint. The format equals the
one used by hugging-face transformers' modelhub models
Currently available remote names: ``"facebook/dpr-ctx_encoder-single-nq-base"``
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param max_seq_len_query: Longest length of each query sequence. Maximum number of tokens for the query text. Longer ones will be cut down."
:param max_seq_len_passage: Longest length of each passage/context sequence. Maximum number of tokens for the passage text. Longer ones will be cut down."
:param use_gpu: Whether to use gpu or not
:param batch_size: Number of questions or passages to encode at once
:param embed_title: Whether to concatenate title and passage to a text pair that is then used to create the embedding.
This is the approach used in the original paper and is likely to improve performance if your
titles contain meaningful information for retrieval (topic, entities etc.) .
The title is expected to be present in doc.meta["name"] and can be supplied in the documents
before writing them to the DocumentStore like this:
{"text": "my text", "meta": {"name": "my title"}}.
:param use_fast_tokenizers: Whether to use fast Rust tokenizers
:param infer_tokenizer_classes: Whether to infer tokenizer class from the model config / name.
If `False`, the class always loads `DPRQuestionEncoderTokenizer` and `DPRContextEncoderTokenizer`.
:param similarity_function: Which function to apply for calculating the similarity of query and passage embeddings during training.
Options: `dot_product` (Default) or `cosine`
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
|
Init the Retriever incl. the two encoder models from a local or remote model checkpoint.
The checkpoint format matches huggingface transformers' model format | def __init__(self,
document_store: BaseDocumentStore,
query_embedding_model: Union[Path, str] = "facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model: Union[Path, str] = "facebook/dpr-ctx_encoder-single-nq-base",
model_version: Optional[str] = None,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
use_gpu: bool = True,
batch_size: int = 16,
embed_title: bool = True,
use_fast_tokenizers: bool = True,
infer_tokenizer_classes: bool = False,
similarity_function: str = "dot_product",
progress_bar: bool = True
):
"""
Init the Retriever incl. the two encoder models from a local or remote model checkpoint.
The checkpoint format matches huggingface transformers' model format
**Example:**
```python
| # remote model from FAIR
| DensePassageRetriever(document_store=your_doc_store,
| query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
| passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base")
| # or from local path
| DensePassageRetriever(document_store=your_doc_store,
| query_embedding_model="model_directory/question-encoder",
| passage_embedding_model="model_directory/context-encoder")
```
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param query_embedding_model: Local path or remote name of question encoder checkpoint. The format equals the
one used by hugging-face transformers' modelhub models
Currently available remote names: ``"facebook/dpr-question_encoder-single-nq-base"``
:param passage_embedding_model: Local path or remote name of passage encoder checkpoint. The format equals the
one used by hugging-face transformers' modelhub models
Currently available remote names: ``"facebook/dpr-ctx_encoder-single-nq-base"``
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param max_seq_len_query: Longest length of each query sequence. Maximum number of tokens for the query text. Longer ones will be cut down."
:param max_seq_len_passage: Longest length of each passage/context sequence. Maximum number of tokens for the passage text. Longer ones will be cut down."
:param use_gpu: Whether to use gpu or not
:param batch_size: Number of questions or passages to encode at once
:param embed_title: Whether to concatenate title and passage to a text pair that is then used to create the embedding.
This is the approach used in the original paper and is likely to improve performance if your
titles contain meaningful information for retrieval (topic, entities etc.) .
The title is expected to be present in doc.meta["name"] and can be supplied in the documents
before writing them to the DocumentStore like this:
{"text": "my text", "meta": {"name": "my title"}}.
:param use_fast_tokenizers: Whether to use fast Rust tokenizers
:param infer_tokenizer_classes: Whether to infer tokenizer class from the model config / name.
If `False`, the class always loads `DPRQuestionEncoderTokenizer` and `DPRContextEncoderTokenizer`.
:param similarity_function: Which function to apply for calculating the similarity of query and passage embeddings during training.
Options: `dot_product` (Default) or `cosine`
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
"""
self.document_store = document_store
self.batch_size = batch_size
self.max_seq_len_passage = max_seq_len_passage
self.max_seq_len_query = max_seq_len_query
self.progress_bar = progress_bar
if document_store is None:
logger.warning("DensePassageRetriever initialized without a document store. "
"This is fine if you are performing DPR training. "
"Otherwise, please provide a document store in the constructor.")
elif document_store.similarity != "dot_product":
logger.warning(f"You are using a Dense Passage Retriever model with the {document_store.similarity} function. "
"We recommend you use dot_product instead. "
"This can be set when initializing the DocumentStore")
if use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.embed_title = embed_title
self.infer_tokenizer_classes = infer_tokenizer_classes
tokenizers_default_classes = {
"query": "DPRQuestionEncoderTokenizer",
"passage": "DPRContextEncoderTokenizer"
}
if self.infer_tokenizer_classes:
tokenizers_default_classes["query"] = None # type: ignore
tokenizers_default_classes["passage"] = None # type: ignore
# Init & Load Encoders
self.query_tokenizer = Tokenizer.load(pretrained_model_name_or_path=query_embedding_model,
revision=model_version,
do_lower_case=True,
use_fast=use_fast_tokenizers,
tokenizer_class=tokenizers_default_classes["query"])
self.query_encoder = LanguageModel.load(pretrained_model_name_or_path=query_embedding_model,
revision=model_version,
language_model_class="DPRQuestionEncoder")
self.passage_tokenizer = Tokenizer.load(pretrained_model_name_or_path=passage_embedding_model,
revision=model_version,
do_lower_case=True,
use_fast=use_fast_tokenizers,
tokenizer_class=tokenizers_default_classes["passage"])
self.passage_encoder = LanguageModel.load(pretrained_model_name_or_path=passage_embedding_model,
revision=model_version,
language_model_class="DPRContextEncoder")
self.processor = TextSimilarityProcessor(tokenizer=self.query_tokenizer,
passage_tokenizer=self.passage_tokenizer,
max_seq_len_passage=self.max_seq_len_passage,
max_seq_len_query=self.max_seq_len_query,
label_list=["hard_negative", "positive"],
metric="text_similarity_metric",
embed_title=self.embed_title,
num_hard_negatives=0,
num_positives=1)
prediction_head = TextSimilarityHead(similarity_function=similarity_function)
self.model = BiAdaptiveModel(
language_model1=self.query_encoder,
language_model2=self.passage_encoder,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
device=self.device,
)
self.model.connect_heads_with_processor(self.processor.tasks, require_labels=False) | [
"def",
"__init__",
"(",
"self",
",",
"document_store",
":",
"BaseDocumentStore",
",",
"query_embedding_model",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
"=",
"\"facebook/dpr-question_encoder-single-nq-base\"",
",",
"passage_embedding_model",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
"=",
"\"facebook/dpr-ctx_encoder-single-nq-base\"",
",",
"model_version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"max_seq_len_query",
":",
"int",
"=",
"64",
",",
"max_seq_len_passage",
":",
"int",
"=",
"256",
",",
"use_gpu",
":",
"bool",
"=",
"True",
",",
"batch_size",
":",
"int",
"=",
"16",
",",
"embed_title",
":",
"bool",
"=",
"True",
",",
"use_fast_tokenizers",
":",
"bool",
"=",
"True",
",",
"infer_tokenizer_classes",
":",
"bool",
"=",
"False",
",",
"similarity_function",
":",
"str",
"=",
"\"dot_product\"",
",",
"progress_bar",
":",
"bool",
"=",
"True",
")",
":",
"self",
".",
"document_store",
"=",
"document_store",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"max_seq_len_passage",
"=",
"max_seq_len_passage",
"self",
".",
"max_seq_len_query",
"=",
"max_seq_len_query",
"self",
".",
"progress_bar",
"=",
"progress_bar",
"if",
"document_store",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"DensePassageRetriever initialized without a document store. \"",
"\"This is fine if you are performing DPR training. \"",
"\"Otherwise, please provide a document store in the constructor.\"",
")",
"elif",
"document_store",
".",
"similarity",
"!=",
"\"dot_product\"",
":",
"logger",
".",
"warning",
"(",
"f\"You are using a Dense Passage Retriever model with the {document_store.similarity} function. \"",
"\"We recommend you use dot_product instead. \"",
"\"This can be set when initializing the DocumentStore\"",
")",
"if",
"use_gpu",
"and",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
":",
"self",
".",
"device",
"=",
"torch",
".",
"device",
"(",
"\"cuda\"",
")",
"else",
":",
"self",
".",
"device",
"=",
"torch",
".",
"device",
"(",
"\"cpu\"",
")",
"self",
".",
"embed_title",
"=",
"embed_title",
"self",
".",
"infer_tokenizer_classes",
"=",
"infer_tokenizer_classes",
"tokenizers_default_classes",
"=",
"{",
"\"query\"",
":",
"\"DPRQuestionEncoderTokenizer\"",
",",
"\"passage\"",
":",
"\"DPRContextEncoderTokenizer\"",
"}",
"if",
"self",
".",
"infer_tokenizer_classes",
":",
"tokenizers_default_classes",
"[",
"\"query\"",
"]",
"=",
"None",
"# type: ignore",
"tokenizers_default_classes",
"[",
"\"passage\"",
"]",
"=",
"None",
"# type: ignore",
"# Init & Load Encoders",
"self",
".",
"query_tokenizer",
"=",
"Tokenizer",
".",
"load",
"(",
"pretrained_model_name_or_path",
"=",
"query_embedding_model",
",",
"revision",
"=",
"model_version",
",",
"do_lower_case",
"=",
"True",
",",
"use_fast",
"=",
"use_fast_tokenizers",
",",
"tokenizer_class",
"=",
"tokenizers_default_classes",
"[",
"\"query\"",
"]",
")",
"self",
".",
"query_encoder",
"=",
"LanguageModel",
".",
"load",
"(",
"pretrained_model_name_or_path",
"=",
"query_embedding_model",
",",
"revision",
"=",
"model_version",
",",
"language_model_class",
"=",
"\"DPRQuestionEncoder\"",
")",
"self",
".",
"passage_tokenizer",
"=",
"Tokenizer",
".",
"load",
"(",
"pretrained_model_name_or_path",
"=",
"passage_embedding_model",
",",
"revision",
"=",
"model_version",
",",
"do_lower_case",
"=",
"True",
",",
"use_fast",
"=",
"use_fast_tokenizers",
",",
"tokenizer_class",
"=",
"tokenizers_default_classes",
"[",
"\"passage\"",
"]",
")",
"self",
".",
"passage_encoder",
"=",
"LanguageModel",
".",
"load",
"(",
"pretrained_model_name_or_path",
"=",
"passage_embedding_model",
",",
"revision",
"=",
"model_version",
",",
"language_model_class",
"=",
"\"DPRContextEncoder\"",
")",
"self",
".",
"processor",
"=",
"TextSimilarityProcessor",
"(",
"tokenizer",
"=",
"self",
".",
"query_tokenizer",
",",
"passage_tokenizer",
"=",
"self",
".",
"passage_tokenizer",
",",
"max_seq_len_passage",
"=",
"self",
".",
"max_seq_len_passage",
",",
"max_seq_len_query",
"=",
"self",
".",
"max_seq_len_query",
",",
"label_list",
"=",
"[",
"\"hard_negative\"",
",",
"\"positive\"",
"]",
",",
"metric",
"=",
"\"text_similarity_metric\"",
",",
"embed_title",
"=",
"self",
".",
"embed_title",
",",
"num_hard_negatives",
"=",
"0",
",",
"num_positives",
"=",
"1",
")",
"prediction_head",
"=",
"TextSimilarityHead",
"(",
"similarity_function",
"=",
"similarity_function",
")",
"self",
".",
"model",
"=",
"BiAdaptiveModel",
"(",
"language_model1",
"=",
"self",
".",
"query_encoder",
",",
"language_model2",
"=",
"self",
".",
"passage_encoder",
",",
"prediction_heads",
"=",
"[",
"prediction_head",
"]",
",",
"embeds_dropout_prob",
"=",
"0.1",
",",
"lm1_output_types",
"=",
"[",
"\"per_sequence\"",
"]",
",",
"lm2_output_types",
"=",
"[",
"\"per_sequence\"",
"]",
",",
"device",
"=",
"self",
".",
"device",
",",
")",
"self",
".",
"model",
".",
"connect_heads_with_processor",
"(",
"self",
".",
"processor",
".",
"tasks",
",",
"require_labels",
"=",
"False",
")"
] | [
35,
4
] | [
162,
91
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever.retrieve | (self, query: str, filters: dict = None, top_k: int = 10, index: str = None) |
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
|
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query. | def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if not self.document_store:
logger.error("Cannot perform retrieve() since DensePassageRetriever initialized with document_store=None")
return []
if index is None:
index = self.document_store.index
query_emb = self.embed_queries(texts=[query])
documents = self.document_store.query_by_embedding(query_emb=query_emb[0], top_k=top_k, filters=filters, index=index)
return documents | [
"def",
"retrieve",
"(",
"self",
",",
"query",
":",
"str",
",",
"filters",
":",
"dict",
"=",
"None",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"index",
":",
"str",
"=",
"None",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"if",
"not",
"self",
".",
"document_store",
":",
"logger",
".",
"error",
"(",
"\"Cannot perform retrieve() since DensePassageRetriever initialized with document_store=None\"",
")",
"return",
"[",
"]",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"self",
".",
"document_store",
".",
"index",
"query_emb",
"=",
"self",
".",
"embed_queries",
"(",
"texts",
"=",
"[",
"query",
"]",
")",
"documents",
"=",
"self",
".",
"document_store",
".",
"query_by_embedding",
"(",
"query_emb",
"=",
"query_emb",
"[",
"0",
"]",
",",
"top_k",
"=",
"top_k",
",",
"filters",
"=",
"filters",
",",
"index",
"=",
"index",
")",
"return",
"documents"
] | [
164,
4
] | [
181,
24
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever._get_predictions | (self, dicts) |
Feed a preprocessed dataset to the model and get the actual predictions (forward pass + formatting).
:param dicts: list of dictionaries
examples:[{'query': "where is florida?"}, {'query': "who wrote lord of the rings?"}, ...]
[{'passages': [{
"title": 'Big Little Lies (TV series)',
"text": 'series garnered several accolades. It received..',
"label": 'positive',
"external_id": '18768923'},
{"title": 'Framlingham Castle',
"text": 'Castle on the Hill "Castle on the Hill" is a song by English..',
"label": 'positive',
"external_id": '19930582'}, ...]
:return: dictionary of embeddings for "passages" and "query"
|
Feed a preprocessed dataset to the model and get the actual predictions (forward pass + formatting). | def _get_predictions(self, dicts):
"""
Feed a preprocessed dataset to the model and get the actual predictions (forward pass + formatting).
:param dicts: list of dictionaries
examples:[{'query': "where is florida?"}, {'query': "who wrote lord of the rings?"}, ...]
[{'passages': [{
"title": 'Big Little Lies (TV series)',
"text": 'series garnered several accolades. It received..',
"label": 'positive',
"external_id": '18768923'},
{"title": 'Framlingham Castle',
"text": 'Castle on the Hill "Castle on the Hill" is a song by English..',
"label": 'positive',
"external_id": '19930582'}, ...]
:return: dictionary of embeddings for "passages" and "query"
"""
dataset, tensor_names, _, baskets = self.processor.dataset_from_dicts(
dicts, indices=[i for i in range(len(dicts))], return_baskets=True
)
data_loader = NamedDataLoader(
dataset=dataset, sampler=SequentialSampler(dataset), batch_size=self.batch_size, tensor_names=tensor_names
)
all_embeddings = {"query": [], "passages": []}
self.model.eval()
# When running evaluations etc., we don't want a progress bar for every single query
if len(dataset) == 1:
disable_tqdm=True
else:
disable_tqdm = not self.progress_bar
for i, batch in enumerate(tqdm(data_loader, desc=f"Creating Embeddings", unit=" Batches", disable=disable_tqdm)):
batch = {key: batch[key].to(self.device) for key in batch}
# get logits
with torch.no_grad():
query_embeddings, passage_embeddings = self.model.forward(**batch)[0]
if query_embeddings is not None:
all_embeddings["query"].append(query_embeddings.cpu().numpy())
if passage_embeddings is not None:
all_embeddings["passages"].append(passage_embeddings.cpu().numpy())
if all_embeddings["passages"]:
all_embeddings["passages"] = np.concatenate(all_embeddings["passages"])
if all_embeddings["query"]:
all_embeddings["query"] = np.concatenate(all_embeddings["query"])
return all_embeddings | [
"def",
"_get_predictions",
"(",
"self",
",",
"dicts",
")",
":",
"dataset",
",",
"tensor_names",
",",
"_",
",",
"baskets",
"=",
"self",
".",
"processor",
".",
"dataset_from_dicts",
"(",
"dicts",
",",
"indices",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dicts",
")",
")",
"]",
",",
"return_baskets",
"=",
"True",
")",
"data_loader",
"=",
"NamedDataLoader",
"(",
"dataset",
"=",
"dataset",
",",
"sampler",
"=",
"SequentialSampler",
"(",
"dataset",
")",
",",
"batch_size",
"=",
"self",
".",
"batch_size",
",",
"tensor_names",
"=",
"tensor_names",
")",
"all_embeddings",
"=",
"{",
"\"query\"",
":",
"[",
"]",
",",
"\"passages\"",
":",
"[",
"]",
"}",
"self",
".",
"model",
".",
"eval",
"(",
")",
"# When running evaluations etc., we don't want a progress bar for every single query",
"if",
"len",
"(",
"dataset",
")",
"==",
"1",
":",
"disable_tqdm",
"=",
"True",
"else",
":",
"disable_tqdm",
"=",
"not",
"self",
".",
"progress_bar",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"tqdm",
"(",
"data_loader",
",",
"desc",
"=",
"f\"Creating Embeddings\"",
",",
"unit",
"=",
"\" Batches\"",
",",
"disable",
"=",
"disable_tqdm",
")",
")",
":",
"batch",
"=",
"{",
"key",
":",
"batch",
"[",
"key",
"]",
".",
"to",
"(",
"self",
".",
"device",
")",
"for",
"key",
"in",
"batch",
"}",
"# get logits",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"query_embeddings",
",",
"passage_embeddings",
"=",
"self",
".",
"model",
".",
"forward",
"(",
"*",
"*",
"batch",
")",
"[",
"0",
"]",
"if",
"query_embeddings",
"is",
"not",
"None",
":",
"all_embeddings",
"[",
"\"query\"",
"]",
".",
"append",
"(",
"query_embeddings",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
")",
"if",
"passage_embeddings",
"is",
"not",
"None",
":",
"all_embeddings",
"[",
"\"passages\"",
"]",
".",
"append",
"(",
"passage_embeddings",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
")",
"if",
"all_embeddings",
"[",
"\"passages\"",
"]",
":",
"all_embeddings",
"[",
"\"passages\"",
"]",
"=",
"np",
".",
"concatenate",
"(",
"all_embeddings",
"[",
"\"passages\"",
"]",
")",
"if",
"all_embeddings",
"[",
"\"query\"",
"]",
":",
"all_embeddings",
"[",
"\"query\"",
"]",
"=",
"np",
".",
"concatenate",
"(",
"all_embeddings",
"[",
"\"query\"",
"]",
")",
"return",
"all_embeddings"
] | [
183,
4
] | [
232,
29
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever.embed_queries | (self, texts: List[str]) |
Create embeddings for a list of queries using the query encoder
:param texts: Queries to embed
:return: Embeddings, one per input queries
|
Create embeddings for a list of queries using the query encoder | def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
"""
Create embeddings for a list of queries using the query encoder
:param texts: Queries to embed
:return: Embeddings, one per input queries
"""
queries = [{'query': q} for q in texts]
result = self._get_predictions(queries)["query"]
return result | [
"def",
"embed_queries",
"(",
"self",
",",
"texts",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"np",
".",
"ndarray",
"]",
":",
"queries",
"=",
"[",
"{",
"'query'",
":",
"q",
"}",
"for",
"q",
"in",
"texts",
"]",
"result",
"=",
"self",
".",
"_get_predictions",
"(",
"queries",
")",
"[",
"\"query\"",
"]",
"return",
"result"
] | [
234,
4
] | [
243,
21
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever.embed_passages | (self, docs: List[Document]) |
Create embeddings for a list of passages using the passage encoder
:param docs: List of Document objects used to represent documents / passages in a standardized way within Haystack.
:return: Embeddings of documents / passages shape (batch_size, embedding_dim)
|
Create embeddings for a list of passages using the passage encoder | def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
"""
Create embeddings for a list of passages using the passage encoder
:param docs: List of Document objects used to represent documents / passages in a standardized way within Haystack.
:return: Embeddings of documents / passages shape (batch_size, embedding_dim)
"""
passages = [{'passages': [{
"title": d.meta["name"] if d.meta and "name" in d.meta else "",
"text": d.text,
"label": d.meta["label"] if d.meta and "label" in d.meta else "positive",
"external_id": d.id}]
} for d in docs]
embeddings = self._get_predictions(passages)["passages"]
return embeddings | [
"def",
"embed_passages",
"(",
"self",
",",
"docs",
":",
"List",
"[",
"Document",
"]",
")",
"->",
"List",
"[",
"np",
".",
"ndarray",
"]",
":",
"passages",
"=",
"[",
"{",
"'passages'",
":",
"[",
"{",
"\"title\"",
":",
"d",
".",
"meta",
"[",
"\"name\"",
"]",
"if",
"d",
".",
"meta",
"and",
"\"name\"",
"in",
"d",
".",
"meta",
"else",
"\"\"",
",",
"\"text\"",
":",
"d",
".",
"text",
",",
"\"label\"",
":",
"d",
".",
"meta",
"[",
"\"label\"",
"]",
"if",
"d",
".",
"meta",
"and",
"\"label\"",
"in",
"d",
".",
"meta",
"else",
"\"positive\"",
",",
"\"external_id\"",
":",
"d",
".",
"id",
"}",
"]",
"}",
"for",
"d",
"in",
"docs",
"]",
"embeddings",
"=",
"self",
".",
"_get_predictions",
"(",
"passages",
")",
"[",
"\"passages\"",
"]",
"return",
"embeddings"
] | [
245,
4
] | [
260,
25
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever.train | (self,
data_dir: str,
train_filename: str,
dev_filename: str = None,
test_filename: str = None,
batch_size: int = 2,
embed_title: bool = True,
num_hard_negatives: int = 1,
num_positives: int = 1,
n_epochs: int = 3,
evaluate_every: int = 1000,
n_gpu: int = 1,
learning_rate: float = 1e-5,
epsilon: float = 1e-08,
weight_decay: float = 0.0,
num_warmup_steps: int = 100,
grad_acc_steps: int = 1,
optimizer_name: str = "TransformersAdamW",
optimizer_correct_bias: bool = True,
save_dir: str = "../saved_models/dpr",
query_encoder_save_dir: str = "query_encoder",
passage_encoder_save_dir: str = "passage_encoder"
) |
train a DensePassageRetrieval model
:param data_dir: Directory where training file, dev file and test file are present
:param train_filename: training filename
:param dev_filename: development set filename, file to be used by model in eval step of training
:param test_filename: test set filename, file to be used by model in test step after training
:param batch_size: total number of samples in 1 batch of data
:param embed_title: whether to concatenate passage title with each passage. The default setting in official DPR embeds passage title with the corresponding passage
:param num_hard_negatives: number of hard negative passages(passages which are very similar(high score by BM25) to query but do not contain the answer
:param num_positives: number of positive passages
:param n_epochs: number of epochs to train the model on
:param evaluate_every: number of training steps after evaluation is run
:param n_gpu: number of gpus to train on
:param learning_rate: learning rate of optimizer
:param epsilon: epsilon parameter of optimizer
:param weight_decay: weight decay parameter of optimizer
:param grad_acc_steps: number of steps to accumulate gradient over before back-propagation is done
:param optimizer_name: what optimizer to use (default: TransformersAdamW)
:param num_warmup_steps: number of warmup steps
:param optimizer_correct_bias: Whether to correct bias in optimizer
:param save_dir: directory where models are saved
:param query_encoder_save_dir: directory inside save_dir where query_encoder model files are saved
:param passage_encoder_save_dir: directory inside save_dir where passage_encoder model files are saved
|
train a DensePassageRetrieval model
:param data_dir: Directory where training file, dev file and test file are present
:param train_filename: training filename
:param dev_filename: development set filename, file to be used by model in eval step of training
:param test_filename: test set filename, file to be used by model in test step after training
:param batch_size: total number of samples in 1 batch of data
:param embed_title: whether to concatenate passage title with each passage. The default setting in official DPR embeds passage title with the corresponding passage
:param num_hard_negatives: number of hard negative passages(passages which are very similar(high score by BM25) to query but do not contain the answer
:param num_positives: number of positive passages
:param n_epochs: number of epochs to train the model on
:param evaluate_every: number of training steps after evaluation is run
:param n_gpu: number of gpus to train on
:param learning_rate: learning rate of optimizer
:param epsilon: epsilon parameter of optimizer
:param weight_decay: weight decay parameter of optimizer
:param grad_acc_steps: number of steps to accumulate gradient over before back-propagation is done
:param optimizer_name: what optimizer to use (default: TransformersAdamW)
:param num_warmup_steps: number of warmup steps
:param optimizer_correct_bias: Whether to correct bias in optimizer
:param save_dir: directory where models are saved
:param query_encoder_save_dir: directory inside save_dir where query_encoder model files are saved
:param passage_encoder_save_dir: directory inside save_dir where passage_encoder model files are saved
| def train(self,
data_dir: str,
train_filename: str,
dev_filename: str = None,
test_filename: str = None,
batch_size: int = 2,
embed_title: bool = True,
num_hard_negatives: int = 1,
num_positives: int = 1,
n_epochs: int = 3,
evaluate_every: int = 1000,
n_gpu: int = 1,
learning_rate: float = 1e-5,
epsilon: float = 1e-08,
weight_decay: float = 0.0,
num_warmup_steps: int = 100,
grad_acc_steps: int = 1,
optimizer_name: str = "TransformersAdamW",
optimizer_correct_bias: bool = True,
save_dir: str = "../saved_models/dpr",
query_encoder_save_dir: str = "query_encoder",
passage_encoder_save_dir: str = "passage_encoder"
):
"""
train a DensePassageRetrieval model
:param data_dir: Directory where training file, dev file and test file are present
:param train_filename: training filename
:param dev_filename: development set filename, file to be used by model in eval step of training
:param test_filename: test set filename, file to be used by model in test step after training
:param batch_size: total number of samples in 1 batch of data
:param embed_title: whether to concatenate passage title with each passage. The default setting in official DPR embeds passage title with the corresponding passage
:param num_hard_negatives: number of hard negative passages(passages which are very similar(high score by BM25) to query but do not contain the answer
:param num_positives: number of positive passages
:param n_epochs: number of epochs to train the model on
:param evaluate_every: number of training steps after evaluation is run
:param n_gpu: number of gpus to train on
:param learning_rate: learning rate of optimizer
:param epsilon: epsilon parameter of optimizer
:param weight_decay: weight decay parameter of optimizer
:param grad_acc_steps: number of steps to accumulate gradient over before back-propagation is done
:param optimizer_name: what optimizer to use (default: TransformersAdamW)
:param num_warmup_steps: number of warmup steps
:param optimizer_correct_bias: Whether to correct bias in optimizer
:param save_dir: directory where models are saved
:param query_encoder_save_dir: directory inside save_dir where query_encoder model files are saved
:param passage_encoder_save_dir: directory inside save_dir where passage_encoder model files are saved
"""
self.embed_title = embed_title
self.processor = TextSimilarityProcessor(tokenizer=self.query_tokenizer,
passage_tokenizer=self.passage_tokenizer,
max_seq_len_passage=self.max_seq_len_passage,
max_seq_len_query=self.max_seq_len_query,
label_list=["hard_negative", "positive"],
metric="text_similarity_metric",
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
embed_title=self.embed_title,
num_hard_negatives=num_hard_negatives,
num_positives=num_positives)
self.model.connect_heads_with_processor(self.processor.tasks, require_labels=True)
data_silo = DataSilo(processor=self.processor, batch_size=batch_size, distributed=False)
# 5. Create an optimizer
self.model, optimizer, lr_schedule = initialize_optimizer(
model=self.model,
learning_rate=learning_rate,
optimizer_opts={"name": optimizer_name, "correct_bias": optimizer_correct_bias,
"weight_decay": weight_decay, "eps": epsilon},
schedule_opts={"name": "LinearWarmup", "num_warmup_steps": num_warmup_steps},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
grad_acc_steps=grad_acc_steps,
device=self.device
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=self.model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=self.device,
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
self.model.save(Path(save_dir), lm1_name=query_encoder_save_dir, lm2_name=passage_encoder_save_dir)
self.query_tokenizer.save_pretrained(f"{save_dir}/{query_encoder_save_dir}")
self.passage_tokenizer.save_pretrained(f"{save_dir}/{passage_encoder_save_dir}") | [
"def",
"train",
"(",
"self",
",",
"data_dir",
":",
"str",
",",
"train_filename",
":",
"str",
",",
"dev_filename",
":",
"str",
"=",
"None",
",",
"test_filename",
":",
"str",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"2",
",",
"embed_title",
":",
"bool",
"=",
"True",
",",
"num_hard_negatives",
":",
"int",
"=",
"1",
",",
"num_positives",
":",
"int",
"=",
"1",
",",
"n_epochs",
":",
"int",
"=",
"3",
",",
"evaluate_every",
":",
"int",
"=",
"1000",
",",
"n_gpu",
":",
"int",
"=",
"1",
",",
"learning_rate",
":",
"float",
"=",
"1e-5",
",",
"epsilon",
":",
"float",
"=",
"1e-08",
",",
"weight_decay",
":",
"float",
"=",
"0.0",
",",
"num_warmup_steps",
":",
"int",
"=",
"100",
",",
"grad_acc_steps",
":",
"int",
"=",
"1",
",",
"optimizer_name",
":",
"str",
"=",
"\"TransformersAdamW\"",
",",
"optimizer_correct_bias",
":",
"bool",
"=",
"True",
",",
"save_dir",
":",
"str",
"=",
"\"../saved_models/dpr\"",
",",
"query_encoder_save_dir",
":",
"str",
"=",
"\"query_encoder\"",
",",
"passage_encoder_save_dir",
":",
"str",
"=",
"\"passage_encoder\"",
")",
":",
"self",
".",
"embed_title",
"=",
"embed_title",
"self",
".",
"processor",
"=",
"TextSimilarityProcessor",
"(",
"tokenizer",
"=",
"self",
".",
"query_tokenizer",
",",
"passage_tokenizer",
"=",
"self",
".",
"passage_tokenizer",
",",
"max_seq_len_passage",
"=",
"self",
".",
"max_seq_len_passage",
",",
"max_seq_len_query",
"=",
"self",
".",
"max_seq_len_query",
",",
"label_list",
"=",
"[",
"\"hard_negative\"",
",",
"\"positive\"",
"]",
",",
"metric",
"=",
"\"text_similarity_metric\"",
",",
"data_dir",
"=",
"data_dir",
",",
"train_filename",
"=",
"train_filename",
",",
"dev_filename",
"=",
"dev_filename",
",",
"test_filename",
"=",
"test_filename",
",",
"embed_title",
"=",
"self",
".",
"embed_title",
",",
"num_hard_negatives",
"=",
"num_hard_negatives",
",",
"num_positives",
"=",
"num_positives",
")",
"self",
".",
"model",
".",
"connect_heads_with_processor",
"(",
"self",
".",
"processor",
".",
"tasks",
",",
"require_labels",
"=",
"True",
")",
"data_silo",
"=",
"DataSilo",
"(",
"processor",
"=",
"self",
".",
"processor",
",",
"batch_size",
"=",
"batch_size",
",",
"distributed",
"=",
"False",
")",
"# 5. Create an optimizer",
"self",
".",
"model",
",",
"optimizer",
",",
"lr_schedule",
"=",
"initialize_optimizer",
"(",
"model",
"=",
"self",
".",
"model",
",",
"learning_rate",
"=",
"learning_rate",
",",
"optimizer_opts",
"=",
"{",
"\"name\"",
":",
"optimizer_name",
",",
"\"correct_bias\"",
":",
"optimizer_correct_bias",
",",
"\"weight_decay\"",
":",
"weight_decay",
",",
"\"eps\"",
":",
"epsilon",
"}",
",",
"schedule_opts",
"=",
"{",
"\"name\"",
":",
"\"LinearWarmup\"",
",",
"\"num_warmup_steps\"",
":",
"num_warmup_steps",
"}",
",",
"n_batches",
"=",
"len",
"(",
"data_silo",
".",
"loaders",
"[",
"\"train\"",
"]",
")",
",",
"n_epochs",
"=",
"n_epochs",
",",
"grad_acc_steps",
"=",
"grad_acc_steps",
",",
"device",
"=",
"self",
".",
"device",
")",
"# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time",
"trainer",
"=",
"Trainer",
"(",
"model",
"=",
"self",
".",
"model",
",",
"optimizer",
"=",
"optimizer",
",",
"data_silo",
"=",
"data_silo",
",",
"epochs",
"=",
"n_epochs",
",",
"n_gpu",
"=",
"n_gpu",
",",
"lr_schedule",
"=",
"lr_schedule",
",",
"evaluate_every",
"=",
"evaluate_every",
",",
"device",
"=",
"self",
".",
"device",
",",
")",
"# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai",
"trainer",
".",
"train",
"(",
")",
"self",
".",
"model",
".",
"save",
"(",
"Path",
"(",
"save_dir",
")",
",",
"lm1_name",
"=",
"query_encoder_save_dir",
",",
"lm2_name",
"=",
"passage_encoder_save_dir",
")",
"self",
".",
"query_tokenizer",
".",
"save_pretrained",
"(",
"f\"{save_dir}/{query_encoder_save_dir}\"",
")",
"self",
".",
"passage_tokenizer",
".",
"save_pretrained",
"(",
"f\"{save_dir}/{passage_encoder_save_dir}\"",
")"
] | [
262,
4
] | [
359,
88
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever.save | (self, save_dir: Union[Path, str], query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder") |
Save DensePassageRetriever to the specified directory.
:param save_dir: Directory to save to.
:param query_encoder_dir: Directory in save_dir that contains query encoder model.
:param passage_encoder_dir: Directory in save_dir that contains passage encoder model.
:return: None
|
Save DensePassageRetriever to the specified directory. | def save(self, save_dir: Union[Path, str], query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder"):
"""
Save DensePassageRetriever to the specified directory.
:param save_dir: Directory to save to.
:param query_encoder_dir: Directory in save_dir that contains query encoder model.
:param passage_encoder_dir: Directory in save_dir that contains passage encoder model.
:return: None
"""
save_dir = Path(save_dir)
self.model.save(save_dir, lm1_name=query_encoder_dir, lm2_name=passage_encoder_dir)
save_dir = str(save_dir)
self.query_tokenizer.save_pretrained(save_dir + f"/{query_encoder_dir}")
self.passage_tokenizer.save_pretrained(save_dir + f"/{passage_encoder_dir}") | [
"def",
"save",
"(",
"self",
",",
"save_dir",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
",",
"query_encoder_dir",
":",
"str",
"=",
"\"query_encoder\"",
",",
"passage_encoder_dir",
":",
"str",
"=",
"\"passage_encoder\"",
")",
":",
"save_dir",
"=",
"Path",
"(",
"save_dir",
")",
"self",
".",
"model",
".",
"save",
"(",
"save_dir",
",",
"lm1_name",
"=",
"query_encoder_dir",
",",
"lm2_name",
"=",
"passage_encoder_dir",
")",
"save_dir",
"=",
"str",
"(",
"save_dir",
")",
"self",
".",
"query_tokenizer",
".",
"save_pretrained",
"(",
"save_dir",
"+",
"f\"/{query_encoder_dir}\"",
")",
"self",
".",
"passage_tokenizer",
".",
"save_pretrained",
"(",
"save_dir",
"+",
"f\"/{passage_encoder_dir}\"",
")"
] | [
361,
4
] | [
375,
84
] | python | en | ['en', 'error', 'th'] | False |
DensePassageRetriever.load | (cls,
load_dir: Union[Path, str],
document_store: BaseDocumentStore,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
use_gpu: bool = True,
batch_size: int = 16,
embed_title: bool = True,
use_fast_tokenizers: bool = True,
similarity_function: str = "dot_product",
query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder"
) |
Load DensePassageRetriever from the specified directory.
|
Load DensePassageRetriever from the specified directory.
| def load(cls,
load_dir: Union[Path, str],
document_store: BaseDocumentStore,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
use_gpu: bool = True,
batch_size: int = 16,
embed_title: bool = True,
use_fast_tokenizers: bool = True,
similarity_function: str = "dot_product",
query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder"
):
"""
Load DensePassageRetriever from the specified directory.
"""
load_dir = Path(load_dir)
dpr = cls(
document_store=document_store,
query_embedding_model=Path(load_dir) / query_encoder_dir,
passage_embedding_model=Path(load_dir) / passage_encoder_dir,
max_seq_len_query=max_seq_len_query,
max_seq_len_passage=max_seq_len_passage,
use_gpu=use_gpu,
batch_size=batch_size,
embed_title=embed_title,
use_fast_tokenizers=use_fast_tokenizers,
similarity_function=similarity_function
)
logger.info(f"DPR model loaded from {load_dir}")
return dpr | [
"def",
"load",
"(",
"cls",
",",
"load_dir",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
",",
"document_store",
":",
"BaseDocumentStore",
",",
"max_seq_len_query",
":",
"int",
"=",
"64",
",",
"max_seq_len_passage",
":",
"int",
"=",
"256",
",",
"use_gpu",
":",
"bool",
"=",
"True",
",",
"batch_size",
":",
"int",
"=",
"16",
",",
"embed_title",
":",
"bool",
"=",
"True",
",",
"use_fast_tokenizers",
":",
"bool",
"=",
"True",
",",
"similarity_function",
":",
"str",
"=",
"\"dot_product\"",
",",
"query_encoder_dir",
":",
"str",
"=",
"\"query_encoder\"",
",",
"passage_encoder_dir",
":",
"str",
"=",
"\"passage_encoder\"",
")",
":",
"load_dir",
"=",
"Path",
"(",
"load_dir",
")",
"dpr",
"=",
"cls",
"(",
"document_store",
"=",
"document_store",
",",
"query_embedding_model",
"=",
"Path",
"(",
"load_dir",
")",
"/",
"query_encoder_dir",
",",
"passage_embedding_model",
"=",
"Path",
"(",
"load_dir",
")",
"/",
"passage_encoder_dir",
",",
"max_seq_len_query",
"=",
"max_seq_len_query",
",",
"max_seq_len_passage",
"=",
"max_seq_len_passage",
",",
"use_gpu",
"=",
"use_gpu",
",",
"batch_size",
"=",
"batch_size",
",",
"embed_title",
"=",
"embed_title",
",",
"use_fast_tokenizers",
"=",
"use_fast_tokenizers",
",",
"similarity_function",
"=",
"similarity_function",
")",
"logger",
".",
"info",
"(",
"f\"DPR model loaded from {load_dir}\"",
")",
"return",
"dpr"
] | [
378,
4
] | [
410,
18
] | python | en | ['en', 'error', 'th'] | False |
EmbeddingRetriever.__init__ | (
self,
document_store: BaseDocumentStore,
embedding_model: str,
model_version: Optional[str] = None,
use_gpu: bool = True,
model_format: str = "farm",
pooling_strategy: str = "reduce_mean",
emb_extraction_layer: int = -1,
) |
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param embedding_model: Local path or name of model in Hugging Face's model hub such as ``'deepset/sentence_bert'``
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param use_gpu: Whether to use gpu or not
:param model_format: Name of framework that was used for saving the model. Options:
- ``'farm'``
- ``'transformers'``
- ``'sentence_transformers'``
:param pooling_strategy: Strategy for combining the embeddings from the model (for farm / transformers models only).
Options:
- ``'cls_token'`` (sentence vector)
- ``'reduce_mean'`` (sentence vector)
- ``'reduce_max'`` (sentence vector)
- ``'per_token'`` (individual token vectors)
:param emb_extraction_layer: Number of layer from which the embeddings shall be extracted (for farm / transformers models only).
Default: -1 (very last layer).
|
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param embedding_model: Local path or name of model in Hugging Face's model hub such as ``'deepset/sentence_bert'``
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param use_gpu: Whether to use gpu or not
:param model_format: Name of framework that was used for saving the model. Options: | def __init__(
self,
document_store: BaseDocumentStore,
embedding_model: str,
model_version: Optional[str] = None,
use_gpu: bool = True,
model_format: str = "farm",
pooling_strategy: str = "reduce_mean",
emb_extraction_layer: int = -1,
):
"""
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param embedding_model: Local path or name of model in Hugging Face's model hub such as ``'deepset/sentence_bert'``
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param use_gpu: Whether to use gpu or not
:param model_format: Name of framework that was used for saving the model. Options:
- ``'farm'``
- ``'transformers'``
- ``'sentence_transformers'``
:param pooling_strategy: Strategy for combining the embeddings from the model (for farm / transformers models only).
Options:
- ``'cls_token'`` (sentence vector)
- ``'reduce_mean'`` (sentence vector)
- ``'reduce_max'`` (sentence vector)
- ``'per_token'`` (individual token vectors)
:param emb_extraction_layer: Number of layer from which the embeddings shall be extracted (for farm / transformers models only).
Default: -1 (very last layer).
"""
self.document_store = document_store
self.model_format = model_format
self.pooling_strategy = pooling_strategy
self.emb_extraction_layer = emb_extraction_layer
logger.info(f"Init retriever using embeddings of model {embedding_model}")
if model_format == "farm" or model_format == "transformers":
self.embedding_model = Inferencer.load(
embedding_model, revision=model_version, task_type="embeddings", extraction_strategy=self.pooling_strategy,
extraction_layer=self.emb_extraction_layer, gpu=use_gpu, batch_size=4, max_seq_len=512, num_processes=0
)
# Check that document_store has the right similarity function
similarity = document_store.similarity
# If we are using a sentence transformer model
if "sentence" in embedding_model.lower() and similarity != "cosine":
logger.warning(f"You seem to be using a Sentence Transformer with the {similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
elif "dpr" in embedding_model.lower() and similarity != "dot_product":
logger.warning(f"You seem to be using a DPR model with the {similarity} function. "
f"We recommend using dot_product instead. "
f"This can be set when initializing the DocumentStore")
elif model_format == "sentence_transformers":
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError("Can't find package `sentence-transformers` \n"
"You can install it via `pip install sentence-transformers` \n"
"For details see https://github.com/UKPLab/sentence-transformers ")
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
if use_gpu:
device = "cuda"
else:
device = "cpu"
self.embedding_model = SentenceTransformer(embedding_model, device=device)
if document_store.similarity != "cosine":
logger.warning(
f"You are using a Sentence Transformer with the {document_store.similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
else:
raise NotImplementedError | [
"def",
"__init__",
"(",
"self",
",",
"document_store",
":",
"BaseDocumentStore",
",",
"embedding_model",
":",
"str",
",",
"model_version",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"use_gpu",
":",
"bool",
"=",
"True",
",",
"model_format",
":",
"str",
"=",
"\"farm\"",
",",
"pooling_strategy",
":",
"str",
"=",
"\"reduce_mean\"",
",",
"emb_extraction_layer",
":",
"int",
"=",
"-",
"1",
",",
")",
":",
"self",
".",
"document_store",
"=",
"document_store",
"self",
".",
"model_format",
"=",
"model_format",
"self",
".",
"pooling_strategy",
"=",
"pooling_strategy",
"self",
".",
"emb_extraction_layer",
"=",
"emb_extraction_layer",
"logger",
".",
"info",
"(",
"f\"Init retriever using embeddings of model {embedding_model}\"",
")",
"if",
"model_format",
"==",
"\"farm\"",
"or",
"model_format",
"==",
"\"transformers\"",
":",
"self",
".",
"embedding_model",
"=",
"Inferencer",
".",
"load",
"(",
"embedding_model",
",",
"revision",
"=",
"model_version",
",",
"task_type",
"=",
"\"embeddings\"",
",",
"extraction_strategy",
"=",
"self",
".",
"pooling_strategy",
",",
"extraction_layer",
"=",
"self",
".",
"emb_extraction_layer",
",",
"gpu",
"=",
"use_gpu",
",",
"batch_size",
"=",
"4",
",",
"max_seq_len",
"=",
"512",
",",
"num_processes",
"=",
"0",
")",
"# Check that document_store has the right similarity function",
"similarity",
"=",
"document_store",
".",
"similarity",
"# If we are using a sentence transformer model",
"if",
"\"sentence\"",
"in",
"embedding_model",
".",
"lower",
"(",
")",
"and",
"similarity",
"!=",
"\"cosine\"",
":",
"logger",
".",
"warning",
"(",
"f\"You seem to be using a Sentence Transformer with the {similarity} function. \"",
"f\"We recommend using cosine instead. \"",
"f\"This can be set when initializing the DocumentStore\"",
")",
"elif",
"\"dpr\"",
"in",
"embedding_model",
".",
"lower",
"(",
")",
"and",
"similarity",
"!=",
"\"dot_product\"",
":",
"logger",
".",
"warning",
"(",
"f\"You seem to be using a DPR model with the {similarity} function. \"",
"f\"We recommend using dot_product instead. \"",
"f\"This can be set when initializing the DocumentStore\"",
")",
"elif",
"model_format",
"==",
"\"sentence_transformers\"",
":",
"try",
":",
"from",
"sentence_transformers",
"import",
"SentenceTransformer",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Can't find package `sentence-transformers` \\n\"",
"\"You can install it via `pip install sentence-transformers` \\n\"",
"\"For details see https://github.com/UKPLab/sentence-transformers \"",
")",
"# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models",
"# e.g. 'roberta-base-nli-stsb-mean-tokens'",
"if",
"use_gpu",
":",
"device",
"=",
"\"cuda\"",
"else",
":",
"device",
"=",
"\"cpu\"",
"self",
".",
"embedding_model",
"=",
"SentenceTransformer",
"(",
"embedding_model",
",",
"device",
"=",
"device",
")",
"if",
"document_store",
".",
"similarity",
"!=",
"\"cosine\"",
":",
"logger",
".",
"warning",
"(",
"f\"You are using a Sentence Transformer with the {document_store.similarity} function. \"",
"f\"We recommend using cosine instead. \"",
"f\"This can be set when initializing the DocumentStore\"",
")",
"else",
":",
"raise",
"NotImplementedError"
] | [
414,
4
] | [
488,
37
] | python | en | ['en', 'error', 'th'] | False |
EmbeddingRetriever.retrieve | (self, query: str, filters: dict = None, top_k: int = 10, index: str = None) |
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
|
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query. | def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if index is None:
index = self.document_store.index
query_emb = self.embed(texts=[query])
documents = self.document_store.query_by_embedding(query_emb=query_emb[0], filters=filters,
top_k=top_k, index=index)
return documents | [
"def",
"retrieve",
"(",
"self",
",",
"query",
":",
"str",
",",
"filters",
":",
"dict",
"=",
"None",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"index",
":",
"str",
"=",
"None",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"self",
".",
"document_store",
".",
"index",
"query_emb",
"=",
"self",
".",
"embed",
"(",
"texts",
"=",
"[",
"query",
"]",
")",
"documents",
"=",
"self",
".",
"document_store",
".",
"query_by_embedding",
"(",
"query_emb",
"=",
"query_emb",
"[",
"0",
"]",
",",
"filters",
"=",
"filters",
",",
"top_k",
"=",
"top_k",
",",
"index",
"=",
"index",
")",
"return",
"documents"
] | [
490,
4
] | [
505,
24
] | python | en | ['en', 'error', 'th'] | False |
EmbeddingRetriever.embed | (self, texts: Union[List[str], str]) |
Create embeddings for each text in a list of texts using the retrievers model (`self.embedding_model`)
:param texts: Texts to embed
:return: List of embeddings (one per input text). Each embedding is a list of floats.
|
Create embeddings for each text in a list of texts using the retrievers model (`self.embedding_model`) | def embed(self, texts: Union[List[str], str]) -> List[np.ndarray]:
"""
Create embeddings for each text in a list of texts using the retrievers model (`self.embedding_model`)
:param texts: Texts to embed
:return: List of embeddings (one per input text). Each embedding is a list of floats.
"""
# for backward compatibility: cast pure str input
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list), "Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])"
if self.model_format == "farm" or self.model_format == "transformers":
# TODO: FARM's `sample_to_features_text` need to fix following warning -
# tokenization_utils.py:460: FutureWarning: `is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.
emb = self.embedding_model.inference_from_dicts(dicts=[{"text": t} for t in texts])
emb = [(r["vec"]) for r in emb]
elif self.model_format == "sentence_transformers":
# text is single string, sentence-transformers needs a list of strings
# get back list of numpy embedding vectors
emb = self.embedding_model.encode(texts)
emb = [r for r in emb]
return emb | [
"def",
"embed",
"(",
"self",
",",
"texts",
":",
"Union",
"[",
"List",
"[",
"str",
"]",
",",
"str",
"]",
")",
"->",
"List",
"[",
"np",
".",
"ndarray",
"]",
":",
"# for backward compatibility: cast pure str input",
"if",
"isinstance",
"(",
"texts",
",",
"str",
")",
":",
"texts",
"=",
"[",
"texts",
"]",
"assert",
"isinstance",
"(",
"texts",
",",
"list",
")",
",",
"\"Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])\"",
"if",
"self",
".",
"model_format",
"==",
"\"farm\"",
"or",
"self",
".",
"model_format",
"==",
"\"transformers\"",
":",
"# TODO: FARM's `sample_to_features_text` need to fix following warning -",
"# tokenization_utils.py:460: FutureWarning: `is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.",
"emb",
"=",
"self",
".",
"embedding_model",
".",
"inference_from_dicts",
"(",
"dicts",
"=",
"[",
"{",
"\"text\"",
":",
"t",
"}",
"for",
"t",
"in",
"texts",
"]",
")",
"emb",
"=",
"[",
"(",
"r",
"[",
"\"vec\"",
"]",
")",
"for",
"r",
"in",
"emb",
"]",
"elif",
"self",
".",
"model_format",
"==",
"\"sentence_transformers\"",
":",
"# text is single string, sentence-transformers needs a list of strings",
"# get back list of numpy embedding vectors",
"emb",
"=",
"self",
".",
"embedding_model",
".",
"encode",
"(",
"texts",
")",
"emb",
"=",
"[",
"r",
"for",
"r",
"in",
"emb",
"]",
"return",
"emb"
] | [
507,
4
] | [
530,
18
] | python | en | ['en', 'error', 'th'] | False |
EmbeddingRetriever.embed_queries | (self, texts: List[str]) |
Create embeddings for a list of queries. For this Retriever type: The same as calling .embed()
:param texts: Queries to embed
:return: Embeddings, one per input queries
|
Create embeddings for a list of queries. For this Retriever type: The same as calling .embed() | def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
"""
Create embeddings for a list of queries. For this Retriever type: The same as calling .embed()
:param texts: Queries to embed
:return: Embeddings, one per input queries
"""
return self.embed(texts) | [
"def",
"embed_queries",
"(",
"self",
",",
"texts",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"np",
".",
"ndarray",
"]",
":",
"return",
"self",
".",
"embed",
"(",
"texts",
")"
] | [
532,
4
] | [
539,
32
] | python | en | ['en', 'error', 'th'] | False |
EmbeddingRetriever.embed_passages | (self, docs: List[Document]) |
Create embeddings for a list of passages. For this Retriever type: The same as calling .embed()
:param docs: List of documents to embed
:return: Embeddings, one per input passage
|
Create embeddings for a list of passages. For this Retriever type: The same as calling .embed() | def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
"""
Create embeddings for a list of passages. For this Retriever type: The same as calling .embed()
:param docs: List of documents to embed
:return: Embeddings, one per input passage
"""
texts = [d.text for d in docs]
return self.embed(texts) | [
"def",
"embed_passages",
"(",
"self",
",",
"docs",
":",
"List",
"[",
"Document",
"]",
")",
"->",
"List",
"[",
"np",
".",
"ndarray",
"]",
":",
"texts",
"=",
"[",
"d",
".",
"text",
"for",
"d",
"in",
"docs",
"]",
"return",
"self",
".",
"embed",
"(",
"texts",
")"
] | [
541,
4
] | [
550,
32
] | python | en | ['en', 'error', 'th'] | False |
ManualBatchKwargsGenerator._build_batch_kwargs | (self, batch_parameters) | Build batch kwargs from a partition id. | Build batch kwargs from a partition id. | def _build_batch_kwargs(self, batch_parameters):
"""Build batch kwargs from a partition id."""
partition_id = batch_parameters.pop("partition_id", None)
batch_kwargs = self._datasource.process_batch_parameters(batch_parameters)
if partition_id:
asset_definition = self._get_data_asset_config(
data_asset_name=batch_parameters.get("data_asset_name")
)
if isinstance(asset_definition, list):
for batch_request in asset_definition:
try:
if batch_request["partition_id"] == partition_id:
batch_kwargs = deepcopy(batch_request)
batch_kwargs.pop("partition_id")
except KeyError:
pass
elif isinstance(asset_definition, dict):
try:
if asset_definition["partition_id"] == partition_id:
batch_kwargs = deepcopy(asset_definition)
batch_kwargs.pop("partition_id")
except KeyError:
pass
else:
batch_kwargs = next(
self._get_iterator(
data_asset_name=batch_parameters.get("data_asset_name")
)
)
if batch_kwargs is not None:
return batch_kwargs
else:
raise BatchKwargsError(
"Unable to find batch_kwargs for given batch_parameters",
batch_parameters,
) | [
"def",
"_build_batch_kwargs",
"(",
"self",
",",
"batch_parameters",
")",
":",
"partition_id",
"=",
"batch_parameters",
".",
"pop",
"(",
"\"partition_id\"",
",",
"None",
")",
"batch_kwargs",
"=",
"self",
".",
"_datasource",
".",
"process_batch_parameters",
"(",
"batch_parameters",
")",
"if",
"partition_id",
":",
"asset_definition",
"=",
"self",
".",
"_get_data_asset_config",
"(",
"data_asset_name",
"=",
"batch_parameters",
".",
"get",
"(",
"\"data_asset_name\"",
")",
")",
"if",
"isinstance",
"(",
"asset_definition",
",",
"list",
")",
":",
"for",
"batch_request",
"in",
"asset_definition",
":",
"try",
":",
"if",
"batch_request",
"[",
"\"partition_id\"",
"]",
"==",
"partition_id",
":",
"batch_kwargs",
"=",
"deepcopy",
"(",
"batch_request",
")",
"batch_kwargs",
".",
"pop",
"(",
"\"partition_id\"",
")",
"except",
"KeyError",
":",
"pass",
"elif",
"isinstance",
"(",
"asset_definition",
",",
"dict",
")",
":",
"try",
":",
"if",
"asset_definition",
"[",
"\"partition_id\"",
"]",
"==",
"partition_id",
":",
"batch_kwargs",
"=",
"deepcopy",
"(",
"asset_definition",
")",
"batch_kwargs",
".",
"pop",
"(",
"\"partition_id\"",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"batch_kwargs",
"=",
"next",
"(",
"self",
".",
"_get_iterator",
"(",
"data_asset_name",
"=",
"batch_parameters",
".",
"get",
"(",
"\"data_asset_name\"",
")",
")",
")",
"if",
"batch_kwargs",
"is",
"not",
"None",
":",
"return",
"batch_kwargs",
"else",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to find batch_kwargs for given batch_parameters\"",
",",
"batch_parameters",
",",
")"
] | [
106,
4
] | [
142,
13
] | python | en | ['en', 'en', 'sw'] | True |
Analysis.get_company_data | (self, mid) | Looks up stock ticker information for a company via its Freebase ID.
| Looks up stock ticker information for a company via its Freebase ID.
| def get_company_data(self, mid):
"""Looks up stock ticker information for a company via its Freebase ID.
"""
try:
ticker_bindings = self.make_wikidata_request(
MID_TO_TICKER_QUERY % mid)
crypto_bindings = self.make_wikidata_request(
MID_TO_CRYPTO_QUERY % mid)
except HTTPError as e:
self.logs.error('Wikidata request failed: %s' % e)
return None
# Collect the data from the response.
companies = []
if ticker_bindings:
for binding in ticker_bindings:
try:
name = binding['companyLabel']['value']
except KeyError:
name = None
try:
root = binding['rootLabel']['value']
except KeyError:
root = None
try:
ticker = binding['tickerLabel']['value']
except KeyError:
ticker = None
try:
exchange = binding['exchangeNameLabel']['value']
except KeyError:
exchange = None
data = {'name': name,
'ticker': ticker,
'exchange': exchange}
# Add the root if there is one.
if root and root != name:
data['root'] = root
# Add to the list unless we already have the same entry.
if data not in companies:
self.logs.debug('Adding company data: %s' % data)
companies.append(data)
else:
self.logs.warn(
'Skipping duplicate company data: %s' % data)
if crypto_bindings:
for binding in crypto_bindings:
try:
name = binding['entityLabel']['value']
except KeyError:
name = None
try:
symbol = binding['symbolLabel']['value']
except KeyError:
symbol = None
data = {'name': name,
'ticker': symbol,
'exchange': 'Crypto'}
# Add to the list unless we already have the same entry.
if data not in companies:
self.logs.debug('Adding crypto data: %s' % data)
companies.append(data)
else:
self.logs.warn('Skipping duplicate crypto data: %s' % data)
# Prefer returning None to an empty list.
if not companies:
return None
return companies | [
"def",
"get_company_data",
"(",
"self",
",",
"mid",
")",
":",
"try",
":",
"ticker_bindings",
"=",
"self",
".",
"make_wikidata_request",
"(",
"MID_TO_TICKER_QUERY",
"%",
"mid",
")",
"crypto_bindings",
"=",
"self",
".",
"make_wikidata_request",
"(",
"MID_TO_CRYPTO_QUERY",
"%",
"mid",
")",
"except",
"HTTPError",
"as",
"e",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Wikidata request failed: %s'",
"%",
"e",
")",
"return",
"None",
"# Collect the data from the response.",
"companies",
"=",
"[",
"]",
"if",
"ticker_bindings",
":",
"for",
"binding",
"in",
"ticker_bindings",
":",
"try",
":",
"name",
"=",
"binding",
"[",
"'companyLabel'",
"]",
"[",
"'value'",
"]",
"except",
"KeyError",
":",
"name",
"=",
"None",
"try",
":",
"root",
"=",
"binding",
"[",
"'rootLabel'",
"]",
"[",
"'value'",
"]",
"except",
"KeyError",
":",
"root",
"=",
"None",
"try",
":",
"ticker",
"=",
"binding",
"[",
"'tickerLabel'",
"]",
"[",
"'value'",
"]",
"except",
"KeyError",
":",
"ticker",
"=",
"None",
"try",
":",
"exchange",
"=",
"binding",
"[",
"'exchangeNameLabel'",
"]",
"[",
"'value'",
"]",
"except",
"KeyError",
":",
"exchange",
"=",
"None",
"data",
"=",
"{",
"'name'",
":",
"name",
",",
"'ticker'",
":",
"ticker",
",",
"'exchange'",
":",
"exchange",
"}",
"# Add the root if there is one.",
"if",
"root",
"and",
"root",
"!=",
"name",
":",
"data",
"[",
"'root'",
"]",
"=",
"root",
"# Add to the list unless we already have the same entry.",
"if",
"data",
"not",
"in",
"companies",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'Adding company data: %s'",
"%",
"data",
")",
"companies",
".",
"append",
"(",
"data",
")",
"else",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'Skipping duplicate company data: %s'",
"%",
"data",
")",
"if",
"crypto_bindings",
":",
"for",
"binding",
"in",
"crypto_bindings",
":",
"try",
":",
"name",
"=",
"binding",
"[",
"'entityLabel'",
"]",
"[",
"'value'",
"]",
"except",
"KeyError",
":",
"name",
"=",
"None",
"try",
":",
"symbol",
"=",
"binding",
"[",
"'symbolLabel'",
"]",
"[",
"'value'",
"]",
"except",
"KeyError",
":",
"symbol",
"=",
"None",
"data",
"=",
"{",
"'name'",
":",
"name",
",",
"'ticker'",
":",
"symbol",
",",
"'exchange'",
":",
"'Crypto'",
"}",
"# Add to the list unless we already have the same entry.",
"if",
"data",
"not",
"in",
"companies",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'Adding crypto data: %s'",
"%",
"data",
")",
"companies",
".",
"append",
"(",
"data",
")",
"else",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'Skipping duplicate crypto data: %s'",
"%",
"data",
")",
"# Prefer returning None to an empty list.",
"if",
"not",
"companies",
":",
"return",
"None",
"return",
"companies"
] | [
78,
4
] | [
157,
24
] | python | en | ['en', 'en', 'en'] | True |
Analysis.find_companies | (self, tweet) | Finds mentions of companies in a tweet. | Finds mentions of companies in a tweet. | def find_companies(self, tweet):
"""Finds mentions of companies in a tweet."""
if not tweet:
self.logs.warn('No tweet to find companies.')
return None
# Use the text of the tweet with any mentions expanded to improve
# entity detection.
text = self.get_expanded_text(tweet)
if not text:
self.logs.error('Failed to get text from tweet: %s' % tweet)
return None
# Run entity detection.
document = language.Document(
content=text,
type_=language.Document.Type.PLAIN_TEXT,
language='en')
entities = self.language_client.analyze_entities(
request={'document': document}).entities
self.logs.debug('Found entities: %s' %
entities)
# Collect all entities which are publicly traded companies, i.e.
# entities which have a known stock ticker symbol.
companies = []
for entity in entities:
# Use the Freebase ID of the entity to find company data. Skip any
# entity which doesn't have a Freebase ID (unless we find one via
# the Twitter handle).
name = entity.name
metadata = entity.metadata
try:
mid = metadata['mid']
except KeyError:
self.logs.debug('No MID found for entity: %s' % name)
continue
company_data = self.get_company_data(mid)
# Skip any entity for which we can't find any company data.
if not company_data:
self.logs.debug('No company data found for entity: %s (%s)' %
(name, mid))
continue
self.logs.debug('Found company data: %s' % company_data)
# Extract the sentiment from the text. This assumes that the
# sentiment is the same for all companies, which isn't always true.
sentiment = self.get_sentiment(text)
for company in company_data:
# Associate the sentiment with the company.
self.logs.debug('Using sentiment for company: %f %s' %
(sentiment, company))
company['sentiment'] = sentiment
# Add the company to the list unless we already have the same
# ticker.
tickers = [existing['ticker'] for existing in companies]
if not company['ticker'] in tickers:
companies.append(company)
else:
self.logs.warn(
'Skipping company with duplicate ticker: %s' % company)
return companies | [
"def",
"find_companies",
"(",
"self",
",",
"tweet",
")",
":",
"if",
"not",
"tweet",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'No tweet to find companies.'",
")",
"return",
"None",
"# Use the text of the tweet with any mentions expanded to improve",
"# entity detection.",
"text",
"=",
"self",
".",
"get_expanded_text",
"(",
"tweet",
")",
"if",
"not",
"text",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Failed to get text from tweet: %s'",
"%",
"tweet",
")",
"return",
"None",
"# Run entity detection.",
"document",
"=",
"language",
".",
"Document",
"(",
"content",
"=",
"text",
",",
"type_",
"=",
"language",
".",
"Document",
".",
"Type",
".",
"PLAIN_TEXT",
",",
"language",
"=",
"'en'",
")",
"entities",
"=",
"self",
".",
"language_client",
".",
"analyze_entities",
"(",
"request",
"=",
"{",
"'document'",
":",
"document",
"}",
")",
".",
"entities",
"self",
".",
"logs",
".",
"debug",
"(",
"'Found entities: %s'",
"%",
"entities",
")",
"# Collect all entities which are publicly traded companies, i.e.",
"# entities which have a known stock ticker symbol.",
"companies",
"=",
"[",
"]",
"for",
"entity",
"in",
"entities",
":",
"# Use the Freebase ID of the entity to find company data. Skip any",
"# entity which doesn't have a Freebase ID (unless we find one via",
"# the Twitter handle).",
"name",
"=",
"entity",
".",
"name",
"metadata",
"=",
"entity",
".",
"metadata",
"try",
":",
"mid",
"=",
"metadata",
"[",
"'mid'",
"]",
"except",
"KeyError",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'No MID found for entity: %s'",
"%",
"name",
")",
"continue",
"company_data",
"=",
"self",
".",
"get_company_data",
"(",
"mid",
")",
"# Skip any entity for which we can't find any company data.",
"if",
"not",
"company_data",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'No company data found for entity: %s (%s)'",
"%",
"(",
"name",
",",
"mid",
")",
")",
"continue",
"self",
".",
"logs",
".",
"debug",
"(",
"'Found company data: %s'",
"%",
"company_data",
")",
"# Extract the sentiment from the text. This assumes that the",
"# sentiment is the same for all companies, which isn't always true.",
"sentiment",
"=",
"self",
".",
"get_sentiment",
"(",
"text",
")",
"for",
"company",
"in",
"company_data",
":",
"# Associate the sentiment with the company.",
"self",
".",
"logs",
".",
"debug",
"(",
"'Using sentiment for company: %f %s'",
"%",
"(",
"sentiment",
",",
"company",
")",
")",
"company",
"[",
"'sentiment'",
"]",
"=",
"sentiment",
"# Add the company to the list unless we already have the same",
"# ticker.",
"tickers",
"=",
"[",
"existing",
"[",
"'ticker'",
"]",
"for",
"existing",
"in",
"companies",
"]",
"if",
"not",
"company",
"[",
"'ticker'",
"]",
"in",
"tickers",
":",
"companies",
".",
"append",
"(",
"company",
")",
"else",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'Skipping company with duplicate ticker: %s'",
"%",
"company",
")",
"return",
"companies"
] | [
159,
4
] | [
228,
24
] | python | en | ['en', 'en', 'en'] | True |
Analysis.get_expanded_text | (self, tweet) | Retrieves the text from a tweet with any @mentions expanded to
their full names.
| Retrieves the text from a tweet with any | def get_expanded_text(self, tweet):
"""Retrieves the text from a tweet with any @mentions expanded to
their full names.
"""
if not tweet:
self.logs.warn('No tweet to expand text.')
return None
try:
text = self.twitter.get_tweet_text(tweet)
mentions = tweet['entities']['user_mentions']
except KeyError:
self.logs.error('Malformed tweet: %s' % tweet)
return None
if not text:
self.logs.warn('Empty text.')
return None
if not mentions:
self.logs.debug('No mentions.')
return text
self.logs.debug('Using mentions: %s' % mentions)
for mention in mentions:
try:
screen_name = '@%s' % mention['screen_name']
name = mention['name']
except KeyError:
self.logs.warn('Malformed mention: %s' % mention)
continue
self.logs.debug('Expanding mention: %s %s' % (screen_name, name))
pattern = compile(screen_name, IGNORECASE)
text = pattern.sub(name, text)
return text | [
"def",
"get_expanded_text",
"(",
"self",
",",
"tweet",
")",
":",
"if",
"not",
"tweet",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'No tweet to expand text.'",
")",
"return",
"None",
"try",
":",
"text",
"=",
"self",
".",
"twitter",
".",
"get_tweet_text",
"(",
"tweet",
")",
"mentions",
"=",
"tweet",
"[",
"'entities'",
"]",
"[",
"'user_mentions'",
"]",
"except",
"KeyError",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Malformed tweet: %s'",
"%",
"tweet",
")",
"return",
"None",
"if",
"not",
"text",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'Empty text.'",
")",
"return",
"None",
"if",
"not",
"mentions",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'No mentions.'",
")",
"return",
"text",
"self",
".",
"logs",
".",
"debug",
"(",
"'Using mentions: %s'",
"%",
"mentions",
")",
"for",
"mention",
"in",
"mentions",
":",
"try",
":",
"screen_name",
"=",
"'@%s'",
"%",
"mention",
"[",
"'screen_name'",
"]",
"name",
"=",
"mention",
"[",
"'name'",
"]",
"except",
"KeyError",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'Malformed mention: %s'",
"%",
"mention",
")",
"continue",
"self",
".",
"logs",
".",
"debug",
"(",
"'Expanding mention: %s %s'",
"%",
"(",
"screen_name",
",",
"name",
")",
")",
"pattern",
"=",
"compile",
"(",
"screen_name",
",",
"IGNORECASE",
")",
"text",
"=",
"pattern",
".",
"sub",
"(",
"name",
",",
"text",
")",
"return",
"text"
] | [
230,
4
] | [
267,
19
] | python | en | ['en', 'en', 'en'] | True |
Analysis.make_wikidata_request | (self, query) | Makes a request to the Wikidata SPARQL API. | Makes a request to the Wikidata SPARQL API. | def make_wikidata_request(self, query):
"""Makes a request to the Wikidata SPARQL API."""
query_url = WIKIDATA_QUERY_URL % quote_plus(query)
self.logs.debug('Wikidata query: %s' % query_url)
response = get(query_url, headers=WIKIDATA_QUERY_HEADERS)
response.raise_for_status()
try:
response_json = response.json()
except ValueError:
self.logs.error('Failed to decode JSON response: %s' % response)
return None
self.logs.debug('Wikidata response: %s' % response_json)
try:
results = response_json['results']
bindings = results['bindings']
except KeyError:
self.logs.error('Malformed Wikidata response: %s' % response_json)
return None
return bindings | [
"def",
"make_wikidata_request",
"(",
"self",
",",
"query",
")",
":",
"query_url",
"=",
"WIKIDATA_QUERY_URL",
"%",
"quote_plus",
"(",
"query",
")",
"self",
".",
"logs",
".",
"debug",
"(",
"'Wikidata query: %s'",
"%",
"query_url",
")",
"response",
"=",
"get",
"(",
"query_url",
",",
"headers",
"=",
"WIKIDATA_QUERY_HEADERS",
")",
"response",
".",
"raise_for_status",
"(",
")",
"try",
":",
"response_json",
"=",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Failed to decode JSON response: %s'",
"%",
"response",
")",
"return",
"None",
"self",
".",
"logs",
".",
"debug",
"(",
"'Wikidata response: %s'",
"%",
"response_json",
")",
"try",
":",
"results",
"=",
"response_json",
"[",
"'results'",
"]",
"bindings",
"=",
"results",
"[",
"'bindings'",
"]",
"except",
"KeyError",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Malformed Wikidata response: %s'",
"%",
"response_json",
")",
"return",
"None",
"return",
"bindings"
] | [
270,
4
] | [
293,
23
] | python | en | ['en', 'en', 'en'] | True |
Analysis.get_sentiment | (self, text) | Extracts a sentiment score [-1, 1] from text. | Extracts a sentiment score [-1, 1] from text. | def get_sentiment(self, text):
"""Extracts a sentiment score [-1, 1] from text."""
if not text:
self.logs.warn('No sentiment for empty text.')
return 0
document = language.Document(
content=text,
type_=language.Document.Type.PLAIN_TEXT,
language='en')
sentiment = self.language_client.analyze_sentiment(
request={'document': document}).document_sentiment
self.logs.debug(
'Sentiment score and magnitude for text: %f %f "%s"' %
(sentiment.score, sentiment.magnitude, text))
return sentiment.score | [
"def",
"get_sentiment",
"(",
"self",
",",
"text",
")",
":",
"if",
"not",
"text",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'No sentiment for empty text.'",
")",
"return",
"0",
"document",
"=",
"language",
".",
"Document",
"(",
"content",
"=",
"text",
",",
"type_",
"=",
"language",
".",
"Document",
".",
"Type",
".",
"PLAIN_TEXT",
",",
"language",
"=",
"'en'",
")",
"sentiment",
"=",
"self",
".",
"language_client",
".",
"analyze_sentiment",
"(",
"request",
"=",
"{",
"'document'",
":",
"document",
"}",
")",
".",
"document_sentiment",
"self",
".",
"logs",
".",
"debug",
"(",
"'Sentiment score and magnitude for text: %f %f \"%s\"'",
"%",
"(",
"sentiment",
".",
"score",
",",
"sentiment",
".",
"magnitude",
",",
"text",
")",
")",
"return",
"sentiment",
".",
"score"
] | [
295,
4
] | [
313,
30
] | python | en | ['en', 'en', 'en'] | True |
_filter_xpath_grouping | (xpath) |
This method removes the outer parentheses for xpath grouping.
The xpath converter will break otherwise.
Example:
"(//button[@type='submit'])[1]" becomes "//button[@type='submit'][1]"
|
This method removes the outer parentheses for xpath grouping.
The xpath converter will break otherwise.
Example:
"(//button[ | def _filter_xpath_grouping(xpath):
"""
This method removes the outer parentheses for xpath grouping.
The xpath converter will break otherwise.
Example:
"(//button[@type='submit'])[1]" becomes "//button[@type='submit'][1]"
"""
# First remove the first open parentheses
xpath = xpath[1:]
# Next remove the last closed parentheses
index = xpath.rfind(')')
if index == -1:
raise XpathException("Invalid or unsupported Xpath: %s" % xpath)
xpath = xpath[:index] + xpath[index + 1:]
return xpath | [
"def",
"_filter_xpath_grouping",
"(",
"xpath",
")",
":",
"# First remove the first open parentheses",
"xpath",
"=",
"xpath",
"[",
"1",
":",
"]",
"# Next remove the last closed parentheses",
"index",
"=",
"xpath",
".",
"rfind",
"(",
"')'",
")",
"if",
"index",
"==",
"-",
"1",
":",
"raise",
"XpathException",
"(",
"\"Invalid or unsupported Xpath: %s\"",
"%",
"xpath",
")",
"xpath",
"=",
"xpath",
"[",
":",
"index",
"]",
"+",
"xpath",
"[",
"index",
"+",
"1",
":",
"]",
"return",
"xpath"
] | [
57,
0
] | [
73,
16
] | python | en | ['en', 'error', 'th'] | False |
create_2D_mosaic_clean_mask | (clean_mask) |
The clean mask of a mosaic should be determined by the compositing function (e.g. mean
mosaic, median mosaic, etc.). This is simply supposed to be a decent approximation of a
clean mask for a mosaic that has no time dimension.
Parameters
----------
clean_mask: np.ndarray
The 3D clean mask used to construct the mosaic.
Returns
-------
mosaic_clean_mask: np.ndarray
A 2D clean mask for a mosaic.
|
The clean mask of a mosaic should be determined by the compositing function (e.g. mean
mosaic, median mosaic, etc.). This is simply supposed to be a decent approximation of a
clean mask for a mosaic that has no time dimension.
Parameters
----------
clean_mask: np.ndarray
The 3D clean mask used to construct the mosaic.
Returns
-------
mosaic_clean_mask: np.ndarray
A 2D clean mask for a mosaic.
| def create_2D_mosaic_clean_mask(clean_mask):
"""
The clean mask of a mosaic should be determined by the compositing function (e.g. mean
mosaic, median mosaic, etc.). This is simply supposed to be a decent approximation of a
clean mask for a mosaic that has no time dimension.
Parameters
----------
clean_mask: np.ndarray
The 3D clean mask used to construct the mosaic.
Returns
-------
mosaic_clean_mask: np.ndarray
A 2D clean mask for a mosaic.
"""
mosaic_clean_mask = clean_mask[0]
# Take the logical OR of clean masks through time.
for i in range(1, clean_mask.shape[0]):
mosaic_clean_mask = np.logical_or(mosaic_clean_mask, clean_mask[i])
return mosaic_clean_mask | [
"def",
"create_2D_mosaic_clean_mask",
"(",
"clean_mask",
")",
":",
"mosaic_clean_mask",
"=",
"clean_mask",
"[",
"0",
"]",
"# Take the logical OR of clean masks through time.",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"clean_mask",
".",
"shape",
"[",
"0",
"]",
")",
":",
"mosaic_clean_mask",
"=",
"np",
".",
"logical_or",
"(",
"mosaic_clean_mask",
",",
"clean_mask",
"[",
"i",
"]",
")",
"return",
"mosaic_clean_mask"
] | [
9,
0
] | [
29,
28
] | python | en | ['en', 'error', 'th'] | False |
landsat_clean_mask_invalid | (dataset) |
Masks out invalid data according to the LANDSAT
surface reflectance specifications. See this document:
https://landsat.usgs.gov/sites/default/files/documents/ledaps_product_guide.pdf pages 19-20.
Parameters
----------
dataset: xarray.Dataset
An xarray `Dataset` containing bands such as 'red', 'green', or 'blue'.
Returns
-------
invalid_mask: xarray.DataArray
An xarray DataArray with the same number and order of coordinates as in `dataset`.
|
Masks out invalid data according to the LANDSAT
surface reflectance specifications. See this document:
https://landsat.usgs.gov/sites/default/files/documents/ledaps_product_guide.pdf pages 19-20. | def landsat_clean_mask_invalid(dataset):
"""
Masks out invalid data according to the LANDSAT
surface reflectance specifications. See this document:
https://landsat.usgs.gov/sites/default/files/documents/ledaps_product_guide.pdf pages 19-20.
Parameters
----------
dataset: xarray.Dataset
An xarray `Dataset` containing bands such as 'red', 'green', or 'blue'.
Returns
-------
invalid_mask: xarray.DataArray
An xarray DataArray with the same number and order of coordinates as in `dataset`.
"""
invalid_mask = None
data_arr_names = [arr_name for arr_name in list(dataset.data_vars)
if arr_name not in ['pixel_qa', 'radsat_qa', 'cloud_qa']]
# Only keep data where all bands are in the valid range.
for i, data_arr_name in enumerate(data_arr_names):
invalid_mask_arr = xr_and(0 < dataset[data_arr_name], dataset[data_arr_name] < 10000)
invalid_mask = invalid_mask_arr if i == 0 else xr_and(invalid_mask, invalid_mask_arr)
return invalid_mask | [
"def",
"landsat_clean_mask_invalid",
"(",
"dataset",
")",
":",
"invalid_mask",
"=",
"None",
"data_arr_names",
"=",
"[",
"arr_name",
"for",
"arr_name",
"in",
"list",
"(",
"dataset",
".",
"data_vars",
")",
"if",
"arr_name",
"not",
"in",
"[",
"'pixel_qa'",
",",
"'radsat_qa'",
",",
"'cloud_qa'",
"]",
"]",
"# Only keep data where all bands are in the valid range.",
"for",
"i",
",",
"data_arr_name",
"in",
"enumerate",
"(",
"data_arr_names",
")",
":",
"invalid_mask_arr",
"=",
"xr_and",
"(",
"0",
"<",
"dataset",
"[",
"data_arr_name",
"]",
",",
"dataset",
"[",
"data_arr_name",
"]",
"<",
"10000",
")",
"invalid_mask",
"=",
"invalid_mask_arr",
"if",
"i",
"==",
"0",
"else",
"xr_and",
"(",
"invalid_mask",
",",
"invalid_mask_arr",
")",
"return",
"invalid_mask"
] | [
31,
0
] | [
54,
23
] | python | en | ['en', 'error', 'th'] | False |
landsat_qa_clean_mask | (dataset, platform, cover_types=['clear', 'water']) |
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Landsat pixel_qa band. Note that Landsat masks specify what to keep, not what to remove.
This means that using `cover_types=['clear', 'water']` should keep only clear land and water.
See "pixel_qa band" here: https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
and Section 7 here: https://landsat.usgs.gov/sites/default/files/documents/lasrc_product_guide.pdf.
Parameters
----------
dataset: xarray.Dataset
An xarray (usually produced by `datacube.load()`) that contains a `pixel_qa` data
variable.
platform: str
A string denoting the platform to be used. Can be "LANDSAT_5", "LANDSAT_7", or
"LANDSAT_8".
cover_types: list
A list of the cover types to include. Adding a cover type allows it to remain in the masked data.
Cover types for all Landsat platforms include:
['fill', 'clear', 'water', 'shadow', 'snow', 'cloud', 'low_conf_cl', 'med_conf_cl', 'high_conf_cl'].
'fill' removes "no_data" values, which indicates an absense of data. This value is -9999 for Landsat platforms.
Generally, don't use 'fill'.
'clear' allows only clear terrain. 'water' allows only water. 'shadow' allows only cloud shadows.
'snow' allows only snow. 'cloud' allows only clouds, but note that it often only selects cloud boundaries.
'low_conf_cl', 'med_conf_cl', and 'high_conf_cl' denote low, medium, and high confidence in cloud coverage.
'low_conf_cl' is useful on its own for only removing clouds, however, 'clear' is usually better suited for this.
'med_conf_cl' is useful in combination with 'low_conf_cl' to allow slightly heavier cloud coverage.
Note that 'med_conf_cl' and 'cloud' are very similar.
'high_conf_cl' is useful in combination with both 'low_conf_cl' and 'med_conf_cl'.
For Landsat 8, there are more cover types: ['low_conf_cir', 'high_conf_cir', 'terrain_occ'].
'low_conf_cir' and 'high_conf_cir' denote low and high confidence in cirrus clouds.
'terrain_occ' allows only occluded terrain.
Returns
-------
clean_mask: xarray.DataArray
An xarray DataArray with the same number and order of coordinates as in `dataset`.
|
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Landsat pixel_qa band. Note that Landsat masks specify what to keep, not what to remove.
This means that using `cover_types=['clear', 'water']` should keep only clear land and water. | def landsat_qa_clean_mask(dataset, platform, cover_types=['clear', 'water']):
"""
Returns a clean_mask for `dataset` that masks out various types of terrain cover using the
Landsat pixel_qa band. Note that Landsat masks specify what to keep, not what to remove.
This means that using `cover_types=['clear', 'water']` should keep only clear land and water.
See "pixel_qa band" here: https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
and Section 7 here: https://landsat.usgs.gov/sites/default/files/documents/lasrc_product_guide.pdf.
Parameters
----------
dataset: xarray.Dataset
An xarray (usually produced by `datacube.load()`) that contains a `pixel_qa` data
variable.
platform: str
A string denoting the platform to be used. Can be "LANDSAT_5", "LANDSAT_7", or
"LANDSAT_8".
cover_types: list
A list of the cover types to include. Adding a cover type allows it to remain in the masked data.
Cover types for all Landsat platforms include:
['fill', 'clear', 'water', 'shadow', 'snow', 'cloud', 'low_conf_cl', 'med_conf_cl', 'high_conf_cl'].
'fill' removes "no_data" values, which indicates an absense of data. This value is -9999 for Landsat platforms.
Generally, don't use 'fill'.
'clear' allows only clear terrain. 'water' allows only water. 'shadow' allows only cloud shadows.
'snow' allows only snow. 'cloud' allows only clouds, but note that it often only selects cloud boundaries.
'low_conf_cl', 'med_conf_cl', and 'high_conf_cl' denote low, medium, and high confidence in cloud coverage.
'low_conf_cl' is useful on its own for only removing clouds, however, 'clear' is usually better suited for this.
'med_conf_cl' is useful in combination with 'low_conf_cl' to allow slightly heavier cloud coverage.
Note that 'med_conf_cl' and 'cloud' are very similar.
'high_conf_cl' is useful in combination with both 'low_conf_cl' and 'med_conf_cl'.
For Landsat 8, there are more cover types: ['low_conf_cir', 'high_conf_cir', 'terrain_occ'].
'low_conf_cir' and 'high_conf_cir' denote low and high confidence in cirrus clouds.
'terrain_occ' allows only occluded terrain.
Returns
-------
clean_mask: xarray.DataArray
An xarray DataArray with the same number and order of coordinates as in `dataset`.
"""
processing_options = {
"LANDSAT_5": ls5_unpack_qa,
"LANDSAT_7": ls7_unpack_qa,
"LANDSAT_8": ls8_unpack_qa
}
clean_mask = None
# Keep all specified cover types (e.g. 'clear', 'water'), so logically or the separate masks.
for i, cover_type in enumerate(cover_types):
cover_type_clean_mask = processing_options[platform](dataset.pixel_qa, cover_type)
clean_mask = cover_type_clean_mask if i == 0 else xr_or(clean_mask, cover_type_clean_mask)
return clean_mask | [
"def",
"landsat_qa_clean_mask",
"(",
"dataset",
",",
"platform",
",",
"cover_types",
"=",
"[",
"'clear'",
",",
"'water'",
"]",
")",
":",
"processing_options",
"=",
"{",
"\"LANDSAT_5\"",
":",
"ls5_unpack_qa",
",",
"\"LANDSAT_7\"",
":",
"ls7_unpack_qa",
",",
"\"LANDSAT_8\"",
":",
"ls8_unpack_qa",
"}",
"clean_mask",
"=",
"None",
"# Keep all specified cover types (e.g. 'clear', 'water'), so logically or the separate masks.",
"for",
"i",
",",
"cover_type",
"in",
"enumerate",
"(",
"cover_types",
")",
":",
"cover_type_clean_mask",
"=",
"processing_options",
"[",
"platform",
"]",
"(",
"dataset",
".",
"pixel_qa",
",",
"cover_type",
")",
"clean_mask",
"=",
"cover_type_clean_mask",
"if",
"i",
"==",
"0",
"else",
"xr_or",
"(",
"clean_mask",
",",
"cover_type_clean_mask",
")",
"return",
"clean_mask"
] | [
57,
0
] | [
109,
21
] | python | en | ['en', 'error', 'th'] | False |
xarray_values_in | (data, values, data_vars=None) |
Returns a mask for an xarray Dataset or DataArray, with `True` wherever the value is in values.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The data to check for value matches.
values: list-like
The values to check for.
data_vars: list-like
The names of the data variables to check.
Returns
-------
mask: np.ndarray
A NumPy array shaped like ``data``. The mask can be used to mask ``data``.
That is, ``data.where(mask)`` is an intended use.
|
Returns a mask for an xarray Dataset or DataArray, with `True` wherever the value is in values. | def xarray_values_in(data, values, data_vars=None):
"""
Returns a mask for an xarray Dataset or DataArray, with `True` wherever the value is in values.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The data to check for value matches.
values: list-like
The values to check for.
data_vars: list-like
The names of the data variables to check.
Returns
-------
mask: np.ndarray
A NumPy array shaped like ``data``. The mask can be used to mask ``data``.
That is, ``data.where(mask)`` is an intended use.
"""
if isinstance(data, xr.Dataset):
mask = np.full_like(list(data.data_vars.values())[0], False, dtype=np.bool)
for data_arr in data.data_vars.values():
for value in values:
mask = mask | (data_arr.values == value)
elif isinstance(data, xr.DataArray):
mask = np.full_like(data, False, dtype=np.bool)
for value in values:
mask = mask | (data.values == value)
return mask | [
"def",
"xarray_values_in",
"(",
"data",
",",
"values",
",",
"data_vars",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"xr",
".",
"Dataset",
")",
":",
"mask",
"=",
"np",
".",
"full_like",
"(",
"list",
"(",
"data",
".",
"data_vars",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"False",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"for",
"data_arr",
"in",
"data",
".",
"data_vars",
".",
"values",
"(",
")",
":",
"for",
"value",
"in",
"values",
":",
"mask",
"=",
"mask",
"|",
"(",
"data_arr",
".",
"values",
"==",
"value",
")",
"elif",
"isinstance",
"(",
"data",
",",
"xr",
".",
"DataArray",
")",
":",
"mask",
"=",
"np",
".",
"full_like",
"(",
"data",
",",
"False",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"for",
"value",
"in",
"values",
":",
"mask",
"=",
"mask",
"|",
"(",
"data",
".",
"values",
"==",
"value",
")",
"return",
"mask"
] | [
111,
0
] | [
139,
15
] | python | en | ['en', 'error', 'th'] | False |
ValidationResultIdentifier.__init__ | (self, expectation_suite_identifier, run_id, batch_identifier) | Constructs a ValidationResultIdentifier
Args:
expectation_suite_identifier (ExpectationSuiteIdentifier, list, tuple, or dict):
identifying information for the fully-qualified expectation suite used to validate
run_id (RunIdentifier): The run_id for which validation occurred
| Constructs a ValidationResultIdentifier | def __init__(self, expectation_suite_identifier, run_id, batch_identifier):
"""Constructs a ValidationResultIdentifier
Args:
expectation_suite_identifier (ExpectationSuiteIdentifier, list, tuple, or dict):
identifying information for the fully-qualified expectation suite used to validate
run_id (RunIdentifier): The run_id for which validation occurred
"""
super().__init__()
self._expectation_suite_identifier = expectation_suite_identifier
if isinstance(run_id, str):
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional).",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
run_time = None
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif run_id is None:
run_id = RunIdentifier()
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=str(run_id))
self._run_id = run_id
self._batch_identifier = batch_identifier | [
"def",
"__init__",
"(",
"self",
",",
"expectation_suite_identifier",
",",
"run_id",
",",
"batch_identifier",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_expectation_suite_identifier",
"=",
"expectation_suite_identifier",
"if",
"isinstance",
"(",
"run_id",
",",
"str",
")",
":",
"warnings",
".",
"warn",
"(",
"\"String run_ids will be deprecated in the future. Please provide a run_id of type \"",
"\"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name \"",
"\"and run_time (both optional).\"",
",",
"DeprecationWarning",
",",
")",
"try",
":",
"run_time",
"=",
"parse",
"(",
"run_id",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"run_time",
"=",
"None",
"run_id",
"=",
"RunIdentifier",
"(",
"run_name",
"=",
"run_id",
",",
"run_time",
"=",
"run_time",
")",
"elif",
"isinstance",
"(",
"run_id",
",",
"dict",
")",
":",
"run_id",
"=",
"RunIdentifier",
"(",
"*",
"*",
"run_id",
")",
"elif",
"run_id",
"is",
"None",
":",
"run_id",
"=",
"RunIdentifier",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"run_id",
",",
"RunIdentifier",
")",
":",
"run_id",
"=",
"RunIdentifier",
"(",
"run_name",
"=",
"str",
"(",
"run_id",
")",
")",
"self",
".",
"_run_id",
"=",
"run_id",
"self",
".",
"_batch_identifier",
"=",
"batch_identifier"
] | [
102,
4
] | [
132,
49
] | python | en | ['en', 'en', 'en'] | True |
__remove_alias | (type_) |
Implementation detail.
Args:
type_ (type_t): type
Returns:
type_t: the type associated to the inputted type
|
Implementation detail. | def __remove_alias(type_):
"""
Implementation detail.
Args:
type_ (type_t): type
Returns:
type_t: the type associated to the inputted type
"""
if isinstance(type_, cpptypes.declarated_t) and \
isinstance(type_.declaration, typedef.typedef_t):
return __remove_alias(type_.declaration.decl_type)
if isinstance(type_, cpptypes.compound_t):
type_.base = __remove_alias(type_.base)
return type_
return type_ | [
"def",
"__remove_alias",
"(",
"type_",
")",
":",
"if",
"isinstance",
"(",
"type_",
",",
"cpptypes",
".",
"declarated_t",
")",
"and",
"isinstance",
"(",
"type_",
".",
"declaration",
",",
"typedef",
".",
"typedef_t",
")",
":",
"return",
"__remove_alias",
"(",
"type_",
".",
"declaration",
".",
"decl_type",
")",
"if",
"isinstance",
"(",
"type_",
",",
"cpptypes",
".",
"compound_t",
")",
":",
"type_",
".",
"base",
"=",
"__remove_alias",
"(",
"type_",
".",
"base",
")",
"return",
"type_",
"return",
"type_"
] | [
25,
0
] | [
41,
16
] | python | en | ['en', 'error', 'th'] | False |
remove_alias | (type_) |
Returns `type_t` without typedef
Args:
type_ (type_t | declaration_t): type or declaration
Returns:
type_t: the type associated to the inputted declaration
|
Returns `type_t` without typedef | def remove_alias(type_):
"""
Returns `type_t` without typedef
Args:
type_ (type_t | declaration_t): type or declaration
Returns:
type_t: the type associated to the inputted declaration
"""
if isinstance(type_, cpptypes.type_t):
type_ref = type_
elif isinstance(type_, typedef.typedef_t):
type_ref = type_.decl_type
else:
# Not a valid input, just return it
return type_
if type_ref.cache.remove_alias:
return type_ref.cache.remove_alias
no_alias = __remove_alias(type_ref.clone())
type_ref.cache.remove_alias = no_alias
return no_alias | [
"def",
"remove_alias",
"(",
"type_",
")",
":",
"if",
"isinstance",
"(",
"type_",
",",
"cpptypes",
".",
"type_t",
")",
":",
"type_ref",
"=",
"type_",
"elif",
"isinstance",
"(",
"type_",
",",
"typedef",
".",
"typedef_t",
")",
":",
"type_ref",
"=",
"type_",
".",
"decl_type",
"else",
":",
"# Not a valid input, just return it",
"return",
"type_",
"if",
"type_ref",
".",
"cache",
".",
"remove_alias",
":",
"return",
"type_ref",
".",
"cache",
".",
"remove_alias",
"no_alias",
"=",
"__remove_alias",
"(",
"type_ref",
".",
"clone",
"(",
")",
")",
"type_ref",
".",
"cache",
".",
"remove_alias",
"=",
"no_alias",
"return",
"no_alias"
] | [
44,
0
] | [
65,
19
] | python | en | ['en', 'error', 'th'] | False |
decompose_type | (tp) |
Implementation detail
|
Implementation detail
| def decompose_type(tp):
"""
Implementation detail
"""
if isinstance(tp, cpptypes.compound_t):
return [tp] + decompose_type(tp.base)
elif isinstance(tp, typedef.typedef_t):
return decompose_type(tp.decl_type)
elif isinstance(tp, cpptypes.declarated_t) and \
isinstance(tp.declaration, typedef.typedef_t):
return decompose_type(tp.declaration.decl_type)
else:
return [tp] | [
"def",
"decompose_type",
"(",
"tp",
")",
":",
"if",
"isinstance",
"(",
"tp",
",",
"cpptypes",
".",
"compound_t",
")",
":",
"return",
"[",
"tp",
"]",
"+",
"decompose_type",
"(",
"tp",
".",
"base",
")",
"elif",
"isinstance",
"(",
"tp",
",",
"typedef",
".",
"typedef_t",
")",
":",
"return",
"decompose_type",
"(",
"tp",
".",
"decl_type",
")",
"elif",
"isinstance",
"(",
"tp",
",",
"cpptypes",
".",
"declarated_t",
")",
"and",
"isinstance",
"(",
"tp",
".",
"declaration",
",",
"typedef",
".",
"typedef_t",
")",
":",
"return",
"decompose_type",
"(",
"tp",
".",
"declaration",
".",
"decl_type",
")",
"else",
":",
"return",
"[",
"tp",
"]"
] | [
68,
0
] | [
80,
19
] | python | en | ['en', 'error', 'th'] | False |
decompose_class | (type_) | implementation details | implementation details | def decompose_class(type_):
"""implementation details"""
types = decompose_type(type_)
return [tp.__class__ for tp in types] | [
"def",
"decompose_class",
"(",
"type_",
")",
":",
"types",
"=",
"decompose_type",
"(",
"type_",
")",
"return",
"[",
"tp",
".",
"__class__",
"for",
"tp",
"in",
"types",
"]"
] | [
83,
0
] | [
86,
41
] | python | da | ['eo', 'da', 'en'] | False |
base_type | (type_) | returns base type.
For `const int` will return `int`
| returns base type. | def base_type(type_):
"""returns base type.
For `const int` will return `int`
"""
types = decompose_type(type_)
return types[-1] | [
"def",
"base_type",
"(",
"type_",
")",
":",
"types",
"=",
"decompose_type",
"(",
"type_",
")",
"return",
"types",
"[",
"-",
"1",
"]"
] | [
89,
0
] | [
95,
20
] | python | en | ['en', 'jv', 'en'] | True |
_create_cv_types | (base) |
Implementation detail.
|
Implementation detail. | def _create_cv_types(base):
"""
Implementation detail.
"""
return (
[base,
cpptypes.const_t(base),
cpptypes.volatile_t(base),
cpptypes.volatile_t(cpptypes.const_t(base))]
) | [
"def",
"_create_cv_types",
"(",
"base",
")",
":",
"return",
"(",
"[",
"base",
",",
"cpptypes",
".",
"const_t",
"(",
"base",
")",
",",
"cpptypes",
".",
"volatile_t",
"(",
"base",
")",
",",
"cpptypes",
".",
"volatile_t",
"(",
"cpptypes",
".",
"const_t",
"(",
"base",
")",
")",
"]",
")"
] | [
98,
0
] | [
108,
5
] | python | en | ['en', 'error', 'th'] | False |
does_match_definition | (given, main, secondary) | implementation details | implementation details | def does_match_definition(given, main, secondary):
"""implementation details"""
assert isinstance(secondary, tuple)
assert len(secondary) == 2 # general solution could be provided
types = decompose_type(given)
if isinstance(types[0], main):
return True
if len(types) >= 2:
cond1 = isinstance(types[0], main)
cond2 = isinstance(types[1], secondary)
cond3 = isinstance(types[1], main)
cond4 = isinstance(types[0], secondary)
if (cond1 and cond2) or (cond3 and cond4):
return True
if len(types) >= 3:
classes = set([tp.__class__ for tp in types[:3]])
desired = set([main] + list(secondary))
diff = classes.symmetric_difference(desired)
if not diff:
return True
if len(diff) == 2:
items = list(diff)
return (
issubclass(
items[0], items[1]) or issubclass(items[1], items[0]))
else:
return False
else:
return False | [
"def",
"does_match_definition",
"(",
"given",
",",
"main",
",",
"secondary",
")",
":",
"assert",
"isinstance",
"(",
"secondary",
",",
"tuple",
")",
"assert",
"len",
"(",
"secondary",
")",
"==",
"2",
"# general solution could be provided",
"types",
"=",
"decompose_type",
"(",
"given",
")",
"if",
"isinstance",
"(",
"types",
"[",
"0",
"]",
",",
"main",
")",
":",
"return",
"True",
"if",
"len",
"(",
"types",
")",
">=",
"2",
":",
"cond1",
"=",
"isinstance",
"(",
"types",
"[",
"0",
"]",
",",
"main",
")",
"cond2",
"=",
"isinstance",
"(",
"types",
"[",
"1",
"]",
",",
"secondary",
")",
"cond3",
"=",
"isinstance",
"(",
"types",
"[",
"1",
"]",
",",
"main",
")",
"cond4",
"=",
"isinstance",
"(",
"types",
"[",
"0",
"]",
",",
"secondary",
")",
"if",
"(",
"cond1",
"and",
"cond2",
")",
"or",
"(",
"cond3",
"and",
"cond4",
")",
":",
"return",
"True",
"if",
"len",
"(",
"types",
")",
">=",
"3",
":",
"classes",
"=",
"set",
"(",
"[",
"tp",
".",
"__class__",
"for",
"tp",
"in",
"types",
"[",
":",
"3",
"]",
"]",
")",
"desired",
"=",
"set",
"(",
"[",
"main",
"]",
"+",
"list",
"(",
"secondary",
")",
")",
"diff",
"=",
"classes",
".",
"symmetric_difference",
"(",
"desired",
")",
"if",
"not",
"diff",
":",
"return",
"True",
"if",
"len",
"(",
"diff",
")",
"==",
"2",
":",
"items",
"=",
"list",
"(",
"diff",
")",
"return",
"(",
"issubclass",
"(",
"items",
"[",
"0",
"]",
",",
"items",
"[",
"1",
"]",
")",
"or",
"issubclass",
"(",
"items",
"[",
"1",
"]",
",",
"items",
"[",
"0",
"]",
")",
")",
"else",
":",
"return",
"False",
"else",
":",
"return",
"False"
] | [
138,
0
] | [
169,
20
] | python | da | ['eo', 'da', 'en'] | False |
is_bool | (type_) |
Check if type is of boolean type.
Args:
type_ (type_t): The type to be checked
Returns:
bool: True if type is a boolean, False otherwise.
|
Check if type is of boolean type. | def is_bool(type_):
"""
Check if type is of boolean type.
Args:
type_ (type_t): The type to be checked
Returns:
bool: True if type is a boolean, False otherwise.
"""
return remove_alias(type_) in _bool_def | [
"def",
"is_bool",
"(",
"type_",
")",
":",
"return",
"remove_alias",
"(",
"type_",
")",
"in",
"_bool_def"
] | [
172,
0
] | [
182,
43
] | python | en | ['en', 'error', 'th'] | False |
is_void | (type_) |
Check if type is of void type.
Args:
type_ (type_t): The type to be checked
Returns:
bool: True if type is void, False otherwise.
|
Check if type is of void type. | def is_void(type_):
"""
Check if type is of void type.
Args:
type_ (type_t): The type to be checked
Returns:
bool: True if type is void, False otherwise.
"""
return remove_alias(type_) in _void_def | [
"def",
"is_void",
"(",
"type_",
")",
":",
"return",
"remove_alias",
"(",
"type_",
")",
"in",
"_void_def"
] | [
185,
0
] | [
195,
43
] | python | en | ['en', 'error', 'th'] | False |
is_void_pointer | (type_) | returns True, if type represents `void*`, False otherwise | returns True, if type represents `void*`, False otherwise | def is_void_pointer(type_):
"""returns True, if type represents `void*`, False otherwise"""
return is_same(type_, cpptypes.pointer_t(cpptypes.void_t())) | [
"def",
"is_void_pointer",
"(",
"type_",
")",
":",
"return",
"is_same",
"(",
"type_",
",",
"cpptypes",
".",
"pointer_t",
"(",
"cpptypes",
".",
"void_t",
"(",
")",
")",
")"
] | [
198,
0
] | [
200,
64
] | python | en | ['en', 'en', 'en'] | True |
is_integral | (type_) |
Check if type is a C++ integral type
Args:
type_ (type_t): The type to be checked
Returns:
bool: True if type is a C++ integral type, False otherwise.
|
Check if type is a C++ integral type | def is_integral(type_):
"""
Check if type is a C++ integral type
Args:
type_ (type_t): The type to be checked
Returns:
bool: True if type is a C++ integral type, False otherwise.
"""
return remove_alias(type_) in _integral_def | [
"def",
"is_integral",
"(",
"type_",
")",
":",
"return",
"remove_alias",
"(",
"type_",
")",
"in",
"_integral_def"
] | [
203,
0
] | [
213,
47
] | python | en | ['en', 'error', 'th'] | False |
is_floating_point | (type_) | returns True, if type represents C++ floating point type,
False otherwise | returns True, if type represents C++ floating point type,
False otherwise | def is_floating_point(type_):
"""returns True, if type represents C++ floating point type,
False otherwise"""
return remove_alias(type_) in _float_def | [
"def",
"is_floating_point",
"(",
"type_",
")",
":",
"return",
"remove_alias",
"(",
"type_",
")",
"in",
"_float_def"
] | [
216,
0
] | [
220,
44
] | python | en | ['en', 'en', 'en'] | True |
is_arithmetic | (type_) | returns True, if type represents C++ integral or floating point type,
False otherwise | returns True, if type represents C++ integral or floating point type,
False otherwise | def is_arithmetic(type_):
"""returns True, if type represents C++ integral or floating point type,
False otherwise"""
return is_integral(type_) or is_floating_point(type_) | [
"def",
"is_arithmetic",
"(",
"type_",
")",
":",
"return",
"is_integral",
"(",
"type_",
")",
"or",
"is_floating_point",
"(",
"type_",
")"
] | [
223,
0
] | [
226,
57
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.