desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'The |_Body| instance containing the content for this document.'
@property def _body(self):
if (self.__body is None): self.__body = _Body(self._element.body, self) return self.__body
'Return this |_Body| instance after clearing it of all content. Section properties for the main document story, if present, are preserved.'
def clear_content(self):
self._body.clear_content() return self
'Return a paragraph newly added to the end of the content in this container, having *text* in a single run if present, and having paragraph style *style*. If *style* is |None|, no paragraph style is applied, which has the same effect as applying the \'Normal\' style.'
def add_paragraph(self, text='', style=None):
paragraph = self._add_paragraph() if text: paragraph.add_run(text) if (style is not None): paragraph.style = style return paragraph
'Return a table of *width* having *rows* rows and *cols* columns, newly appended to the content in this container. *width* is evenly distributed between the table columns.'
def add_table(self, rows, cols, width):
from .table import Table tbl = CT_Tbl.new_tbl(rows, cols, width) self._element._insert_tbl(tbl) return Table(tbl, self)
'A list containing the paragraphs in this container, in document order. Read-only.'
@property def paragraphs(self):
return [Paragraph(p, self) for p in self._element.p_lst]
'A list containing the tables in this container, in document order. Read-only.'
@property def tables(self):
from .table import Table return [Table(tbl, self) for tbl in self._element.tbl_lst]
'Return a paragraph newly added to the end of the content in this container.'
def _add_paragraph(self):
return Paragraph(self._element.add_p(), self)
'Provide indexed access, e.g. \'inline_shapes[idx]\''
def __getitem__(self, idx):
try: inline = self._inline_lst[idx] except IndexError: msg = (u'inline shape index [%d] out of range' % idx) raise IndexError(msg) return InlineShape(inline)
'Read/write. The display height of this inline shape as an |Emu| instance.'
@property def height(self):
return self._inline.extent.cy
'The type of this inline shape as a member of ``docx.enum.shape.WD_INLINE_SHAPE``, e.g. ``LINKED_PICTURE``. Read-only.'
@property def type(self):
graphicData = self._inline.graphic.graphicData uri = graphicData.uri if (uri == nsmap[u'pic']): blip = graphicData.pic.blipFill.blip if (blip.link is not None): return WD_INLINE_SHAPE.LINKED_PICTURE return WD_INLINE_SHAPE.PICTURE if (uri == nsmap[u'c']): return WD_INLINE_SHAPE.CHART if (uri == nsmap[u'dgm']): return WD_INLINE_SHAPE.SMART_ART return WD_INLINE_SHAPE.NOT_IMPLEMENTED
'Read/write. The display width of this inline shape as an |Emu| instance.'
@property def width(self):
return self._inline.extent.cx
'Notify the observers.'
def changed(self, event):
for observer in self.observers: observer.update(event, self)
'Register an observer.'
def add_observer(self, observer):
self.observers.append(observer)
'Refit the model if already fitted.'
def refit(self):
if self.fitted: self.fit()
'Remove old decision surface.'
def remove_surface(self):
if (len(self.contours) > 0): for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = []
'Plot the support vectors by placing circles over the corresponding data points and adds the circle collection to the contours list.'
def plot_support_vectors(self, support_vectors):
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1], s=80, edgecolors='k', facecolors='none') self.contours.append(cs)
'Get a valid link, False if not found'
def _get_link(self, cobj):
fname_idx = None full_name = ((cobj['module_short'] + '.') + cobj['name']) if (full_name in self._searchindex['objects']): value = self._searchindex['objects'][full_name] if isinstance(value, dict): value = value[next(iter(value.keys()))] fname_idx = value[0] elif (cobj['module_short'] in self._searchindex['objects']): value = self._searchindex['objects'][cobj['module_short']] if (cobj['name'] in value.keys()): fname_idx = value[cobj['name']][0] if (fname_idx is not None): fname = self._searchindex['filenames'][fname_idx] fnames = [(fname + '.html'), (os.path.splitext(fname)[0] + '.html')] for fname in fnames: try: if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if hasattr(link, 'decode'): link = link.decode('utf-8', 'replace') if (link in self._page_cache): html = self._page_cache[link] else: html = get_data(link, self.gallery_dir) self._page_cache[link] = html except (HTTPError, URLError, IOError): pass else: break else: raise comb_names = [((cobj['module_short'] + '.') + cobj['name'])] if (self.extra_modules_test is not None): for mod in self.extra_modules_test: comb_names.append(((mod + '.') + cobj['name'])) url = False if hasattr(html, 'decode'): html = html.decode('utf-8', 'replace') for comb_name in comb_names: if hasattr(comb_name, 'decode'): comb_name = comb_name.decode('utf-8', 'replace') if (comb_name in html): url = ((link + u'#') + comb_name) link = url else: link = False return link
'Resolve the link to the documentation, returns None if not found Parameters cobj : dict Dict with information about the "code object" for which we are resolving a link. cobj[\'name\'] : function or class name (str) cobj[\'module_short\'] : shortened module name (str) cobj[\'module\'] : module name (str) this_url: str URL of the current page. Needed to construct relative URLs (only used if relative=True in constructor). Returns link : str | None The link (URL) to the documentation.'
def resolve(self, cobj, this_url):
full_name = ((cobj['module_short'] + '.') + cobj['name']) link = self._link_cache.get(full_name, None) if (link is None): link = self._get_link(cobj) self._link_cache[full_name] = link if ((link is False) or (link is None)): return None if self.relative: link = os.path.relpath(link, start=this_url) if self._is_windows: link = link.replace('\\', '/') link = link[3:] return link
'supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text'
def __getattr__(self, aname):
if (aname == 'lineno'): return lineno(self.loc, self.pstr) elif (aname in ('col', 'column')): return col(self.loc, self.pstr) elif (aname == 'line'): return line(self.loc, self.pstr) else: raise AttributeError, aname
'Extracts the exception line from the input string, and marks the location of the exception with a special symbol.'
def markInputline(self, markerString='>!<'):
line_str = self.line line_column = (self.column - 1) if markerString: line_str = ''.join([line_str[:line_column], markerString, line_str[line_column:]]) return line_str.strip()
'Returns all named result keys.'
def keys(self):
return self.__tokdict.keys()
'Removes and returns item at specified index (default=last). Will work with either numeric indices or dict-key indicies.'
def pop(self, index=(-1)):
ret = self[index] del self[index] return ret
'Returns named result matching the given key, or if there is no such name, then returns the given defaultValue or None if no defaultValue is specified.'
def get(self, key, defaultValue=None):
if (key in self): return self[key] else: return defaultValue
'Returns all named result keys and values as a list of tuples.'
def items(self):
return [(k, self[k]) for k in self.__tokdict.keys()]
'Returns all named result values.'
def values(self):
return [v[(-1)][0] for v in self.__tokdict.values()]
'Returns the parse results as a nested list of matching tokens, all converted to strings.'
def asList(self):
out = [] for res in self.__toklist: if isinstance(res, ParseResults): out.append(res.asList()) else: out.append(res) return out
'Returns the named parse results as dictionary.'
def asDict(self):
return dict(self.items())
'Returns a new copy of a ParseResults object.'
def copy(self):
ret = ParseResults(self.__toklist) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent ret.__accumNames.update(self.__accumNames) ret.__name = self.__name return ret
'Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.'
def asXML(self, doctag=None, namedItemsOnly=False, indent='', formatted=True):
nl = '\n' out = [] namedItems = dict([(v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist]) nextLevelIndent = (indent + ' ') if (not formatted): indent = '' nextLevelIndent = '' nl = '' selfTag = None if (doctag is not None): selfTag = doctag elif self.__name: selfTag = self.__name if (not selfTag): if namedItemsOnly: return '' else: selfTag = 'ITEM' out += [nl, indent, '<', selfTag, '>'] worklist = self.__toklist for (i, res) in enumerate(worklist): if isinstance(res, ParseResults): if (i in namedItems): out += [res.asXML(namedItems[i], (namedItemsOnly and (doctag is None)), nextLevelIndent, formatted)] else: out += [res.asXML(None, (namedItemsOnly and (doctag is None)), nextLevelIndent, formatted)] else: resTag = None if (i in namedItems): resTag = namedItems[i] if (not resTag): if namedItemsOnly: continue else: resTag = 'ITEM' xmlBodyText = xml.sax.saxutils.escape(_ustr(res)) out += [nl, nextLevelIndent, '<', resTag, '>', xmlBodyText, '</', resTag, '>'] out += [nl, indent, '</', selfTag, '>'] return ''.join(out)
'Returns the results name for this token expression.'
def getName(self):
if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif ((len(self) == 1) and (len(self.__tokdict) == 1) and (self.__tokdict.values()[0][0][1] in (0, (-1)))): return self.__tokdict.keys()[0] else: return None
'Diagnostic method for listing out the contents of a ParseResults. Accepts an optional indent argument so that this string can be embedded in a nested display of other data.'
def dump(self, indent='', depth=0):
out = [] out.append((indent + _ustr(self.asList()))) keys = self.items() keys.sort() for (k, v) in keys: if out: out.append('\n') out.append(('%s%s- %s: ' % (indent, (' ' * depth), k))) if isinstance(v, ParseResults): if v.keys(): out.append(v.dump(indent, (depth + 1))) else: out.append(_ustr(v)) else: out.append(_ustr(v)) return ''.join(out)
'Overrides the default whitespace chars'
def setDefaultWhitespaceChars(chars):
ParserElement.DEFAULT_WHITE_CHARS = chars
'Make a copy of this ParserElement. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element.'
def copy(self):
cpy = copy.copy(self) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy
'Define name for this expression, for use in debugging.'
def setName(self, name):
self.name = name self.errmsg = ('Expected ' + self.name) if hasattr(self, 'exception'): self.exception.msg = self.errmsg return self
'Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original ParserElement object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names.'
def setResultsName(self, name, listAllMatches=False):
newself = self.copy() newself.resultsName = name newself.modalResults = (not listAllMatches) return newself
'Method to invoke the Python pdb debugger when this element is about to be parsed. Set breakFlag to True to enable, False to disable.'
def setBreak(self, breakFlag=True):
if breakFlag: _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb pdb.set_trace() _parseMethod(instring, loc, doActions, callPreParse) breaker._originalParseMethod = _parseMethod self._parse = breaker elif hasattr(self._parse, '_originalParseMethod'): self._parse = self._parse._originalParseMethod return self
'Internal method used to decorate parse actions that take fewer than 3 arguments, so that all parse actions can be called as f(s,l,t).'
def _normalizeParseActionArgs(f):
STAR_ARGS = 4 try: restore = None if isinstance(f, type): restore = f f = f.__init__ if (f.func_code.co_flags & STAR_ARGS): return f numargs = f.func_code.co_argcount if hasattr(f, 'im_self'): numargs -= 1 if restore: f = restore except AttributeError: try: if (f.__call__.im_func.func_code.co_flags & STAR_ARGS): return f numargs = f.__call__.im_func.func_code.co_argcount if hasattr(f.__call__, 'im_self'): numargs -= 1 except AttributeError: if (f.__call__.func_code.co_flags & STAR_ARGS): return f numargs = f.__call__.func_code.co_argcount if hasattr(f.__call__, 'im_self'): numargs -= 1 if (numargs == 3): return f else: if (numargs == 2): def tmp(s, l, t): return f(l, t) elif (numargs == 1): def tmp(s, l, t): return f(t) else: def tmp(s, l, t): return f() try: tmp.__name__ = f.__name__ except AttributeError: pass try: tmp.__doc__ = f.__doc__ except AttributeError: pass try: tmp.__dict__.update(f.__dict__) except AttributeError: pass return tmp
'Define action to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks), fn(loc,toks), fn(toks), or just fn(), where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks = a list of the matched tokens, packaged as a ParseResults object If the functions in fns modify the tokens, they can return them as the return value from fn, and the modified list of tokens will replace the original. Otherwise, fn does not need to return any value. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{parseString}<parseString>} for more information on parsing strings containing <TAB>s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string.'
def setParseAction(self, *fns, **kwargs):
self.parseAction = map(self._normalizeParseActionArgs, list(fns)) self.callDuringTry = (('callDuringTry' in kwargs) and kwargs['callDuringTry']) return self
'Add parse action to expression\'s list of parse actions. See L{I{setParseAction}<setParseAction>}.'
def addParseAction(self, *fns, **kwargs):
self.parseAction += map(self._normalizeParseActionArgs, list(fns)) self.callDuringTry = (self.callDuringTry or (('callDuringTry' in kwargs) and kwargs['callDuringTry'])) return self
'Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments fn(s,loc,expr,err) where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The function returns no value. It may throw ParseFatalException if it is desired to stop parsing immediately.'
def setFailAction(self, fn):
self.failAction = fn return self
'Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your program must call the class method ParserElement.enablePackrat(). If your program uses psyco to "compile as you go", you must call enablePackrat before calling psyco.full(). If you do not do this, Python will crash. For best results, call enablePackrat() immediately after importing pyparsing.'
def enablePackrat():
if (not ParserElement._packratEnabled): ParserElement._packratEnabled = True ParserElement._parse = ParserElement._parseCache
'Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. Note: parseString implicitly calls expandtabs() on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the loc argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling parseWithTabs on your grammar before calling parseString (see L{I{parseWithTabs}<parseWithTabs>}) - define your parse action using the full (s,loc,toks) signature, and reference the input string using the parse action\'s s argument - explicitly expand the tabs in your input string before calling parseString'
def parseString(self, instring):
ParserElement.resetCache() if (not self.streamlined): self.streamline() for e in self.ignoreExprs: e.streamline() if self.keepTabs: (loc, tokens) = self._parse(instring, 0) else: (loc, tokens) = self._parse(instring.expandtabs(), 0) return tokens
'Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional maxMatches argument, to clip scanning after \'n\' matches are found. Note that the start and end locations are reported relative to the string being parsed. See L{I{parseString}<parseString>} for more information on parsing strings with embedded tabs.'
def scanString(self, instring, maxMatches=__MAX_INT__):
if (not self.streamlined): self.streamline() for e in self.ignoreExprs: e.streamline() if (not self.keepTabs): instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 while ((loc <= instrlen) and (matches < maxMatches)): try: preloc = preparseFn(instring, loc) (nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = (preloc + 1) else: matches += 1 (yield (tokens, preloc, nextLoc)) loc = nextLoc
'Extension to scanString, to modify matching text with modified tokens that may be returned from a parse action. To use transformString, define a grammar and attach a parse action to it that modifies the returned token list. Invoking transformString() on a target string will then scan for matches, and replace the matched text patterns according to the logic in the parse action. transformString() returns the resulting transformed string.'
def transformString(self, instring):
out = [] lastE = 0 self.keepTabs = True for (t, s, e) in self.scanString(instring): out.append(instring[lastE:s]) if t: if isinstance(t, ParseResults): out += t.asList() elif isinstance(t, list): out += t else: out.append(t) lastE = e out.append(instring[lastE:]) return ''.join(map(_ustr, out))
'Another extension to scanString, simplifying the access to the tokens found to match the given parse expression. May be called with optional maxMatches argument, to clip searching after \'n\' matches are found.'
def searchString(self, instring, maxMatches=__MAX_INT__):
return ParseResults([t for (t, s, e) in self.scanString(instring, maxMatches)])
'Implementation of + operator - returns And'
def __add__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return And([self, other])
'Implementation of + operator when left operand is not a ParserElement'
def __radd__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other + self)
'Implementation of | operator - returns MatchFirst'
def __or__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return MatchFirst([self, other])
'Implementation of | operator when left operand is not a ParserElement'
def __ror__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other | self)
'Implementation of ^ operator - returns Or'
def __xor__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return Or([self, other])
'Implementation of ^ operator when left operand is not a ParserElement'
def __rxor__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other ^ self)
'Implementation of & operator - returns Each'
def __and__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return Each([self, other])
'Implementation of & operator when left operand is not a ParserElement'
def __rand__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other & self)
'Implementation of ~ operator - returns NotAny'
def __invert__(self):
return NotAny(self)
'Shortcut for setResultsName, with listAllMatches=default:: userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") could be written as:: userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")'
def __call__(self, name):
return self.setResultsName(name)
'Suppresses the output of this ParserElement; useful to keep punctuation from cluttering up returned output.'
def suppress(self):
return Suppress(self)
'Disables the skipping of whitespace before matching the characters in the ParserElement\'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars.'
def leaveWhitespace(self):
self.skipWhitespace = False return self
'Overrides the default whitespace chars'
def setWhitespaceChars(self, chars):
self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self
'Overrides default behavior to expand <TAB>s to spaces before parsing the input string. Must be called before parseString when the input grammar contains elements that match <TAB> characters.'
def parseWithTabs(self):
self.keepTabs = True return self
'Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns.'
def ignore(self, other):
if isinstance(other, Suppress): if (other not in self.ignoreExprs): self.ignoreExprs.append(other) else: self.ignoreExprs.append(Suppress(other)) return self
'Enable display of debugging messages while doing pattern matching.'
def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = ((startAction or _defaultStartDebugAction), (successAction or _defaultSuccessDebugAction), (exceptionAction or _defaultExceptionDebugAction)) self.debug = True return self
'Enable display of debugging messages while doing pattern matching. Set flag to True to enable, False to disable.'
def setDebug(self, flag=True):
if flag: self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) else: self.debug = False return self
'Check defined expressions for valid structure, check for infinite recursive definitions.'
def validate(self, validateTrace=[]):
self.checkRecursion([])
'Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing.'
def parseFile(self, file_or_filename):
try: file_contents = file_or_filename.read() except AttributeError: f = open(file_or_filename, 'rb') file_contents = f.read() f.close() return self.parseString(file_contents)
'Overrides the default Keyword chars'
def setDefaultKeywordChars(chars):
Keyword.DEFAULT_KEYWORD_CHARS = chars
'The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.'
def __init__(self, pattern, flags=0):
super(Regex, self).__init__() if (len(pattern) == 0): warnings.warn('null string passed to Regex; use Empty() instead', SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error as e: warnings.warn(('invalid pattern (%s) passed to Regex' % pattern), SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) self.errmsg = ('Expected ' + self.name) self.mayIndexError = False self.mayReturnEmpty = True
'Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=None) - escQuote - special quote sequence to escape an embedded quote string (such as SQL\'s "" to escape an embedded ") (default=None) - multiline - boolean indicating whether quotes can span multiple lines (default=False) - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True) - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)'
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
super(QuotedString, self).__init__() quoteChar = quoteChar.strip() if (len(quoteChar) == 0): warnings.warn('quoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() if (endQuoteChar is None): endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() if (len(endQuoteChar) == 0): warnings.warn('endQuoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar self.quoteCharLen = len(quoteChar) self.firstQuoteChar = quoteChar[0] self.endQuoteChar = endQuoteChar self.endQuoteCharLen = len(endQuoteChar) self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults if multiline: self.flags = (re.MULTILINE | re.DOTALL) self.pattern = ('%s(?:[^%s%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or ''))) else: self.flags = 0 self.pattern = ('%s(?:[^%s\\n\\r%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or ''))) if (len(self.endQuoteChar) > 1): self.pattern += (('|(?:' + ')|(?:'.join([('%s[^%s]' % (re.escape(self.endQuoteChar[:i]), _escapeRegexRangeChars(self.endQuoteChar[i]))) for i in range((len(self.endQuoteChar) - 1), 0, (-1))])) + ')') if escQuote: self.pattern += ('|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += ('|(?:%s.)' % re.escape(escChar)) self.escCharReplacePattern = (re.escape(self.escChar) + '(.)') self.pattern += (')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error as e: warnings.warn(('invalid pattern (%s) passed to Regex' % self.pattern), SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) self.errmsg = ('Expected ' + self.name) self.mayIndexError = False self.mayReturnEmpty = True
'Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on all contained expressions.'
def leaveWhitespace(self):
self.skipWhitespace = False self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.leaveWhitespace() return self
'Get parameter names for the estimator'
@classmethod def _get_param_names(cls):
init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if (init is object.__init__): return [] init_signature = signature(init) parameters = [p for p in init_signature.parameters.values() if ((p.name != 'self') and (p.kind != p.VAR_KEYWORD))] for p in parameters: if (p.kind == p.VAR_POSITIONAL): raise RuntimeError(("scikit-learn estimators should always specify their parameters in the signature of their __init__ (no varargs). %s with constructor %s doesn't follow this convention." % (cls, init_signature))) return sorted([p.name for p in parameters])
'Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.'
def get_params(self, deep=True):
out = dict() for key in self._get_param_names(): warnings.simplefilter('always', DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if (len(w) and (w[0].category == DeprecationWarning)): continue finally: warnings.filters.pop(0) if (deep and hasattr(value, 'get_params')): deep_items = value.get_params().items() out.update(((((key + '__') + k), val) for (k, val) in deep_items)) out[key] = value return out
'Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``<component>__<parameter>`` so that it\'s possible to update each component of a nested object. Returns self'
def set_params(self, **params):
if (not params): return self valid_params = self.get_params(deep=True) for (key, value) in six.iteritems(params): split = key.split('__', 1) if (len(split) > 1): (name, sub_name) = split if (name not in valid_params): raise ValueError(('Invalid parameter %s for estimator %s. Check the list of available parameters with `estimator.get_params().keys()`.' % (name, self))) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: if (key not in valid_params): raise ValueError(('Invalid parameter %s for estimator %s. Check the list of available parameters with `estimator.get_params().keys()`.' % (key, self.__class__.__name__))) setattr(self, key, value) return self
'Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float Mean accuracy of self.predict(X) wrt. y.'
def score(self, X, y, sample_weight=None):
from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
'Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) ** 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns score : float R^2 of self.predict(X) wrt. y.'
def score(self, X, y, sample_weight=None):
from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='variance_weighted')
'Performs clustering on X and returns cluster labels. Parameters X : ndarray, shape (n_samples, n_features) Input data. Returns y : ndarray, shape (n_samples,) cluster labels'
def fit_predict(self, X, y=None):
self.fit(X) return self.labels_
'Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members.'
@property def biclusters_(self):
return (self.rows_, self.columns_)
'Row and column indices of the i\'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Parameters i : int The index of the cluster. Returns row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the dataset that belong to the bicluster.'
def get_indices(self, i):
rows = self.rows_[i] columns = self.columns_[i] return (np.nonzero(rows)[0], np.nonzero(columns)[0])
'Shape of the i\'th bicluster. Parameters i : int The index of the cluster. Returns shape : (int, int) Number of rows and columns (resp.) in the bicluster.'
def get_shape(self, i):
indices = self.get_indices(i) return tuple((len(i) for i in indices))
'Returns the submatrix corresponding to bicluster `i`. Parameters i : int The index of the cluster. data : array The data. Returns submatrix : array The submatrix corresponding to bicluster i. Notes Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist.'
def get_submatrix(self, i, data):
from .utils.validation import check_array data = check_array(data, accept_sparse='csr') (row_ind, col_ind) = self.get_indices(i) return data[(row_ind[:, np.newaxis], col_ind)]
'Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_features_new] Transformed array.'
def fit_transform(self, X, y=None, **fit_params):
if (y is None): return self.fit(X, **fit_params).transform(X) else: return self.fit(X, y, **fit_params).transform(X)
'Returns the score of the model on the data X Parameters X : array-like, shape = (n_samples, n_features) Returns score : float'
def score(self, X, y=None):
pass
'Computes the cosine distance. Distance is from the query to points in the candidates array. Returns argsort of distances in the candidates array and sorted distances.'
def _compute_distances(self, query, candidates):
if (candidates.shape == (0,)): return (np.empty(0, dtype=np.int), np.empty(0, dtype=float)) if sparse.issparse(self._fit_X): candidate_X = self._fit_X[candidates] else: candidate_X = self._fit_X.take(candidates, axis=0, mode='clip') distances = pairwise_distances(query, candidate_X, metric='cosine')[0] distance_positions = np.argsort(distances) distances = distances.take(distance_positions, mode='clip', axis=0) return (distance_positions, distances)
'Creates left and right masks for all hash lengths.'
def _generate_masks(self):
tri_size = (MAX_HASH_SIZE + 1) left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::(-1), ::(-1)] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
'Performs the Synchronous ascending phase. Returns an array of candidates, their distance ranks and distances.'
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
index_size = self._fit_X.shape[0] n_candidates = 0 candidate_set = set() min_candidates = (self.n_candidates * self.n_estimators) while ((max_depth > self.min_hash_match) and ((n_candidates < min_candidates) or (len(candidate_set) < n_neighbors))): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] for i in range(self.n_estimators): (start, stop) = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) n_candidates += (stop - start) candidate_set.update(self.original_indices_[i][start:stop].tolist()) max_depth -= 1 candidates = np.fromiter(candidate_set, count=len(candidate_set), dtype=np.intp) if (candidates.shape[0] < n_neighbors): warnings.warn(('Number of candidates is not sufficient to retrieve %i neighbors with min_hash_match = %i. Candidates are filled up uniformly from unselected indices.' % (n_neighbors, self.min_hash_match))) remaining = np.setdiff1d(np.arange(0, index_size), candidates) to_fill = (n_neighbors - candidates.shape[0]) candidates = np.concatenate((candidates, remaining[:to_fill])) (ranks, distances) = self._compute_distances(query, candidates.astype(int)) return (candidates[ranks[:n_neighbors]], distances[:n_neighbors])
'Finds radius neighbors from the candidates obtained. Their distances from query are smaller than radius. Returns radius neighbors and distances.'
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
ratio_within_radius = 1 threshold = (1 - self.radius_cutoff_ratio) total_candidates = np.array([], dtype=int) total_neighbors = np.array([], dtype=int) total_distances = np.array([], dtype=float) while ((max_depth > self.min_hash_match) and (ratio_within_radius > threshold)): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] candidates = [] for i in range(self.n_estimators): (start, stop) = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) candidates.extend(self.original_indices_[i][start:stop].tolist()) candidates = np.setdiff1d(candidates, total_candidates) total_candidates = np.append(total_candidates, candidates) (ranks, distances) = self._compute_distances(query, candidates) m = np.searchsorted(distances, radius, side='right') positions = np.searchsorted(total_distances, distances[:m]) total_neighbors = np.insert(total_neighbors, positions, candidates[ranks[:m]]) total_distances = np.insert(total_distances, positions, distances[:m]) ratio_within_radius = (total_neighbors.shape[0] / float(total_candidates.shape[0])) max_depth = (max_depth - 1) return (total_neighbors, total_distances)
'Fit the LSH forest on the data. This creates binary hashes of input data points by getting the dot product of input points and hash_function then transforming the projection into a binary string array based on the sign (positive/negative) of the projection. A sorted array of binary hashes is created. Parameters X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns self : object Returns self.'
def fit(self, X, y=None):
self._fit_X = check_array(X, accept_sparse='csr') self.hash_functions_ = [] self.trees_ = [] self.original_indices_ = [] rng = check_random_state(self.random_state) int_max = np.iinfo(np.int32).max for i in range(self.n_estimators): hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE, rng.randint(0, int_max)) hashes = hasher.fit_transform(self._fit_X)[:, 0] original_index = np.argsort(hashes) bin_hashes = hashes[original_index] self.original_indices_.append(original_index) self.trees_.append(bin_hashes) self.hash_functions_.append(hasher) self._generate_masks() return self
'Performs descending phase to find maximum depth.'
def _query(self, X):
bin_queries = np.asarray([hasher.transform(X)[:, 0] for hasher in self.hash_functions_]) bin_queries = np.rollaxis(bin_queries, 1) depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE, self._left_mask, self._right_mask) for (tree, tree_queries) in zip(self.trees_, np.rollaxis(bin_queries, 1))] return (bin_queries, np.max(depths, axis=0))
'Returns n_neighbors of approximate nearest neighbors. Parameters X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. n_neighbors : int, optional (default = None) Number of neighbors required. If not provided, this will return the number specified at the initialization. return_distance : boolean, optional (default = True) Returns the distances of neighbors if set to True. Returns dist : array, shape (n_samples, n_neighbors) Array representing the cosine distances to each point, only present if return_distance=True. ind : array, shape (n_samples, n_neighbors) Indices of the approximate nearest points in the population matrix.'
def kneighbors(self, X, n_neighbors=None, return_distance=True):
if (not hasattr(self, 'hash_functions_')): raise ValueError('estimator should be fitted.') if (n_neighbors is None): n_neighbors = self.n_neighbors X = check_array(X, accept_sparse='csr') (neighbors, distances) = ([], []) (bin_queries, max_depth) = self._query(X) for i in range(X.shape[0]): (neighs, dists) = self._get_candidates(X[[i]], max_depth[i], bin_queries[i], n_neighbors) neighbors.append(neighs) distances.append(dists) if return_distance: return (np.array(distances), np.array(neighbors)) else: return np.array(neighbors)
'Finds the neighbors within a given radius of a point or points. Return the indices and distances of some points from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance to their query point. LSH Forest being an approximate method, some true neighbors from the indexed dataset might be missing from the results. Parameters X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns dist : array, shape (n_samples,) of arrays Each element is an array representing the cosine distances to some points found within ``radius`` of the respective query. Only present if ``return_distance=True``. ind : array, shape (n_samples,) of arrays Each element is an array of indices for neighbors within ``radius`` of the respective query.'
def radius_neighbors(self, X, radius=None, return_distance=True):
if (not hasattr(self, 'hash_functions_')): raise ValueError('estimator should be fitted.') if (radius is None): radius = self.radius X = check_array(X, accept_sparse='csr') (neighbors, distances) = ([], []) (bin_queries, max_depth) = self._query(X) for i in range(X.shape[0]): (neighs, dists) = self._get_radius_neighbors(X[[i]], max_depth[i], bin_queries[i], radius) neighbors.append(neighs) distances.append(dists) if return_distance: return (_array_of_arrays(distances), _array_of_arrays(neighbors)) else: return _array_of_arrays(neighbors)
'Inserts new data into the already fitted LSH Forest. Cost is proportional to new total size, so additions should be batched. Parameters X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) New data point to be inserted into the LSH Forest.'
def partial_fit(self, X, y=None):
X = check_array(X, accept_sparse='csr') if (not hasattr(self, 'hash_functions_')): return self.fit(X) if (X.shape[1] != self._fit_X.shape[1]): raise ValueError('Number of features in X and fitted array does not match.') n_samples = X.shape[0] n_indexed = self._fit_X.shape[0] for i in range(self.n_estimators): bin_X = self.hash_functions_[i].transform(X)[:, 0] positions = self.trees_[i].searchsorted(bin_X) self.trees_[i] = np.insert(self.trees_[i], positions, bin_X) self.original_indices_[i] = np.insert(self.original_indices_[i], positions, np.arange(n_indexed, (n_indexed + n_samples))) if (sparse.issparse(X) or sparse.issparse(self._fit_X)): self._fit_X = sparse.vstack((self._fit_X, X)) else: self._fit_X = np.row_stack((self._fit_X, X)) return self
'Finds the K-neighbors of a point. Returns indices of and distances to the neighbors of each point. Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. n_neighbors : int Number of neighbors to get (default is the value passed to the constructor). return_distance : boolean, optional. Defaults to True. If False, distances will not be returned Returns dist : array Array representing the lengths to points, only present if return_distance=True ind : array Indices of the nearest points in the population matrix. Examples In the following example, we construct a NeighborsClassifier class from an array representing our data set and ask who\'s the closest point to [1,1,1] >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=1) >>> neigh.fit(samples) # doctest: +ELLIPSIS NearestNeighbors(algorithm=\'auto\', leaf_size=30, ...) >>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS (array([[ 0.5]]), array([[2]]...)) As you can see, it returns [[0.5]], and [[2]], which means that the element is at distance 0.5 and is the third element of samples (indexes start at 0). You can also query for multiple points: >>> X = [[0., 1., 0.], [1., 0., 1.]] >>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS array([[1], [2]]...)'
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
if (self._fit_method is None): raise NotFittedError('Must fit neighbors before querying.') if (n_neighbors is None): n_neighbors = self.n_neighbors if (X is not None): query_is_train = False X = check_array(X, accept_sparse='csr') else: query_is_train = True X = self._fit_X n_neighbors += 1 train_size = self._fit_X.shape[0] if (n_neighbors > train_size): raise ValueError(('Expected n_neighbors <= n_samples, but n_samples = %d, n_neighbors = %d' % (train_size, n_neighbors))) (n_samples, _) = X.shape sample_range = np.arange(n_samples)[:, None] n_jobs = _get_n_jobs(self.n_jobs) if (self._fit_method == 'brute'): if (self.effective_metric_ == 'euclidean'): dist = pairwise_distances(X, self._fit_X, 'euclidean', n_jobs=n_jobs, squared=True) else: dist = pairwise_distances(X, self._fit_X, self.effective_metric_, n_jobs=n_jobs, **self.effective_metric_params_) neigh_ind = np.argpartition(dist, (n_neighbors - 1), axis=1) neigh_ind = neigh_ind[:, :n_neighbors] neigh_ind = neigh_ind[(sample_range, np.argsort(dist[(sample_range, neigh_ind)]))] if return_distance: if (self.effective_metric_ == 'euclidean'): result = (np.sqrt(dist[(sample_range, neigh_ind)]), neigh_ind) else: result = (dist[(sample_range, neigh_ind)], neigh_ind) else: result = neigh_ind elif (self._fit_method in ['ball_tree', 'kd_tree']): if issparse(X): raise ValueError(("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)) result = Parallel(n_jobs, backend='threading')((delayed(self._tree.query, check_pickle=False)(X[s], n_neighbors, return_distance) for s in gen_even_slices(X.shape[0], n_jobs))) if return_distance: (dist, neigh_ind) = tuple(zip(*result)) result = (np.vstack(dist), np.vstack(neigh_ind)) else: result = np.vstack(result) else: raise ValueError('internal: _fit_method not recognized') if (not query_is_train): return result else: if return_distance: (dist, neigh_ind) = result else: neigh_ind = result sample_mask = (neigh_ind != sample_range) dup_gr_nbrs = np.all(sample_mask, axis=1) sample_mask[:, 0][dup_gr_nbrs] = False neigh_ind = np.reshape(neigh_ind[sample_mask], (n_samples, (n_neighbors - 1))) if return_distance: dist = np.reshape(dist[sample_mask], (n_samples, (n_neighbors - 1))) return (dist, neigh_ind) return neigh_ind
'Computes the (weighted) graph of k-Neighbors for points in X Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. n_neighbors : int Number of neighbors for each sample. (default is value passed to the constructor). mode : {\'connectivity\', \'distance\'}, optional Type of returned matrix: \'connectivity\' will return the connectivity matrix with ones and zeros, in \'distance\' the edges are Euclidean distance between points. Returns A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit] n_samples_fit is the number of samples in the fitted data A[i, j] is assigned the weight of edge that connects i to j. Examples >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) # doctest: +ELLIPSIS NearestNeighbors(algorithm=\'auto\', leaf_size=30, ...) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 1.], [ 1., 0., 1.]]) See also NearestNeighbors.radius_neighbors_graph'
def kneighbors_graph(self, X=None, n_neighbors=None, mode='connectivity'):
if (n_neighbors is None): n_neighbors = self.n_neighbors if (X is not None): X = check_array(X, accept_sparse='csr') n_samples1 = X.shape[0] else: n_samples1 = self._fit_X.shape[0] n_samples2 = self._fit_X.shape[0] n_nonzero = (n_samples1 * n_neighbors) A_indptr = np.arange(0, (n_nonzero + 1), n_neighbors) if (mode == 'connectivity'): A_data = np.ones((n_samples1 * n_neighbors)) A_ind = self.kneighbors(X, n_neighbors, return_distance=False) elif (mode == 'distance'): (A_data, A_ind) = self.kneighbors(X, n_neighbors, return_distance=True) A_data = np.ravel(A_data) else: raise ValueError(('Unsupported mode, must be one of "connectivity" or "distance" but got "%s" instead' % mode)) kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr), shape=(n_samples1, n_samples2)) return kneighbors_graph
'Finds the neighbors within a given radius of a point or points. Return the indices and distances of each point from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance to their query point. Parameters X : array-like, (n_samples, n_features), optional The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional. Defaults to True. If False, distances will not be returned Returns dist : array, shape (n_samples,) of arrays Array representing the distances to each point, only present if return_distance=True. The distance values are computed according to the ``metric`` constructor parameter. ind : array, shape (n_samples,) of arrays An array of arrays of indices of the approximate nearest points from the population matrix that lie within a ball of size ``radius`` around the query points. Examples In the following example, we construct a NeighborsClassifier class from an array representing our data set and ask who\'s the closest point to [1, 1, 1]: >>> import numpy as np >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.6) >>> neigh.fit(samples) # doctest: +ELLIPSIS NearestNeighbors(algorithm=\'auto\', leaf_size=30, ...) >>> rng = neigh.radius_neighbors([[1., 1., 1.]]) >>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS [ 1.5 0.5] >>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS [1 2] The first array returned contains the distances to all points which are closer than 1.6, while the second array returned contains their indices. In general, multiple points can be queried at the same time. Notes Because the number of neighbors of each point is not necessarily equal, the results for multiple query points cannot be fit in a standard data array. For efficiency, `radius_neighbors` returns arrays of objects, where each object is a 1D array of indices or distances.'
def radius_neighbors(self, X=None, radius=None, return_distance=True):
if (self._fit_method is None): raise NotFittedError('Must fit neighbors before querying.') if (X is not None): query_is_train = False X = check_array(X, accept_sparse='csr') else: query_is_train = True X = self._fit_X if (radius is None): radius = self.radius n_samples = X.shape[0] if (self._fit_method == 'brute'): if (self.effective_metric_ == 'euclidean'): dist = pairwise_distances(X, self._fit_X, 'euclidean', n_jobs=self.n_jobs, squared=True) radius *= radius else: dist = pairwise_distances(X, self._fit_X, self.effective_metric_, n_jobs=self.n_jobs, **self.effective_metric_params_) neigh_ind_list = [np.where((d <= radius))[0] for d in dist] neigh_ind = np.empty(n_samples, dtype='object') neigh_ind[:] = neigh_ind_list if return_distance: dist_array = np.empty(n_samples, dtype='object') if (self.effective_metric_ == 'euclidean'): dist_list = [np.sqrt(d[neigh_ind[i]]) for (i, d) in enumerate(dist)] else: dist_list = [d[neigh_ind[i]] for (i, d) in enumerate(dist)] dist_array[:] = dist_list results = (dist_array, neigh_ind) else: results = neigh_ind elif (self._fit_method in ['ball_tree', 'kd_tree']): if issparse(X): raise ValueError(("%s does not work with sparse matrices. Densify the data, or set algorithm='brute'" % self._fit_method)) results = self._tree.query_radius(X, radius, return_distance=return_distance) if return_distance: results = results[::(-1)] else: raise ValueError('internal: _fit_method not recognized') if (not query_is_train): return results else: if return_distance: (dist, neigh_ind) = results else: neigh_ind = results for (ind, ind_neighbor) in enumerate(neigh_ind): mask = (ind_neighbor != ind) neigh_ind[ind] = ind_neighbor[mask] if return_distance: dist[ind] = dist[ind][mask] if return_distance: return (dist, neigh_ind) return neigh_ind
'Computes the (weighted) graph of Neighbors for points in X Neighborhoods are restricted the points at a distance lower than radius. Parameters X : array-like, shape = [n_samples, n_features], optional The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. radius : float Radius of neighborhoods. (default is the value passed to the constructor). mode : {\'connectivity\', \'distance\'}, optional Type of returned matrix: \'connectivity\' will return the connectivity matrix with ones and zeros, in \'distance\' the edges are Euclidean distance between points. Returns A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.5) >>> neigh.fit(X) # doctest: +ELLIPSIS NearestNeighbors(algorithm=\'auto\', leaf_size=30, ...) >>> A = neigh.radius_neighbors_graph(X) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 0.], [ 1., 0., 1.]]) See also kneighbors_graph'
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
if (X is not None): X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) n_samples2 = self._fit_X.shape[0] if (radius is None): radius = self.radius if (mode == 'connectivity'): A_ind = self.radius_neighbors(X, radius, return_distance=False) A_data = None elif (mode == 'distance'): (dist, A_ind) = self.radius_neighbors(X, radius, return_distance=True) A_data = np.concatenate(list(dist)) else: raise ValueError(('Unsupported mode, must be one of "connectivity", or "distance" but got %s instead' % mode)) n_samples1 = A_ind.shape[0] n_neighbors = np.array([len(a) for a in A_ind]) A_ind = np.concatenate(list(A_ind)) if (A_data is None): A_data = np.ones(len(A_ind)) A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors))) return csr_matrix((A_data, A_ind, A_indptr), shape=(n_samples1, n_samples2))
'Fit the model using X as training data and y as target values Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape [n_samples, n_features], or [n_samples, n_samples] if metric=\'precomputed\'. y : {array-like, sparse matrix} Target values, array of float values, shape = [n_samples] or [n_samples, n_outputs]'
def fit(self, X, y):
if (not isinstance(X, (KDTree, BallTree))): (X, y) = check_X_y(X, y, 'csr', multi_output=True) self._y = y return self._fit(X)
'Fit the model using X as training data and y as target values Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape [n_samples, n_features], or [n_samples, n_samples] if metric=\'precomputed\'. y : {array-like, sparse matrix} Target values of shape = [n_samples] or [n_samples, n_outputs]'
def fit(self, X, y):
if (not isinstance(X, (KDTree, BallTree))): (X, y) = check_X_y(X, y, 'csr', multi_output=True) if ((y.ndim == 1) or ((y.ndim == 2) and (y.shape[1] == 1))): if (y.ndim != 1): warnings.warn('A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().', DataConversionWarning, stacklevel=2) self.outputs_2d_ = False y = y.reshape(((-1), 1)) else: self.outputs_2d_ = True check_classification_targets(y) self.classes_ = [] self._y = np.empty(y.shape, dtype=np.int) for k in range(self._y.shape[1]): (classes, self._y[:, k]) = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes) if (not self.outputs_2d_): self.classes_ = self.classes_[0] self._y = self._y.ravel() return self._fit(X)
'Fit the model using X as training data Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape [n_samples, n_features], or [n_samples, n_samples] if metric=\'precomputed\'.'
def fit(self, X, y=None):
return self._fit(X)
'Fit the Kernel Density model on the data. Parameters X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point.'
def fit(self, X, y=None):
algorithm = self._choose_algorithm(self.algorithm, self.metric) X = check_array(X, order='C', dtype=DTYPE) kwargs = self.metric_params if (kwargs is None): kwargs = {} self.tree_ = TREE_DICT[algorithm](X, metric=self.metric, leaf_size=self.leaf_size, **kwargs) return self
'Evaluate the density model on the data. Parameters X : array_like, shape (n_samples, n_features) An array of points to query. Last dimension should match dimension of training data (n_features). Returns density : ndarray, shape (n_samples,) The array of log(density) evaluations.'
def score_samples(self, X):
X = check_array(X, order='C', dtype=DTYPE) N = self.tree_.data.shape[0] atol_N = (self.atol * N) log_density = self.tree_.kernel_density(X, h=self.bandwidth, kernel=self.kernel, atol=atol_N, rtol=self.rtol, breadth_first=self.breadth_first, return_log=True) log_density -= np.log(N) return log_density
'Compute the total log probability under the model. Parameters X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns logprob : float Total log-likelihood of the data in X.'
def score(self, X, y=None):
return np.sum(self.score_samples(X))
'Generate random samples from the model. Currently, this is implemented only for gaussian and tophat kernels. Parameters n_samples : int, optional Number of samples to generate. Defaults to 1. random_state : int, RandomState instance or None. default to None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns X : array_like, shape (n_samples, n_features) List of samples.'
def sample(self, n_samples=1, random_state=None):
if (self.kernel not in ['gaussian', 'tophat']): raise NotImplementedError() data = np.asarray(self.tree_.data) rng = check_random_state(random_state) i = rng.randint(data.shape[0], size=n_samples) if (self.kernel == 'gaussian'): return np.atleast_2d(rng.normal(data[i], self.bandwidth)) elif (self.kernel == 'tophat'): dim = data.shape[1] X = rng.normal(size=(n_samples, dim)) s_sq = row_norms(X, squared=True) correction = (((gammainc((0.5 * dim), (0.5 * s_sq)) ** (1.0 / dim)) * self.bandwidth) / np.sqrt(s_sq)) return (data[i] + (X * correction[:, np.newaxis]))