repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_execfile.py
# coding: utf-8 """ Fixer for the execfile() function on Py2, which was removed in Py3. The Lib/lib2to3/fixes/fix_execfile.py module has some problems: see python-future issue #37. This fixer merely imports execfile() from past.builtins and leaves the code alone. Adds this import line:: from past.builtins import execfile for the function execfile() that was removed from Py3. """ from __future__ import unicode_literals from lib2to3 import fixer_base from libfuturize.fixer_util import touch_import_top expression = "name='execfile'" class FixExecfile(fixer_base.BaseFix): BM_compatible = True run_order = 9 PATTERN = """ power< ({0}) trailer< '(' args=[any] ')' > rest=any* > """.format(expression) def transform(self, node, results): name = results["name"] touch_import_top(u'past.builtins', name.value, node)
922
22.666667
67
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
""" Fixer for adding: from __future__ import absolute_import from __future__ import division from __future__ import print_function This is "stage 1": hopefully uncontroversial changes. Stage 2 adds ``unicode_literals``. """ from lib2to3 import fixer_base from libfuturize.fixer_util import future_import class FixAddFutureImportsExceptUnicodeLiterals(fixer_base.BaseFix): BM_compatible = True PATTERN = "file_input" run_order = 9 def transform(self, node, results): # Reverse order: future_import(u"print_function", node) future_import(u"division", node) future_import(u"absolute_import", node)
663
22.714286
67
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_division_safe.py
""" For the ``future`` package. Adds this import line: from __future__ import division at the top and changes any old-style divisions to be calls to past.utils.old_div so the code runs as before on Py2.6/2.7 and has the same behaviour on Py3. If "from __future__ import division" is already in effect, this fixer does nothing. """ from lib2to3 import fixer_base from lib2to3.fixer_util import syms, does_tree_import from libfuturize.fixer_util import (token, future_import, touch_import_top, wrap_in_fn_call) def match_division(node): u""" __future__.division redefines the meaning of a single slash for division, so we match that and only that. """ slash = token.SLASH return node.type == slash and not node.next_sibling.type == slash and \ not node.prev_sibling.type == slash class FixDivisionSafe(fixer_base.BaseFix): # BM_compatible = True run_order = 4 # this seems to be ignored? _accept_type = token.SLASH PATTERN = """ term<(not('/') any)+ '/' ((not('/') any))> """ def start_tree(self, tree, name): """ Skip this fixer if "__future__.division" is already imported. """ super(FixDivisionSafe, self).start_tree(tree, name) self.skip = "division" in tree.future_features def match(self, node): u""" Since the tree needs to be fixed once and only once if and only if it matches, we can start discarding matches after the first. """ if (node.type == self.syms.term and len(node.children) == 3 and match_division(node.children[1])): expr1, expr2 = node.children[0], node.children[2] return expr1, expr2 else: return False def transform(self, node, results): if self.skip: return future_import(u"division", node) touch_import_top(u'past.utils', u'old_div', node) expr1, expr2 = results[0].clone(), results[1].clone() # Strip any leading space for the first number: expr1.prefix = u'' return wrap_in_fn_call("old_div", (expr1, expr2), prefix=node.prefix)
2,242
29.726027
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_bytes.py
"""Optional fixer that changes all unprefixed string literals "..." to b"...". br'abcd' is a SyntaxError on Python 2 but valid on Python 3. ur'abcd' is a SyntaxError on Python 3 but valid on Python 2. """ from __future__ import unicode_literals import re from lib2to3.pgen2 import token from lib2to3 import fixer_base _literal_re = re.compile(r"[^bBuUrR]?[\'\"]") class FixBytes(fixer_base.BaseFix): BM_compatible = True PATTERN = "STRING" def transform(self, node, results): if node.type == token.STRING: if _literal_re.match(node.value): new = node.clone() new.value = u'b' + new.value return new
685
26.44
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_oldstr_wrap.py
""" For the ``future`` package. Adds this import line: from past.builtins import str as oldstr at the top and wraps any unadorned string literals 'abc' or explicit byte-string literals b'abc' in oldstr() calls so the code has the same behaviour on Py3 as on Py2.6/2.7. """ from __future__ import unicode_literals import re from lib2to3 import fixer_base from lib2to3.pgen2 import token from lib2to3.fixer_util import syms from libfuturize.fixer_util import (future_import, touch_import_top, wrap_in_fn_call) _literal_re = re.compile(r"[^uUrR]?[\'\"]") class FixOldstrWrap(fixer_base.BaseFix): BM_compatible = True PATTERN = "STRING" def transform(self, node, results): if node.type == token.STRING: touch_import_top(u'past.types', u'oldstr', node) if _literal_re.match(node.value): new = node.clone() # Strip any leading space or comments: # TODO: check: do we really want to do this? new.prefix = u'' new.value = u'b' + new.value wrapped = wrap_in_fn_call("oldstr", [new], prefix=node.prefix) return wrapped
1,214
29.375
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/__init__.py
import sys from lib2to3 import refactor # The following fixers are "safe": they convert Python 2 code to more # modern Python 2 code. They should be uncontroversial to apply to most # projects that are happy to drop support for Py2.5 and below. Applying # them first will reduce the size of the patch set for the real porting. lib2to3_fix_names_stage1 = set([ 'lib2to3.fixes.fix_apply', 'lib2to3.fixes.fix_except', 'lib2to3.fixes.fix_exec', 'lib2to3.fixes.fix_exitfunc', 'lib2to3.fixes.fix_funcattrs', 'lib2to3.fixes.fix_has_key', 'lib2to3.fixes.fix_idioms', # 'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import) 'lib2to3.fixes.fix_intern', 'lib2to3.fixes.fix_isinstance', 'lib2to3.fixes.fix_methodattrs', 'lib2to3.fixes.fix_ne', # 'lib2to3.fixes.fix_next', # would replace ``next`` method names # with ``__next__``. 'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755 'lib2to3.fixes.fix_paren', # 'lib2to3.fixes.fix_print', # see the libfuturize fixer that also # adds ``from __future__ import print_function`` # 'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions 'lib2to3.fixes.fix_reduce', # reduce is available in functools on Py2.6/Py2.7 'lib2to3.fixes.fix_renames', # sys.maxint -> sys.maxsize # 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support 'lib2to3.fixes.fix_repr', 'lib2to3.fixes.fix_standarderror', 'lib2to3.fixes.fix_sys_exc', 'lib2to3.fixes.fix_throw', 'lib2to3.fixes.fix_tuple_params', 'lib2to3.fixes.fix_types', 'lib2to3.fixes.fix_ws_comma', # can perhaps decrease readability: see issue #58 'lib2to3.fixes.fix_xreadlines', ]) # The following fixers add a dependency on the ``future`` package on order to # support Python 2: lib2to3_fix_names_stage2 = set([ # 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this. # 'lib2to3.fixes.fix_callable', # not needed in Py3.2+ 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2 # 'lib2to3.fixes.fix_execfile', # some problems: see issue #37. # We use a custom fixer instead (see below) # 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports 'lib2to3.fixes.fix_getcwdu', # 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library # 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm) 'lib2to3.fixes.fix_input', 'lib2to3.fixes.fix_itertools', 'lib2to3.fixes.fix_itertools_imports', 'lib2to3.fixes.fix_filter', 'lib2to3.fixes.fix_long', 'lib2to3.fixes.fix_map', # 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead 'lib2to3.fixes.fix_next', 'lib2to3.fixes.fix_nonzero', # TODO: cause this to import ``object`` and/or add a decorator for mapping __bool__ to __nonzero__ 'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3 'lib2to3.fixes.fix_raw_input', # 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings # 'lib2to3.fixes.fix_urllib', # included in libfuturize.fix_future_standard_library_urllib # 'lib2to3.fixes.fix_xrange', # custom one because of a bug with Py3.3's lib2to3 'lib2to3.fixes.fix_zip', ]) libfuturize_fix_names_stage1 = set([ 'libfuturize.fixes.fix_absolute_import', 'libfuturize.fixes.fix_next_call', # obj.next() -> next(obj). Unlike # lib2to3.fixes.fix_next, doesn't change # the ``next`` method to ``__next__``. 'libfuturize.fixes.fix_print_with_import', 'libfuturize.fixes.fix_raise', # 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing ]) libfuturize_fix_names_stage2 = set([ 'libfuturize.fixes.fix_basestring', # 'libfuturize.fixes.fix_add__future__imports_except_unicode_literals', # just in case 'libfuturize.fixes.fix_cmp', 'libfuturize.fixes.fix_division_safe', 'libfuturize.fixes.fix_execfile', 'libfuturize.fixes.fix_future_builtins', 'libfuturize.fixes.fix_future_standard_library', 'libfuturize.fixes.fix_future_standard_library_urllib', 'libfuturize.fixes.fix_metaclass', 'libpasteurize.fixes.fix_newstyle', 'libfuturize.fixes.fix_object', # 'libfuturize.fixes.fix_order___future__imports', # TODO: consolidate to a single line to simplify testing 'libfuturize.fixes.fix_unicode_keep_u', # 'libfuturize.fixes.fix_unicode_literals_import', 'libfuturize.fixes.fix_xrange_with_import', # custom one because of a bug with Py3.3's lib2to3 ])
5,142
51.479592
163
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_remove_old__future__imports.py
""" Fixer for removing any of these lines: from __future__ import with_statement from __future__ import nested_scopes from __future__ import generators The reason is that __future__ imports like these are required to be the first line of code (after docstrings) on Python 2.6+, which can get in the way. These imports are always enabled in Python 2.6+, which is the minimum sane version to target for Py2/3 compatibility. """ from lib2to3 import fixer_base from libfuturize.fixer_util import remove_future_import class FixRemoveOldFutureImports(fixer_base.BaseFix): BM_compatible = True PATTERN = "file_input" run_order = 1 def transform(self, node, results): remove_future_import(u"with_statement", node) remove_future_import(u"nested_scopes", node) remove_future_import(u"generators", node)
852
29.464286
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_future_standard_library.py
""" For the ``future`` package. Changes any imports needed to reflect the standard library reorganization. Also Also adds these import lines: from future import standard_library standard_library.install_aliases() after any __future__ imports but before any other imports. """ from lib2to3.fixes.fix_imports import FixImports from libfuturize.fixer_util import touch_import_top class FixFutureStandardLibrary(FixImports): run_order = 8 def transform(self, node, results): result = super(FixFutureStandardLibrary, self).transform(node, results) # TODO: add a blank line between any __future__ imports and this? touch_import_top(u'future', u'standard_library', node) return result
735
26.259259
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_basestring.py
""" Fixer that adds ``from past.builtins import basestring`` if there is a reference to ``basestring`` """ from lib2to3 import fixer_base from libfuturize.fixer_util import touch_import_top class FixBasestring(fixer_base.BaseFix): BM_compatible = True PATTERN = "'basestring'" def transform(self, node, results): touch_import_top(u'past.builtins', 'basestring', node)
395
19.842105
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_object.py
""" Fixer that adds ``from builtins import object`` if there is a line like this: class Foo(object): """ from lib2to3 import fixer_base from libfuturize.fixer_util import touch_import_top class FixObject(fixer_base.BaseFix): PATTERN = u"classdef< 'class' NAME '(' name='object' ')' colon=':' any >" def transform(self, node, results): touch_import_top(u'builtins', 'object', node)
407
21.666667
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/libfuturize/fixes/fix_print_with_import.py
""" For the ``future`` package. Turns any print statements into functions and adds this import line: from __future__ import print_function at the top to retain compatibility with Python 2.6+. """ from libfuturize.fixes.fix_print import FixPrint from libfuturize.fixer_util import future_import class FixPrintWithImport(FixPrint): run_order = 7 def transform(self, node, results): # Add the __future__ import first. (Otherwise any shebang or encoding # comment line attached as a prefix to the print statement will be # copied twice and appear twice.) future_import(u'print_function', node) n_stmt = super(FixPrintWithImport, self).transform(node, results) return n_stmt
736
29.708333
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/_collections.py
from __future__ import absolute_import try: from collections.abc import Mapping, MutableMapping except ImportError: from collections import Mapping, MutableMapping try: from threading import RLock except ImportError: # Platform-specific: No threads available class RLock: def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict from .exceptions import InvalidHeader from .packages.six import iterkeys, itervalues, PY3 __all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self.lock = RLock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self.lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self.lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self.lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self.lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): with self.lock: # Copy pointers to all values, then wipe the mapping values = list(itervalues(self._container)) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self.lock: return list(iterkeys(self._container)) class HTTPHeaderDict(MutableMapping): """ :param headers: An iterable of field-value pairs. Must not contain multiple field names when compared case-insensitively. :param kwargs: Additional field-value pairs to pass in to ``dict.update``. A ``dict`` like container for storing HTTP Headers. Field names are stored and compared case-insensitively in compliance with RFC 7230. Iteration provides the first case-sensitive key seen for each case-insensitive pair. Using ``__setitem__`` syntax overwrites fields that compare equal case-insensitively in order to maintain ``dict``'s api. For fields that compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` in a loop. If multiple fields that are equal case-insensitively are passed to the constructor or ``.update``, the behavior is undefined and some will be lost. >>> headers = HTTPHeaderDict() >>> headers.add('Set-Cookie', 'foo=bar') >>> headers.add('set-cookie', 'baz=quxx') >>> headers['content-length'] = '7' >>> headers['SET-cookie'] 'foo=bar, baz=quxx' >>> headers['Content-Length'] '7' """ def __init__(self, headers=None, **kwargs): super(HTTPHeaderDict, self).__init__() self._container = OrderedDict() if headers is not None: if isinstance(headers, HTTPHeaderDict): self._copy_from(headers) else: self.extend(headers) if kwargs: self.extend(kwargs) def __setitem__(self, key, val): self._container[key.lower()] = [key, val] return self._container[key.lower()] def __getitem__(self, key): val = self._container[key.lower()] return ', '.join(val[1:]) def __delitem__(self, key): del self._container[key.lower()] def __contains__(self, key): return key.lower() in self._container def __eq__(self, other): if not isinstance(other, Mapping) and not hasattr(other, 'keys'): return False if not isinstance(other, type(self)): other = type(self)(other) return (dict((k.lower(), v) for k, v in self.itermerged()) == dict((k.lower(), v) for k, v in other.itermerged())) def __ne__(self, other): return not self.__eq__(other) if not PY3: # Python 2 iterkeys = MutableMapping.iterkeys itervalues = MutableMapping.itervalues __marker = object() def __len__(self): return len(self._container) def __iter__(self): # Only provide the originally cased names for vals in self._container.values(): yield vals[0] def pop(self, key, default=__marker): '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' # Using the MutableMapping function directly fails due to the private marker. # Using ordinary dict.pop would expose the internal structures. # So let's reinvent the wheel. try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def discard(self, key): try: del self[key] except KeyError: pass def add(self, key, val): """Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz' """ key_lower = key.lower() new_vals = [key, val] # Keep the common case aka no item present as fast as possible vals = self._container.setdefault(key_lower, new_vals) if new_vals is not vals: vals.append(val) def extend(self, *args, **kwargs): """Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__ """ if len(args) > 1: raise TypeError("extend() takes at most 1 positional " "arguments ({0} given)".format(len(args))) other = args[0] if len(args) >= 1 else () if isinstance(other, HTTPHeaderDict): for key, val in other.iteritems(): self.add(key, val) elif isinstance(other, Mapping): for key in other: self.add(key, other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.add(key, other[key]) else: for key, value in other: self.add(key, value) for key, value in kwargs.items(): self.add(key, value) def getlist(self, key, default=__marker): """Returns a list of all the values for the named field. Returns an empty list if the key doesn't exist.""" try: vals = self._container[key.lower()] except KeyError: if default is self.__marker: return [] return default else: return vals[1:] # Backwards compatibility for httplib getheaders = getlist getallmatchingheaders = getlist iget = getlist # Backwards compatibility for http.cookiejar get_all = getlist def __repr__(self): return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) def _copy_from(self, other): for key in other: val = other.getlist(key) if isinstance(val, list): # Don't need to convert tuples val = list(val) self._container[key.lower()] = [key] + val def copy(self): clone = type(self)() clone._copy_from(self) return clone def iteritems(self): """Iterate over all header lines, including duplicate ones.""" for key in self: vals = self._container[key.lower()] for val in vals[1:]: yield vals[0], val def itermerged(self): """Iterate over all headers, merging duplicate ones together.""" for key in self: val = self._container[key.lower()] yield val[0], ', '.join(val[1:]) def items(self): return list(self.iteritems()) @classmethod def from_httplib(cls, message): # Python 2 """Read headers from a Python 2 httplib message object.""" # python2.7 does not expose a proper API for exporting multiheaders # efficiently. This function re-reads raw lines from the message # object and extracts the multiheaders properly. obs_fold_continued_leaders = (' ', '\t') headers = [] for line in message.headers: if line.startswith(obs_fold_continued_leaders): if not headers: # We received a header line that starts with OWS as described # in RFC-7230 S3.2.4. This indicates a multiline header, but # there exists no previous header to which we can attach it. raise InvalidHeader( 'Header continuation with no previous header: %s' % line ) else: key, value = headers[-1] headers[-1] = (key, value + ' ' + line.strip()) continue key, value = line.split(':', 1) headers.append((key, value.strip())) return cls(headers)
10,841
31.558559
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/poolmanager.py
from __future__ import absolute_import import collections import functools import logging from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import port_by_scheme from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.url import parse_url from .util.retry import Retry __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', 'ssl_version', 'ca_cert_dir', 'ssl_context') # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( 'key_scheme', # str 'key_host', # str 'key_port', # int 'key_timeout', # int or float or Timeout 'key_retries', # int or Retry 'key_strict', # bool 'key_block', # bool 'key_source_address', # str 'key_key_file', # str 'key_cert_file', # str 'key_cert_reqs', # str 'key_ca_certs', # str 'key_ssl_version', # str 'key_ca_cert_dir', # str 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext 'key_maxsize', # int 'key_headers', # dict 'key__proxy', # parsed proxy url 'key__proxy_headers', # dict 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples 'key__socks_options', # dict 'key_assert_hostname', # bool or string 'key_assert_fingerprint', # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple('PoolKey', _key_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context['scheme'] = context['scheme'].lower() context['host'] = context['host'].lower() # These are both dictionaries and need to be transformed into frozensets for key in ('headers', '_proxy_headers', '_socks_options'): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get('socket_options') if socket_opts is not None: context['socket_options'] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context['key_' + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { 'http': functools.partial(_default_key_normalizer, PoolKey), 'https': functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = { 'http': HTTPConnectionPool, 'https': HTTPSConnectionPool, } class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ('scheme', 'host', 'port'): request_context.pop(key, None) if scheme == 'http': for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context['scheme'] = scheme or 'http' if not port: port = port_by_scheme.get(request_context['scheme'].lower(), 80) request_context['port'] = port request_context['host'] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context['scheme'].lower() pool_key_constructor = self.key_fn_by_scheme[scheme] pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context['scheme'] host = request_context['host'] port = request_context['port'] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host(u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if 'headers' not in kw: kw['headers'] = self.headers.copy() if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) # RFC 7231, Section 6.4.4 if response.status == 303: method = 'GET' retries = kw.get('retries') if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if (retries.remove_headers_on_redirect and not conn.is_same_host(redirect_location)): for header in retries.remove_headers_on_redirect: kw['headers'].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: if retries.raise_on_redirect: raise return response kw['retries'] = retries kw['redirect'] = redirect log.info("Redirecting %s -> %s", url, redirect_location) return self.urlopen(method, redirect_location, **kw) class ProxyManager(PoolManager): """ Behaves just like :class:`PoolManager`, but sends all requests through the defined proxy, using the CONNECT method for HTTPS URLs. :param proxy_url: The URL of the proxy to be used. :param proxy_headers: A dictionary containing headers that will be sent to the proxy. In case of HTTP they are being sent with each request, while in the HTTPS/CONNECT case they are sent only once. Could be used for proxy authentication. Example: >>> proxy = urllib3.ProxyManager('http://localhost:3128/') >>> r1 = proxy.request('GET', 'http://google.com/') >>> r2 = proxy.request('GET', 'http://httpbin.org/') >>> len(proxy.pools) 1 >>> r3 = proxy.request('GET', 'https://httpbin.org/') >>> r4 = proxy.request('GET', 'https://twitter.com/') >>> len(proxy.pools) 3 """ def __init__(self, proxy_url, num_pools=10, headers=None, proxy_headers=None, **connection_pool_kw): if isinstance(proxy_url, HTTPConnectionPool): proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host, proxy_url.port) proxy = parse_url(proxy_url) if not proxy.port: port = port_by_scheme.get(proxy.scheme, 80) proxy = proxy._replace(port=port) if proxy.scheme not in ("http", "https"): raise ProxySchemeUnknown(proxy.scheme) self.proxy = proxy self.proxy_headers = proxy_headers or {} connection_pool_kw['_proxy'] = self.proxy connection_pool_kw['_proxy_headers'] = self.proxy_headers super(ProxyManager, self).__init__( num_pools, headers, **connection_pool_kw) def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None): if scheme == "https": return super(ProxyManager, self).connection_from_host( host, port, scheme, pool_kwargs=pool_kwargs) return super(ProxyManager, self).connection_from_host( self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs) def _set_proxy_headers(self, url, headers=None): """ Sets headers needed by proxies: specifically, the Accept and Host headers. Only sets headers not provided by the user. """ headers_ = {'Accept': '*/*'} netloc = parse_url(url).netloc if netloc: headers_['Host'] = netloc if headers: headers_.update(headers) return headers_ def urlopen(self, method, url, redirect=True, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." u = parse_url(url) if u.scheme == "http": # For proxied HTTPS requests, httplib sets the necessary headers # on the CONNECT to the proxy. For HTTP, we'll definitely # need to set 'Host' at the very least. headers = kw.get('headers', self.headers) kw['headers'] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw) def proxy_from_url(url, **kw): return ProxyManager(proxy_url=url, **kw)
16,821
36.382222
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/exceptions.py
from __future__ import absolute_import from .packages.six.moves.http_client import ( IncompleteRead as httplib_IncompleteRead ) # Base Exceptions class HTTPError(Exception): "Base exception used by this module." pass class HTTPWarning(Warning): "Base warning used by this module." pass class PoolError(HTTPError): "Base exception for errors caused within a pool." def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) def __reduce__(self): # For pickling purposes. return self.__class__, (None, None) class RequestError(PoolError): "Base exception for PoolErrors that have associated URLs." def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) def __reduce__(self): # For pickling purposes. return self.__class__, (None, self.url, None) class SSLError(HTTPError): "Raised when SSL certificate fails in an HTTPS connection." pass class ProxyError(HTTPError): "Raised when the connection to a proxy fails." pass class DecodeError(HTTPError): "Raised when automatic decoding based on Content-Type fails." pass class ProtocolError(HTTPError): "Raised when something unexpected happens mid-request/response." pass #: Renamed to ProtocolError but aliased for backwards compatibility. ConnectionError = ProtocolError # Leaf Exceptions class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. :param pool: The connection pool :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` :param string url: The requested Url :param exceptions.Exception reason: The underlying error """ def __init__(self, pool, url, reason=None): self.reason = reason message = "Max retries exceeded with url: %s (Caused by %r)" % ( url, reason) RequestError.__init__(self, pool, url, message) class HostChangedError(RequestError): "Raised when an existing pool gets a request for a foreign host." def __init__(self, pool, url, retries=3): message = "Tried to open a foreign host with url: %s" % url RequestError.__init__(self, pool, url, message) self.retries = retries class TimeoutStateError(HTTPError): """ Raised when passing an invalid state to a timeout """ pass class TimeoutError(HTTPError): """ Raised when a socket timeout error occurs. Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ pass class ReadTimeoutError(TimeoutError, RequestError): "Raised when a socket timeout occurs while receiving data from a server" pass # This timeout error does not have a URL attached and needs to inherit from the # base HTTPError class ConnectTimeoutError(TimeoutError): "Raised when a socket timeout occurs while connecting to a server" pass class NewConnectionError(ConnectTimeoutError, PoolError): "Raised when we fail to establish a new connection. Usually ECONNREFUSED." pass class EmptyPoolError(PoolError): "Raised when a pool runs out of connections and no more are allowed." pass class ClosedPoolError(PoolError): "Raised when a request enters a pool after the pool has been closed." pass class LocationValueError(ValueError, HTTPError): "Raised when there is something wrong with a given URL input." pass class LocationParseError(LocationValueError): "Raised when get_host or similar fails to parse the URL input." def __init__(self, location): message = "Failed to parse: %s" % location HTTPError.__init__(self, message) self.location = location class ResponseError(HTTPError): "Used as a container for an error reason supplied in a MaxRetryError." GENERIC_ERROR = 'too many error responses' SPECIFIC_ERROR = 'too many {status_code} error responses' class SecurityWarning(HTTPWarning): "Warned when performing security reducing actions" pass class SubjectAltNameWarning(SecurityWarning): "Warned when connecting to a host with a certificate missing a SAN." pass class InsecureRequestWarning(SecurityWarning): "Warned when making an unverified HTTPS request." pass class SystemTimeWarning(SecurityWarning): "Warned when system time is suspected to be wrong" pass class InsecurePlatformWarning(SecurityWarning): "Warned when certain SSL configuration is not available on a platform." pass class SNIMissingWarning(HTTPWarning): "Warned when making a HTTPS request without SNI available." pass class DependencyWarning(HTTPWarning): """ Warned when an attempt is made to import a module with missing optional dependencies. """ pass class ResponseNotChunked(ProtocolError, ValueError): "Response needs to be chunked in order to read it as chunks." pass class BodyNotHttplibCompatible(HTTPError): """ Body should be httplib.HTTPResponse like (have an fp attribute which returns raw chunks) for read_chunked(). """ pass class IncompleteRead(HTTPError, httplib_IncompleteRead): """ Response length doesn't match expected Content-Length Subclass of http_client.IncompleteRead to allow int value for `partial` to avoid creating large objects on streamed reads. """ def __init__(self, partial, expected): super(IncompleteRead, self).__init__(partial, expected) def __repr__(self): return ('IncompleteRead(%i bytes read, ' '%i more expected)' % (self.partial, self.expected)) class InvalidHeader(HTTPError): "The header provided was somehow invalid." pass class ProxySchemeUnknown(AssertionError, ValueError): "ProxyManager does not support the supplied scheme" # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. def __init__(self, scheme): message = "Not supported proxy scheme %s" % scheme super(ProxySchemeUnknown, self).__init__(message) class HeaderParsingError(HTTPError): "Raised by assert_header_parsing, but we convert it to a log.warning statement." def __init__(self, defects, unparsed_data): message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) super(HeaderParsingError, self).__init__(message) class UnrewindableBodyError(HTTPError): "urllib3 encountered an error when trying to rewind a body" pass
6,604
25.740891
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/connection.py
from __future__ import absolute_import import datetime import logging import os import sys import socket from socket import error as SocketError, timeout as SocketTimeout import warnings from .packages import six from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection from .packages.six.moves.http_client import HTTPException # noqa: F401 try: # Compiled with SSL? import ssl BaseSSLError = ssl.SSLError except (ImportError, AttributeError): # Platform-specific: No SSL. ssl = None class BaseSSLError(BaseException): pass try: # Python 3: # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError except NameError: # Python 2: class ConnectionError(Exception): pass from .exceptions import ( NewConnectionError, ConnectTimeoutError, SubjectAltNameWarning, SystemTimeWarning, ) from .packages.ssl_match_hostname import match_hostname, CertificateError from .util.ssl_ import ( resolve_cert_reqs, resolve_ssl_version, assert_fingerprint, create_urllib3_context, ssl_wrap_socket ) from .util import connection from ._collections import HTTPHeaderDict log = logging.getLogger(__name__) port_by_scheme = { 'http': 80, 'https': 443, } # When updating RECENT_DATE, move it to within two years of the current date, # and not less than 6 months ago. # Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or # after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months) RECENT_DATE = datetime.date(2017, 6, 30) class DummyConnection(object): """Used to detect a failed ConnectionCls import.""" pass class HTTPConnection(_HTTPConnection, object): """ Based on httplib.HTTPConnection but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool` - ``source_address``: Set the source address for the current connection. .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass:: HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port = port_by_scheme['http'] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)] #: Whether this connection verifies the host's certificate. is_verified = False def __init__(self, *args, **kw): if six.PY3: # Python 3 kw.pop('strict', None) # Pre-set source_address in case we have an older Python like 2.6. self.source_address = kw.get('source_address') if sys.version_info < (2, 7): # Python 2.6 # _HTTPConnection on Python 2.6 will balk at this keyword arg, but # not newer versions. We can still use it when creating a # connection though, so we pop it *after* we have saved it as # self.source_address. kw.pop('source_address', None) #: The socket options provided by the user. If no options are #: provided, we use the default options. self.socket_options = kw.pop('socket_options', self.default_socket_options) # Superclass also sets self.source_address in Python 2.7+. _HTTPConnection.__init__(self, *args, **kw) @property def host(self): """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip('.') @host.setter def host(self, value): """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value def _new_conn(self): """ Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except SocketError as e: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn def _prepare_conn(self, conn): self.sock = conn # the _tunnel_host attribute was added in python 2.6.3 (via # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do # not have them. if getattr(self, '_tunnel_host', None): # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 def connect(self): conn = self._new_conn() self._prepare_conn(conn) def request_chunked(self, method, url, body=None, headers=None): """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ headers = HTTPHeaderDict(headers if headers is not None else {}) skip_accept_encoding = 'accept-encoding' in headers skip_host = 'host' in headers self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) for header, value in headers.items(): self.putheader(header, value) if 'transfer-encoding' not in headers: self.putheader('Transfer-Encoding', 'chunked') self.endheaders() if body is not None: stringish_types = six.string_types + (six.binary_type,) if isinstance(body, stringish_types): body = (body,) for chunk in body: if not chunk: continue if not isinstance(chunk, six.binary_type): chunk = chunk.encode('utf8') len_str = hex(len(chunk))[2:] self.send(len_str.encode('utf-8')) self.send(b'\r\n') self.send(chunk) self.send(b'\r\n') # After the if clause, to always have a closed body self.send(b'0\r\n\r\n') class HTTPSConnection(HTTPConnection): default_port = port_by_scheme['https'] ssl_version = None def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None, **kw): HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) self.key_file = key_file self.cert_file = cert_file self.ssl_context = ssl_context # Required property for Google AppEngine 1.9.0 which otherwise causes # HTTPS requests to go out as HTTP. (See Issue #356) self._protocol = 'https' def connect(self): conn = self._new_conn() self._prepare_conn(conn) if self.ssl_context is None: self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(None), cert_reqs=resolve_cert_reqs(None), ) self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, ssl_context=self.ssl_context, ) class VerifiedHTTPSConnection(HTTPSConnection): """ Based on httplib.HTTPSConnection but wraps the socket with SSL certification. """ cert_reqs = None ca_certs = None ca_cert_dir = None ssl_version = None assert_fingerprint = None def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None): """ This method should only be called once, before the connection is used. """ # If cert_reqs is not provided, we can try to guess. If the user gave # us a cert database, we assume they want to use it: otherwise, if # they gave us an SSL Context object we should use whatever is set for # it. if cert_reqs is None: if ca_certs or ca_cert_dir: cert_reqs = 'CERT_REQUIRED' elif self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) def connect(self): # Add certificate verification conn = self._new_conn() hostname = self.host if getattr(self, '_tunnel_host', None): # _tunnel_host was added in Python 2.6.3 # (See: http://hg.python.org/cpython/rev/0f57b30a152f) self.sock = conn # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 # Override the host with the one we're requesting data from. hostname = self._tunnel_host is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn(( 'System time is way off (before {0}). This will probably ' 'lead to SSL verification errors').format(RECENT_DATE), SystemTimeWarning ) # Wrap socket using verification with the root certs in # trusted_root_certs if self.ssl_context is None: self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(self.ssl_version), cert_reqs=resolve_cert_reqs(self.cert_reqs), ) context = self.ssl_context context.verify_mode = resolve_cert_reqs(self.cert_reqs) self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, server_hostname=hostname, ssl_context=context) if self.assert_fingerprint: assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint) elif context.verify_mode != ssl.CERT_NONE \ and not getattr(context, 'check_hostname', False) \ and self.assert_hostname is not False: # While urllib3 attempts to always turn off hostname matching from # the TLS library, this cannot always be done. So we check whether # the TLS Library still thinks it's matching hostnames. cert = self.sock.getpeercert() if not cert.get('subjectAltName', ()): warnings.warn(( 'Certificate for {0} has no `subjectAltName`, falling back to check for a ' '`commonName` for now. This feature is being removed by major browsers and ' 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 ' 'for details.)'.format(hostname)), SubjectAltNameWarning ) _match_hostname(cert, self.assert_hostname or hostname) self.is_verified = ( context.verify_mode == ssl.CERT_REQUIRED or self.assert_fingerprint is not None ) def _match_hostname(cert, asserted_hostname): try: match_hostname(cert, asserted_hostname) except CertificateError as e: log.error( 'Certificate did not match expected hostname: %s. ' 'Certificate: %s', asserted_hostname, cert ) # Add cert to exception and reraise so client code can inspect # the cert when catching the exception, if they want to e._peer_cert = cert raise if ssl: # Make a copy for testing. UnverifiedHTTPSConnection = HTTPSConnection HTTPSConnection = VerifiedHTTPSConnection else: HTTPSConnection = DummyConnection
14,485
34.856436
99
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/filepost.py
from __future__ import absolute_import import binascii import codecs import os from io import BytesIO from .packages import six from .packages.six import b from .fields import RequestField writer = codecs.lookup('utf-8')[3] def choose_boundary(): """ Our embarrassingly-simple replacement for mimetools.choose_boundary. """ boundary = binascii.hexlify(os.urandom(16)) if six.PY3: boundary = boundary.decode('ascii') return boundary def iter_field_objects(fields): """ Iterate over fields. Supports list of (k, v) tuples and dicts, and lists of :class:`~urllib3.fields.RequestField`. """ if isinstance(fields, dict): i = six.iteritems(fields) else: i = iter(fields) for field in i: if isinstance(field, RequestField): yield field else: yield RequestField.from_tuples(*field) def iter_fields(fields): """ .. deprecated:: 1.6 Iterate over fields. The addition of :class:`~urllib3.fields.RequestField` makes this function obsolete. Instead, use :func:`iter_field_objects`, which returns :class:`~urllib3.fields.RequestField` objects. Supports list of (k, v) tuples and dicts. """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in fields) def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`). :param boundary: If not specified, then a random boundary will be generated using :func:`urllib3.filepost.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for field in iter_field_objects(fields): body.write(b('--%s\r\n' % (boundary))) writer(body).write(field.render_headers()) data = field.data if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = str('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
2,436
23.616162
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/connectionpool.py
from __future__ import absolute_import import errno import logging import sys import warnings from socket import error as SocketError, timeout as SocketTimeout import socket from .exceptions import ( ClosedPoolError, ProtocolError, EmptyPoolError, HeaderParsingError, HostChangedError, LocationValueError, MaxRetryError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, InsecureRequestWarning, NewConnectionError, ) from .packages.ssl_match_hostname import CertificateError from .packages import six from .packages.six.moves import queue from .connection import ( port_by_scheme, DummyConnection, HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, HTTPException, BaseSSLError, ) from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped from .util.request import set_file_position from .util.response import assert_header_parsing from .util.retry import Retry from .util.timeout import Timeout from .util.url import get_host, Url, NORMALIZABLE_SCHEMES from .util.queue import LifoQueue xrange = six.moves.xrange log = logging.getLogger(__name__) _Default = object() # Pool objects class ConnectionPool(object): """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. """ scheme = None QueueCls = LifoQueue def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") self.host = _ipv6_host(host, self.scheme) self._proxy_host = host.lower() self.port = port def __str__(self): return '%s(host=%r, port=%r)' % (type(self).__name__, self.host, self.port) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() # Return False to re-raise any potential exceptions return False def close(self): """ Close all pooled connections and disable the pool. """ pass # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`httplib.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`httplib.HTTPConnection`. :param strict: Causes BadStatusLine to be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1 status line, passed into :class:`httplib.HTTPConnection`. .. note:: Only works in Python 2. This parameter is ignored in Python 3. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.connectionpool.ProxyManager`" :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.connectionpool.ProxyManager`" :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = 'http' ConnectionCls = HTTPConnection ResponseCls = HTTPResponse def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, **conn_kw): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict if not isinstance(timeout, Timeout): timeout = Timeout.from_float(timeout) if retries is None: retries = Retry.DEFAULT self.timeout = timeout self.retries = retries self.pool = self.QueueCls(maxsize) self.block = block self.proxy = _proxy self.proxy_headers = _proxy_headers or {} # Fill the queue up so that doing get() on it will block properly for _ in xrange(maxsize): self.pool.put(None) # These are mostly for testing and debugging purposes. self.num_connections = 0 self.num_requests = 0 self.conn_kw = conn_kw if self.proxy: # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. # We cannot know if the user has added default socket options, so we cannot replace the # list. self.conn_kw.setdefault('socket_options', []) def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 log.debug("Starting new HTTP connection (%d): %s:%s", self.num_connections, self.host, self.port or "80") conn = self.ConnectionCls(host=self.host, port=self.port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return conn def _get_conn(self, timeout=None): """ Get a connection. Will return a pooled connection if one is available. If no connections are available and :prop:`.block` is ``False``, then a fresh connection is returned. :param timeout: Seconds to wait before giving up and raising :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and :prop:`.block` is ``True``. """ conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) except AttributeError: # self.pool is None raise ClosedPoolError(self, "Pool is closed.") except queue.Empty: if self.block: raise EmptyPoolError(self, "Pool reached maximum size and no more " "connections are allowed.") pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.debug("Resetting dropped connection: %s", self.host) conn.close() if getattr(conn, 'auto_open', 1) == 0: # This is a proxied connection that has been mutated by # httplib._tunnel() and cannot be reused (since it would # attempt to bypass the proxy) conn = None return conn or self._new_conn() def _put_conn(self, conn): """ Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is closed and discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased. If the pool is closed, then the connection will be closed and discarded. """ try: self.pool.put(conn, block=False) return # Everything is dandy, done. except AttributeError: # self.pool is None. pass except queue.Full: # This should never happen if self.block == True log.warning( "Connection pool is full, discarding connection: %s", self.host) # Connection never got put back into the pool, close it. if conn: conn.close() def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ pass def _prepare_proxy(self, conn): # Nothing to do for HTTP connections. pass def _get_timeout(self, timeout): """ Helper that always returns a :class:`urllib3.util.Timeout` """ if timeout is _Default: return self.timeout.clone() if isinstance(timeout, Timeout): return timeout.clone() else: # User passed us an int/float. This is for backwards compatibility, # can be removed later return Timeout.from_float(timeout) def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass""" if isinstance(err, SocketTimeout): raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error if hasattr(err, 'errno') and err.errno in _blocking_errnos: raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) def _make_request(self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise # conn.request() calls httplib.*.request, not the method in # urllib3.request. It also calls makefile (recv) on the socket. if chunked: conn.request_chunked(method, url, **httplib_request_kw) else: conn.request(method, url, **httplib_request_kw) # Reset the timeout for the recv() on the socket read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr if getattr(conn, 'sock', None): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching # the exception and assuming all BadStatusLine exceptions are read # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: # None or a value conn.sock.settimeout(read_timeout) # Receive the response from the server try: try: # Python 2.7, use buffering of HTTP responses httplib_response = conn.getresponse(buffering=True) except TypeError: # Python 2.6 and older, Python 3 try: httplib_response = conn.getresponse() except Exception as e: # Remove the TypeError from the exception chain in Python 3; # otherwise it looks like a programming error was the cause. six.raise_from(e, None) except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr. http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port, method, url, http_version, httplib_response.status, httplib_response.length) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( 'Failed to parse headers (url=%s): %s', self._absolute_url(url), hpe, exc_info=True) return httplib_response def _absolute_url(self, path): return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url def close(self): """ Close all pooled connections and disable the pool. """ if self.pool is None: return # Disable access to the pool old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except queue.Empty: pass # Done. def is_same_host(self, url): """ Check if the given ``url`` is a member of the same host as this connection pool. """ if url.startswith('/'): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) host = _ipv6_host(host, self.scheme) # Use explicit default port for comparison when none is given if self.port and not port: port = port_by_scheme.get(scheme) elif not self.port and port == port_by_scheme.get(scheme): port = None return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param body: Data to send in the request body (useful for creating POST requests, see HTTPConnectionPool.post_url for more convenience). :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When False, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get('preload_content', True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] <https://github.com/shazow/urllib3/issues/651> release_this_conn = release_conn # Merge the proxy headers. Only do this in HTTP. We have to copy the # headers dict so we can safely change it without those changes being # reflected in anyone else's copy. if self.scheme == 'http': headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) if is_new_proxy_conn: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request(conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = conn if not release_conn else None # Pass method to Response for length checking response_kw['request_method'] = method # Import httplib's response into our own wrapper object response = self.ResponseCls.from_httplib(httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw) # Everything went great! clean_exit = True except queue.Empty: # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") except (TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False if isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError('Connection aborted.', e) retries = retries.increment(method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]) retries.sleep() # Keep track of the error for the retry warning. err = e finally: if not clean_exit: # We hit some kind of exception, handled or otherwise. We need # to throw the connection away unless explicitly told not to. # Close the connection, set the variable to None, and make sure # we put the None back in the pool to avoid leaking it. conn = conn and conn.close() release_this_conn = True if release_this_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a # fresh connection during _get_conn. self._put_conn(conn) if not conn: # Try again log.warning("Retrying (%r) after connection " "broken by '%r': %s", retries, err, url) return self.urlopen(method, url, body, headers, retries, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) def drain_and_release_conn(response): try: # discard any remaining response body, the connection will be # released back to the pool once the entire response is read response.read() except (TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError) as e: pass # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: method = 'GET' try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: # Drain and release the connection for this response, since # we're not returning it to be released manually. drain_and_release_conn(response) raise return response # drain and return the connection to the pool before recursing drain_and_release_conn(response) retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( method, redirect_location, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) # Check if we should retry the HTTP response. has_retry_after = bool(response.getheader('Retry-After')) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_status: # Drain and release the connection for this response, since # we're not returning it to be released manually. drain_and_release_conn(response) raise return response # drain and return the connection to the pool before recursing drain_and_release_conn(response) retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( method, url, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) return response class HTTPSConnectionPool(HTTPConnectionPool): """ Same as :class:`.HTTPConnectionPool`, but HTTPS. When Python is compiled with the :mod:`ssl` module, then :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, instead of :class:`.HTTPSConnection`. :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, ``assert_hostname`` and ``host`` in this order to verify connections. If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ scheme = 'https' ConnectionCls = HTTPSConnection def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, **conn_kw): HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, block, headers, retries, _proxy, _proxy_headers, **conn_kw) if ca_certs and cert_reqs is None: cert_reqs = 'CERT_REQUIRED' self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def _prepare_conn(self, conn): """ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` and establish the tunnel if proxy is used. """ if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert(key_file=self.key_file, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint) conn.ssl_version = self.ssl_version return conn def _prepare_proxy(self, conn): """ Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port. """ # Python 2.7+ try: set_tunnel = conn.set_tunnel except AttributeError: # Platform-specific: Python 2.6 set_tunnel = conn._set_tunnel if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older set_tunnel(self._proxy_host, self.port) else: set_tunnel(self._proxy_host, self.port, self.proxy_headers) conn.connect() def _new_conn(self): """ Return a fresh :class:`httplib.HTTPSConnection`. """ self.num_connections += 1 log.debug("Starting new HTTPS connection (%d): %s:%s", self.num_connections, self.host, self.port or "443") if not self.ConnectionCls or self.ConnectionCls is DummyConnection: raise SSLError("Can't connect to HTTPS URL because the SSL " "module is not available.") actual_host = self.host actual_port = self.port if self.proxy is not None: actual_host = self.proxy.host actual_port = self.proxy.port conn = self.ConnectionCls(host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return self._prepare_conn(conn) def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: warnings.warn(( 'Unverified HTTPS request is being made. ' 'Adding certificate verification is strongly advised. See: ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings'), InsecureRequestWarning) def connection_from_url(url, **kw): """ Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \\**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example:: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/') """ scheme, host, port = get_host(url) port = port or port_by_scheme.get(scheme, 80) if scheme == 'https': return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw) def _ipv6_host(host, scheme): """ Process IPv6 address literals """ # httplib doesn't like it when we include brackets in IPv6 addresses # Specifically, if we include brackets but also pass the port then # httplib crazily doubles up the square brackets on the Host header. # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. See http://bugs.python.org/issue28539 # # Also if an IPv6 address literal has a zone identifier, the # percent sign might be URIencoded, convert it back into ASCII if host.startswith('[') and host.endswith(']'): host = host.replace('%25', '%').strip('[]') if scheme in NORMALIZABLE_SCHEMES: host = host.lower() return host
35,464
38.101433
99
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/response.py
from __future__ import absolute_import from contextlib import contextmanager import zlib import io import logging from socket import timeout as SocketTimeout from socket import error as SocketError from ._collections import HTTPHeaderDict from .exceptions import ( BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked, IncompleteRead, InvalidHeader ) from .packages.six import string_types as basestring, binary_type, PY3 from .packages.six.moves import http_client as httplib from .connection import HTTPException, BaseSSLError from .util.response import is_fp_closed, is_response_to_head log = logging.getLogger(__name__) class DeflateDecoder(object): def __init__(self): self._first_try = True self._data = binary_type() self._obj = zlib.decompressobj() def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): if not data: return data if not self._first_try: return self._obj.decompress(data) self._data += data try: decompressed = self._obj.decompress(data) if decompressed: self._first_try = False self._data = None return decompressed except zlib.error: self._first_try = False self._obj = zlib.decompressobj(-zlib.MAX_WBITS) try: return self.decompress(self._data) finally: self._data = None class GzipDecoderState(object): FIRST_MEMBER = 0 OTHER_MEMBERS = 1 SWALLOW_DATA = 2 class GzipDecoder(object): def __init__(self): self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) self._state = GzipDecoderState.FIRST_MEMBER def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): ret = binary_type() if self._state == GzipDecoderState.SWALLOW_DATA or not data: return ret while True: try: ret += self._obj.decompress(data) except zlib.error: previous_state = self._state # Ignore data after the first error self._state = GzipDecoderState.SWALLOW_DATA if previous_state == GzipDecoderState.OTHER_MEMBERS: # Allow trailing garbage acceptable in other gzip clients return ret raise data = self._obj.unused_data if not data: return ret self._state = GzipDecoderState.OTHER_MEMBERS self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) def _get_decoder(mode): if mode == 'gzip': return GzipDecoder() return DeflateDecoder() class HTTPResponse(io.IOBase): """ HTTP Response container. Backwards-compatible to httplib's HTTPResponse but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. This class is also compatible with the Python standard library's :mod:`io` module, and can hence be treated as a readable object in the context of that framework. Extra parameters for behaviour not present in httplib.HTTPResponse: :param preload_content: If True, the response's body will be preloaded during construction. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param original_response: When this HTTPResponse wrapper is generated from an httplib.HTTPResponse object, it's convenient to include the original for debug purposes. It's otherwise unused. :param retries: The retries contains the last :class:`~urllib3.util.retry.Retry` that was used during the request. :param enforce_content_length: Enforce content length checking. Body returned by server must match value of Content-Length header, if present. Otherwise, raise error. """ CONTENT_DECODERS = ['gzip', 'deflate'] REDIRECT_STATUSES = [301, 302, 303, 307, 308] def __init__(self, body='', headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None, msg=None, retries=None, enforce_content_length=False, request_method=None, request_url=None): if isinstance(headers, HTTPHeaderDict): self.headers = headers else: self.headers = HTTPHeaderDict(headers) self.status = status self.version = version self.reason = reason self.strict = strict self.decode_content = decode_content self.retries = retries self.enforce_content_length = enforce_content_length self._decoder = None self._body = None self._fp = None self._original_response = original_response self._fp_bytes_read = 0 self.msg = msg self._request_url = request_url if body and isinstance(body, (basestring, binary_type)): self._body = body self._pool = pool self._connection = connection if hasattr(body, 'read'): self._fp = body # Are we using the chunked-style of transfer encoding? self.chunked = False self.chunk_left = None tr_enc = self.headers.get('transfer-encoding', '').lower() # Don't incur the penalty of creating a list and then discarding it encodings = (enc.strip() for enc in tr_enc.split(",")) if "chunked" in encodings: self.chunked = True # Determine length of response self.length_remaining = self._init_length(request_method) # If requested, preload the body. if preload_content and not self._body: self._body = self.read(decode_content=decode_content) def get_redirect_location(self): """ Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: return self.headers.get('location') return False def release_conn(self): if not self._pool or not self._connection: return self._pool._put_conn(self._connection) self._connection = None @property def data(self): # For backwords-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body if self._fp: return self.read(cache_content=True) @property def connection(self): return self._connection def isclosed(self): return is_fp_closed(self._fp) def tell(self): """ Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed). """ return self._fp_bytes_read def _init_length(self, request_method): """ Set initial length value for Response content if available. """ length = self.headers.get('content-length') if length is not None: if self.chunked: # This Response will fail with an IncompleteRead if it can't be # received as chunked. This method falls back to attempt reading # the response before raising an exception. log.warning("Received response with both Content-Length and " "Transfer-Encoding set. This is expressly forbidden " "by RFC 7230 sec 3.3.2. Ignoring Content-Length and " "attempting to process response as Transfer-Encoding: " "chunked.") return None try: # RFC 7230 section 3.3.2 specifies multiple content lengths can # be sent in a single Content-Length header # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. lengths = set([int(val) for val in length.split(',')]) if len(lengths) > 1: raise InvalidHeader("Content-Length contained multiple " "unmatching values (%s)" % length) length = lengths.pop() except ValueError: length = None else: if length < 0: length = None # Convert status to int for comparison # In some cases, httplib returns a status of "_UNKNOWN" try: status = int(self.status) except ValueError: status = 0 # Check for responses that shouldn't include a body if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD': length = 0 return length def _init_decoder(self): """ Set-up the _decoder attribute if necessary. """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 content_encoding = self.headers.get('content-encoding', '').lower() if self._decoder is None and content_encoding in self.CONTENT_DECODERS: self._decoder = _get_decoder(content_encoding) def _decode(self, data, decode_content, flush_decoder): """ Decode the data passed in and potentially flush the decoder. """ try: if decode_content and self._decoder: data = self._decoder.decompress(data) except (IOError, zlib.error) as e: content_encoding = self.headers.get('content-encoding', '').lower() raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e) if flush_decoder and decode_content: data += self._flush_decoder() return data def _flush_decoder(self): """ Flushes the decoder. Should only be called if the decoder is actually being used. """ if self._decoder: buf = self._decoder.decompress(b'') return buf + self._decoder.flush() return b'' @contextmanager def _error_catcher(self): """ Catch low-level python exceptions, instead re-raising urllib3 variants, so that low-level exceptions are not leaked in the high-level api. On exit, release the connection back to the pool. """ clean_exit = False try: try: yield except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, 'Read timed out.') except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if 'read operation timed out' not in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise raise ReadTimeoutError(self._pool, None, 'Read timed out.') except (HTTPException, SocketError) as e: # This includes IncompleteRead. raise ProtocolError('Connection broken: %r' % e, e) # If no exception is thrown, we should avoid cleaning up # unnecessarily. clean_exit = True finally: # If we didn't terminate cleanly, we need to throw away our # connection. if not clean_exit: # The response may not be closed but we're not going to use it # anymore so close it now to ensure that the connection is # released back to the pool. if self._original_response: self._original_response.close() # Closing the response may not actually be sufficient to close # everything, so if we have a hold of the connection close that # too. if self._connection: self._connection.close() # If we hold the original response but it's closed now, we should # return the connection back to the pool. if self._original_response and self._original_response.isclosed(): self.release_conn() def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ self._init_decoder() if decode_content is None: decode_content = self.decode_content if self._fp is None: return flush_decoder = False data = None with self._error_catcher(): if amt is None: # cStringIO doesn't like amt=None data = self._fp.read() flush_decoder = True else: cache_content = False data = self._fp.read(amt) if amt != 0 and not data: # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close. self._fp.close() flush_decoder = True if self.enforce_content_length and self.length_remaining not in (0, None): # This is an edge case that httplib failed to cover due # to concerns of backward compatibility. We're # addressing it here to make sure IncompleteRead is # raised during streaming, so all calls with incorrect # Content-Length are caught. raise IncompleteRead(self._fp_bytes_read, self.length_remaining) if data: self._fp_bytes_read += len(data) if self.length_remaining is not None: self.length_remaining -= len(data) data = self._decode(data, decode_content, flush_decoder) if cache_content: self._body = data return data def stream(self, amt=2**16, decode_content=None): """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed. :param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ if self.chunked and self.supports_chunked_reads(): for line in self.read_chunked(amt, decode_content=decode_content): yield line else: while not is_fp_closed(self._fp): data = self.read(amt=amt, decode_content=decode_content) if data: yield data @classmethod def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ headers = r.msg if not isinstance(headers, HTTPHeaderDict): if PY3: # Python 3 headers = HTTPHeaderDict(headers.items()) else: # Python 2 headers = HTTPHeaderDict.from_httplib(headers) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) resp = ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw) return resp # Backwards-compatibility methods for httplib.HTTPResponse def getheaders(self): return self.headers def getheader(self, name, default=None): return self.headers.get(name, default) # Backwards compatibility for http.cookiejar def info(self): return self.headers # Overrides from io.IOBase def close(self): if not self.closed: self._fp.close() if self._connection: self._connection.close() @property def closed(self): if self._fp is None: return True elif hasattr(self._fp, 'isclosed'): return self._fp.isclosed() elif hasattr(self._fp, 'closed'): return self._fp.closed else: return True def fileno(self): if self._fp is None: raise IOError("HTTPResponse has no file to get a fileno from") elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: raise IOError("The file-like object this HTTPResponse is wrapped " "around has no file descriptor") def flush(self): if self._fp is not None and hasattr(self._fp, 'flush'): return self._fp.flush() def readable(self): # This method is required for `io` module compatibility. return True def readinto(self, b): # This method is required for `io` module compatibility. temp = self.read(len(b)) if len(temp) == 0: return 0 else: b[:len(temp)] = temp return len(temp) def supports_chunked_reads(self): """ Checks if the underlying file-like object looks like a httplib.HTTPResponse object. We do this by testing for the fp attribute. If it is present we assume it returns raw chunks as processed by read_chunked(). """ return hasattr(self._fp, 'fp') def _update_chunk_length(self): # First, we'll figure out length of a chunk and then # we'll try to read it from socket. if self.chunk_left is not None: return line = self._fp.fp.readline() line = line.split(b';', 1)[0] try: self.chunk_left = int(line, 16) except ValueError: # Invalid chunked protocol response, abort. self.close() raise httplib.IncompleteRead(line) def _handle_chunk(self, amt): returned_chunk = None if amt is None: chunk = self._fp._safe_read(self.chunk_left) returned_chunk = chunk self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None elif amt < self.chunk_left: value = self._fp._safe_read(amt) self.chunk_left = self.chunk_left - amt returned_chunk = value elif amt == self.chunk_left: value = self._fp._safe_read(amt) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None returned_chunk = value else: # amt > self.chunk_left returned_chunk = self._fp._safe_read(self.chunk_left) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None return returned_chunk def read_chunked(self, amt=None, decode_content=None): """ Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ self._init_decoder() # FIXME: Rewrite this method and make it a class with a better structured logic. if not self.chunked: raise ResponseNotChunked( "Response is not chunked. " "Header 'transfer-encoding: chunked' is missing.") if not self.supports_chunked_reads(): raise BodyNotHttplibCompatible( "Body should be httplib.HTTPResponse like. " "It should have have an fp attribute which returns raw chunks.") with self._error_catcher(): # Don't bother reading the body of a HEAD request. if self._original_response and is_response_to_head(self._original_response): self._original_response.close() return # If a response is already read and closed # then return immediately. if self._fp.fp is None: return while True: self._update_chunk_length() if self.chunk_left == 0: break chunk = self._handle_chunk(amt) decoded = self._decode(chunk, decode_content=decode_content, flush_decoder=False) if decoded: yield decoded if decode_content: # On CPython and PyPy, we should never need to flush the # decoder. However, on Jython we *might* need to, so # lets defensively do it anyway. decoded = self._flush_decoder() if decoded: # Platform-specific: Jython. yield decoded # Chunk content ends with \r\n: discard it. while True: line = self._fp.fp.readline() if not line: # Some sites may not end with '\r\n'. break if line == b'\r\n': break # We read everything; close the "file". if self._original_response: self._original_response.close() def geturl(self): """ Returns the URL that was the source of this response. If the request that generated this response redirected, this method will return the final redirect location. """ if self.retries is not None and len(self.retries.history): return self.retries.history[-1].redirect_location else: return self._request_url
24,667
35.437223
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/__init__.py
""" urllib3 - Thread-safe connection pooling and re-using. """ from __future__ import absolute_import import warnings from .connectionpool import ( HTTPConnectionPool, HTTPSConnectionPool, connection_from_url ) from . import exceptions from .filepost import encode_multipart_formdata from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .response import HTTPResponse from .util.request import make_headers from .util.url import get_host from .util.timeout import Timeout from .util.retry import Retry # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass __author__ = 'Andrey Petrov ([email protected])' __license__ = 'MIT' __version__ = '1.23' __all__ = ( 'HTTPConnectionPool', 'HTTPSConnectionPool', 'PoolManager', 'ProxyManager', 'HTTPResponse', 'Retry', 'Timeout', 'add_stderr_logger', 'connection_from_url', 'disable_warnings', 'encode_multipart_formdata', 'get_host', 'make_headers', 'proxy_from_url', ) logging.getLogger(__name__).addHandler(NullHandler()) def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s', __name__) return handler # ... Clean up. del NullHandler # All warning filters *must* be appended unless you're really certain that they # shouldn't be: otherwise, it's very hard for users to use most Python # mechanisms to silence them. # SecurityWarning's always go off by default. warnings.simplefilter('always', exceptions.SecurityWarning, append=True) # SubjectAltNameWarning's should go off once per host warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) # InsecurePlatformWarning's don't vary between requests, so we keep it default. warnings.simplefilter('default', exceptions.InsecurePlatformWarning, append=True) # SNIMissingWarnings should go off only once. warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ warnings.simplefilter('ignore', category)
2,853
28.122449
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/fields.py
from __future__ import absolute_import import email.utils import mimetypes from .packages import six def guess_content_type(filename, default='application/octet-stream'): """ Guess the "Content-Type" of a file. :param filename: The filename to guess the "Content-Type" of using :mod:`mimetypes`. :param default: If no "Content-Type" can be guessed, default to `default`. """ if filename: return mimetypes.guess_type(filename)[0] or default return default def format_header_param(name, value): """ Helper function to format and quote a single header parameter. Particularly useful for header parameters which might contain non-ASCII values, like file names. This follows RFC 2231, as suggested by RFC 2388 Section 4.4. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ if not any(ch in value for ch in '"\\\r\n'): result = '%s="%s"' % (name, value) try: result.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result if not six.PY3 and isinstance(value, six.text_type): # Python 2: value = value.encode('utf-8') value = email.utils.encode_rfc2231(value, 'utf-8') value = '%s*=%s' % (name, value) return value class RequestField(object): """ A data container for request body parameters. :param name: The name of this request field. :param data: The data/value body. :param filename: An optional filename of the request field. :param headers: An optional dict-like object of headers to initially use for the field. """ def __init__(self, name, data, filename=None, headers=None): self._name = name self._filename = filename self.data = data self.headers = {} if headers: self.headers = dict(headers) @classmethod def from_tuples(cls, fieldname, value): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. Supports constructing :class:`~urllib3.fields.RequestField` from parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', Field names and filenames must be unicode. """ if isinstance(value, tuple): if len(value) == 3: filename, data, content_type = value else: filename, data = value content_type = guess_content_type(filename) else: filename = None content_type = None data = value request_param = cls(fieldname, data, filename=filename) request_param.make_multipart(content_type=content_type) return request_param def _render_part(self, name, value): """ Overridable helper function to format a single header parameter. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ return format_header_param(name, value) def _render_parts(self, header_parts): """ Helper function to format and quote a single header. Useful for single headers that are composed of multiple items. E.g., 'Content-Disposition' fields. :param header_parts: A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format as `k1="v1"; k2="v2"; ...`. """ parts = [] iterable = header_parts if isinstance(header_parts, dict): iterable = header_parts.items() for name, value in iterable: if value is not None: parts.append(self._render_part(name, value)) return '; '.join(parts) def render_headers(self): """ Renders the headers for this request field. """ lines = [] sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] for sort_key in sort_keys: if self.headers.get(sort_key, False): lines.append('%s: %s' % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: lines.append('%s: %s' % (header_name, header_value)) lines.append('\r\n') return '\r\n'.join(lines) def make_multipart(self, content_disposition=None, content_type=None, content_location=None): """ Makes this request field into a multipart request field. This method overrides "Content-Disposition", "Content-Type" and "Content-Location" headers to the request parameter. :param content_type: The 'Content-Type' of the request body. :param content_location: The 'Content-Location' of the request body. """ self.headers['Content-Disposition'] = content_disposition or 'form-data' self.headers['Content-Disposition'] += '; '.join([ '', self._render_parts( (('name', self._name), ('filename', self._filename)) ) ]) self.headers['Content-Type'] = content_type self.headers['Content-Location'] = content_location
5,943
32.206704
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/request.py
from __future__ import absolute_import from .filepost import encode_multipart_formdata from .packages.six.moves.urllib.parse import urlencode __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) def __init__(self, headers=None): self.headers = headers or {} def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw): # Abstract raise NotImplementedError("Classes extending RequestMethods must implement " "their own ``urlopen`` method.") def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() urlopen_kw['request_url'] = url if method in self._encode_url_methods: return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw) def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if headers is None: headers = self.headers extra_kw = {'headers': headers} extra_kw.update(urlopen_kw) if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **extra_kw) def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimic behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if headers is None: headers = self.headers extra_kw = {'headers': {}} if fields: if 'body' in urlopen_kw: raise TypeError( "request got values for both 'fields' and 'body', can only specify one.") if encode_multipart: body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) else: body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' extra_kw['body'] = body extra_kw['headers'] = {'Content-Type': content_type} extra_kw['headers'].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw)
5,996
38.715232
99
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/url.py
from __future__ import absolute_import from collections import namedtuple from ..exceptions import LocationParseError url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] # We only want to normalize urls with an HTTP(S) scheme. # urllib3 infers URLs without a scheme (None) to be http. NORMALIZABLE_SCHEMES = ('http', 'https', None) class Url(namedtuple('Url', url_attrs)): """ Datastructure for representing an HTTP URL. Used as a return value for :func:`parse_url`. Both the scheme and host are normalized as they are both case-insensitive according to RFC 3986. """ __slots__ = () def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): if path and not path.startswith('/'): path = '/' + path if scheme: scheme = scheme.lower() if host and scheme in NORMALIZABLE_SCHEMES: host = host.lower() return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) @property def hostname(self): """For backwards-compatibility with urlparse. We're nice like that.""" return self.host @property def request_uri(self): """Absolute path including the query string.""" uri = self.path or '/' if self.query is not None: uri += '?' + self.query return uri @property def netloc(self): """Network location including host and port""" if self.port: return '%s:%d' % (self.host, self.port) return self.host @property def url(self): """ Convert self into a url This function should more or less round-trip with :func:`.parse_url`. The returned url may not be exactly the same as the url inputted to :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls with a blank port will have : removed). Example: :: >>> U = parse_url('http://google.com/mail/') >>> U.url 'http://google.com/mail/' >>> Url('http', 'username:password', 'host.com', 80, ... '/path', 'query', 'fragment').url 'http://username:[email protected]:80/path?query#fragment' """ scheme, auth, host, port, path, query, fragment = self url = '' # We use "is not None" we want things to happen with empty strings (or 0 port) if scheme is not None: url += scheme + '://' if auth is not None: url += auth + '@' if host is not None: url += host if port is not None: url += ':' + str(port) if path is not None: url += path if query is not None: url += '?' + query if fragment is not None: url += '#' + fragment return url def __str__(self): return self.url def split_first(s, delims): """ Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. If not found, then the first part is the full input string. Example:: >>> split_first('foo/bar?baz', '?/=') ('foo', 'bar?baz', '/') >>> split_first('foo/bar?baz', '123') ('foo/bar?baz', '', None) Scales linearly with number of delims. Not ideal for large number of delims. """ min_idx = None min_delim = None for d in delims: idx = s.find(d) if idx < 0: continue if min_idx is None or idx < min_idx: min_idx = idx min_delim = d if min_idx is None or min_idx < 0: return s, '', None return s[:min_idx], s[min_idx + 1:], min_delim def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. Partly backwards-compatible with :mod:`urlparse`. Example:: >>> parse_url('http://google.com/mail/') Url(scheme='http', host='google.com', port=None, path='/mail/', ...) >>> parse_url('google.com:80') Url(scheme=None, host='google.com', port=80, path=None, ...) >>> parse_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ # While this code has overlap with stdlib's urlparse, it is much # simplified for our needs and less annoying. # Additionally, this implementations does silly things to be optimal # on CPython. if not url: # Empty return Url() scheme = None auth = None host = None port = None path = None fragment = None query = None # Scheme if '://' in url: scheme, url = url.split('://', 1) # Find the earliest Authority Terminator # (http://tools.ietf.org/html/rfc3986#section-3.2) url, path_, delim = split_first(url, ['/', '?', '#']) if delim: # Reassemble the path path = delim + path_ # Auth if '@' in url: # Last '@' denotes end of auth part auth, url = url.rsplit('@', 1) # IPv6 if url and url[0] == '[': host, url = url.split(']', 1) host += ']' # Port if ':' in url: _host, port = url.split(':', 1) if not host: host = _host if port: # If given, ports must be integers. No whitespace, no plus or # minus prefixes, no non-integer digits such as ^2 (superscript). if not port.isdigit(): raise LocationParseError(url) try: port = int(port) except ValueError: raise LocationParseError(url) else: # Blank ports are cool, too. (rfc3986#section-3.2.3) port = None elif not host and url: host = url if not path: return Url(scheme, auth, host, port, path, query, fragment) # Fragment if '#' in path: path, fragment = path.split('#', 1) # Query if '?' in path: path, query = path.split('?', 1) return Url(scheme, auth, host, port, path, query, fragment) def get_host(url): """ Deprecated. Use :func:`parse_url` instead. """ p = parse_url(url) return p.scheme or 'http', p.hostname, p.port
6,487
27.08658
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/ssl_.py
from __future__ import absolute_import import errno import warnings import hmac import socket from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning from ..packages import six SSLContext = None HAS_SNI = False IS_PYOPENSSL = False IS_SECURETRANSPORT = False # Maps the length of a digest to a possible hash function producing this digest HASHFUNC_MAP = { 32: md5, 40: sha1, 64: sha256, } def _const_compare_digest_backport(a, b): """ Compare two digests of equal length in constant time. The digests must be of type str/bytes. Returns True if the digests match, and False otherwise. """ result = abs(len(a) - len(b)) for l, r in zip(bytearray(a), bytearray(b)): result |= l ^ r return result == 0 _const_compare_digest = getattr(hmac, 'compare_digest', _const_compare_digest_backport) try: # Test for SSL features import ssl from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 from ssl import HAS_SNI # Has SNI? except ImportError: pass try: from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION except ImportError: OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 OP_NO_COMPRESSION = 0x20000 # Python 2.7 and earlier didn't have inet_pton on non-Linux # so we fallback on inet_aton in those cases. This means that # we can only detect IPv4 addresses in this case. if hasattr(socket, 'inet_pton'): inet_pton = socket.inet_pton else: # Maybe we can use ipaddress if the user has urllib3[secure]? try: import ipaddress def inet_pton(_, host): if isinstance(host, six.binary_type): host = host.decode('ascii') return ipaddress.ip_address(host) except ImportError: # Platform-specific: Non-Linux def inet_pton(_, host): return socket.inet_aton(host) # A secure default. # Sources for more information on TLS ciphers: # # - https://wiki.mozilla.org/Security/Server_Side_TLS # - https://www.ssllabs.com/projects/best-practices/index.html # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ # # The general intent is: # - Prefer TLS 1.3 cipher suites # - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), # - prefer ECDHE over DHE for better performance, # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and # security, # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, # - disable NULL authentication, MD5 MACs and DSS for security reasons. DEFAULT_CIPHERS = ':'.join([ 'TLS13-AES-256-GCM-SHA384', 'TLS13-CHACHA20-POLY1305-SHA256', 'TLS13-AES-128-GCM-SHA256', 'ECDH+AESGCM', 'ECDH+CHACHA20', 'DH+AESGCM', 'DH+CHACHA20', 'ECDH+AES256', 'DH+AES256', 'ECDH+AES128', 'DH+AES', 'RSA+AESGCM', 'RSA+AES', '!aNULL', '!eNULL', '!MD5', ]) try: from ssl import SSLContext # Modern SSL? except ImportError: import sys class SSLContext(object): # Platform-specific: Python 2 & 3.1 supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or (3, 2) <= sys.version_info) def __init__(self, protocol_version): self.protocol = protocol_version # Use default values from a real SSLContext self.check_hostname = False self.verify_mode = ssl.CERT_NONE self.ca_certs = None self.options = 0 self.certfile = None self.keyfile = None self.ciphers = None def load_cert_chain(self, certfile, keyfile): self.certfile = certfile self.keyfile = keyfile def load_verify_locations(self, cafile=None, capath=None): self.ca_certs = cafile if capath is not None: raise SSLError("CA directories not supported in older Pythons") def set_ciphers(self, cipher_suite): if not self.supports_set_ciphers: raise TypeError( 'Your version of Python does not support setting ' 'a custom cipher suite. Please upgrade to Python ' '2.7, 3.2, or later if you need this functionality.' ) self.ciphers = cipher_suite def wrap_socket(self, socket, server_hostname=None, server_side=False): warnings.warn( 'A true SSLContext object is not available. This prevents ' 'urllib3 from configuring SSL appropriately and may cause ' 'certain SSL connections to fail. You can upgrade to a newer ' 'version of Python to solve this. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings', InsecurePlatformWarning ) kwargs = { 'keyfile': self.keyfile, 'certfile': self.certfile, 'ca_certs': self.ca_certs, 'cert_reqs': self.verify_mode, 'ssl_version': self.protocol, 'server_side': server_side, } if self.supports_set_ciphers: # Platform-specific: Python 2.7+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs) else: # Platform-specific: Python 2.6 return wrap_socket(socket, **kwargs) def assert_fingerprint(cert, fingerprint): """ Checks if given fingerprint matches the supplied certificate. :param cert: Certificate as bytes object. :param fingerprint: Fingerprint as string of hexdigits, can be interspersed by colons. """ fingerprint = fingerprint.replace(':', '').lower() digest_length = len(fingerprint) hashfunc = HASHFUNC_MAP.get(digest_length) if not hashfunc: raise SSLError( 'Fingerprint of invalid length: {0}'.format(fingerprint)) # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) cert_digest = hashfunc(cert).digest() if not _const_compare_digest(cert_digest, fingerprint_bytes): raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' .format(fingerprint, hexlify(cert_digest))) def resolve_cert_reqs(candidate): """ Resolves the argument to a numeric constant, which can be passed to the wrap_socket function/method from the ssl module. Defaults to :data:`ssl.CERT_NONE`. If given a string it is assumed to be the name of the constant in the :mod:`ssl` module or its abbreviation. (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. If it's neither `None` nor a string we assume it is already the numeric constant which can directly be passed to wrap_socket. """ if candidate is None: return CERT_NONE if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'CERT_' + candidate) return res return candidate def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) return res return candidate def create_urllib3_context(ssl_version=None, cert_reqs=None, options=None, ciphers=None): """All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that ``ssl.create_default_context`` does on Python 3.4+. It: - Disables SSLv2, SSLv3, and compression - Sets a restricted set of server ciphers If you wish to enable SSLv3, you can do:: from urllib3.util import ssl_ context = ssl_.create_urllib3_context() context.options &= ~ssl_.OP_NO_SSLv3 You can do the same to enable compression (substituting ``COMPRESSION`` for ``SSLv3`` in the last line above). :param ssl_version: The desired protocol version to use. This will default to PROTOCOL_SSLv23 which will negotiate the highest protocol that both the server and your installation of OpenSSL support. :param cert_reqs: Whether to require the certificate verification. This defaults to ``ssl.CERT_REQUIRED``. :param options: Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. :param ciphers: Which cipher suites to allow the server to select. :returns: Constructed SSLContext object with specified options :rtype: SSLContext """ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) # Setting the default here, as we may have no ssl module on import cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs if options is None: options = 0 # SSLv2 is easily broken and is considered harmful and dangerous options |= OP_NO_SSLv2 # SSLv3 has several problems and is now dangerous options |= OP_NO_SSLv3 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ # (issue #309) options |= OP_NO_COMPRESSION context.options |= options if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 context.set_ciphers(ciphers or DEFAULT_CIPHERS) context.verify_mode = cert_reqs if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 # We do our own verification, including fingerprints and alternative # hostnames. So disable it here context.check_hostname = False return context def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ciphers=None, ssl_context=None, ca_cert_dir=None): """ All arguments except for server_hostname, ssl_context, and ca_cert_dir have the same meaning as they do when using :func:`ssl.wrap_socket`. :param server_hostname: When SNI is supported, the expected hostname of the certificate :param ssl_context: A pre-made :class:`SSLContext` object. If none is provided, one will be created using :func:`create_urllib3_context`. :param ciphers: A string of ciphers we wish the client to support. This is not supported on Python 2.6 as the ssl module does not support it. :param ca_cert_dir: A directory containing CA certificates in multiple separate files, as supported by OpenSSL's -CApath flag or the capath argument to SSLContext.load_verify_locations(). """ context = ssl_context if context is None: # Note: This branch of code and all the variables in it are no longer # used by urllib3 itself. We should consider deprecating and removing # this code. context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers) if ca_certs or ca_cert_dir: try: context.load_verify_locations(ca_certs, ca_cert_dir) except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 raise SSLError(e) # Py33 raises FileNotFoundError which subclasses OSError # These are not equivalent unless we check the errno attribute except OSError as e: # Platform-specific: Python 3.3 and beyond if e.errno == errno.ENOENT: raise SSLError(e) raise elif getattr(context, 'load_default_certs', None) is not None: # try to load OS default certs; works well on Windows (require Python3.4+) context.load_default_certs() if certfile: context.load_cert_chain(certfile, keyfile) # If we detect server_hostname is an IP address then the SNI # extension should not be used according to RFC3546 Section 3.1 # We shouldn't warn the user if SNI isn't available but we would # not be using SNI anyways due to IP address for server_hostname. if ((server_hostname is not None and not is_ipaddress(server_hostname)) or IS_SECURETRANSPORT): if HAS_SNI and server_hostname is not None: return context.wrap_socket(sock, server_hostname=server_hostname) warnings.warn( 'An HTTPS request has been made, but the SNI (Server Name ' 'Indication) extension to TLS is not available on this platform. ' 'This may cause the server to present an incorrect TLS ' 'certificate, which can cause validation failures. You can upgrade to ' 'a newer version of Python to solve this. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings', SNIMissingWarning ) return context.wrap_socket(sock) def is_ipaddress(hostname): """Detects whether the hostname given is an IP address. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise. """ if six.PY3 and isinstance(hostname, six.binary_type): # IDN A-label bytes are ASCII compatible. hostname = hostname.decode('ascii') families = [socket.AF_INET] if hasattr(socket, 'AF_INET6'): families.append(socket.AF_INET6) for af in families: try: inet_pton(af, hostname) except (socket.error, ValueError, OSError): pass else: return True return False
13,993
34.24937
93
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/retry.py
from __future__ import absolute_import import time import logging from collections import namedtuple from itertools import takewhile import email import re from ..exceptions import ( ConnectTimeoutError, MaxRetryError, ProtocolError, ReadTimeoutError, ResponseError, InvalidHeader, ) from ..packages import six log = logging.getLogger(__name__) # Data structure for representing the metadata of requests that result in a retry. RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", "status", "redirect_location"]) class Retry(object): """ Retry configuration. Each retry attempt will create a new Retry object with updated values, so they can be safely reused. Retries can be defined as a default for a pool:: retries = Retry(connect=5, read=2, redirect=5) http = PoolManager(retries=retries) response = http.request('GET', 'http://example.com/') Or per-request (which overrides the default for the pool):: response = http.request('GET', 'http://example.com/', retries=Retry(10)) Retries can be disabled by passing ``False``:: response = http.request('GET', 'http://example.com/', retries=False) Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless retries are disabled, in which case the causing exception will be raised. :param int total: Total number of retries to allow. Takes precedence over other counts. Set to ``None`` to remove this constraint and fall back on other counts. It's a good idea to set this to some sensibly-high value to account for unexpected edge cases and avoid infinite retry loops. Set to ``0`` to fail on the first retry. Set to ``False`` to disable and imply ``raise_on_redirect=False``. :param int connect: How many connection-related errors to retry on. These are errors raised before the request is sent to the remote server, which we assume has not triggered the server to process the request. Set to ``0`` to fail on the first retry of this type. :param int read: How many times to retry on read errors. These errors are raised after the request was sent to the server, so the request may have side-effects. Set to ``0`` to fail on the first retry of this type. :param int redirect: How many redirects to perform. Limit this to avoid infinite redirect loops. A redirect is a HTTP response with a status code 301, 302, 303, 307 or 308. Set to ``0`` to fail on the first retry of this type. Set to ``False`` to disable and imply ``raise_on_redirect=False``. :param int status: How many times to retry on bad status codes. These are retries made on responses, where status code matches ``status_forcelist``. Set to ``0`` to fail on the first retry of this type. :param iterable method_whitelist: Set of uppercased HTTP method verbs that we should retry on. By default, we only retry on methods which are considered to be idempotent (multiple requests with the same parameters end with the same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`. Set to a ``False`` value to retry on any verb. :param iterable status_forcelist: A set of integer HTTP status codes that we should force a retry on. A retry is initiated if the request method is in ``method_whitelist`` and the response status code is in ``status_forcelist``. By default, this is disabled with ``None``. :param float backoff_factor: A backoff factor to apply between attempts after the second try (most errors are resolved immediately by a second try without a delay). urllib3 will sleep for:: {backoff factor} * (2 ^ ({number of total retries} - 1)) seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer than :attr:`Retry.BACKOFF_MAX`. By default, backoff is disabled (set to 0). :param bool raise_on_redirect: Whether, if the number of redirects is exhausted, to raise a MaxRetryError, or to return a response with a response code in the 3xx range. :param bool raise_on_status: Similar meaning to ``raise_on_redirect``: whether we should raise an exception, or return a response, if status falls in ``status_forcelist`` range and retries have been exhausted. :param tuple history: The history of the request encountered during each call to :meth:`~Retry.increment`. The list is in the order the requests occurred. Each list item is of class :class:`RequestHistory`. :param bool respect_retry_after_header: Whether to respect Retry-After header on status codes defined as :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. :param iterable remove_headers_on_redirect: Sequence of headers to remove from the request when a response indicating a redirect is returned before firing off the redirected request. """ DEFAULT_METHOD_WHITELIST = frozenset([ 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization']) #: Maximum backoff time. BACKOFF_MAX = 120 def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0, raise_on_redirect=True, raise_on_status=True, history=None, respect_retry_after_header=True, remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST): self.total = total self.connect = connect self.read = read self.status = status if redirect is False or total is False: redirect = 0 raise_on_redirect = False self.redirect = redirect self.status_forcelist = status_forcelist or set() self.method_whitelist = method_whitelist self.backoff_factor = backoff_factor self.raise_on_redirect = raise_on_redirect self.raise_on_status = raise_on_status self.history = history or tuple() self.respect_retry_after_header = respect_retry_after_header self.remove_headers_on_redirect = remove_headers_on_redirect def new(self, **kw): params = dict( total=self.total, connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, history=self.history, remove_headers_on_redirect=self.remove_headers_on_redirect ) params.update(kw) return type(self)(**params) @classmethod def from_int(cls, retries, redirect=True, default=None): """ Backwards-compatibility for the old retries format.""" if retries is None: retries = default if default is not None else cls.DEFAULT if isinstance(retries, Retry): return retries redirect = bool(redirect) and None new_retries = cls(retries, redirect=redirect) log.debug("Converted retries value: %r -> %r", retries, new_retries) return new_retries def get_backoff_time(self): """ Formula for computing the current backoff :rtype: float """ # We want to consider only the last consecutive errors sequence (Ignore redirects). consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, reversed(self.history)))) if consecutive_errors_len <= 1: return 0 backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) return min(self.BACKOFF_MAX, backoff_value) def parse_retry_after(self, retry_after): # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4 if re.match(r"^\s*[0-9]+\s*$", retry_after): seconds = int(retry_after) else: retry_date_tuple = email.utils.parsedate(retry_after) if retry_date_tuple is None: raise InvalidHeader("Invalid Retry-After header: %s" % retry_after) retry_date = time.mktime(retry_date_tuple) seconds = retry_date - time.time() if seconds < 0: seconds = 0 return seconds def get_retry_after(self, response): """ Get the value of Retry-After in seconds. """ retry_after = response.getheader("Retry-After") if retry_after is None: return None return self.parse_retry_after(retry_after) def sleep_for_retry(self, response=None): retry_after = self.get_retry_after(response) if retry_after: time.sleep(retry_after) return True return False def _sleep_backoff(self): backoff = self.get_backoff_time() if backoff <= 0: return time.sleep(backoff) def sleep(self, response=None): """ Sleep between retry attempts. This method will respect a server's ``Retry-After`` response header and sleep the duration of the time requested. If that is not present, it will use an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ if response: slept = self.sleep_for_retry(response) if slept: return self._sleep_backoff() def _is_connection_error(self, err): """ Errors when we're fairly sure that the server did not receive the request, so it should be safe to retry. """ return isinstance(err, ConnectTimeoutError) def _is_read_error(self, err): """ Errors that occur after the request has been started, so we should assume that the server began processing it. """ return isinstance(err, (ReadTimeoutError, ProtocolError)) def _is_method_retryable(self, method): """ Checks if a given HTTP method should be retried upon, depending if it is included on the method whitelist. """ if self.method_whitelist and method.upper() not in self.method_whitelist: return False return True def is_retry(self, method, status_code, has_retry_after=False): """ Is this method/status code retryable? (Based on whitelists and control variables such as the number of total retries to allow, whether to respect the Retry-After header, whether this header is present, and whether the returned status code is on the list of status codes to be retried upon on the presence of the aforementioned header) """ if not self._is_method_retryable(method): return False if self.status_forcelist and status_code in self.status_forcelist: return True return (self.total and self.respect_retry_after_header and has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES)) def is_exhausted(self): """ Are we out of retries? """ retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) retry_counts = list(filter(None, retry_counts)) if not retry_counts: return False return min(retry_counts) < 0 def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None): """ Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object. """ if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 connect = self.connect read = self.read redirect = self.redirect status_count = self.status cause = 'unknown' status = None redirect_location = None if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 elif error and self._is_read_error(error): # Read retry? if read is False or not self._is_method_retryable(method): raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 cause = 'too many redirects' redirect_location = response.get_redirect_location() status = response.status else: # Incrementing because of a server error like a 500 in # status_forcelist and a the given method is in the whitelist cause = ResponseError.GENERIC_ERROR if response and response.status: if status_count is not None: status_count -= 1 cause = ResponseError.SPECIFIC_ERROR.format( status_code=response.status) status = response.status history = self.history + (RequestHistory(method, url, error, status, redirect_location),) new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, status=status_count, history=history) if new_retry.is_exhausted(): raise MaxRetryError(_pool, url, error or ResponseError(cause)) log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) return new_retry def __repr__(self): return ('{cls.__name__}(total={self.total}, connect={self.connect}, ' 'read={self.read}, redirect={self.redirect}, status={self.status})').format( cls=type(self), self=self) # For backwards compatibility (equivalent to pre-v1.9): Retry.DEFAULT = Retry(3)
15,104
35.662621
97
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/connection.py
from __future__ import absolute_import import socket from .wait import NoWayToWaitForSocketError, wait_for_read def is_connection_dropped(conn): # Platform-specific """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if sock is False: # Platform-specific: AppEngine return False if sock is None: # Connection already closed (such as by httplib). return True try: # Returns True if readable, which here means it's been dropped return wait_for_read(sock, timeout=0.0) except NoWayToWaitForSocketError: # Platform-specific: AppEngine return False # This function is copied from socket.py in the Python 2.7 standard # library test suite. Added to its signature is only `socket_options`. # One additional modification is that we avoid binding to IPv6 servers # discovered in DNS if the system doesn't have IPv6 functionality. def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default. """ host, port = address if host.startswith('['): host = host.strip('[]') err = None # Using the value from allowed_gai_family() in the context of getaddrinfo lets # us select whether to work with IPv4 DNS records, IPv6 records, or both. # The original create_connection function always returns all records. family = allowed_gai_family() for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) # If provided, set socket level options before connecting. _set_socket_options(sock, socket_options) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as e: err = e if sock is not None: sock.close() sock = None if err is not None: raise err raise socket.error("getaddrinfo returns an empty list") def _set_socket_options(sock, options): if options is None: return for opt in options: sock.setsockopt(*opt) def allowed_gai_family(): """This function is designed to work in the context of getaddrinfo, where family=socket.AF_UNSPEC is the default and will perform a DNS search for both IPv6 and IPv4 records.""" family = socket.AF_INET if HAS_IPV6: family = socket.AF_UNSPEC return family def _has_ipv6(host): """ Returns True if the system can bind an IPv6 address. """ sock = None has_ipv6 = False if socket.has_ipv6: # has_ipv6 returns true if cPython was compiled with IPv6 support. # It does not tell us if the system has IPv6 support enabled. To # determine that we must bind to an IPv6 address. # https://github.com/shazow/urllib3/pull/611 # https://bugs.python.org/issue658327 try: sock = socket.socket(socket.AF_INET6) sock.bind((host, 0)) has_ipv6 = True except Exception: pass if sock: sock.close() return has_ipv6 HAS_IPV6 = _has_ipv6('::1')
4,279
32.700787
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/queue.py
import collections from ..packages import six from ..packages.six.moves import queue if six.PY2: # Queue is imported for side effects on MS Windows. See issue #229. import Queue as _unused_module_Queue # noqa: F401 class LifoQueue(queue.Queue): def _init(self, _): self.queue = collections.deque() def _qsize(self, len=len): return len(self.queue) def _put(self, item): self.queue.append(item) def _get(self): return self.queue.pop()
497
21.636364
71
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/timeout.py
from __future__ import absolute_import # The default socket timeout, used by httplib to indicate that no timeout was # specified by the user from socket import _GLOBAL_DEFAULT_TIMEOUT import time from ..exceptions import TimeoutStateError # A sentinel value to indicate that no timeout was specified by the user in # urllib3 _Default = object() # Use time.monotonic if available. current_time = getattr(time, "monotonic", time.time) class Timeout(object): """ Timeout configuration. Timeouts can be defined as a default for a pool:: timeout = Timeout(connect=2.0, read=7.0) http = PoolManager(timeout=timeout) response = http.request('GET', 'http://example.com/') Or per-request (which overrides the default for the pool):: response = http.request('GET', 'http://example.com/', timeout=Timeout(10)) Timeouts can be disabled by setting all the parameters to ``None``:: no_timeout = Timeout(connect=None, read=None) response = http.request('GET', 'http://example.com/, timeout=no_timeout) :param total: This combines the connect and read timeouts into one; the read timeout will be set to the time leftover from the connect attempt. In the event that both a connect timeout and a total are specified, or a read timeout and a total are specified, the shorter timeout will be applied. Defaults to None. :type total: integer, float, or None :param connect: The maximum amount of time to wait for a connection attempt to a server to succeed. Omitting the parameter will default the connect timeout to the system default, probably `the global default timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout for connection attempts. :type connect: integer, float, or None :param read: The maximum amount of time to wait between consecutive read operations for a response from the server. Omitting the parameter will default the read timeout to the system default, probably `the global default timeout in socket.py <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_. None will set an infinite timeout. :type read: integer, float, or None .. note:: Many factors can affect the total amount of time for urllib3 to return an HTTP response. For example, Python's DNS resolver does not obey the timeout specified on the socket. Other factors that can affect total request time include high CPU load, high swap, the program running at a low priority level, or other behaviors. In addition, the read and total timeouts only measure the time between read operations on the socket connecting the client and the server, not the total amount of time for the request to return a complete response. For most requests, the timeout is raised because the server has not sent the first byte in the specified time. This is not always the case; if a server streams one byte every fifteen seconds, a timeout of 20 seconds will not trigger, even though the request will take several minutes to complete. If your goal is to cut off any request after a set amount of wall clock time, consider having a second "watcher" thread to cut off a slow request. """ #: A sentinel object representing the default timeout value DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT def __init__(self, total=None, connect=_Default, read=_Default): self._connect = self._validate_timeout(connect, 'connect') self._read = self._validate_timeout(read, 'read') self.total = self._validate_timeout(total, 'total') self._start_connect = None def __str__(self): return '%s(connect=%r, read=%r, total=%r)' % ( type(self).__name__, self._connect, self._read, self.total) @classmethod def _validate_timeout(cls, value, name): """ Check that a timeout attribute is valid. :param value: The timeout value to validate :param name: The name of the timeout attribute to validate. This is used to specify in error messages. :return: The validated and casted version of the given value. :raises ValueError: If it is a numeric value less than or equal to zero, or the type is not an integer, float, or None. """ if value is _Default: return cls.DEFAULT_TIMEOUT if value is None or value is cls.DEFAULT_TIMEOUT: return value if isinstance(value, bool): raise ValueError("Timeout cannot be a boolean value. It must " "be an int, float or None.") try: float(value) except (TypeError, ValueError): raise ValueError("Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)) try: if value <= 0: raise ValueError("Attempted to set %s timeout to %s, but the " "timeout cannot be set to a value less " "than or equal to 0." % (name, value)) except TypeError: # Python 3 raise ValueError("Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)) return value @classmethod def from_float(cls, timeout): """ Create a new Timeout from a legacy timeout value. The timeout value used by httplib.py sets the same timeout on the connect(), and recv() socket requests. This creates a :class:`Timeout` object that sets the individual timeouts to the ``timeout`` value passed to this function. :param timeout: The legacy timeout value. :type timeout: integer, float, sentinel default object, or None :return: Timeout object :rtype: :class:`Timeout` """ return Timeout(read=timeout, connect=timeout) def clone(self): """ Create a copy of the timeout object Timeout properties are stored per-pool but each request needs a fresh Timeout object to ensure each one has its own start/stop configured. :return: a copy of the timeout object :rtype: :class:`Timeout` """ # We can't use copy.deepcopy because that will also create a new object # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to # detect the user default. return Timeout(connect=self._connect, read=self._read, total=self.total) def start_connect(self): """ Start the timeout clock, used during a connect() attempt :raises urllib3.exceptions.TimeoutStateError: if you attempt to start a timer that has been started already. """ if self._start_connect is not None: raise TimeoutStateError("Timeout timer has already been started.") self._start_connect = current_time() return self._start_connect def get_connect_duration(self): """ Gets the time elapsed since the call to :meth:`start_connect`. :return: Elapsed time. :rtype: float :raises urllib3.exceptions.TimeoutStateError: if you attempt to get duration for a timer that hasn't been started. """ if self._start_connect is None: raise TimeoutStateError("Can't get connect duration for timer " "that has not started.") return current_time() - self._start_connect @property def connect_timeout(self): """ Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None """ if self.total is None: return self._connect if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: return self.total return min(self._connect, self.total) @property def read_timeout(self): """ Get the value for the read timeout. This assumes some time has elapsed in the connection timeout and computes the read timeout appropriately. If self.total is set, the read timeout is dependent on the amount of time taken by the connect timeout. If the connection time has not been established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be raised. :return: Value to use for the read timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` has not yet been called on this object. """ if (self.total is not None and self.total is not self.DEFAULT_TIMEOUT and self._read is not None and self._read is not self.DEFAULT_TIMEOUT): # In case the connect timeout has not yet been established. if self._start_connect is None: return self._read return max(0, min(self.total - self.get_connect_duration(), self._read)) elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: return max(0, self.total - self.get_connect_duration()) else: return self._read
9,757
39.156379
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/wait.py
import errno from functools import partial import select import sys try: from time import monotonic except ImportError: from time import time as monotonic __all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"] class NoWayToWaitForSocketError(Exception): pass # How should we wait on sockets? # # There are two types of APIs you can use for waiting on sockets: the fancy # modern stateful APIs like epoll/kqueue, and the older stateless APIs like # select/poll. The stateful APIs are more efficient when you have a lots of # sockets to keep track of, because you can set them up once and then use them # lots of times. But we only ever want to wait on a single socket at a time # and don't want to keep track of state, so the stateless APIs are actually # more efficient. So we want to use select() or poll(). # # Now, how do we choose between select() and poll()? On traditional Unixes, # select() has a strange calling convention that makes it slow, or fail # altogether, for high-numbered file descriptors. The point of poll() is to fix # that, so on Unixes, we prefer poll(). # # On Windows, there is no poll() (or at least Python doesn't provide a wrapper # for it), but that's OK, because on Windows, select() doesn't have this # strange calling convention; plain select() works fine. # # So: on Windows we use select(), and everywhere else we use poll(). We also # fall back to select() in case poll() is somehow broken or missing. if sys.version_info >= (3, 5): # Modern Python, that retries syscalls by default def _retry_on_intr(fn, timeout): return fn(timeout) else: # Old and broken Pythons. def _retry_on_intr(fn, timeout): if timeout is not None and timeout <= 0: return fn(timeout) if timeout is None: deadline = float("inf") else: deadline = monotonic() + timeout while True: try: return fn(timeout) # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7 except (OSError, select.error) as e: # 'e.args[0]' incantation works for both OSError and select.error if e.args[0] != errno.EINTR: raise else: timeout = deadline - monotonic() if timeout < 0: timeout = 0 if timeout == float("inf"): timeout = None continue def select_wait_for_socket(sock, read=False, write=False, timeout=None): if not read and not write: raise RuntimeError("must specify at least one of read=True, write=True") rcheck = [] wcheck = [] if read: rcheck.append(sock) if write: wcheck.append(sock) # When doing a non-blocking connect, most systems signal success by # marking the socket writable. Windows, though, signals success by marked # it as "exceptional". We paper over the difference by checking the write # sockets for both conditions. (The stdlib selectors module does the same # thing.) fn = partial(select.select, rcheck, wcheck, wcheck) rready, wready, xready = _retry_on_intr(fn, timeout) return bool(rready or wready or xready) def poll_wait_for_socket(sock, read=False, write=False, timeout=None): if not read and not write: raise RuntimeError("must specify at least one of read=True, write=True") mask = 0 if read: mask |= select.POLLIN if write: mask |= select.POLLOUT poll_obj = select.poll() poll_obj.register(sock, mask) # For some reason, poll() takes timeout in milliseconds def do_poll(t): if t is not None: t *= 1000 return poll_obj.poll(t) return bool(_retry_on_intr(do_poll, timeout)) def null_wait_for_socket(*args, **kwargs): raise NoWayToWaitForSocketError("no select-equivalent available") def _have_working_poll(): # Apparently some systems have a select.poll that fails as soon as you try # to use it, either due to strange configuration or broken monkeypatching # from libraries like eventlet/greenlet. try: poll_obj = select.poll() poll_obj.poll(0) except (AttributeError, OSError): return False else: return True def wait_for_socket(*args, **kwargs): # We delay choosing which implementation to use until the first time we're # called. We could do it at import time, but then we might make the wrong # decision if someone goes wild with monkeypatching select.poll after # we're imported. global wait_for_socket if _have_working_poll(): wait_for_socket = poll_wait_for_socket elif hasattr(select, "select"): wait_for_socket = select_wait_for_socket else: # Platform-specific: Appengine. wait_for_socket = null_wait_for_socket return wait_for_socket(*args, **kwargs) def wait_for_read(sock, timeout=None): """ Waits for reading to be available on a given socket. Returns True if the socket is readable, or False if the timeout expired. """ return wait_for_socket(sock, read=True, timeout=timeout) def wait_for_write(sock, timeout=None): """ Waits for writing to be available on a given socket. Returns True if the socket is readable, or False if the timeout expired. """ return wait_for_socket(sock, write=True, timeout=timeout)
5,468
34.512987
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/response.py
from __future__ import absolute_import from ..packages.six.moves import http_client as httplib from ..exceptions import HeaderParsingError def is_fp_closed(obj): """ Checks whether a given file-like object is closed. :param obj: The file-like object to check. """ try: # Check `isclosed()` first, in case Python3 doesn't set `closed`. # GH Issue #928 return obj.isclosed() except AttributeError: pass try: # Check via the official file-like-object way. return obj.closed except AttributeError: pass try: # Check if the object is a container for another file-like object that # gets released on exhaustion (e.g. HTTPResponse). return obj.fp is None except AttributeError: pass raise ValueError("Unable to determine whether fp is closed.") def assert_header_parsing(headers): """ Asserts whether all headers have been successfully parsed. Extracts encountered errors from the result of parsing headers. Only works on Python 3. :param headers: Headers to verify. :type headers: `httplib.HTTPMessage`. :raises urllib3.exceptions.HeaderParsingError: If parsing errors are found. """ # This will fail silently if we pass in the wrong kind of parameter. # To make debugging easier add an explicit check. if not isinstance(headers, httplib.HTTPMessage): raise TypeError('expected httplib.Message, got {0}.'.format( type(headers))) defects = getattr(headers, 'defects', None) get_payload = getattr(headers, 'get_payload', None) unparsed_data = None if get_payload: # Platform-specific: Python 3. unparsed_data = get_payload() if defects or unparsed_data: raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) def is_response_to_head(response): """ Checks whether the request of a response has been a HEAD-request. Handles the quirks of AppEngine. :param conn: :type conn: :class:`httplib.HTTPResponse` """ # FIXME: Can we do this somehow without accessing private httplib _method? method = response._method if isinstance(method, int): # Platform-specific: Appengine return method == 3 return method.upper() == 'HEAD'
2,343
27.585366
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/__init__.py
from __future__ import absolute_import # For backwards compatibility, provide imports that used to be here. from .connection import is_connection_dropped from .request import make_headers from .response import is_fp_closed from .ssl_ import ( SSLContext, HAS_SNI, IS_PYOPENSSL, IS_SECURETRANSPORT, assert_fingerprint, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .timeout import ( current_time, Timeout, ) from .retry import Retry from .url import ( get_host, parse_url, split_first, Url, ) from .wait import ( wait_for_read, wait_for_write ) __all__ = ( 'HAS_SNI', 'IS_PYOPENSSL', 'IS_SECURETRANSPORT', 'SSLContext', 'Retry', 'Timeout', 'Url', 'assert_fingerprint', 'current_time', 'is_connection_dropped', 'is_fp_closed', 'get_host', 'parse_url', 'make_headers', 'resolve_cert_reqs', 'resolve_ssl_version', 'split_first', 'ssl_wrap_socket', 'wait_for_read', 'wait_for_write' )
1,044
18
68
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/util/request.py
from __future__ import absolute_import from base64 import b64encode from ..packages.six import b, integer_types from ..exceptions import UnrewindableBodyError ACCEPT_ENCODING = 'gzip,deflate' _FAILEDTELL = object() def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, basic_auth=None, proxy_basic_auth=None, disable_cache=None): """ Shortcuts for generating request headers. :param keep_alive: If ``True``, adds 'connection: keep-alive' header. :param accept_encoding: Can be a boolean, list, or string. ``True`` translates to 'gzip,deflate'. List will get joined by comma. String will be used as provided. :param user_agent: String representing the user-agent you want, such as "python-urllib3/0.6" :param basic_auth: Colon-separated username:password string for 'authorization: basic ...' auth header. :param proxy_basic_auth: Colon-separated username:password string for 'proxy-authorization: basic ...' auth header. :param disable_cache: If ``True``, adds 'cache-control: no-cache' header. Example:: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} >>> make_headers(accept_encoding=True) {'accept-encoding': 'gzip,deflate'} """ headers = {} if accept_encoding: if isinstance(accept_encoding, str): pass elif isinstance(accept_encoding, list): accept_encoding = ','.join(accept_encoding) else: accept_encoding = ACCEPT_ENCODING headers['accept-encoding'] = accept_encoding if user_agent: headers['user-agent'] = user_agent if keep_alive: headers['connection'] = 'keep-alive' if basic_auth: headers['authorization'] = 'Basic ' + \ b64encode(b(basic_auth)).decode('utf-8') if proxy_basic_auth: headers['proxy-authorization'] = 'Basic ' + \ b64encode(b(proxy_basic_auth)).decode('utf-8') if disable_cache: headers['cache-control'] = 'no-cache' return headers def set_file_position(body, pos): """ If a position is provided, move file to that point. Otherwise, we'll attempt to record a position for future use. """ if pos is not None: rewind_body(body, pos) elif getattr(body, 'tell', None) is not None: try: pos = body.tell() except (IOError, OSError): # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body. pos = _FAILEDTELL return pos def rewind_body(body, body_pos): """ Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file. """ body_seek = getattr(body, 'seek', None) if body_seek is not None and isinstance(body_pos, integer_types): try: body_seek(body_pos) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect/retry.") elif body_pos is _FAILEDTELL: raise UnrewindableBodyError("Unable to record file position for rewinding " "request body during a redirect/retry.") else: raise ValueError("body_pos must be of type integer, " "instead it was %s." % type(body_pos))
3,705
30.142857
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/packages/six.py
"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <[email protected]>" __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): if from_value is None: raise value raise value from from_value """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
30,098
33.636364
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/packages/__init__.py
from __future__ import absolute_import from . import ssl_match_hostname __all__ = ('ssl_match_hostname', )
109
17.333333
38
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/packages/ordered_dict.py
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. # Copyright 2009 Raymond Hettinger, released under the MIT License. # http://code.activestate.com/recipes/576693/ try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
8,935
33.369231
87
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/packages/ssl_match_hostname/__init__.py
import sys try: # Our match_hostname function is the same as 3.5's, so we only want to # import the match_hostname function if it's at least that good. if sys.version_info < (3, 5): raise ImportError("Fallback to vendored code") from ssl import CertificateError, match_hostname except ImportError: try: # Backport of the function from a pypi module from backports.ssl_match_hostname import CertificateError, match_hostname except ImportError: # Our vendored copy from ._implementation import CertificateError, match_hostname # Not needed, but documenting what we provide. __all__ = ('CertificateError', 'match_hostname')
688
33.45
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/packages/ssl_match_hostname/_implementation.py
"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" # Note: This file is under the PSF license as the code comes from the python # stdlib. http://docs.python.org/3/license.html import re import sys # ipaddress has been backported to 2.6+ in pypi. If it is installed on the # system, use it to handle IPAddress ServerAltnames (this was added in # python-3.5) otherwise only do DNS matching. This allows # backports.ssl_match_hostname to continue to be used all the way back to # python-2.4. try: import ipaddress except ImportError: ipaddress = None __version__ = '3.5.0.1' class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def _to_unicode(obj): if isinstance(obj, str) and sys.version_info < (3,): obj = unicode(obj, encoding='ascii', errors='strict') return obj def _ipaddress_match(ipname, host_ip): """Exact matching of IP addresses. RFC 6125 explicitly doesn't define an algorithm for this (section 1.7.2 - "Out of Scope"). """ # OpenSSL may add a trailing newline to a subjectAltName's IP address # Divergence from upstream: ipaddress can't handle byte str ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) return ip == host_ip def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED") try: # Divergence from upstream: ipaddress can't handle byte str host_ip = ipaddress.ip_address(_to_unicode(hostname)) except ValueError: # Not an IP address (common case) host_ip = None except UnicodeError: # Divergence from upstream: Have to deal with ipaddress not taking # byte strings. addresses should be all ascii, so we consider it not # an ipaddress in this case host_ip = None except AttributeError: # Divergence from upstream: Make ipaddress library optional if ipaddress is None: host_ip = None else: raise dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if host_ip is None and _dnsname_match(value, hostname): return dnsnames.append(value) elif key == 'IP Address': if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
5,702
35.094937
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/packages/backports/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/packages/backports/makefile.py
# -*- coding: utf-8 -*- """ backports.makefile ~~~~~~~~~~~~~~~~~~ Backports the Python 3 ``socket.makefile`` method for use with anything that wants to create a "fake" socket object. """ import io from socket import SocketIO def backport_makefile(self, mode="r", buffering=None, encoding=None, errors=None, newline=None): """ Backport of ``socket.makefile`` from Python 3.5. """ if not set(mode) <= set(["r", "w", "b"]): raise ValueError( "invalid mode %r (only r, w, b allowed)" % (mode,) ) writing = "w" in mode reading = "r" in mode or not writing assert reading or writing binary = "b" in mode rawmode = "" if reading: rawmode += "r" if writing: rawmode += "w" raw = SocketIO(self, rawmode) self._makefile_refs += 1 if buffering is None: buffering = -1 if buffering < 0: buffering = io.DEFAULT_BUFFER_SIZE if buffering == 0: if not binary: raise ValueError("unbuffered streams must be binary") return raw if reading and writing: buffer = io.BufferedRWPair(raw, raw, buffering) elif reading: buffer = io.BufferedReader(raw, buffering) else: assert writing buffer = io.BufferedWriter(raw, buffering) if binary: return buffer text = io.TextIOWrapper(buffer, encoding, errors, newline) text.mode = mode return text
1,461
26.074074
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/appengine.py
""" This module provides a pool manager that uses Google App Engine's `URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. Example usage:: from urllib3 import PoolManager from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox if is_appengine_sandbox(): # AppEngineManager uses AppEngine's URLFetch API behind the scenes http = AppEngineManager() else: # PoolManager uses a socket-level API behind the scenes http = PoolManager() r = http.request('GET', 'https://google.com/') There are `limitations <https://cloud.google.com/appengine/docs/python/\ urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be the best choice for your application. There are three options for using urllib3 on Google App Engine: 1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is cost-effective in many circumstances as long as your usage is within the limitations. 2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. Sockets also have `limitations and restrictions <https://cloud.google.com/appengine/docs/python/sockets/\ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. To use sockets, be sure to specify the following in your ``app.yaml``:: env_variables: GAE_USE_SOCKETS_HTTPLIB : 'true' 3. If you are using `App Engine Flexible <https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard :class:`PoolManager` without any configuration or special environment variables. """ from __future__ import absolute_import import logging import os import warnings from ..packages.six.moves.urllib.parse import urljoin from ..exceptions import ( HTTPError, HTTPWarning, MaxRetryError, ProtocolError, TimeoutError, SSLError ) from ..packages.six import BytesIO from ..request import RequestMethods from ..response import HTTPResponse from ..util.timeout import Timeout from ..util.retry import Retry try: from google.appengine.api import urlfetch except ImportError: urlfetch = None log = logging.getLogger(__name__) class AppEnginePlatformWarning(HTTPWarning): pass class AppEnginePlatformError(HTTPError): pass class AppEngineManager(RequestMethods): """ Connection manager for Google App Engine sandbox applications. This manager uses the URLFetch service directly instead of using the emulated httplib, and is subject to URLFetch limitations as described in the App Engine documentation `here <https://cloud.google.com/appengine/docs/python/urlfetch>`_. Notably it will raise an :class:`AppEnginePlatformError` if: * URLFetch is not available. * If you attempt to use this on App Engine Flexible, as full socket support is available. * If a request size is more than 10 megabytes. * If a response size is more than 32 megabtyes. * If you use an unsupported request method such as OPTIONS. Beyond those cases, it will raise normal urllib3 errors. """ def __init__(self, headers=None, retries=None, validate_certificate=True, urlfetch_retries=True): if not urlfetch: raise AppEnginePlatformError( "URLFetch is not available in this environment.") if is_prod_appengine_mvms(): raise AppEnginePlatformError( "Use normal urllib3.PoolManager instead of AppEngineManager" "on Managed VMs, as using URLFetch is not necessary in " "this environment.") warnings.warn( "urllib3 is using URLFetch on Google App Engine sandbox instead " "of sockets. To use sockets directly instead of URLFetch see " "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.", AppEnginePlatformWarning) RequestMethods.__init__(self, headers) self.validate_certificate = validate_certificate self.urlfetch_retries = urlfetch_retries self.retries = retries or Retry.DEFAULT def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Return False to re-raise any potential exceptions return False def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, **response_kw): retries = self._get_retries(retries, redirect) try: follow_redirects = ( redirect and retries.redirect != 0 and retries.total) response = urlfetch.fetch( url, payload=body, method=method, headers=headers or {}, allow_truncated=False, follow_redirects=self.urlfetch_retries and follow_redirects, deadline=self._get_absolute_timeout(timeout), validate_certificate=self.validate_certificate, ) except urlfetch.DeadlineExceededError as e: raise TimeoutError(self, e) except urlfetch.InvalidURLError as e: if 'too large' in str(e): raise AppEnginePlatformError( "URLFetch request too large, URLFetch only " "supports requests up to 10mb in size.", e) raise ProtocolError(e) except urlfetch.DownloadError as e: if 'Too many redirects' in str(e): raise MaxRetryError(self, url, reason=e) raise ProtocolError(e) except urlfetch.ResponseTooLargeError as e: raise AppEnginePlatformError( "URLFetch response too large, URLFetch only supports" "responses up to 32mb in size.", e) except urlfetch.SSLCertificateError as e: raise SSLError(e) except urlfetch.InvalidMethodError as e: raise AppEnginePlatformError( "URLFetch does not support method: %s" % method, e) http_response = self._urlfetch_response_to_http_response( response, retries=retries, **response_kw) # Handle redirect? redirect_location = redirect and http_response.get_redirect_location() if redirect_location: # Check for redirect response if (self.urlfetch_retries and retries.raise_on_redirect): raise MaxRetryError(self, url, "too many redirects") else: if http_response.status == 303: method = 'GET' try: retries = retries.increment(method, url, response=http_response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: raise MaxRetryError(self, url, "too many redirects") return http_response retries.sleep_for_retry(http_response) log.debug("Redirecting %s -> %s", url, redirect_location) redirect_url = urljoin(url, redirect_location) return self.urlopen( method, redirect_url, body, headers, retries=retries, redirect=redirect, timeout=timeout, **response_kw) # Check if we should retry the HTTP response. has_retry_after = bool(http_response.getheader('Retry-After')) if retries.is_retry(method, http_response.status, has_retry_after): retries = retries.increment( method, url, response=http_response, _pool=self) log.debug("Retry: %s", url) retries.sleep(http_response) return self.urlopen( method, url, body=body, headers=headers, retries=retries, redirect=redirect, timeout=timeout, **response_kw) return http_response def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): if is_prod_appengine(): # Production GAE handles deflate encoding automatically, but does # not remove the encoding header. content_encoding = urlfetch_resp.headers.get('content-encoding') if content_encoding == 'deflate': del urlfetch_resp.headers['content-encoding'] transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') # We have a full response's content, # so let's make sure we don't report ourselves as chunked data. if transfer_encoding == 'chunked': encodings = transfer_encoding.split(",") encodings.remove('chunked') urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) original_response = HTTPResponse( # In order for decoding to work, we must present the content as # a file-like object. body=BytesIO(urlfetch_resp.content), msg=urlfetch_resp.header_msg, headers=urlfetch_resp.headers, status=urlfetch_resp.status_code, **response_kw ) return HTTPResponse( body=BytesIO(urlfetch_resp.content), headers=urlfetch_resp.headers, status=urlfetch_resp.status_code, original_response=original_response, **response_kw ) def _get_absolute_timeout(self, timeout): if timeout is Timeout.DEFAULT_TIMEOUT: return None # Defer to URLFetch's default. if isinstance(timeout, Timeout): if timeout._read is not None or timeout._connect is not None: warnings.warn( "URLFetch does not support granular timeout settings, " "reverting to total or default URLFetch timeout.", AppEnginePlatformWarning) return timeout.total return timeout def _get_retries(self, retries, redirect): if not isinstance(retries, Retry): retries = Retry.from_int( retries, redirect=redirect, default=self.retries) if retries.connect or retries.read or retries.redirect: warnings.warn( "URLFetch only supports total retries and does not " "recognize connect, read, or redirect retry parameters.", AppEnginePlatformWarning) return retries def is_appengine(): return (is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()) def is_appengine_sandbox(): return is_appengine() and not is_prod_appengine_mvms() def is_local_appengine(): return ('APPENGINE_RUNTIME' in os.environ and 'Development/' in os.environ['SERVER_SOFTWARE']) def is_prod_appengine(): return ('APPENGINE_RUNTIME' in os.environ and 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and not is_prod_appengine_mvms()) def is_prod_appengine_mvms(): return os.environ.get('GAE_VM', False) == 'true'
11,173
35.51634
96
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/pyopenssl.py
""" SSL with SNI_-support for Python 2. Follow these instructions if you would like to verify SSL certificates in Python 2. Note, the default libraries do *not* do certificate checking; you need to do additional work to validate certificates yourself. This needs the following packages installed: * pyOpenSSL (tested with 16.0.0) * cryptography (minimum 1.3.4, from pyopenssl) * idna (minimum 2.0, from cryptography) However, pyopenssl depends on cryptography, which depends on idna, so while we use all three directly here we end up having relatively few packages required. You can install them with the following command: pip install pyopenssl cryptography idna To activate certificate checking, call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code before you begin making HTTP requests. This can be done in a ``sitecustomize`` module, or at any other time before your application begins using ``urllib3``, like this:: try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass Now you can use :mod:`urllib3` as you normally would, and it will support SNI when the required modules are installed. Activating this module also has the positive side effect of disabling SSL/TLS compression in Python 2 (see `CRIME attack`_). If you want to configure the default list of supported cipher suites, you can set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) """ from __future__ import absolute_import import OpenSSL.SSL from cryptography import x509 from cryptography.hazmat.backends.openssl import backend as openssl_backend from cryptography.hazmat.backends.openssl.x509 import _Certificate try: from cryptography.x509 import UnsupportedExtension except ImportError: # UnsupportedExtension is gone in cryptography >= 2.1.0 class UnsupportedExtension(Exception): pass from socket import timeout, error as SocketError from io import BytesIO try: # Platform-specific: Python 2 from socket import _fileobject except ImportError: # Platform-specific: Python 3 _fileobject = None from ..packages.backports.makefile import backport_makefile import logging import ssl from ..packages import six import sys from .. import util __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] # SNI always works. HAS_SNI = True # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD try: _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) except AttributeError: pass _stdlib_to_openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } _openssl_to_stdlib_verify = dict( (v, k) for k, v in _stdlib_to_openssl_verify.items() ) # OpenSSL will only write 16K at a time SSL_WRITE_BLOCKSIZE = 16384 orig_util_HAS_SNI = util.HAS_SNI orig_util_SSLContext = util.ssl_.SSLContext log = logging.getLogger(__name__) def inject_into_urllib3(): 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' _validate_dependencies_met() util.ssl_.SSLContext = PyOpenSSLContext util.HAS_SNI = HAS_SNI util.ssl_.HAS_SNI = HAS_SNI util.IS_PYOPENSSL = True util.ssl_.IS_PYOPENSSL = True def extract_from_urllib3(): 'Undo monkey-patching by :func:`inject_into_urllib3`.' util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_PYOPENSSL = False util.ssl_.IS_PYOPENSSL = False def _validate_dependencies_met(): """ Verifies that PyOpenSSL's package-level dependencies have been met. Throws `ImportError` if they are not met. """ # Method added in `cryptography==1.1`; not available in older versions from cryptography.x509.extensions import Extensions if getattr(Extensions, "get_extension_for_class", None) is None: raise ImportError("'cryptography' module missing required functionality. " "Try upgrading to v1.3.4 or newer.") # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 # attribute is only present on those versions. from OpenSSL.crypto import X509 x509 = X509() if getattr(x509, "_x509", None) is None: raise ImportError("'pyOpenSSL' module missing required functionality. " "Try upgrading to v0.14 or newer.") def _dnsname_to_stdlib(name): """ Converts a dNSName SubjectAlternativeName field to the form used by the standard library on the given Python version. Cryptography produces a dNSName as a unicode string that was idna-decoded from ASCII bytes. We need to idna-encode that string to get it back, and then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). """ def idna_encode(name): """ Borrowed wholesale from the Python Cryptography Project. It turns out that we can't just safely call `idna.encode`: it can explode for wildcard names. This avoids that problem. """ import idna for prefix in [u'*.', u'.']: if name.startswith(prefix): name = name[len(prefix):] return prefix.encode('ascii') + idna.encode(name) return idna.encode(name) name = idna_encode(name) if sys.version_info >= (3, 0): name = name.decode('utf-8') return name def get_subj_alt_name(peer_cert): """ Given an PyOpenSSL certificate, provides all the subject alternative names. """ # Pass the cert to cryptography, which has much better APIs for this. if hasattr(peer_cert, "to_cryptography"): cert = peer_cert.to_cryptography() else: # This is technically using private APIs, but should work across all # relevant versions before PyOpenSSL got a proper API for this. cert = _Certificate(openssl_backend, peer_cert._x509) # We want to find the SAN extension. Ask Cryptography to locate it (it's # faster than looping in Python) try: ext = cert.extensions.get_extension_for_class( x509.SubjectAlternativeName ).value except x509.ExtensionNotFound: # No such extension, return the empty list. return [] except (x509.DuplicateExtension, UnsupportedExtension, x509.UnsupportedGeneralNameType, UnicodeError) as e: # A problem has been found with the quality of the certificate. Assume # no SAN field is present. log.warning( "A problem was encountered with the certificate that prevented " "urllib3 from finding the SubjectAlternativeName field. This can " "affect certificate validation. The error was %s", e, ) return [] # We want to return dNSName and iPAddress fields. We need to cast the IPs # back to strings because the match_hostname function wants them as # strings. # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 # decoded. This is pretty frustrating, but that's what the standard library # does with certificates, and so we need to attempt to do the same. names = [ ('DNS', _dnsname_to_stdlib(name)) for name in ext.get_values_for_type(x509.DNSName) ] names.extend( ('IP Address', str(name)) for name in ext.get_values_for_type(x509.IPAddress) ) return names class WrappedSocket(object): '''API-compatibility wrapper for Python OpenSSL's Connection-class. Note: _makefile_refs, _drop() and _reuse() are needed for the garbage collector of pypy. ''' def __init__(self, connection, socket, suppress_ragged_eofs=True): self.connection = connection self.socket = socket self.suppress_ragged_eofs = suppress_ragged_eofs self._makefile_refs = 0 self._closed = False def fileno(self): return self.socket.fileno() # Copy-pasted from Python 3.5 source code def _decref_socketios(self): if self._makefile_refs > 0: self._makefile_refs -= 1 if self._closed: self.close() def recv(self, *args, **kwargs): try: data = self.connection.recv(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): return b'' else: raise SocketError(str(e)) except OpenSSL.SSL.ZeroReturnError as e: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: return b'' else: raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): raise timeout('The read operation timed out') else: return self.recv(*args, **kwargs) else: return data def recv_into(self, *args, **kwargs): try: return self.connection.recv_into(*args, **kwargs) except OpenSSL.SSL.SysCallError as e: if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): return 0 else: raise SocketError(str(e)) except OpenSSL.SSL.ZeroReturnError as e: if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: return 0 else: raise except OpenSSL.SSL.WantReadError: if not util.wait_for_read(self.socket, self.socket.gettimeout()): raise timeout('The read operation timed out') else: return self.recv_into(*args, **kwargs) def settimeout(self, timeout): return self.socket.settimeout(timeout) def _send_until_done(self, data): while True: try: return self.connection.send(data) except OpenSSL.SSL.WantWriteError: if not util.wait_for_write(self.socket, self.socket.gettimeout()): raise timeout() continue except OpenSSL.SSL.SysCallError as e: raise SocketError(str(e)) def sendall(self, data): total_sent = 0 while total_sent < len(data): sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) total_sent += sent def shutdown(self): # FIXME rethrow compatible exceptions should we ever use this self.connection.shutdown() def close(self): if self._makefile_refs < 1: try: self._closed = True return self.connection.close() except OpenSSL.SSL.Error: return else: self._makefile_refs -= 1 def getpeercert(self, binary_form=False): x509 = self.connection.get_peer_certificate() if not x509: return x509 if binary_form: return OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_ASN1, x509) return { 'subject': ( (('commonName', x509.get_subject().CN),), ), 'subjectAltName': get_subj_alt_name(x509) } def _reuse(self): self._makefile_refs += 1 def _drop(self): if self._makefile_refs < 1: self.close() else: self._makefile_refs -= 1 if _fileobject: # Platform-specific: Python 2 def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) else: # Platform-specific: Python 3 makefile = backport_makefile WrappedSocket.makefile = makefile class PyOpenSSLContext(object): """ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible for translating the interface of the standard library ``SSLContext`` object to calls into PyOpenSSL. """ def __init__(self, protocol): self.protocol = _openssl_versions[protocol] self._ctx = OpenSSL.SSL.Context(self.protocol) self._options = 0 self.check_hostname = False @property def options(self): return self._options @options.setter def options(self, value): self._options = value self._ctx.set_options(value) @property def verify_mode(self): return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] @verify_mode.setter def verify_mode(self, value): self._ctx.set_verify( _stdlib_to_openssl_verify[value], _verify_callback ) def set_default_verify_paths(self): self._ctx.set_default_verify_paths() def set_ciphers(self, ciphers): if isinstance(ciphers, six.text_type): ciphers = ciphers.encode('utf-8') self._ctx.set_cipher_list(ciphers) def load_verify_locations(self, cafile=None, capath=None, cadata=None): if cafile is not None: cafile = cafile.encode('utf-8') if capath is not None: capath = capath.encode('utf-8') self._ctx.load_verify_locations(cafile, capath) if cadata is not None: self._ctx.load_verify_locations(BytesIO(cadata)) def load_cert_chain(self, certfile, keyfile=None, password=None): self._ctx.use_certificate_chain_file(certfile) if password is not None: self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) self._ctx.use_privatekey_file(keyfile or certfile) def wrap_socket(self, sock, server_side=False, do_handshake_on_connect=True, suppress_ragged_eofs=True, server_hostname=None): cnx = OpenSSL.SSL.Connection(self._ctx, sock) if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 server_hostname = server_hostname.encode('utf-8') if server_hostname is not None: cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() while True: try: cnx.do_handshake() except OpenSSL.SSL.WantReadError: if not util.wait_for_read(sock, sock.gettimeout()): raise timeout('select timed out') continue except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad handshake: %r' % e) break return WrappedSocket(cnx, sock) def _verify_callback(cnx, x509, err_no, err_depth, return_code): return err_no == 0
15,463
32.764192
91
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/securetransport.py
""" SecureTranport support for urllib3 via ctypes. This makes platform-native TLS available to urllib3 users on macOS without the use of a compiler. This is an important feature because the Python Package Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL that ships with macOS is not capable of doing TLSv1.2. The only way to resolve this is to give macOS users an alternative solution to the problem, and that solution is to use SecureTransport. We use ctypes here because this solution must not require a compiler. That's because pip is not allowed to require a compiler either. This is not intended to be a seriously long-term solution to this problem. The hope is that PEP 543 will eventually solve this issue for us, at which point we can retire this contrib module. But in the short term, we need to solve the impending tire fire that is Python on Mac without this kind of contrib module. So...here we are. To use this module, simply import and inject it:: import urllib3.contrib.securetransport urllib3.contrib.securetransport.inject_into_urllib3() Happy TLSing! """ from __future__ import absolute_import import contextlib import ctypes import errno import os.path import shutil import socket import ssl import threading import weakref from .. import util from ._securetransport.bindings import ( Security, SecurityConst, CoreFoundation ) from ._securetransport.low_level import ( _assert_no_error, _cert_array_from_pem, _temporary_keychain, _load_client_cert_chain ) try: # Platform-specific: Python 2 from socket import _fileobject except ImportError: # Platform-specific: Python 3 _fileobject = None from ..packages.backports.makefile import backport_makefile __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] # SNI always works HAS_SNI = True orig_util_HAS_SNI = util.HAS_SNI orig_util_SSLContext = util.ssl_.SSLContext # This dictionary is used by the read callback to obtain a handle to the # calling wrapped socket. This is a pretty silly approach, but for now it'll # do. I feel like I should be able to smuggle a handle to the wrapped socket # directly in the SSLConnectionRef, but for now this approach will work I # guess. # # We need to lock around this structure for inserts, but we don't do it for # reads/writes in the callbacks. The reasoning here goes as follows: # # 1. It is not possible to call into the callbacks before the dictionary is # populated, so once in the callback the id must be in the dictionary. # 2. The callbacks don't mutate the dictionary, they only read from it, and # so cannot conflict with any of the insertions. # # This is good: if we had to lock in the callbacks we'd drastically slow down # the performance of this code. _connection_refs = weakref.WeakValueDictionary() _connection_ref_lock = threading.Lock() # Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over # for no better reason than we need *a* limit, and this one is right there. SSL_WRITE_BLOCKSIZE = 16384 # This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to # individual cipher suites. We need to do this because this is how # SecureTransport wants them. CIPHER_SUITES = [ SecurityConst.TLS_AES_256_GCM_SHA384, SecurityConst.TLS_CHACHA20_POLY1305_SHA256, SecurityConst.TLS_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA, ] # Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of # TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. _protocol_to_min_max = { ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), } if hasattr(ssl, "PROTOCOL_SSLv2"): _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = ( SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2 ) if hasattr(ssl, "PROTOCOL_SSLv3"): _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = ( SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3 ) if hasattr(ssl, "PROTOCOL_TLSv1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = ( SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1 ) if hasattr(ssl, "PROTOCOL_TLSv1_1"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = ( SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11 ) if hasattr(ssl, "PROTOCOL_TLSv1_2"): _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 ) if hasattr(ssl, "PROTOCOL_TLS"): _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] def inject_into_urllib3(): """ Monkey-patch urllib3 with SecureTransport-backed SSL-support. """ util.ssl_.SSLContext = SecureTransportContext util.HAS_SNI = HAS_SNI util.ssl_.HAS_SNI = HAS_SNI util.IS_SECURETRANSPORT = True util.ssl_.IS_SECURETRANSPORT = True def extract_from_urllib3(): """ Undo monkey-patching by :func:`inject_into_urllib3`. """ util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_SECURETRANSPORT = False util.ssl_.IS_SECURETRANSPORT = False def _read_callback(connection_id, data_buffer, data_length_pointer): """ SecureTransport read callback. This is called by ST to request that data be returned from the socket. """ wrapped_socket = None try: wrapped_socket = _connection_refs.get(connection_id) if wrapped_socket is None: return SecurityConst.errSSLInternal base_socket = wrapped_socket.socket requested_length = data_length_pointer[0] timeout = wrapped_socket.gettimeout() error = None read_count = 0 try: while read_count < requested_length: if timeout is None or timeout >= 0: if not util.wait_for_read(base_socket, timeout): raise socket.error(errno.EAGAIN, 'timed out') remaining = requested_length - read_count buffer = (ctypes.c_char * remaining).from_address( data_buffer + read_count ) chunk_size = base_socket.recv_into(buffer, remaining) read_count += chunk_size if not chunk_size: if not read_count: return SecurityConst.errSSLClosedGraceful break except (socket.error) as e: error = e.errno if error is not None and error != errno.EAGAIN: data_length_pointer[0] = read_count if error == errno.ECONNRESET or error == errno.EPIPE: return SecurityConst.errSSLClosedAbort raise data_length_pointer[0] = read_count if read_count != requested_length: return SecurityConst.errSSLWouldBlock return 0 except Exception as e: if wrapped_socket is not None: wrapped_socket._exception = e return SecurityConst.errSSLInternal def _write_callback(connection_id, data_buffer, data_length_pointer): """ SecureTransport write callback. This is called by ST to request that data actually be sent on the network. """ wrapped_socket = None try: wrapped_socket = _connection_refs.get(connection_id) if wrapped_socket is None: return SecurityConst.errSSLInternal base_socket = wrapped_socket.socket bytes_to_write = data_length_pointer[0] data = ctypes.string_at(data_buffer, bytes_to_write) timeout = wrapped_socket.gettimeout() error = None sent = 0 try: while sent < bytes_to_write: if timeout is None or timeout >= 0: if not util.wait_for_write(base_socket, timeout): raise socket.error(errno.EAGAIN, 'timed out') chunk_sent = base_socket.send(data) sent += chunk_sent # This has some needless copying here, but I'm not sure there's # much value in optimising this data path. data = data[chunk_sent:] except (socket.error) as e: error = e.errno if error is not None and error != errno.EAGAIN: data_length_pointer[0] = sent if error == errno.ECONNRESET or error == errno.EPIPE: return SecurityConst.errSSLClosedAbort raise data_length_pointer[0] = sent if sent != bytes_to_write: return SecurityConst.errSSLWouldBlock return 0 except Exception as e: if wrapped_socket is not None: wrapped_socket._exception = e return SecurityConst.errSSLInternal # We need to keep these two objects references alive: if they get GC'd while # in use then SecureTransport could attempt to call a function that is in freed # memory. That would be...uh...bad. Yeah, that's the word. Bad. _read_callback_pointer = Security.SSLReadFunc(_read_callback) _write_callback_pointer = Security.SSLWriteFunc(_write_callback) class WrappedSocket(object): """ API-compatibility wrapper for Python's OpenSSL wrapped socket object. Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage collector of PyPy. """ def __init__(self, socket): self.socket = socket self.context = None self._makefile_refs = 0 self._closed = False self._exception = None self._keychain = None self._keychain_dir = None self._client_cert_chain = None # We save off the previously-configured timeout and then set it to # zero. This is done because we use select and friends to handle the # timeouts, but if we leave the timeout set on the lower socket then # Python will "kindly" call select on that socket again for us. Avoid # that by forcing the timeout to zero. self._timeout = self.socket.gettimeout() self.socket.settimeout(0) @contextlib.contextmanager def _raise_on_error(self): """ A context manager that can be used to wrap calls that do I/O from SecureTransport. If any of the I/O callbacks hit an exception, this context manager will correctly propagate the exception after the fact. This avoids silently swallowing those exceptions. It also correctly forces the socket closed. """ self._exception = None # We explicitly don't catch around this yield because in the unlikely # event that an exception was hit in the block we don't want to swallow # it. yield if self._exception is not None: exception, self._exception = self._exception, None self.close() raise exception def _set_ciphers(self): """ Sets up the allowed ciphers. By default this matches the set in util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done custom and doesn't allow changing at this time, mostly because parsing OpenSSL cipher strings is going to be a freaking nightmare. """ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES) result = Security.SSLSetEnabledCiphers( self.context, ciphers, len(CIPHER_SUITES) ) _assert_no_error(result) def _custom_validate(self, verify, trust_bundle): """ Called when we have set custom validation. We do this in two cases: first, when cert validation is entirely disabled; and second, when using a custom trust DB. """ # If we disabled cert validation, just say: cool. if not verify: return # We want data in memory, so load it up. if os.path.isfile(trust_bundle): with open(trust_bundle, 'rb') as f: trust_bundle = f.read() cert_array = None trust = Security.SecTrustRef() try: # Get a CFArray that contains the certs we want. cert_array = _cert_array_from_pem(trust_bundle) # Ok, now the hard part. We want to get the SecTrustRef that ST has # created for this connection, shove our CAs into it, tell ST to # ignore everything else it knows, and then ask if it can build a # chain. This is a buuuunch of code. result = Security.SSLCopyPeerTrust( self.context, ctypes.byref(trust) ) _assert_no_error(result) if not trust: raise ssl.SSLError("Failed to copy trust reference") result = Security.SecTrustSetAnchorCertificates(trust, cert_array) _assert_no_error(result) result = Security.SecTrustSetAnchorCertificatesOnly(trust, True) _assert_no_error(result) trust_result = Security.SecTrustResultType() result = Security.SecTrustEvaluate( trust, ctypes.byref(trust_result) ) _assert_no_error(result) finally: if trust: CoreFoundation.CFRelease(trust) if cert_array is not None: CoreFoundation.CFRelease(cert_array) # Ok, now we can look at what the result was. successes = ( SecurityConst.kSecTrustResultUnspecified, SecurityConst.kSecTrustResultProceed ) if trust_result.value not in successes: raise ssl.SSLError( "certificate verify failed, error code: %d" % trust_result.value ) def handshake(self, server_hostname, verify, trust_bundle, min_version, max_version, client_cert, client_key, client_key_passphrase): """ Actually performs the TLS handshake. This is run automatically by wrapped socket, and shouldn't be needed in user code. """ # First, we do the initial bits of connection setup. We need to create # a context, set its I/O funcs, and set the connection reference. self.context = Security.SSLCreateContext( None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType ) result = Security.SSLSetIOFuncs( self.context, _read_callback_pointer, _write_callback_pointer ) _assert_no_error(result) # Here we need to compute the handle to use. We do this by taking the # id of self modulo 2**31 - 1. If this is already in the dictionary, we # just keep incrementing by one until we find a free space. with _connection_ref_lock: handle = id(self) % 2147483647 while handle in _connection_refs: handle = (handle + 1) % 2147483647 _connection_refs[handle] = self result = Security.SSLSetConnection(self.context, handle) _assert_no_error(result) # If we have a server hostname, we should set that too. if server_hostname: if not isinstance(server_hostname, bytes): server_hostname = server_hostname.encode('utf-8') result = Security.SSLSetPeerDomainName( self.context, server_hostname, len(server_hostname) ) _assert_no_error(result) # Setup the ciphers. self._set_ciphers() # Set the minimum and maximum TLS versions. result = Security.SSLSetProtocolVersionMin(self.context, min_version) _assert_no_error(result) result = Security.SSLSetProtocolVersionMax(self.context, max_version) _assert_no_error(result) # If there's a trust DB, we need to use it. We do that by telling # SecureTransport to break on server auth. We also do that if we don't # want to validate the certs at all: we just won't actually do any # authing in that case. if not verify or trust_bundle is not None: result = Security.SSLSetSessionOption( self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True ) _assert_no_error(result) # If there's a client cert, we need to use it. if client_cert: self._keychain, self._keychain_dir = _temporary_keychain() self._client_cert_chain = _load_client_cert_chain( self._keychain, client_cert, client_key ) result = Security.SSLSetCertificate( self.context, self._client_cert_chain ) _assert_no_error(result) while True: with self._raise_on_error(): result = Security.SSLHandshake(self.context) if result == SecurityConst.errSSLWouldBlock: raise socket.timeout("handshake timed out") elif result == SecurityConst.errSSLServerAuthCompleted: self._custom_validate(verify, trust_bundle) continue else: _assert_no_error(result) break def fileno(self): return self.socket.fileno() # Copy-pasted from Python 3.5 source code def _decref_socketios(self): if self._makefile_refs > 0: self._makefile_refs -= 1 if self._closed: self.close() def recv(self, bufsiz): buffer = ctypes.create_string_buffer(bufsiz) bytes_read = self.recv_into(buffer, bufsiz) data = buffer[:bytes_read] return data def recv_into(self, buffer, nbytes=None): # Read short on EOF. if self._closed: return 0 if nbytes is None: nbytes = len(buffer) buffer = (ctypes.c_char * nbytes).from_buffer(buffer) processed_bytes = ctypes.c_size_t(0) with self._raise_on_error(): result = Security.SSLRead( self.context, buffer, nbytes, ctypes.byref(processed_bytes) ) # There are some result codes that we want to treat as "not always # errors". Specifically, those are errSSLWouldBlock, # errSSLClosedGraceful, and errSSLClosedNoNotify. if (result == SecurityConst.errSSLWouldBlock): # If we didn't process any bytes, then this was just a time out. # However, we can get errSSLWouldBlock in situations when we *did* # read some data, and in those cases we should just read "short" # and return. if processed_bytes.value == 0: # Timed out, no data read. raise socket.timeout("recv timed out") elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify): # The remote peer has closed this connection. We should do so as # well. Note that we don't actually return here because in # principle this could actually be fired along with return data. # It's unlikely though. self.close() else: _assert_no_error(result) # Ok, we read and probably succeeded. We should return whatever data # was actually read. return processed_bytes.value def settimeout(self, timeout): self._timeout = timeout def gettimeout(self): return self._timeout def send(self, data): processed_bytes = ctypes.c_size_t(0) with self._raise_on_error(): result = Security.SSLWrite( self.context, data, len(data), ctypes.byref(processed_bytes) ) if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0: # Timed out raise socket.timeout("send timed out") else: _assert_no_error(result) # We sent, and probably succeeded. Tell them how much we sent. return processed_bytes.value def sendall(self, data): total_sent = 0 while total_sent < len(data): sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE]) total_sent += sent def shutdown(self): with self._raise_on_error(): Security.SSLClose(self.context) def close(self): # TODO: should I do clean shutdown here? Do I have to? if self._makefile_refs < 1: self._closed = True if self.context: CoreFoundation.CFRelease(self.context) self.context = None if self._client_cert_chain: CoreFoundation.CFRelease(self._client_cert_chain) self._client_cert_chain = None if self._keychain: Security.SecKeychainDelete(self._keychain) CoreFoundation.CFRelease(self._keychain) shutil.rmtree(self._keychain_dir) self._keychain = self._keychain_dir = None return self.socket.close() else: self._makefile_refs -= 1 def getpeercert(self, binary_form=False): # Urgh, annoying. # # Here's how we do this: # # 1. Call SSLCopyPeerTrust to get hold of the trust object for this # connection. # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf. # 3. To get the CN, call SecCertificateCopyCommonName and process that # string so that it's of the appropriate type. # 4. To get the SAN, we need to do something a bit more complex: # a. Call SecCertificateCopyValues to get the data, requesting # kSecOIDSubjectAltName. # b. Mess about with this dictionary to try to get the SANs out. # # This is gross. Really gross. It's going to be a few hundred LoC extra # just to repeat something that SecureTransport can *already do*. So my # operating assumption at this time is that what we want to do is # instead to just flag to urllib3 that it shouldn't do its own hostname # validation when using SecureTransport. if not binary_form: raise ValueError( "SecureTransport only supports dumping binary certs" ) trust = Security.SecTrustRef() certdata = None der_bytes = None try: # Grab the trust store. result = Security.SSLCopyPeerTrust( self.context, ctypes.byref(trust) ) _assert_no_error(result) if not trust: # Probably we haven't done the handshake yet. No biggie. return None cert_count = Security.SecTrustGetCertificateCount(trust) if not cert_count: # Also a case that might happen if we haven't handshaked. # Handshook? Handshaken? return None leaf = Security.SecTrustGetCertificateAtIndex(trust, 0) assert leaf # Ok, now we want the DER bytes. certdata = Security.SecCertificateCopyData(leaf) assert certdata data_length = CoreFoundation.CFDataGetLength(certdata) data_buffer = CoreFoundation.CFDataGetBytePtr(certdata) der_bytes = ctypes.string_at(data_buffer, data_length) finally: if certdata: CoreFoundation.CFRelease(certdata) if trust: CoreFoundation.CFRelease(trust) return der_bytes def _reuse(self): self._makefile_refs += 1 def _drop(self): if self._makefile_refs < 1: self.close() else: self._makefile_refs -= 1 if _fileobject: # Platform-specific: Python 2 def makefile(self, mode, bufsize=-1): self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True) else: # Platform-specific: Python 3 def makefile(self, mode="r", buffering=None, *args, **kwargs): # We disable buffering with SecureTransport because it conflicts with # the buffering that ST does internally (see issue #1153 for more). buffering = 0 return backport_makefile(self, mode, buffering, *args, **kwargs) WrappedSocket.makefile = makefile class SecureTransportContext(object): """ I am a wrapper class for the SecureTransport library, to translate the interface of the standard library ``SSLContext`` object to calls into SecureTransport. """ def __init__(self, protocol): self._min_version, self._max_version = _protocol_to_min_max[protocol] self._options = 0 self._verify = False self._trust_bundle = None self._client_cert = None self._client_key = None self._client_key_passphrase = None @property def check_hostname(self): """ SecureTransport cannot have its hostname checking disabled. For more, see the comment on getpeercert() in this file. """ return True @check_hostname.setter def check_hostname(self, value): """ SecureTransport cannot have its hostname checking disabled. For more, see the comment on getpeercert() in this file. """ pass @property def options(self): # TODO: Well, crap. # # So this is the bit of the code that is the most likely to cause us # trouble. Essentially we need to enumerate all of the SSL options that # users might want to use and try to see if we can sensibly translate # them, or whether we should just ignore them. return self._options @options.setter def options(self, value): # TODO: Update in line with above. self._options = value @property def verify_mode(self): return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE @verify_mode.setter def verify_mode(self, value): self._verify = True if value == ssl.CERT_REQUIRED else False def set_default_verify_paths(self): # So, this has to do something a bit weird. Specifically, what it does # is nothing. # # This means that, if we had previously had load_verify_locations # called, this does not undo that. We need to do that because it turns # out that the rest of the urllib3 code will attempt to load the # default verify paths if it hasn't been told about any paths, even if # the context itself was sometime earlier. We resolve that by just # ignoring it. pass def load_default_certs(self): return self.set_default_verify_paths() def set_ciphers(self, ciphers): # For now, we just require the default cipher string. if ciphers != util.ssl_.DEFAULT_CIPHERS: raise ValueError( "SecureTransport doesn't support custom cipher strings" ) def load_verify_locations(self, cafile=None, capath=None, cadata=None): # OK, we only really support cadata and cafile. if capath is not None: raise ValueError( "SecureTransport does not support cert directories" ) self._trust_bundle = cafile or cadata def load_cert_chain(self, certfile, keyfile=None, password=None): self._client_cert = certfile self._client_key = keyfile self._client_cert_passphrase = password def wrap_socket(self, sock, server_side=False, do_handshake_on_connect=True, suppress_ragged_eofs=True, server_hostname=None): # So, what do we do here? Firstly, we assert some properties. This is a # stripped down shim, so there is some functionality we don't support. # See PEP 543 for the real deal. assert not server_side assert do_handshake_on_connect assert suppress_ragged_eofs # Ok, we're good to go. Now we want to create the wrapped socket object # and store it in the appropriate place. wrapped_socket = WrappedSocket(sock) # Now we can handshake wrapped_socket.handshake( server_hostname, self._verify, self._trust_bundle, self._min_version, self._max_version, self._client_cert, self._client_key, self._client_key_passphrase ) return wrapped_socket
30,309
36.652174
96
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/socks.py
# -*- coding: utf-8 -*- """ This module contains provisional support for SOCKS proxies from within urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and SOCKS5. To enable its functionality, either install PySocks or install this module with the ``socks`` extra. The SOCKS implementation supports the full range of urllib3 features. It also supports the following SOCKS features: - SOCKS4 - SOCKS4a - SOCKS5 - Usernames and passwords for the SOCKS proxy Known Limitations: - Currently PySocks does not support contacting remote websites via literal IPv6 addresses. Any such connection attempt will fail. You must use a domain name. - Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any such connection attempt will fail. """ from __future__ import absolute_import try: import socks except ImportError: import warnings from ..exceptions import DependencyWarning warnings.warn(( 'SOCKS support in urllib3 requires the installation of optional ' 'dependencies: specifically, PySocks. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies' ), DependencyWarning ) raise from socket import error as SocketError, timeout as SocketTimeout from ..connection import ( HTTPConnection, HTTPSConnection ) from ..connectionpool import ( HTTPConnectionPool, HTTPSConnectionPool ) from ..exceptions import ConnectTimeoutError, NewConnectionError from ..poolmanager import PoolManager from ..util.url import parse_url try: import ssl except ImportError: ssl = None class SOCKSConnection(HTTPConnection): """ A plain-text HTTP connection that connects via a SOCKS proxy. """ def __init__(self, *args, **kwargs): self._socks_options = kwargs.pop('_socks_options') super(SOCKSConnection, self).__init__(*args, **kwargs) def _new_conn(self): """ Establish a new connection via the SOCKS proxy. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = socks.create_connection( (self.host, self.port), proxy_type=self._socks_options['socks_version'], proxy_addr=self._socks_options['proxy_host'], proxy_port=self._socks_options['proxy_port'], proxy_username=self._socks_options['username'], proxy_password=self._socks_options['password'], proxy_rdns=self._socks_options['rdns'], timeout=self.timeout, **extra_kw ) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except socks.ProxyError as e: # This is fragile as hell, but it seems to be the only way to raise # useful errors here. if e.socket_err: error = e.socket_err if isinstance(error, SocketTimeout): raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout) ) else: raise NewConnectionError( self, "Failed to establish a new connection: %s" % error ) else: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e ) except SocketError as e: # Defensive: PySocks should catch all these. raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn # We don't need to duplicate the Verified/Unverified distinction from # urllib3/connection.py here because the HTTPSConnection will already have been # correctly set to either the Verified or Unverified form by that module. This # means the SOCKSHTTPSConnection will automatically be the correct type. class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection): pass class SOCKSHTTPConnectionPool(HTTPConnectionPool): ConnectionCls = SOCKSConnection class SOCKSHTTPSConnectionPool(HTTPSConnectionPool): ConnectionCls = SOCKSHTTPSConnection class SOCKSProxyManager(PoolManager): """ A version of the urllib3 ProxyManager that routes connections via the defined SOCKS proxy. """ pool_classes_by_scheme = { 'http': SOCKSHTTPConnectionPool, 'https': SOCKSHTTPSConnectionPool, } def __init__(self, proxy_url, username=None, password=None, num_pools=10, headers=None, **connection_pool_kw): parsed = parse_url(proxy_url) if username is None and password is None and parsed.auth is not None: split = parsed.auth.split(':') if len(split) == 2: username, password = split if parsed.scheme == 'socks5': socks_version = socks.PROXY_TYPE_SOCKS5 rdns = False elif parsed.scheme == 'socks5h': socks_version = socks.PROXY_TYPE_SOCKS5 rdns = True elif parsed.scheme == 'socks4': socks_version = socks.PROXY_TYPE_SOCKS4 rdns = False elif parsed.scheme == 'socks4a': socks_version = socks.PROXY_TYPE_SOCKS4 rdns = True else: raise ValueError( "Unable to determine SOCKS version from %s" % proxy_url ) self.proxy_url = proxy_url socks_options = { 'socks_version': socks_version, 'proxy_host': parsed.host, 'proxy_port': parsed.port, 'username': username, 'password': password, 'rdns': rdns } connection_pool_kw['_socks_options'] = socks_options super(SOCKSProxyManager, self).__init__( num_pools, headers, **connection_pool_kw ) self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
6,391
32.119171
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/ntlmpool.py
""" NTLM authenticating pool, contributed by erikcederstran Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 """ from __future__ import absolute_import from logging import getLogger from ntlm import ntlm from .. import HTTPSConnectionPool from ..packages.six.moves.http_client import HTTPSConnection log = getLogger(__name__) class NTLMConnectionPool(HTTPSConnectionPool): """ Implements an NTLM authentication version of an urllib3 connection pool """ scheme = 'https' def __init__(self, user, pw, authurl, *args, **kwargs): """ authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\\username format. pw is the password for the user. """ super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split('\\', 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw def _new_conn(self): # Performs the NTLM handshake that secures the connection. The socket # must be kept open while requests are performed. self.num_connections += 1 log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s', self.num_connections, self.host, self.authurl) headers = {} headers['Connection'] = 'Keep-Alive' req_header = 'Authorization' resp_header = 'www-authenticate' conn = HTTPSConnection(host=self.host, port=self.port) # Send negotiation message headers[req_header] = ( 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser)) log.debug('Request headers: %s', headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() reshdr = dict(res.getheaders()) log.debug('Response status: %s %s', res.status, res.reason) log.debug('Response headers: %s', reshdr) log.debug('Response data: %s [...]', res.read(100)) # Remove the reference to the socket, so that it can not be closed by # the response object (we want to keep the socket open) res.fp = None # Server should respond with a challenge message auth_header_values = reshdr[resp_header].split(', ') auth_header_value = None for s in auth_header_values: if s[:5] == 'NTLM ': auth_header_value = s[5:] if auth_header_value is None: raise Exception('Unexpected %s response header: %s' % (resp_header, reshdr[resp_header])) # Send authentication message ServerChallenge, NegotiateFlags = \ ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value) auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags) headers[req_header] = 'NTLM %s' % auth_msg log.debug('Request headers: %s', headers) conn.request('GET', self.authurl, None, headers) res = conn.getresponse() log.debug('Response status: %s %s', res.status, res.reason) log.debug('Response headers: %s', dict(res.getheaders())) log.debug('Response data: %s [...]', res.read()[:100]) if res.status != 200: if res.status == 401: raise Exception('Server rejected request: wrong ' 'username or password') raise Exception('Wrong server response: %s %s' % (res.status, res.reason)) res.fp = None log.debug('Connection established') return conn def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True): if headers is None: headers = {} headers['Connection'] = 'Keep-Alive' return super(NTLMConnectionPool, self).urlopen(method, url, body, headers, retries, redirect, assert_same_host)
4,478
38.637168
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/_securetransport/bindings.py
""" This module uses ctypes to bind a whole bunch of functions and constants from SecureTransport. The goal here is to provide the low-level API to SecureTransport. These are essentially the C-level functions and constants, and they're pretty gross to work with. This code is a bastardised version of the code found in Will Bond's oscrypto library. An enormous debt is owed to him for blazing this trail for us. For that reason, this code should be considered to be covered both by urllib3's license and by oscrypto's: Copyright (c) 2015-2016 Will Bond <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import import platform from ctypes.util import find_library from ctypes import ( c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long, c_bool ) from ctypes import CDLL, POINTER, CFUNCTYPE security_path = find_library('Security') if not security_path: raise ImportError('The library Security could not be found') core_foundation_path = find_library('CoreFoundation') if not core_foundation_path: raise ImportError('The library CoreFoundation could not be found') version = platform.mac_ver()[0] version_info = tuple(map(int, version.split('.'))) if version_info < (10, 8): raise OSError( 'Only OS X 10.8 and newer are supported, not %s.%s' % ( version_info[0], version_info[1] ) ) Security = CDLL(security_path, use_errno=True) CoreFoundation = CDLL(core_foundation_path, use_errno=True) Boolean = c_bool CFIndex = c_long CFStringEncoding = c_uint32 CFData = c_void_p CFString = c_void_p CFArray = c_void_p CFMutableArray = c_void_p CFDictionary = c_void_p CFError = c_void_p CFType = c_void_p CFTypeID = c_ulong CFTypeRef = POINTER(CFType) CFAllocatorRef = c_void_p OSStatus = c_int32 CFDataRef = POINTER(CFData) CFStringRef = POINTER(CFString) CFArrayRef = POINTER(CFArray) CFMutableArrayRef = POINTER(CFMutableArray) CFDictionaryRef = POINTER(CFDictionary) CFArrayCallBacks = c_void_p CFDictionaryKeyCallBacks = c_void_p CFDictionaryValueCallBacks = c_void_p SecCertificateRef = POINTER(c_void_p) SecExternalFormat = c_uint32 SecExternalItemType = c_uint32 SecIdentityRef = POINTER(c_void_p) SecItemImportExportFlags = c_uint32 SecItemImportExportKeyParameters = c_void_p SecKeychainRef = POINTER(c_void_p) SSLProtocol = c_uint32 SSLCipherSuite = c_uint32 SSLContextRef = POINTER(c_void_p) SecTrustRef = POINTER(c_void_p) SSLConnectionRef = c_uint32 SecTrustResultType = c_uint32 SecTrustOptionFlags = c_uint32 SSLProtocolSide = c_uint32 SSLConnectionType = c_uint32 SSLSessionOption = c_uint32 try: Security.SecItemImport.argtypes = [ CFDataRef, CFStringRef, POINTER(SecExternalFormat), POINTER(SecExternalItemType), SecItemImportExportFlags, POINTER(SecItemImportExportKeyParameters), SecKeychainRef, POINTER(CFArrayRef), ] Security.SecItemImport.restype = OSStatus Security.SecCertificateGetTypeID.argtypes = [] Security.SecCertificateGetTypeID.restype = CFTypeID Security.SecIdentityGetTypeID.argtypes = [] Security.SecIdentityGetTypeID.restype = CFTypeID Security.SecKeyGetTypeID.argtypes = [] Security.SecKeyGetTypeID.restype = CFTypeID Security.SecCertificateCreateWithData.argtypes = [ CFAllocatorRef, CFDataRef ] Security.SecCertificateCreateWithData.restype = SecCertificateRef Security.SecCertificateCopyData.argtypes = [ SecCertificateRef ] Security.SecCertificateCopyData.restype = CFDataRef Security.SecCopyErrorMessageString.argtypes = [ OSStatus, c_void_p ] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SecIdentityCreateWithCertificate.argtypes = [ CFTypeRef, SecCertificateRef, POINTER(SecIdentityRef) ] Security.SecIdentityCreateWithCertificate.restype = OSStatus Security.SecKeychainCreate.argtypes = [ c_char_p, c_uint32, c_void_p, Boolean, c_void_p, POINTER(SecKeychainRef) ] Security.SecKeychainCreate.restype = OSStatus Security.SecKeychainDelete.argtypes = [ SecKeychainRef ] Security.SecKeychainDelete.restype = OSStatus Security.SecPKCS12Import.argtypes = [ CFDataRef, CFDictionaryRef, POINTER(CFArrayRef) ] Security.SecPKCS12Import.restype = OSStatus SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t)) SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)) Security.SSLSetIOFuncs.argtypes = [ SSLContextRef, SSLReadFunc, SSLWriteFunc ] Security.SSLSetIOFuncs.restype = OSStatus Security.SSLSetPeerID.argtypes = [ SSLContextRef, c_char_p, c_size_t ] Security.SSLSetPeerID.restype = OSStatus Security.SSLSetCertificate.argtypes = [ SSLContextRef, CFArrayRef ] Security.SSLSetCertificate.restype = OSStatus Security.SSLSetCertificateAuthorities.argtypes = [ SSLContextRef, CFTypeRef, Boolean ] Security.SSLSetCertificateAuthorities.restype = OSStatus Security.SSLSetConnection.argtypes = [ SSLContextRef, SSLConnectionRef ] Security.SSLSetConnection.restype = OSStatus Security.SSLSetPeerDomainName.argtypes = [ SSLContextRef, c_char_p, c_size_t ] Security.SSLSetPeerDomainName.restype = OSStatus Security.SSLHandshake.argtypes = [ SSLContextRef ] Security.SSLHandshake.restype = OSStatus Security.SSLRead.argtypes = [ SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t) ] Security.SSLRead.restype = OSStatus Security.SSLWrite.argtypes = [ SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t) ] Security.SSLWrite.restype = OSStatus Security.SSLClose.argtypes = [ SSLContextRef ] Security.SSLClose.restype = OSStatus Security.SSLGetNumberSupportedCiphers.argtypes = [ SSLContextRef, POINTER(c_size_t) ] Security.SSLGetNumberSupportedCiphers.restype = OSStatus Security.SSLGetSupportedCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), POINTER(c_size_t) ] Security.SSLGetSupportedCiphers.restype = OSStatus Security.SSLSetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), c_size_t ] Security.SSLSetEnabledCiphers.restype = OSStatus Security.SSLGetNumberEnabledCiphers.argtype = [ SSLContextRef, POINTER(c_size_t) ] Security.SSLGetNumberEnabledCiphers.restype = OSStatus Security.SSLGetEnabledCiphers.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite), POINTER(c_size_t) ] Security.SSLGetEnabledCiphers.restype = OSStatus Security.SSLGetNegotiatedCipher.argtypes = [ SSLContextRef, POINTER(SSLCipherSuite) ] Security.SSLGetNegotiatedCipher.restype = OSStatus Security.SSLGetNegotiatedProtocolVersion.argtypes = [ SSLContextRef, POINTER(SSLProtocol) ] Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus Security.SSLCopyPeerTrust.argtypes = [ SSLContextRef, POINTER(SecTrustRef) ] Security.SSLCopyPeerTrust.restype = OSStatus Security.SecTrustSetAnchorCertificates.argtypes = [ SecTrustRef, CFArrayRef ] Security.SecTrustSetAnchorCertificates.restype = OSStatus Security.SecTrustSetAnchorCertificatesOnly.argstypes = [ SecTrustRef, Boolean ] Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus Security.SecTrustEvaluate.argtypes = [ SecTrustRef, POINTER(SecTrustResultType) ] Security.SecTrustEvaluate.restype = OSStatus Security.SecTrustGetCertificateCount.argtypes = [ SecTrustRef ] Security.SecTrustGetCertificateCount.restype = CFIndex Security.SecTrustGetCertificateAtIndex.argtypes = [ SecTrustRef, CFIndex ] Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef Security.SSLCreateContext.argtypes = [ CFAllocatorRef, SSLProtocolSide, SSLConnectionType ] Security.SSLCreateContext.restype = SSLContextRef Security.SSLSetSessionOption.argtypes = [ SSLContextRef, SSLSessionOption, Boolean ] Security.SSLSetSessionOption.restype = OSStatus Security.SSLSetProtocolVersionMin.argtypes = [ SSLContextRef, SSLProtocol ] Security.SSLSetProtocolVersionMin.restype = OSStatus Security.SSLSetProtocolVersionMax.argtypes = [ SSLContextRef, SSLProtocol ] Security.SSLSetProtocolVersionMax.restype = OSStatus Security.SecCopyErrorMessageString.argtypes = [ OSStatus, c_void_p ] Security.SecCopyErrorMessageString.restype = CFStringRef Security.SSLReadFunc = SSLReadFunc Security.SSLWriteFunc = SSLWriteFunc Security.SSLContextRef = SSLContextRef Security.SSLProtocol = SSLProtocol Security.SSLCipherSuite = SSLCipherSuite Security.SecIdentityRef = SecIdentityRef Security.SecKeychainRef = SecKeychainRef Security.SecTrustRef = SecTrustRef Security.SecTrustResultType = SecTrustResultType Security.SecExternalFormat = SecExternalFormat Security.OSStatus = OSStatus Security.kSecImportExportPassphrase = CFStringRef.in_dll( Security, 'kSecImportExportPassphrase' ) Security.kSecImportItemIdentity = CFStringRef.in_dll( Security, 'kSecImportItemIdentity' ) # CoreFoundation time! CoreFoundation.CFRetain.argtypes = [ CFTypeRef ] CoreFoundation.CFRetain.restype = CFTypeRef CoreFoundation.CFRelease.argtypes = [ CFTypeRef ] CoreFoundation.CFRelease.restype = None CoreFoundation.CFGetTypeID.argtypes = [ CFTypeRef ] CoreFoundation.CFGetTypeID.restype = CFTypeID CoreFoundation.CFStringCreateWithCString.argtypes = [ CFAllocatorRef, c_char_p, CFStringEncoding ] CoreFoundation.CFStringCreateWithCString.restype = CFStringRef CoreFoundation.CFStringGetCStringPtr.argtypes = [ CFStringRef, CFStringEncoding ] CoreFoundation.CFStringGetCStringPtr.restype = c_char_p CoreFoundation.CFStringGetCString.argtypes = [ CFStringRef, c_char_p, CFIndex, CFStringEncoding ] CoreFoundation.CFStringGetCString.restype = c_bool CoreFoundation.CFDataCreate.argtypes = [ CFAllocatorRef, c_char_p, CFIndex ] CoreFoundation.CFDataCreate.restype = CFDataRef CoreFoundation.CFDataGetLength.argtypes = [ CFDataRef ] CoreFoundation.CFDataGetLength.restype = CFIndex CoreFoundation.CFDataGetBytePtr.argtypes = [ CFDataRef ] CoreFoundation.CFDataGetBytePtr.restype = c_void_p CoreFoundation.CFDictionaryCreate.argtypes = [ CFAllocatorRef, POINTER(CFTypeRef), POINTER(CFTypeRef), CFIndex, CFDictionaryKeyCallBacks, CFDictionaryValueCallBacks ] CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef CoreFoundation.CFDictionaryGetValue.argtypes = [ CFDictionaryRef, CFTypeRef ] CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef CoreFoundation.CFArrayCreate.argtypes = [ CFAllocatorRef, POINTER(CFTypeRef), CFIndex, CFArrayCallBacks, ] CoreFoundation.CFArrayCreate.restype = CFArrayRef CoreFoundation.CFArrayCreateMutable.argtypes = [ CFAllocatorRef, CFIndex, CFArrayCallBacks ] CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef CoreFoundation.CFArrayAppendValue.argtypes = [ CFMutableArrayRef, c_void_p ] CoreFoundation.CFArrayAppendValue.restype = None CoreFoundation.CFArrayGetCount.argtypes = [ CFArrayRef ] CoreFoundation.CFArrayGetCount.restype = CFIndex CoreFoundation.CFArrayGetValueAtIndex.argtypes = [ CFArrayRef, CFIndex ] CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll( CoreFoundation, 'kCFAllocatorDefault' ) CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks') CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll( CoreFoundation, 'kCFTypeDictionaryKeyCallBacks' ) CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll( CoreFoundation, 'kCFTypeDictionaryValueCallBacks' ) CoreFoundation.CFTypeRef = CFTypeRef CoreFoundation.CFArrayRef = CFArrayRef CoreFoundation.CFStringRef = CFStringRef CoreFoundation.CFDictionaryRef = CFDictionaryRef except (AttributeError): raise ImportError('Error initializing ctypes') class CFConst(object): """ A class object that acts as essentially a namespace for CoreFoundation constants. """ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100) class SecurityConst(object): """ A class object that acts as essentially a namespace for Security constants. """ kSSLSessionOptionBreakOnServerAuth = 0 kSSLProtocol2 = 1 kSSLProtocol3 = 2 kTLSProtocol1 = 4 kTLSProtocol11 = 7 kTLSProtocol12 = 8 kSSLClientSide = 1 kSSLStreamType = 0 kSecFormatPEMSequence = 10 kSecTrustResultInvalid = 0 kSecTrustResultProceed = 1 # This gap is present on purpose: this was kSecTrustResultConfirm, which # is deprecated. kSecTrustResultDeny = 3 kSecTrustResultUnspecified = 4 kSecTrustResultRecoverableTrustFailure = 5 kSecTrustResultFatalTrustFailure = 6 kSecTrustResultOtherError = 7 errSSLProtocol = -9800 errSSLWouldBlock = -9803 errSSLClosedGraceful = -9805 errSSLClosedNoNotify = -9816 errSSLClosedAbort = -9806 errSSLXCertChainInvalid = -9807 errSSLCrypto = -9809 errSSLInternal = -9810 errSSLCertExpired = -9814 errSSLCertNotYetValid = -9815 errSSLUnknownRootCert = -9812 errSSLNoRootCert = -9813 errSSLHostNameMismatch = -9843 errSSLPeerHandshakeFail = -9824 errSSLPeerUserCancelled = -9839 errSSLWeakPeerEphemeralDHKey = -9850 errSSLServerAuthCompleted = -9841 errSSLRecordOverflow = -9847 errSecVerifyFailed = -67808 errSecNoTrustSettings = -25263 errSecItemNotFound = -25300 errSecInvalidTrustSettings = -25262 # Cipher suites. We only pick the ones our default cipher string allows. TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F TLS_AES_128_GCM_SHA256 = 0x1301 TLS_AES_256_GCM_SHA384 = 0x1302 TLS_CHACHA20_POLY1305_SHA256 = 0x1303
17,560
28.563973
99
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/_securetransport/low_level.py
""" Low-level helpers for the SecureTransport bindings. These are Python functions that are not directly related to the high-level APIs but are necessary to get them to work. They include a whole bunch of low-level CoreFoundation messing about and memory management. The concerns in this module are almost entirely about trying to avoid memory leaks and providing appropriate and useful assistance to the higher-level code. """ import base64 import ctypes import itertools import re import os import ssl import tempfile from .bindings import Security, CoreFoundation, CFConst # This regular expression is used to grab PEM data out of a PEM bundle. _PEM_CERTS_RE = re.compile( b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL ) def _cf_data_from_bytes(bytestring): """ Given a bytestring, create a CFData object from it. This CFData object must be CFReleased by the caller. """ return CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) ) def _cf_dictionary_from_tuples(tuples): """ Given a list of Python tuples, create an associated CFDictionary. """ dictionary_size = len(tuples) # We need to get the dictionary keys and values out in the same order. keys = (t[0] for t in tuples) values = (t[1] for t in tuples) cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) return CoreFoundation.CFDictionaryCreate( CoreFoundation.kCFAllocatorDefault, cf_keys, cf_values, dictionary_size, CoreFoundation.kCFTypeDictionaryKeyCallBacks, CoreFoundation.kCFTypeDictionaryValueCallBacks, ) def _cf_string_to_unicode(value): """ Creates a Unicode string from a CFString object. Used entirely for error reporting. Yes, it annoys me quite a lot that this function is this complex. """ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) string = CoreFoundation.CFStringGetCStringPtr( value_as_void_p, CFConst.kCFStringEncodingUTF8 ) if string is None: buffer = ctypes.create_string_buffer(1024) result = CoreFoundation.CFStringGetCString( value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8 ) if not result: raise OSError('Error copying C string from CFStringRef') string = buffer.value if string is not None: string = string.decode('utf-8') return string def _assert_no_error(error, exception_class=None): """ Checks the return code and throws an exception if there is an error to report """ if error == 0: return cf_error_string = Security.SecCopyErrorMessageString(error, None) output = _cf_string_to_unicode(cf_error_string) CoreFoundation.CFRelease(cf_error_string) if output is None or output == u'': output = u'OSStatus %s' % error if exception_class is None: exception_class = ssl.SSLError raise exception_class(output) def _cert_array_from_pem(pem_bundle): """ Given a bundle of certs in PEM format, turns them into a CFArray of certs that can be used to validate a cert chain. """ # Normalize the PEM bundle's line endings. pem_bundle = pem_bundle.replace(b"\r\n", b"\n") der_certs = [ base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle) ] if not der_certs: raise ssl.SSLError("No root certificates specified") cert_array = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) ) if not cert_array: raise ssl.SSLError("Unable to allocate memory!") try: for der_bytes in der_certs: certdata = _cf_data_from_bytes(der_bytes) if not certdata: raise ssl.SSLError("Unable to allocate memory!") cert = Security.SecCertificateCreateWithData( CoreFoundation.kCFAllocatorDefault, certdata ) CoreFoundation.CFRelease(certdata) if not cert: raise ssl.SSLError("Unable to build cert object!") CoreFoundation.CFArrayAppendValue(cert_array, cert) CoreFoundation.CFRelease(cert) except Exception: # We need to free the array before the exception bubbles further. # We only want to do that if an error occurs: otherwise, the caller # should free. CoreFoundation.CFRelease(cert_array) return cert_array def _is_cert(item): """ Returns True if a given CFTypeRef is a certificate. """ expected = Security.SecCertificateGetTypeID() return CoreFoundation.CFGetTypeID(item) == expected def _is_identity(item): """ Returns True if a given CFTypeRef is an identity. """ expected = Security.SecIdentityGetTypeID() return CoreFoundation.CFGetTypeID(item) == expected def _temporary_keychain(): """ This function creates a temporary Mac keychain that we can use to work with credentials. This keychain uses a one-time password and a temporary file to store the data. We expect to have one keychain per socket. The returned SecKeychainRef must be freed by the caller, including calling SecKeychainDelete. Returns a tuple of the SecKeychainRef and the path to the temporary directory that contains it. """ # Unfortunately, SecKeychainCreate requires a path to a keychain. This # means we cannot use mkstemp to use a generic temporary file. Instead, # we're going to create a temporary directory and a filename to use there. # This filename will be 8 random bytes expanded into base64. We also need # some random bytes to password-protect the keychain we're creating, so we # ask for 40 random bytes. random_bytes = os.urandom(40) filename = base64.b16encode(random_bytes[:8]).decode('utf-8') password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 tempdirectory = tempfile.mkdtemp() keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') # We now want to create the keychain itself. keychain = Security.SecKeychainRef() status = Security.SecKeychainCreate( keychain_path, len(password), password, False, None, ctypes.byref(keychain) ) _assert_no_error(status) # Having created the keychain, we want to pass it off to the caller. return keychain, tempdirectory def _load_items_from_file(keychain, path): """ Given a single file, loads all the trust objects from it into arrays and the keychain. Returns a tuple of lists: the first list is a list of identities, the second a list of certs. """ certificates = [] identities = [] result_array = None with open(path, 'rb') as f: raw_filedata = f.read() try: filedata = CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata) ) result_array = CoreFoundation.CFArrayRef() result = Security.SecItemImport( filedata, # cert data None, # Filename, leaving it out for now None, # What the type of the file is, we don't care None, # what's in the file, we don't care 0, # import flags None, # key params, can include passphrase in the future keychain, # The keychain to insert into ctypes.byref(result_array) # Results ) _assert_no_error(result) # A CFArray is not very useful to us as an intermediary # representation, so we are going to extract the objects we want # and then free the array. We don't need to keep hold of keys: the # keychain already has them! result_count = CoreFoundation.CFArrayGetCount(result_array) for index in range(result_count): item = CoreFoundation.CFArrayGetValueAtIndex( result_array, index ) item = ctypes.cast(item, CoreFoundation.CFTypeRef) if _is_cert(item): CoreFoundation.CFRetain(item) certificates.append(item) elif _is_identity(item): CoreFoundation.CFRetain(item) identities.append(item) finally: if result_array: CoreFoundation.CFRelease(result_array) CoreFoundation.CFRelease(filedata) return (identities, certificates) def _load_client_cert_chain(keychain, *paths): """ Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain. """ # Ok, the strategy. # # This relies on knowing that macOS will not give you a SecIdentityRef # unless you have imported a key into a keychain. This is a somewhat # artificial limitation of macOS (for example, it doesn't necessarily # affect iOS), but there is nothing inside Security.framework that lets you # get a SecIdentityRef without having a key in a keychain. # # So the policy here is we take all the files and iterate them in order. # Each one will use SecItemImport to have one or more objects loaded from # it. We will also point at a keychain that macOS can use to work with the # private key. # # Once we have all the objects, we'll check what we actually have. If we # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, # we'll take the first certificate (which we assume to be our leaf) and # ask the keychain to give us a SecIdentityRef with that cert's associated # key. # # We'll then return a CFArray containing the trust chain: one # SecIdentityRef and then zero-or-more SecCertificateRef objects. The # responsibility for freeing this CFArray will be with the caller. This # CFArray must remain alive for the entire connection, so in practice it # will be stored with a single SSLSocket, along with the reference to the # keychain. certificates = [] identities = [] # Filter out bad paths. paths = (path for path in paths if path) try: for file_path in paths: new_identities, new_certs = _load_items_from_file( keychain, file_path ) identities.extend(new_identities) certificates.extend(new_certs) # Ok, we have everything. The question is: do we have an identity? If # not, we want to grab one from the first cert we have. if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) # We now want to release the original certificate, as we no longer # need it. CoreFoundation.CFRelease(certificates.pop(0)) # We now need to build a new CFArray that holds the trust chain. trust_chain = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) for item in itertools.chain(identities, certificates): # ArrayAppendValue does a CFRetain on the item. That's fine, # because the finally block will release our other refs to them. CoreFoundation.CFArrayAppendValue(trust_chain, item) return trust_chain finally: for obj in itertools.chain(identities, certificates): CoreFoundation.CFRelease(obj)
12,162
34.051873
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/urllib3/contrib/_securetransport/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/kernel_approximation.py
""" The :mod:`sklearn.kernel_approximation` module implements several approximate kernel feature maps base on Fourier transforms. """ # Author: Andreas Mueller <[email protected]> # # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from scipy.linalg import svd from .base import BaseEstimator from .base import TransformerMixin from .utils import check_array, check_random_state, as_float_array from .utils.extmath import safe_sparse_dot from .utils.validation import check_is_fitted from .metrics.pairwise import pairwise_kernels, KERNEL_PARAMS class RBFSampler(BaseEstimator, TransformerMixin): """Approximates feature map of an RBF kernel by Monte Carlo approximation of its Fourier transform. It implements a variant of Random Kitchen Sinks.[1] Read more in the :ref:`User Guide <rbf_kernel_approx>`. Parameters ---------- gamma : float Parameter of RBF kernel: exp(-gamma * x^2) n_components : int Number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Notes ----- See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and Benjamin Recht. [1] "Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning" by A. Rahimi and Benjamin Recht. (http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf) """ def __init__(self, gamma=1., n_components=100, random_state=None): self.gamma = gamma self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X, accept_sparse='csr') random_state = check_random_state(self.random_state) n_features = X.shape[1] self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal( size=(n_features, self.n_components))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = check_array(X, accept_sparse='csr') projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class SkewedChi2Sampler(BaseEstimator, TransformerMixin): """Approximates feature map of the "skewed chi-squared" kernel by Monte Carlo approximation of its Fourier transform. Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`. Parameters ---------- skewedness : float "skewedness" parameter of the kernel. Needs to be cross-validated. n_components : int number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. References ---------- See "Random Fourier Approximations for Skewed Multiplicative Histogram Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu. See also -------- AdditiveChi2Sampler : A different approach for approximating an additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. """ def __init__(self, skewedness=1., n_components=100, random_state=None): self.skewedness = skewedness self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X) random_state = check_random_state(self.random_state) n_features = X.shape[1] uniform = random_state.uniform(size=(n_features, self.n_components)) # transform by inverse CDF of sech self.random_weights_ = (1. / np.pi * np.log(np.tan(np.pi / 2. * uniform))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. All values of X must be strictly greater than "-skewedness". Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = as_float_array(X, copy=True) X = check_array(X, copy=False) if (X <= -self.skewedness).any(): raise ValueError("X may not contain entries smaller than" " -skewedness.") X += self.skewedness np.log(X, X) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class AdditiveChi2Sampler(BaseEstimator, TransformerMixin): """Approximate feature map for additive chi2 kernel. Uses sampling the fourier transform of the kernel characteristic at regular intervals. Since the kernel that is to be approximated is additive, the components of the input vectors can be treated separately. Each entry in the original space is transformed into 2*sample_steps+1 features, where sample_steps is a parameter of the method. Typical values of sample_steps include 1, 2 and 3. Optimal choices for the sampling interval for certain data ranges can be computed (see the reference). The default values should be reasonable. Read more in the :ref:`User Guide <additive_chi_kernel_approx>`. Parameters ---------- sample_steps : int, optional Gives the number of (complex) sampling points. sample_interval : float, optional Sampling interval. Must be specified when sample_steps not in {1,2,3}. Notes ----- This estimator approximates a slightly different version of the additive chi squared kernel then ``metric.additive_chi2`` computes. See also -------- SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi squared kernel. References ---------- See `"Efficient additive kernels via explicit feature maps" <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_ A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, 2011 """ def __init__(self, sample_steps=2, sample_interval=None): self.sample_steps = sample_steps self.sample_interval = sample_interval def fit(self, X, y=None): """Set parameters.""" X = check_array(X, accept_sparse='csr') if self.sample_interval is None: # See reference, figure 2 c) if self.sample_steps == 1: self.sample_interval_ = 0.8 elif self.sample_steps == 2: self.sample_interval_ = 0.5 elif self.sample_steps == 3: self.sample_interval_ = 0.4 else: raise ValueError("If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval") else: self.sample_interval_ = self.sample_interval return self def transform(self, X): """Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Returns ------- X_new : {array, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps + 1)) Whether the return value is an array of sparse matrix depends on the type of the input X. """ msg = ("%(name)s is not fitted. Call fit to set the parameters before" " calling transform") check_is_fitted(self, "sample_interval_", msg=msg) X = check_array(X, accept_sparse='csr') sparse = sp.issparse(X) # check if X has negative values. Doesn't play well with np.log. if ((X.data if sparse else X) < 0).any(): raise ValueError("Entries of X must be non-negative.") # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X) def _transform_dense(self, X): non_zero = (X != 0.0) X_nz = X[non_zero] X_step = np.zeros_like(X) X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X_nz) step_nz = 2 * X_nz * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.cos(j * log_step_nz) X_new.append(X_step) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.sin(j * log_step_nz) X_new.append(X_step) return np.hstack(X_new) def _transform_sparse(self, X): indices = X.indices.copy() indptr = X.indptr.copy() data_step = np.sqrt(X.data * self.sample_interval_) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X.data) step_nz = 2 * X.data * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) data_step = factor_nz * np.cos(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) data_step = factor_nz * np.sin(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) return sp.hstack(X_new) class Nystroem(BaseEstimator, TransformerMixin): """Approximate a kernel map using a subset of the training data. Constructs an approximate feature map for an arbitrary kernel using a subset of the data as basis. Read more in the :ref:`User Guide <nystroem_kernel_approx>`. Parameters ---------- kernel : string or callable, default="rbf" Kernel map to be approximated. A callable should accept two arguments and the keyword arguments passed to this object as kernel_params, and should return a floating point number. n_components : int Number of features to construct. How many data points will be used to construct the mapping. gamma : float, default=None Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=None Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=None Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : mapping of string to any, optional Additional parameters (keyword arguments) for kernel function passed as callable object. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, shape (n_components, n_features) Subset of training points used to construct the feature map. component_indices_ : array, shape (n_components) Indices of ``components_`` in the training set. normalization_ : array, shape (n_components, n_components) Normalization matrix needed for embedding. Square root of the kernel matrix on ``components_``. References ---------- * Williams, C.K.I. and Seeger, M. "Using the Nystroem method to speed up kernel machines", Advances in neural information processing systems 2001 * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical Comparison", Advances in Neural Information Processing Systems 2012 See also -------- RBFSampler : An approximation to the RBF kernel using random Fourier features. sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. """ def __init__(self, kernel="rbf", gamma=None, coef0=None, degree=None, kernel_params=None, n_components=100, random_state=None): self.kernel = kernel self.gamma = gamma self.coef0 = coef0 self.degree = degree self.kernel_params = kernel_params self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit estimator to data. Samples a subset of training points, computes kernel on these and computes normalization matrix. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Training data. """ X = check_array(X, accept_sparse='csr') rnd = check_random_state(self.random_state) n_samples = X.shape[0] # get basis vectors if self.n_components > n_samples: # XXX should we just bail? n_components = n_samples warnings.warn("n_components > n_samples. This is not possible.\n" "n_components was set to n_samples, which results" " in inefficient evaluation of the full kernel.") else: n_components = self.n_components n_components = min(n_samples, n_components) inds = rnd.permutation(n_samples) basis_inds = inds[:n_components] basis = X[basis_inds] basis_kernel = pairwise_kernels(basis, metric=self.kernel, filter_params=True, **self._get_kernel_params()) # sqrt of kernel matrix on basis vectors U, S, V = svd(basis_kernel) S = np.maximum(S, 1e-12) self.normalization_ = np.dot(U / np.sqrt(S), V) self.components_ = basis self.component_indices_ = inds return self def transform(self, X): """Apply feature map to X. Computes an approximate feature map using the kernel between some training points and X. Parameters ---------- X : array-like, shape=(n_samples, n_features) Data to transform. Returns ------- X_transformed : array, shape=(n_samples, n_components) Transformed data. """ check_is_fitted(self, 'components_') X = check_array(X, accept_sparse='csr') kernel_params = self._get_kernel_params() embedded = pairwise_kernels(X, self.components_, metric=self.kernel, filter_params=True, **kernel_params) return np.dot(embedded, self.normalization_.T) def _get_kernel_params(self): params = self.kernel_params if params is None: params = {} if not callable(self.kernel): for param in (KERNEL_PARAMS[self.kernel]): if getattr(self, param) is not None: params[param] = getattr(self, param) else: if (self.gamma is not None or self.coef0 is not None or self.degree is not None): warnings.warn( "Passing gamma, coef0 or degree to Nystroem when using a" " callable kernel is deprecated in version 0.19 and will" " raise an error in 0.21, as they are ignored. Use " "kernel_params instead.", DeprecationWarning) return params
19,022
34.358736
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/base.py
"""Base classes for all estimators.""" # Author: Gael Varoquaux <[email protected]> # License: BSD 3 clause import copy import warnings from collections import defaultdict import numpy as np from scipy import sparse from .externals import six from .utils.fixes import signature from . import __version__ ############################################################################## def _first_and_last_element(arr): """Returns first and last element of numpy array or sparse matrix.""" if isinstance(arr, np.ndarray) or hasattr(arr, 'data'): # numpy array or sparse matrix with .data attribute data = arr.data if sparse.issparse(arr) else arr return data.flat[0], data.flat[-1] else: # Sparse matrices without .data attribute. Only dok_matrix at # the time of writing, in this case indexing is fast return arr[0, 0], arr[-1, -1] def clone(estimator, safe=True): """Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator : estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe : boolean, optional If safe is false, clone will fall back to a deep copy on objects that are not estimators. """ estimator_type = type(estimator) # XXX: not handling dictionaries if estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) elif not hasattr(estimator, 'get_params'): if not safe: return copy.deepcopy(estimator) else: raise TypeError("Cannot clone object '%s' (type %s): " "it does not seem to be a scikit-learn estimator " "as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator))) klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for name, param in six.iteritems(new_object_params): new_object_params[name] = clone(param, safe=False) new_object = klass(**new_object_params) params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if param1 is param2: # this should always happen continue if isinstance(param1, np.ndarray): # For most ndarrays, we do not test for complete equality if not isinstance(param2, type(param1)): equality_test = False elif (param1.ndim > 0 and param1.shape[0] > 0 and isinstance(param2, np.ndarray) and param2.ndim > 0 and param2.shape[0] > 0): equality_test = ( param1.shape == param2.shape and param1.dtype == param2.dtype and (_first_and_last_element(param1) == _first_and_last_element(param2)) ) else: equality_test = np.all(param1 == param2) elif sparse.issparse(param1): # For sparse matrices equality doesn't work if not sparse.issparse(param2): equality_test = False elif param1.size == 0 or param2.size == 0: equality_test = ( param1.__class__ == param2.__class__ and param1.size == 0 and param2.size == 0 ) else: equality_test = ( param1.__class__ == param2.__class__ and (_first_and_last_element(param1) == _first_and_last_element(param2)) and param1.nnz == param2.nnz and param1.shape == param2.shape ) else: # fall back on standard equality equality_test = param1 == param2 if equality_test: warnings.warn("Estimator %s modifies parameters in __init__." " This behavior is deprecated as of 0.18 and " "support for this behavior will be removed in 0.20." % type(estimator).__name__, DeprecationWarning) else: raise RuntimeError('Cannot clone object %s, as the constructor ' 'does not seem to set parameter %s' % (estimator, name)) return new_object ############################################################################### def _pprint(params, offset=0, printer=repr): """Pretty print the dictionary 'params' Parameters ---------- params : dict The dictionary to pretty print offset : int The offset in characters to add at the begin of each line. printer : callable The function to convert entries to strings, typically the builtin str or repr """ # Do a multi-line justified repr: options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) params_list = list() this_line_length = offset line_sep = ',\n' + (1 + offset // 2) * ' ' for i, (k, v) in enumerate(sorted(six.iteritems(params))): if type(v) is float: # use str for representing floating point numbers # this way we get consistent representation across # architectures and versions. this_repr = '%s=%s' % (k, str(v)) else: # use repr of the rest this_repr = '%s=%s' % (k, printer(v)) if len(this_repr) > 500: this_repr = this_repr[:300] + '...' + this_repr[-100:] if i > 0: if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr): params_list.append(line_sep) this_line_length = len(line_sep) else: params_list.append(', ') this_line_length += 2 params_list.append(this_repr) this_line_length += len(this_repr) np.set_printoptions(**options) lines = ''.join(params_list) # Strip trailing space to avoid nightmare in doctests lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n')) return lines ############################################################################### class BaseEstimator(object): """Base class for all estimators in scikit-learn Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). """ @classmethod def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = signature(init) # Consider the constructor parameters excluding 'self' parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError("scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature)) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimization to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) nested_params = defaultdict(dict) # grouped by prefix for key, value in params.items(): key, delim, sub_key = key.partition('__') if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self)) if delim: nested_params[key][sub_key] = value else: setattr(self, key, value) for key, sub_params in nested_params.items(): valid_params[key].set_params(**sub_params) return self def __repr__(self): class_name = self.__class__.__name__ return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False), offset=len(class_name),),) def __getstate__(self): try: state = super(BaseEstimator, self).__getstate__() except AttributeError: state = self.__dict__.copy() if type(self).__module__.startswith('sklearn.'): return dict(state.items(), _sklearn_version=__version__) else: return state def __setstate__(self, state): if type(self).__module__.startswith('sklearn.'): pickle_version = state.pop("_sklearn_version", "pre-0.18") if pickle_version != __version__: warnings.warn( "Trying to unpickle estimator {0} from version {1} when " "using version {2}. This might lead to breaking code or " "invalid results. Use at your own risk.".format( self.__class__.__name__, pickle_version, __version__), UserWarning) try: super(BaseEstimator, self).__setstate__(state) except AttributeError: self.__dict__.update(state) ############################################################################### class ClassifierMixin(object): """Mixin class for all classifiers in scikit-learn.""" _estimator_type = "classifier" def score(self, X, y, sample_weight=None): """Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) wrt. y. """ from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight) ############################################################################### class RegressorMixin(object): """Mixin class for all regression estimators in scikit-learn.""" _estimator_type = "regressor" def score(self, X, y, sample_weight=None): """Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) ** 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float R^2 of self.predict(X) wrt. y. """ from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='variance_weighted') ############################################################################### class ClusterMixin(object): """Mixin class for all cluster estimators in scikit-learn.""" _estimator_type = "clusterer" def fit_predict(self, X, y=None): """Performs clustering on X and returns cluster labels. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data. Returns ------- y : ndarray, shape (n_samples,) cluster labels """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm self.fit(X) return self.labels_ class BiclusterMixin(object): """Mixin class for all bicluster estimators in scikit-learn""" @property def biclusters_(self): """Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members. """ return self.rows_, self.columns_ def get_indices(self, i): """Row and column indices of the i'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Parameters ---------- i : int The index of the cluster. Returns ------- row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the dataset that belong to the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] return np.nonzero(rows)[0], np.nonzero(columns)[0] def get_shape(self, i): """Shape of the i'th bicluster. Parameters ---------- i : int The index of the cluster. Returns ------- shape : (int, int) Number of rows and columns (resp.) in the bicluster. """ indices = self.get_indices(i) return tuple(len(i) for i in indices) def get_submatrix(self, i, data): """Returns the submatrix corresponding to bicluster `i`. Parameters ---------- i : int The index of the cluster. data : array The data. Returns ------- submatrix : array The submatrix corresponding to bicluster i. Notes ----- Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist. """ from .utils.validation import check_array data = check_array(data, accept_sparse='csr') row_ind, col_ind = self.get_indices(i) return data[row_ind[:, np.newaxis], col_ind] ############################################################################### class TransformerMixin(object): """Mixin class for all transformers in scikit-learn.""" def fit_transform(self, X, y=None, **fit_params): """Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters ---------- X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns ------- X_new : numpy array of shape [n_samples, n_features_new] Transformed array. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm if y is None: # fit method of arity 1 (unsupervised transformation) return self.fit(X, **fit_params).transform(X) else: # fit method of arity 2 (supervised transformation) return self.fit(X, y, **fit_params).transform(X) class DensityMixin(object): """Mixin class for all density estimators in scikit-learn.""" _estimator_type = "DensityEstimator" def score(self, X, y=None): """Returns the score of the model on the data X Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- score : float """ pass ############################################################################### class MetaEstimatorMixin(object): """Mixin class for all meta estimators in scikit-learn.""" # this is just a tag for the moment ############################################################################### def is_classifier(estimator): """Returns True if the given estimator is (probably) a classifier. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if estimator is a classifier and False otherwise. """ return getattr(estimator, "_estimator_type", None) == "classifier" def is_regressor(estimator): """Returns True if the given estimator is (probably) a regressor. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if estimator is a regressor and False otherwise. """ return getattr(estimator, "_estimator_type", None) == "regressor"
20,250
33.915517
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/naive_bayes.py
# -*- coding: utf-8 -*- """ The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Author: Vincent Michel <[email protected]> # Minor fixes by Fabian Pedregosa # Amit Aides <[email protected]> # Yehuda Finkelstein <[email protected]> # Lars Buitinck # Jan Hendrik Metzen <[email protected]> # (parts based on earlier work by Mathieu Blondel) # # License: BSD 3 clause import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from .base import BaseEstimator, ClassifierMixin from .preprocessing import binarize from .preprocessing import LabelBinarizer from .preprocessing import label_binarize from .utils import check_X_y, check_array, check_consistent_length from .utils.extmath import safe_sparse_dot from .utils.fixes import logsumexp from .utils.multiclass import _check_partial_fit_first_call from .utils.validation import check_is_fitted from .externals import six __all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB'] class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)): """Abstract base class for naive Bayes estimators""" @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape [n_classes, n_samples]. Input is passed to _joint_log_likelihood as-is by predict, predict_proba and predict_log_proba. """ def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X """ jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(BaseNB): """ Gaussian Naive Bayes (GaussianNB) Can perform online updates to model parameters via `partial_fit` method. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Parameters ---------- priors : array-like, shape (n_classes,) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_prior_ : array, shape (n_classes,) probability of each class. class_count_ : array, shape (n_classes,) number of training samples observed in each class. theta_ : array, shape (n_classes, n_features) mean of each feature per class sigma_ : array, shape (n_classes, n_features) variance of each feature per class Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB(priors=None) >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB(priors=None) >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ def __init__(self, priors=None): self.priors = priors def fit(self, X, y, sample_weight=None): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Gaussian Naive Bayes supports fitting with *sample_weight*. Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) return self._partial_fit(X, y, np.unique(y), _refit=True, sample_weight=sample_weight) @staticmethod def _update_mean_variance(n_past, mu, var, X, sample_weight=None): """Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters ---------- n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like, shape (number of Gaussians,) Means for Gaussians in original set. var : array-like, shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- total_mu : array-like, shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like, shape (number of Gaussians,) Updated variance for each Gaussian over the combined set. """ if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) new_mu = np.average(X, axis=0, weights=sample_weight / n_new) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight / n_new) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = (old_ssd + new_ssd + (n_past / float(n_new * n_total)) * (n_new * mu - n_new * new_mu) ** 2) total_var = total_ssd / n_total return total_mu, total_var def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance and numerical stability overhead, hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,), optional (default=None) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Returns ------- self : object Returns self. """ return self._partial_fit(X, y, classes, _refit=False, sample_weight=sample_weight) def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None): """Actual implementation of Gaussian NB fitting. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,), optional (default=None) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. _refit: bool, optional (default=False) If true, act as though this were the first time we called _partial_fit (ie, throw away any past fitting and start over). sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) if sample_weight is not None: sample_weight = check_array(sample_weight, ensure_2d=False) check_consistent_length(y, sample_weight) # If the ratio of data variance between dimensions is too small, it # will cause numerical errors. To address this, we artificially # boost the variance by epsilon, a small fraction of the standard # deviation of the largest dimension. epsilon = 1e-9 * np.var(X, axis=0).max() if _refit: self.classes_ = None if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_features = X.shape[1] n_classes = len(self.classes_) self.theta_ = np.zeros((n_classes, n_features)) self.sigma_ = np.zeros((n_classes, n_features)) self.class_count_ = np.zeros(n_classes, dtype=np.float64) # Initialise the class prior n_classes = len(self.classes_) # Take into account the priors if self.priors is not None: priors = np.asarray(self.priors) # Check that the provide prior match the number of classes if len(priors) != n_classes: raise ValueError('Number of priors must match number of' ' classes.') # Check that the sum is 1 if priors.sum() != 1.0: raise ValueError('The sum of the priors should be 1.') # Check that the prior are non-negative if (priors < 0).any(): raise ValueError('Priors must be non-negative.') self.class_prior_ = priors else: # Initialize the priors to zeros for each class self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64) else: if X.shape[1] != self.theta_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (X.shape[1], self.theta_.shape[1])) # Put epsilon back in each time self.sigma_[:, :] -= epsilon classes = self.classes_ unique_y = np.unique(y) unique_y_in_classes = np.in1d(unique_y, classes) if not np.all(unique_y_in_classes): raise ValueError("The target label(s) %s in y do not exist in the " "initial classes %s" % (unique_y[~unique_y_in_classes], classes)) for y_i in unique_y: i = classes.searchsorted(y_i) X_i = X[y == y_i, :] if sample_weight is not None: sw_i = sample_weight[y == y_i] N_i = sw_i.sum() else: sw_i = None N_i = X_i.shape[0] new_theta, new_sigma = self._update_mean_variance( self.class_count_[i], self.theta_[i, :], self.sigma_[i, :], X_i, sw_i) self.theta_[i, :] = new_theta self.sigma_[i, :] = new_sigma self.class_count_[i] += N_i self.sigma_[:, :] += epsilon # Update if only no priors is provided if self.priors is None: # Empirical prior, with sample_weight taken into account self.class_prior_ = self.class_count_ / self.class_count_.sum() return self def _joint_log_likelihood(self, X): check_is_fitted(self, "classes_") X = check_array(X) joint_log_likelihood = [] for i in range(np.size(self.classes_)): jointi = np.log(self.class_prior_[i]) n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :])) n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.sigma_[i, :]), 1) joint_log_likelihood.append(jointi + n_ij) joint_log_likelihood = np.array(joint_log_likelihood).T return joint_log_likelihood _ALPHA_MIN = 1e-10 class BaseDiscreteNB(BaseNB): """Abstract base class for naive Bayes on discrete/categorical data Any estimator based on this class should provide: __init__ _joint_log_likelihood(X) as per BaseNB """ def _update_class_log_prior(self, class_prior=None): n_classes = len(self.classes_) if class_prior is not None: if len(class_prior) != n_classes: raise ValueError("Number of priors must match number of" " classes.") self.class_log_prior_ = np.log(class_prior) elif self.fit_prior: # empirical prior, with sample_weight taken into account self.class_log_prior_ = (np.log(self.class_count_) - np.log(self.class_count_.sum())) else: self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes) def _check_alpha(self): if self.alpha < 0: raise ValueError('Smoothing parameter alpha = %.1e. ' 'alpha should be > 0.' % self.alpha) if self.alpha < _ALPHA_MIN: warnings.warn('alpha too small will result in numeric errors, ' 'setting alpha = %.1e' % _ALPHA_MIN) return _ALPHA_MIN return self.alpha def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. classes : array-like, shape = [n_classes] (default=None) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape = [n_samples] (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse='csr', dtype=np.float64) _, n_features = X.shape if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_effective_classes = len(classes) if len(classes) > 1 else 2 self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) elif n_features != self.coef_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (n_features, self.coef_.shape[-1])) Y = label_binarize(y, classes=self.classes_) if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) n_samples, n_classes = Y.shape if X.shape[0] != Y.shape[0]: msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible." raise ValueError(msg % (X.shape[0], y.shape[0])) # label_binarize() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently Y = Y.astype(np.float64) if sample_weight is not None: sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas self._count(X, Y) # XXX: OPTIM: we could introduce a public finalization method to # be called by the user explicitly just once after several consecutive # calls to partial_fit and prior any call to predict[_[log_]proba] # to avoid computing the smooth log probas at each call to partial fit alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self def fit(self, X, y, sample_weight=None): """Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], (default=None) Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y, 'csr') _, n_features = X.shape labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point Y = Y.astype(np.float64) if sample_weight is not None: sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_effective_classes = Y.shape[1] self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) self._count(X, Y) alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self # XXX The following is a stopgap measure; we need to set the dimensions # of class_log_prior_ and feature_log_prob_ correctly. def _get_coef(self): return (self.feature_log_prob_[1:] if len(self.classes_) == 2 else self.feature_log_prob_) def _get_intercept(self): return (self.class_log_prior_[1:] if len(self.classes_) == 2 else self.class_log_prior_) coef_ = property(_get_coef) intercept_ = property(_get_intercept) class MultinomialNB(BaseDiscreteNB): """ Naive Bayes classifier for multinomial models The multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work. Read more in the :ref:`User Guide <multinomial_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). fit_prior : boolean, optional (default=True) Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size (n_classes,), optional (default=None) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape (n_classes, ) Smoothed empirical log probability for each class. intercept_ : property Mirrors ``class_log_prior_`` for interpreting MultinomialNB as a linear model. feature_log_prob_ : array, shape (n_classes, n_features) Empirical log probability of features given a class, ``P(x_i|y)``. coef_ : property Mirrors ``feature_log_prob_`` for interpreting MultinomialNB as a linear model. class_count_ : array, shape (n_classes,) Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape (n_classes, n_features) Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(5, size=(6, 100)) >>> y = np.array([1, 2, 3, 4, 5, 6]) >>> from sklearn.naive_bayes import MultinomialNB >>> clf = MultinomialNB() >>> clf.fit(X, y) MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2:3])) [3] Notes ----- For the rationale behind the names `coef_` and `intercept_`, i.e. naive Bayes as a linear classifier, see J. Rennie et al. (2003), Tackling the poor assumptions of naive Bayes text classifiers, ICML. References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html """ def __init__(self, alpha=1.0, fit_prior=True, class_prior=None): self.alpha = alpha self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative") self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self, alpha): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + alpha smoothed_cc = smoothed_fc.sum(axis=1) self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_) class BernoulliNB(BaseDiscreteNB): """Naive Bayes classifier for multivariate Bernoulli models. Like MultinomialNB, this classifier is suitable for discrete data. The difference is that while MultinomialNB works with occurrence counts, BernoulliNB is designed for binary/boolean features. Read more in the :ref:`User Guide <bernoulli_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). binarize : float or None, optional (default=0.0) Threshold for binarizing (mapping to booleans) of sample features. If None, input is presumed to already consist of binary vectors. fit_prior : boolean, optional (default=True) Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size=[n_classes,], optional (default=None) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape = [n_classes] Log probability of each class (smoothed). feature_log_prob_ : array, shape = [n_classes, n_features] Empirical log probability of features given a class, P(x_i|y). class_count_ : array, shape = [n_classes] Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape = [n_classes, n_features] Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(2, size=(6, 100)) >>> Y = np.array([1, 2, 3, 4, 4, 5]) >>> from sklearn.naive_bayes import BernoulliNB >>> clf = BernoulliNB() >>> clf.fit(X, Y) BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2:3])) [3] References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html A. McCallum and K. Nigam (1998). A comparison of event models for naive Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for Text Categorization, pp. 41-48. V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS). """ def __init__(self, alpha=1.0, binarize=.0, fit_prior=True, class_prior=None): self.alpha = alpha self.binarize = binarize self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if self.binarize is not None: X = binarize(X, threshold=self.binarize) self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self, alpha): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + alpha smoothed_cc = self.class_count_ + alpha * 2 self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') if self.binarize is not None: X = binarize(X, threshold=self.binarize) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X.shape if n_features_X != n_features: raise ValueError("Expected input with %d features, got %d instead" % (n_features, n_features_X)) neg_prob = np.log(1 - np.exp(self.feature_log_prob_)) # Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T) jll += self.class_log_prior_ + neg_prob.sum(axis=1) return jll
31,330
36.122038
91
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/learning_curve.py
"""Utilities to evaluate models with respect to a variable """ # Author: Alexander Fabisch <[email protected]> # # License: BSD 3 clause import warnings import numpy as np from .base import is_classifier, clone from .cross_validation import check_cv from .externals.joblib import Parallel, delayed from .cross_validation import _safe_split, _score, _fit_and_score from .metrics.scorer import check_scoring from .utils import indexable warnings.warn("This module was deprecated in version 0.18 in favor of the " "model_selection module into which all the functions are moved." " This module will be removed in 0.20", DeprecationWarning) __all__ = ['learning_curve', 'validation_curve'] def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=1, pre_dispatch="all", verbose=0, error_score='raise'): """Learning curve. .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.learning_curve` instead. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curves>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. In all other cases, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : boolean, optional, default: False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- train_sizes_abs : array, shape = (n_unique_ticks,), dtype int Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y = indexable(X, y) # Make a list since we will be iterating multiple times over the folds cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator))) scorer = check_scoring(estimator, scoring=scoring) # HACK as long as boolean indices are allowed in cv generators if cv[0][0].dtype == bool: new_cv = [] for i in range(len(cv)): new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0])) cv = new_cv n_max_training_samples = len(cv[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose) for train, test in cv) else: out = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train[:n_train_samples], test, verbose, parameters=None, fit_params=None, return_train_score=True, error_score=error_score) for train, test in cv for n_train_samples in train_sizes_abs) out = np.array(out)[:, :2] n_cv_folds = out.shape[0] // n_unique_ticks out = out.reshape(n_cv_folds, n_unique_ticks, 2) out = np.asarray(out).transpose((2, 1, 0)) return train_sizes_abs, out[0], out[1] def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like, shape (n_ticks,), dtype float or int Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array, shape (n_unique_ticks,), dtype int Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.floating): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( dtype=np.int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores = [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes) train_scores.append(_score(estimator, X_train, y_train, scorer)) test_scores.append(_score(estimator, X_test, y_test, scorer)) return np.array((train_scores, test_scores)).T def validation_curve(estimator, X, y, param_name, param_range, cv=None, scoring=None, n_jobs=1, pre_dispatch="all", verbose=0): """Validation curve. .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.validation_curve` instead. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <validation_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. param_name : string Name of the parameter that will be varied. param_range : array-like, shape (n_values,) The values of the parameter that will be evaluated. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. In all other cases, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. Returns ------- train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_validation_curve.py <sphx_glr_auto_examples_model_selection_plot_validation_curve.py>` """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) out = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=None, return_train_score=True) for train, test in cv for v in param_range) out = np.asarray(out)[:, :2] n_params = len(param_range) n_cv_folds = out.shape[0] // n_params out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0)) return out[0], out[1]
15,421
41.720222
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/random_projection.py
# -*- coding: utf8 """Random Projection transformers Random Projections are a simple and computationally efficient way to reduce the dimensionality of the data by trading a controlled amount of accuracy (as additional variance) for faster processing times and smaller model sizes. The dimensions and distribution of Random Projections matrices are controlled so as to preserve the pairwise distances between any two samples of the dataset. The main theoretical result behind the efficiency of random projection is the `Johnson-Lindenstrauss lemma (quoting Wikipedia) <https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_: In mathematics, the Johnson-Lindenstrauss lemma is a result concerning low-distortion embeddings of points from high-dimensional into low-dimensional Euclidean space. The lemma states that a small set of points in a high-dimensional space can be embedded into a space of much lower dimension in such a way that distances between the points are nearly preserved. The map used for the embedding is at least Lipschitz, and can even be taken to be an orthogonal projection. """ # Authors: Olivier Grisel <[email protected]>, # Arnaud Joly <[email protected]> # License: BSD 3 clause from __future__ import division import warnings from abc import ABCMeta, abstractmethod import numpy as np from numpy.testing import assert_equal import scipy.sparse as sp from .base import BaseEstimator, TransformerMixin from .externals import six from .externals.six.moves import xrange from .utils import check_random_state from .utils.extmath import safe_sparse_dot from .utils.random import sample_without_replacement from .utils.validation import check_array, check_is_fitted from .exceptions import DataDimensionalityWarning __all__ = ["SparseRandomProjection", "GaussianRandomProjection", "johnson_lindenstrauss_min_dim"] def johnson_lindenstrauss_min_dim(n_samples, eps=0.1): """Find a 'safe' number of components to randomly project to The distortion introduced by a random projection `p` only changes the distance between two points by a factor (1 +- eps) in an euclidean space with good probability. The projection `p` is an eps-embedding as defined by: (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantee the eps-embedding is given by: n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3) Note that the number of dimensions is independent of the original number of features but instead depends on the size of the dataset: the larger the dataset, the higher is the minimal dimensionality of an eps-embedding. Read more in the :ref:`User Guide <johnson_lindenstrauss>`. Parameters ---------- n_samples : int or numpy array of int greater than 0, Number of samples. If an array is given, it will compute a safe number of components array-wise. eps : float or numpy array of float in ]0,1[, optional (default=0.1) Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma. If an array is given, it will compute a safe number of components array-wise. Returns ------- n_components : int or numpy array of int, The minimal number of components to guarantee with good probability an eps-embedding with n_samples. Examples -------- >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5) 663 >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01]) array([ 663, 11841, 1112658]) >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1) array([ 7894, 9868, 11841]) References ---------- .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma .. [2] Sanjoy Dasgupta and Anupam Gupta, 1999, "An elementary proof of the Johnson-Lindenstrauss Lemma." http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654 """ eps = np.asarray(eps) n_samples = np.asarray(n_samples) if np.any(eps <= 0.0) or np.any(eps >= 1): raise ValueError( "The JL bound is defined for eps in ]0, 1[, got %r" % eps) if np.any(n_samples) <= 0: raise ValueError( "The JL bound is defined for n_samples greater than zero, got %r" % n_samples) denominator = (eps ** 2 / 2) - (eps ** 3 / 3) return (4 * np.log(n_samples) / denominator).astype(np.int) def _check_density(density, n_features): """Factorize density check according to Li et al.""" if density == 'auto': density = 1 / np.sqrt(n_features) elif density <= 0 or density > 1: raise ValueError("Expected density in range ]0, 1], got: %r" % density) return density def _check_input_size(n_components, n_features): """Factorize argument checking for random matrix generation""" if n_components <= 0: raise ValueError("n_components must be strictly positive, got %d" % n_components) if n_features <= 0: raise ValueError("n_features must be strictly positive, got %d" % n_components) def gaussian_random_matrix(n_components, n_features, random_state=None): """Generate a dense Gaussian random matrix. The components of the random matrix are drawn from N(0, 1.0 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. random_state : int, RandomState instance or None, optional (default=None) Control the pseudo random number generator used to generate the matrix at fit time. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- components : numpy array of shape [n_components, n_features] The generated Gaussian random matrix. See Also -------- GaussianRandomProjection sparse_random_matrix """ _check_input_size(n_components, n_features) rng = check_random_state(random_state) components = rng.normal(loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features)) return components def sparse_random_matrix(n_components, n_features, density='auto', random_state=None): """Generalized Achlioptas random sparse matrix for random projection Setting density to 1 / 3 will yield the original matrix by Dimitris Achlioptas while setting a lower value will yield the generalization by Ping Li et al. If we note :math:`s = 1 / density`, the components of the random matrix are drawn from: - -sqrt(s) / sqrt(n_components) with probability 1 / 2s - 0 with probability 1 - 1 / s - +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. density : float in range ]0, 1] or 'auto', optional (default='auto') Ratio of non-zero component in the random projection matrix. If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. random_state : int, RandomState instance or None, optional (default=None) Control the pseudo random number generator used to generate the matrix at fit time. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- components : array or CSR matrix with shape [n_components, n_features] The generated Gaussian random matrix. See Also -------- SparseRandomProjection gaussian_random_matrix References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", http://www.cs.ucsc.edu/~optas/papers/jl.pdf """ _check_input_size(n_components, n_features) density = _check_density(density, n_features) rng = check_random_state(random_state) if density == 1: # skip index generation if totally dense components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1 return 1 / np.sqrt(n_components) * components else: # Generate location of non zero elements indices = [] offset = 0 indptr = [offset] for i in xrange(n_components): # find the indices of the non-zero components for row i n_nonzero_i = rng.binomial(n_features, density) indices_i = sample_without_replacement(n_features, n_nonzero_i, random_state=rng) indices.append(indices_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) # Among non zero components the probability of the sign is 50%/50% data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1 # build the CSR structure by concatenating the rows components = sp.csr_matrix((data, indices, indptr), shape=(n_components, n_features)) return np.sqrt(1 / density) / np.sqrt(n_components) * components class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)): """Base class for random projections. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, n_components='auto', eps=0.1, dense_output=False, random_state=None): self.n_components = n_components self.eps = eps self.dense_output = dense_output self.random_state = random_state @abstractmethod def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : numpy array or CSR matrix [n_components, n_features] The generated random matrix. """ def fit(self, X, y=None): """Generate a sparse random projection matrix Parameters ---------- X : numpy array or scipy.sparse of shape [n_samples, n_features] Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers. y : is not used: placeholder to allow for usage in a Pipeline. Returns ------- self """ X = check_array(X, accept_sparse=['csr', 'csc']) n_samples, n_features = X.shape if self.n_components == 'auto': self.n_components_ = johnson_lindenstrauss_min_dim( n_samples=n_samples, eps=self.eps) if self.n_components_ <= 0: raise ValueError( 'eps=%f and n_samples=%d lead to a target dimension of ' '%d which is invalid' % ( self.eps, n_samples, self.n_components_)) elif self.n_components_ > n_features: raise ValueError( 'eps=%f and n_samples=%d lead to a target dimension of ' '%d which is larger than the original space with ' 'n_features=%d' % (self.eps, n_samples, self.n_components_, n_features)) else: if self.n_components <= 0: raise ValueError("n_components must be greater than 0, got %s" % self.n_components) elif self.n_components > n_features: warnings.warn( "The number of components is higher than the number of" " features: n_features < n_components (%s < %s)." "The dimensionality of the problem will not be reduced." % (n_features, self.n_components), DataDimensionalityWarning) self.n_components_ = self.n_components # Generate a projection matrix of size [n_components, n_features] self.components_ = self._make_random_matrix(self.n_components_, n_features) # Check contract assert_equal( self.components_.shape, (self.n_components_, n_features), err_msg=('An error has occurred the self.components_ matrix has ' ' not the proper shape.')) return self def transform(self, X): """Project the data by using matrix product with the random matrix Parameters ---------- X : numpy array or scipy.sparse of shape [n_samples, n_features] The input data to project into a smaller dimensional space. Returns ------- X_new : numpy array or scipy sparse of shape [n_samples, n_components] Projected array. """ X = check_array(X, accept_sparse=['csr', 'csc']) check_is_fitted(self, 'components_') if X.shape[1] != self.components_.shape[1]: raise ValueError( 'Impossible to perform projection:' 'X at fit stage had a different number of features. ' '(%s != %s)' % (X.shape[1], self.components_.shape[1])) X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output) return X_new class GaussianRandomProjection(BaseRandomProjection): """Reduce dimensionality through Gaussian random projection The components of the random matrix are drawn from N(0, 1 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. Parameters ---------- n_components : int or 'auto', optional (default = 'auto') Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. eps : strictly positive float, optional (default=0.1) Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to 'auto'. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. random_state : int, RandomState instance or None, optional (default=None) Control the pseudo random number generator used to generate the matrix at fit time. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- n_component_ : int Concrete number of components computed when n_components="auto". components_ : numpy array of shape [n_components, n_features] Random matrix used for the projection. See Also -------- SparseRandomProjection """ def __init__(self, n_components='auto', eps=0.1, random_state=None): super(GaussianRandomProjection, self).__init__( n_components=n_components, eps=eps, dense_output=True, random_state=random_state) def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : numpy array or CSR matrix [n_components, n_features] The generated random matrix. """ random_state = check_random_state(self.random_state) return gaussian_random_matrix(n_components, n_features, random_state=random_state) class SparseRandomProjection(BaseRandomProjection): """Reduce dimensionality through sparse random projection Sparse random matrix is an alternative to dense random projection matrix that guarantees similar embedding quality while being much more memory efficient and allowing faster computation of the projected data. If we note `s = 1 / density` the components of the random matrix are drawn from: - -sqrt(s) / sqrt(n_components) with probability 1 / 2s - 0 with probability 1 - 1 / s - +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. Parameters ---------- n_components : int or 'auto', optional (default = 'auto') Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. density : float in range ]0, 1], optional (default='auto') Ratio of non-zero component in the random projection matrix. If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. eps : strictly positive float, optional, (default=0.1) Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to 'auto'. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. dense_output : boolean, optional (default=False) If True, ensure that the output of the random projection is a dense numpy array even if the input and random projection matrix are both sparse. In practice, if the number of components is small the number of zero components in the projected data will be very small and it will be more CPU and memory efficient to use a dense representation. If False, the projected data uses a sparse representation if the input is sparse. random_state : int, RandomState instance or None, optional (default=None) Control the pseudo random number generator used to generate the matrix at fit time. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- n_component_ : int Concrete number of components computed when n_components="auto". components_ : CSR matrix with shape [n_components, n_features] Random matrix used for the projection. density_ : float in range 0.0 - 1.0 Concrete density computed from when density = "auto". See Also -------- GaussianRandomProjection References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", https://users.soe.ucsc.edu/~optas/papers/jl.pdf """ def __init__(self, n_components='auto', density='auto', eps=0.1, dense_output=False, random_state=None): super(SparseRandomProjection, self).__init__( n_components=n_components, eps=eps, dense_output=dense_output, random_state=random_state) self.density = density def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : numpy array or CSR matrix [n_components, n_features] The generated random matrix. """ random_state = check_random_state(self.random_state) self.density_ = _check_density(self.density, n_features) return sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)
22,894
35.515152
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/exceptions.py
""" The :mod:`sklearn.exceptions` module includes all custom warnings and error classes used across scikit-learn. """ __all__ = ['NotFittedError', 'ChangedBehaviorWarning', 'ConvergenceWarning', 'DataConversionWarning', 'DataDimensionalityWarning', 'EfficiencyWarning', 'FitFailedWarning', 'NonBLASDotWarning', 'SkipTestWarning', 'UndefinedMetricWarning'] class NotFittedError(ValueError, AttributeError): """Exception class to raise if estimator is used before fitting. This class inherits from both ValueError and AttributeError to help with exception handling and backward compatibility. Examples -------- >>> from sklearn.svm import LinearSVC >>> from sklearn.exceptions import NotFittedError >>> try: ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]]) ... except NotFittedError as e: ... print(repr(e)) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS NotFittedError('This LinearSVC instance is not fitted yet',) .. versionchanged:: 0.18 Moved from sklearn.utils.validation. """ class ChangedBehaviorWarning(UserWarning): """Warning class used to notify the user of any change in the behavior. .. versionchanged:: 0.18 Moved from sklearn.base. """ class ConvergenceWarning(UserWarning): """Custom warning to capture convergence problems .. versionchanged:: 0.18 Moved from sklearn.utils. """ class DataConversionWarning(UserWarning): """Warning used to notify implicit data conversions happening in the code. This warning occurs when some input data needs to be converted or interpreted in a way that may not match the user's expectations. For example, this warning may occur when the user - passes an integer array to a function which expects float input and will convert the input - requests a non-copying operation, but a copy is required to meet the implementation's data-type expectations; - passes an input whose shape can be interpreted ambiguously. .. versionchanged:: 0.18 Moved from sklearn.utils.validation. """ class DataDimensionalityWarning(UserWarning): """Custom warning to notify potential issues with data dimensionality. For example, in random projection, this warning is raised when the number of components, which quantifies the dimensionality of the target projection space, is higher than the number of features, which quantifies the dimensionality of the original source space, to imply that the dimensionality of the problem will not be reduced. .. versionchanged:: 0.18 Moved from sklearn.utils. """ class EfficiencyWarning(UserWarning): """Warning used to notify the user of inefficient computation. This warning notifies the user that the efficiency may not be optimal due to some reason which may be included as a part of the warning message. This may be subclassed into a more specific Warning class. .. versionadded:: 0.18 """ class FitFailedWarning(RuntimeWarning): """Warning class used if there is an error while fitting the estimator. This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV and the cross-validation helper function cross_val_score to warn when there is an error while fitting the estimator. Examples -------- >>> from sklearn.model_selection import GridSearchCV >>> from sklearn.svm import LinearSVC >>> from sklearn.exceptions import FitFailedWarning >>> import warnings >>> warnings.simplefilter('always', FitFailedWarning) >>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0) >>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1] >>> with warnings.catch_warnings(record=True) as w: ... try: ... gs.fit(X, y) # This will raise a ValueError since C is < 0 ... except ValueError: ... pass ... print(repr(w[-1].message)) ... # doctest: +NORMALIZE_WHITESPACE FitFailedWarning("Classifier fit failed. The score on this train-test partition for these parameters will be set to 0.000000. Details: \\nValueError('Penalty term must be positive; got (C=-2)',)",) .. versionchanged:: 0.18 Moved from sklearn.cross_validation. """ class NonBLASDotWarning(EfficiencyWarning): """Warning used when the dot operation does not use BLAS. This warning is used to notify the user that BLAS was not used for dot operation and hence the efficiency may be affected. .. versionchanged:: 0.18 Moved from sklearn.utils.validation, extends EfficiencyWarning. """ class SkipTestWarning(UserWarning): """Warning class used to notify the user of a test that was skipped. For example, one of the estimator checks requires a pandas import. If the pandas package cannot be imported, the test will be skipped rather than register as a failure. """ class UndefinedMetricWarning(UserWarning): """Warning used when the metric is invalid .. versionchanged:: 0.18 Moved from sklearn.base. """
5,276
32.611465
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/setup.py
import os from os.path import join import warnings from sklearn._build_utils import maybe_cythonize_extensions def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info, BlasNotFoundError import numpy libraries = [] if os.name == 'posix': libraries.append('m') config = Configuration('sklearn', parent_package, top_path) # submodules with build utilities config.add_subpackage('__check_build') config.add_subpackage('_build_utils') # submodules which do not have their own setup.py # we must manually add sub-submodules & tests config.add_subpackage('covariance') config.add_subpackage('covariance/tests') config.add_subpackage('cross_decomposition') config.add_subpackage('cross_decomposition/tests') config.add_subpackage('feature_selection') config.add_subpackage('feature_selection/tests') config.add_subpackage('gaussian_process') config.add_subpackage('gaussian_process/tests') config.add_subpackage('mixture') config.add_subpackage('mixture/tests') config.add_subpackage('model_selection') config.add_subpackage('model_selection/tests') config.add_subpackage('neural_network') config.add_subpackage('neural_network/tests') config.add_subpackage('preprocessing') config.add_subpackage('preprocessing/tests') config.add_subpackage('semi_supervised') config.add_subpackage('semi_supervised/tests') # submodules which have their own setup.py # leave out "linear_model" and "utils" for now; add them after cblas below config.add_subpackage('cluster') config.add_subpackage('datasets') config.add_subpackage('decomposition') config.add_subpackage('ensemble') config.add_subpackage('externals') config.add_subpackage('feature_extraction') config.add_subpackage('manifold') config.add_subpackage('metrics') config.add_subpackage('metrics/cluster') config.add_subpackage('neighbors') config.add_subpackage('tree') config.add_subpackage('svm') # add cython extension module for isotonic regression config.add_extension('_isotonic', sources=['_isotonic.pyx'], include_dirs=[numpy.get_include()], libraries=libraries, ) # some libs needs cblas, fortran-compiled BLAS will not be sufficient blas_info = get_info('blas_opt', 0) if (not blas_info) or ( ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])): config.add_library('cblas', sources=[join('src', 'cblas', '*.c')]) warnings.warn(BlasNotFoundError.__doc__) # the following packages depend on cblas, so they have to be build # after the above. config.add_subpackage('linear_model') config.add_subpackage('utils') # add the test directory config.add_subpackage('tests') maybe_cythonize_extensions(top_path, config) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
3,201
34.977528
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/discriminant_analysis.py
""" Linear Discriminant Analysis and Quadratic Discriminant Analysis """ # Authors: Clemens Brunner # Martin Billinger # Matthieu Perrot # Mathieu Blondel # License: BSD 3-Clause from __future__ import print_function import warnings import numpy as np from .utils import deprecated from scipy import linalg from .externals.six import string_types from .externals.six.moves import xrange from .base import BaseEstimator, TransformerMixin, ClassifierMixin from .linear_model.base import LinearClassifierMixin from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance from .utils.multiclass import unique_labels from .utils import check_array, check_X_y from .utils.validation import check_is_fitted from .utils.multiclass import check_classification_targets from .preprocessing import StandardScaler __all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis'] def _cov(X, shrinkage=None): """Estimate covariance matrix (using optional shrinkage). Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. shrinkage : string or float, optional Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Returns ------- s : array, shape (n_features, n_features) Estimated covariance matrix. """ shrinkage = "empirical" if shrinkage is None else shrinkage if isinstance(shrinkage, string_types): if shrinkage == 'auto': sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == 'empirical': s = empirical_covariance(X) else: raise ValueError('unknown shrinkage parameter') elif isinstance(shrinkage, float) or isinstance(shrinkage, int): if shrinkage < 0 or shrinkage > 1: raise ValueError('shrinkage parameter must be between 0 and 1') s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError('shrinkage must be of string or int type') return s def _class_means(X, y): """Compute class means. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- means : array-like, shape (n_features,) Class means. """ means = [] classes = np.unique(y) for group in classes: Xg = X[y == group, :] means.append(Xg.mean(0)) return np.asarray(means) def _class_cov(X, y, priors=None, shrinkage=None): """Compute class covariance matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. priors : array-like, shape (n_classes,) Class priors. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Returns ------- cov : array-like, shape (n_features, n_features) Class covariance matrix. """ classes = np.unique(y) covs = [] for group in classes: Xg = X[y == group, :] covs.append(np.atleast_2d(_cov(Xg, shrinkage))) return np.average(covs, axis=0, weights=priors) class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin, TransformerMixin): """Linear Discriminant Analysis A classifier with a linear decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class, assuming that all classes share the same covariance matrix. The fitted model can also be used to reduce the dimensionality of the input by projecting it to the most discriminative directions. .. versionadded:: 0.17 *LinearDiscriminantAnalysis*. Read more in the :ref:`User Guide <lda_qda>`. Parameters ---------- solver : string, optional Solver to use, possible values: - 'svd': Singular value decomposition (default). Does not compute the covariance matrix, therefore this solver is recommended for data with a large number of features. - 'lsqr': Least squares solution, can be combined with shrinkage. - 'eigen': Eigenvalue decomposition, can be combined with shrinkage. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Note that shrinkage works only with 'lsqr' and 'eigen' solvers. priors : array, optional, shape (n_classes,) Class priors. n_components : int, optional Number of components (< n_classes - 1) for dimensionality reduction. store_covariance : bool, optional Additionally compute class covariance matrix (default False), used only in 'svd' solver. .. versionadded:: 0.17 tol : float, optional, (default 1.0e-4) Threshold used for rank estimation in SVD solver. .. versionadded:: 0.17 Attributes ---------- coef_ : array, shape (n_features,) or (n_classes, n_features) Weight vector(s). intercept_ : array, shape (n_features,) Intercept term. covariance_ : array-like, shape (n_features, n_features) Covariance matrix (shared by all classes). explained_variance_ratio_ : array, shape (n_components,) Percentage of variance explained by each of the selected components. If ``n_components`` is not set then all components are stored and the sum of explained variances is equal to 1.0. Only available when eigen or svd solver is used. means_ : array-like, shape (n_classes, n_features) Class means. priors_ : array-like, shape (n_classes,) Class priors (sum to 1). scalings_ : array-like, shape (rank, n_classes - 1) Scaling of the features in the space spanned by the class centroids. xbar_ : array-like, shape (n_features,) Overall mean. classes_ : array-like, shape (n_classes,) Unique class labels. See also -------- sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic Discriminant Analysis Notes ----- The default solver is 'svd'. It can perform both classification and transform, and it does not rely on the calculation of the covariance matrix. This can be an advantage in situations where the number of features is large. However, the 'svd' solver cannot be used with shrinkage. The 'lsqr' solver is an efficient algorithm that only works for classification. It supports shrinkage. The 'eigen' solver is based on the optimization of the between class scatter to within class scatter ratio. It can be used for both classification and transform, and it supports shrinkage. However, the 'eigen' solver needs to compute the covariance matrix, so it might not be suitable for situations with a high number of features. Examples -------- >>> import numpy as np >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = LinearDiscriminantAnalysis() >>> clf.fit(X, y) LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None, solver='svd', store_covariance=False, tol=0.0001) >>> print(clf.predict([[-0.8, -1]])) [1] """ def __init__(self, solver='svd', shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=1e-4): self.solver = solver self.shrinkage = shrinkage self.priors = priors self.n_components = n_components self.store_covariance = store_covariance # used only in svd solver self.tol = tol # used only in svd solver def _solve_lsqr(self, X, y, shrinkage): """Least squares solver. The least squares solver computes a straightforward solution of the optimal decision rule based directly on the discriminant functions. It can only be used for classification (with optional shrinkage), because estimation of eigenvectors is not performed. Therefore, dimensionality reduction with the transform is not supported. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_classes) Target values. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Notes ----- This solver is based on [1]_, section 2.6.2, pp. 39-41. References ---------- .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN 0-471-05669-3. """ self.means_ = _class_means(X, y) self.covariance_ = _class_cov(X, y, self.priors_, shrinkage) self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)) def _solve_eigen(self, X, y, shrinkage): """Eigenvalue solver. The eigenvalue solver computes the optimal solution of the Rayleigh coefficient (basically the ratio of between class scatter to within class scatter). This solver supports both classification and dimensionality reduction (with optional shrinkage). Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage constant. Notes ----- This solver is based on [1]_, section 3.8.3, pp. 121-124. References ---------- .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN 0-471-05669-3. """ self.means_ = _class_means(X, y) self.covariance_ = _class_cov(X, y, self.priors_, shrinkage) Sw = self.covariance_ # within scatter St = _cov(X, shrinkage) # total scatter Sb = St - Sw # between scatter evals, evecs = linalg.eigh(Sb, Sw) self.explained_variance_ratio_ = np.sort(evals / np.sum(evals) )[::-1][:self._max_components] evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors evecs /= np.linalg.norm(evecs, axis=0) self.scalings_ = evecs self.coef_ = np.dot(self.means_, evecs).dot(evecs.T) self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)) def _solve_svd(self, X, y): """SVD solver. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. """ n_samples, n_features = X.shape n_classes = len(self.classes_) self.means_ = _class_means(X, y) if self.store_covariance: self.covariance_ = _class_cov(X, y, self.priors_) Xc = [] for idx, group in enumerate(self.classes_): Xg = X[y == group, :] Xc.append(Xg - self.means_[idx]) self.xbar_ = np.dot(self.priors_, self.means_) Xc = np.concatenate(Xc, axis=0) # 1) within (univariate) scaling by with classes std-dev std = Xc.std(axis=0) # avoid division by zero in normalization std[std == 0] = 1. fac = 1. / (n_samples - n_classes) # 2) Within variance scaling X = np.sqrt(fac) * (Xc / std) # SVD of centered (within)scaled data U, S, V = linalg.svd(X, full_matrices=False) rank = np.sum(S > self.tol) if rank < n_features: warnings.warn("Variables are collinear.") # Scaling of within covariance is: V' 1/S scalings = (V[:rank] / std).T / S[:rank] # 3) Between variance scaling # Scale weighted centers X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T).T, scalings) # Centers are living in a space with n_classes-1 dim (maximum) # Use SVD to find projection in the space spanned by the # (n_classes) centers _, S, V = linalg.svd(X, full_matrices=0) self.explained_variance_ratio_ = (S**2 / np.sum( S**2))[:self._max_components] rank = np.sum(S > self.tol * S[0]) self.scalings_ = np.dot(scalings, V.T[:, :rank]) coef = np.dot(self.means_ - self.xbar_, self.scalings_) self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) + np.log(self.priors_)) self.coef_ = np.dot(coef, self.scalings_.T) self.intercept_ -= np.dot(self.xbar_, self.coef_.T) def fit(self, X, y): """Fit LinearDiscriminantAnalysis model according to the given training data and parameters. .. versionchanged:: 0.19 *store_covariance* has been moved to main constructor. .. versionchanged:: 0.19 *tol* has been moved to main constructor. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array, shape (n_samples,) Target values. """ X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self) self.classes_ = unique_labels(y) if self.priors is None: # estimate priors from sample _, y_t = np.unique(y, return_inverse=True) # non-negative ints self.priors_ = np.bincount(y_t) / float(len(y)) else: self.priors_ = np.asarray(self.priors) if (self.priors_ < 0).any(): raise ValueError("priors must be non-negative") if self.priors_.sum() != 1: warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning) self.priors_ = self.priors_ / self.priors_.sum() # Get the maximum number of components if self.n_components is None: self._max_components = len(self.classes_) - 1 else: self._max_components = min(len(self.classes_) - 1, self.n_components) if self.solver == 'svd': if self.shrinkage is not None: raise NotImplementedError('shrinkage not supported') self._solve_svd(X, y) elif self.solver == 'lsqr': self._solve_lsqr(X, y, shrinkage=self.shrinkage) elif self.solver == 'eigen': self._solve_eigen(X, y, shrinkage=self.shrinkage) else: raise ValueError("unknown solver {} (valid solvers are 'svd', " "'lsqr', and 'eigen').".format(self.solver)) if self.classes_.size == 2: # treat binary case as a special case self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2) self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0], ndmin=1) return self def transform(self, X): """Project data to maximize class separation. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- X_new : array, shape (n_samples, n_components) Transformed data. """ if self.solver == 'lsqr': raise NotImplementedError("transform not implemented for 'lsqr' " "solver (use 'svd' or 'eigen').") check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any) X = check_array(X) if self.solver == 'svd': X_new = np.dot(X - self.xbar_, self.scalings_) elif self.solver == 'eigen': X_new = np.dot(X, self.scalings_) return X_new[:, :self._max_components] def predict_proba(self, X): """Estimate probability. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- C : array, shape (n_samples, n_classes) Estimated probabilities. """ prob = self.decision_function(X) prob *= -1 np.exp(prob, prob) prob += 1 np.reciprocal(prob, prob) if len(self.classes_) == 2: # binary case return np.column_stack([1 - prob, prob]) else: # OvR normalization, like LibLinear's predict_probability prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) return prob def predict_log_proba(self, X): """Estimate log probability. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- C : array, shape (n_samples, n_classes) Estimated log probabilities. """ return np.log(self.predict_proba(X)) class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin): """Quadratic Discriminant Analysis A classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class. .. versionadded:: 0.17 *QuadraticDiscriminantAnalysis* Read more in the :ref:`User Guide <lda_qda>`. Parameters ---------- priors : array, optional, shape = [n_classes] Priors on classes reg_param : float, optional Regularizes the covariance estimate as ``(1-reg_param)*Sigma + reg_param*np.eye(n_features)`` store_covariance : boolean If True the covariance matrices are computed and stored in the `self.covariance_` attribute. .. versionadded:: 0.17 tol : float, optional, default 1.0e-4 Threshold used for rank estimation. .. versionadded:: 0.17 Attributes ---------- covariance_ : list of array-like, shape = [n_features, n_features] Covariance matrices of each class. means_ : array-like, shape = [n_classes, n_features] Class means. priors_ : array-like, shape = [n_classes] Class priors (sum to 1). rotations_ : list of arrays For each class k an array of shape [n_features, n_k], with ``n_k = min(n_features, number of elements in class k)`` It is the rotation of the Gaussian distribution, i.e. its principal axis. scalings_ : list of arrays For each class k an array of shape [n_k]. It contains the scaling of the Gaussian distributions along its principal axes, i.e. the variance in the rotated coordinate system. Examples -------- >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = QuadraticDiscriminantAnalysis() >>> clf.fit(X, y) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0, store_covariance=False, store_covariances=None, tol=0.0001) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear Discriminant Analysis """ def __init__(self, priors=None, reg_param=0., store_covariance=False, tol=1.0e-4, store_covariances=None): self.priors = np.asarray(priors) if priors is not None else None self.reg_param = reg_param self.store_covariances = store_covariances self.store_covariance = store_covariance self.tol = tol @property @deprecated("Attribute covariances_ was deprecated in version" " 0.19 and will be removed in 0.21. Use " "covariance_ instead") def covariances_(self): return self.covariance_ def fit(self, X, y): """Fit the model according to the given training data and parameters. .. versionchanged:: 0.19 ``store_covariances`` has been moved to main constructor as ``store_covariance`` .. versionchanged:: 0.19 ``tol`` has been moved to main constructor. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array, shape = [n_samples] Target values (integers) """ X, y = check_X_y(X, y) check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) n_samples, n_features = X.shape n_classes = len(self.classes_) if n_classes < 2: raise ValueError('y has less than 2 classes') if self.priors is None: self.priors_ = np.bincount(y) / float(n_samples) else: self.priors_ = self.priors cov = None store_covariance = self.store_covariance or self.store_covariances if self.store_covariances: warnings.warn("'store_covariances' was renamed to store_covariance" " in version 0.19 and will be removed in 0.21.", DeprecationWarning) if store_covariance: cov = [] means = [] scalings = [] rotations = [] for ind in xrange(n_classes): Xg = X[y == ind, :] meang = Xg.mean(0) means.append(meang) if len(Xg) == 1: raise ValueError('y has only 1 sample in class %s, covariance ' 'is ill defined.' % str(self.classes_[ind])) Xgc = Xg - meang # Xgc = U * S * V.T U, S, Vt = np.linalg.svd(Xgc, full_matrices=False) rank = np.sum(S > self.tol) if rank < n_features: warnings.warn("Variables are collinear") S2 = (S ** 2) / (len(Xg) - 1) S2 = ((1 - self.reg_param) * S2) + self.reg_param if self.store_covariance or store_covariance: # cov = V * (S^2 / (n-1)) * V.T cov.append(np.dot(S2 * Vt.T, Vt)) scalings.append(S2) rotations.append(Vt.T) if self.store_covariance or store_covariance: self.covariance_ = cov self.means_ = np.asarray(means) self.scalings_ = scalings self.rotations_ = rotations return self def _decision_function(self, X): check_is_fitted(self, 'classes_') X = check_array(X) norm2 = [] for i in range(len(self.classes_)): R = self.rotations_[i] S = self.scalings_[i] Xm = X - self.means_[i] X2 = np.dot(Xm, R * (S ** (-0.5))) norm2.append(np.sum(X2 ** 2, 1)) norm2 = np.array(norm2).T # shape = [len(X), n_classes] u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) return (-0.5 * (norm2 + u) + np.log(self.priors_)) def decision_function(self, X): """Apply decision function to an array of samples. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples (test vectors). Returns ------- C : array, shape = [n_samples, n_classes] or [n_samples,] Decision function values related to each class, per sample. In the two-class case, the shape is [n_samples,], giving the log likelihood ratio of the positive class. """ dec_func = self._decision_function(X) # handle special case of two classes if len(self.classes_) == 2: return dec_func[:, 1] - dec_func[:, 0] return dec_func def predict(self, X): """Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] """ d = self._decision_function(X) y_pred = self.classes_.take(d.argmax(1)) return y_pred def predict_proba(self, X): """Return posterior probabilities of classification. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples/test vectors. Returns ------- C : array, shape = [n_samples, n_classes] Posterior probabilities of classification per class. """ values = self._decision_function(X) # compute the likelihood of the underlying gaussian models # up to a multiplicative constant. likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis]) # compute posterior probabilities return likelihood / likelihood.sum(axis=1)[:, np.newaxis] def predict_log_proba(self, X): """Return posterior probabilities of classification. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples/test vectors. Returns ------- C : array, shape = [n_samples, n_classes] Posterior log-probabilities of classification per class. """ # XXX : can do better to avoid precision overflows probas_ = self.predict_proba(X) return np.log(probas_)
27,513
34.005089
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/kernel_ridge.py
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression.""" # Authors: Mathieu Blondel <[email protected]> # Jan Hendrik Metzen <[email protected]> # License: BSD 3 clause import numpy as np from .base import BaseEstimator, RegressorMixin from .metrics.pairwise import pairwise_kernels from .linear_model.ridge import _solve_cholesky_kernel from .utils import check_array, check_X_y from .utils.validation import check_is_fitted class KernelRidge(BaseEstimator, RegressorMixin): """Kernel ridge regression. Kernel ridge regression (KRR) combines ridge regression (linear least squares with l2-norm regularization) with the kernel trick. It thus learns a linear function in the space induced by the respective kernel and the data. For non-linear kernels, this corresponds to a non-linear function in the original space. The form of the model learned by KRR is identical to support vector regression (SVR). However, different loss functions are used: KRR uses squared error loss while support vector regression uses epsilon-insensitive loss, both combined with l2 regularization. In contrast to SVR, fitting a KRR model can be done in closed-form and is typically faster for medium-sized datasets. On the other hand, the learned model is non-sparse and thus slower than SVR, which learns a sparse model for epsilon > 0, at prediction-time. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <kernel_ridge>`. Parameters ---------- alpha : {float, array-like}, shape = [n_targets] Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``(2*C)^-1`` in other linear models such as LogisticRegression or LinearSVC. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. kernel : string or callable, default="linear" Kernel mapping used internally. A callable should accept two arguments and the keyword arguments passed to this object as kernel_params, and should return a floating point number. gamma : float, default=None Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : mapping of string to any, optional Additional parameters (keyword arguments) for kernel function passed as callable object. Attributes ---------- dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets] Representation of weight vector(s) in kernel space X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data, which is also required for prediction References ---------- * Kevin P. Murphy "Machine Learning: A Probabilistic Perspective", The MIT Press chapter 14.4.3, pp. 492-493 See also -------- Ridge Linear ridge regression. SVR Support Vector Regression implemented using libsvm. Examples -------- >>> from sklearn.kernel_ridge import KernelRidge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> rng = np.random.RandomState(0) >>> y = rng.randn(n_samples) >>> X = rng.randn(n_samples, n_features) >>> clf = KernelRidge(alpha=1.0) >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear', kernel_params=None) """ def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None): self.alpha = alpha self.kernel = kernel self.gamma = gamma self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params def _get_kernel(self, X, Y=None): if callable(self.kernel): params = self.kernel_params or {} else: params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) @property def _pairwise(self): return self.kernel == "precomputed" def fit(self, X, y=None, sample_weight=None): """Fit Kernel Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Individual weights for each sample, ignored if None is passed. Returns ------- self : returns an instance of self. """ # Convert data X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True) if sample_weight is not None and not isinstance(sample_weight, float): sample_weight = check_array(sample_weight, ensure_2d=False) K = self._get_kernel(X) alpha = np.atleast_1d(self.alpha) ravel = False if len(y.shape) == 1: y = y.reshape(-1, 1) ravel = True copy = self.kernel == "precomputed" self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) if ravel: self.dual_coef_ = self.dual_coef_.ravel() self.X_fit_ = X return self def predict(self, X): """Predict using the kernel ridge model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Samples. Returns ------- C : array, shape = [n_samples] or [n_samples, n_targets] Returns predicted values. """ check_is_fitted(self, ["X_fit_", "dual_coef_"]) K = self._get_kernel(X, self.X_fit_) return np.dot(K, self.dual_coef_)
6,731
35.193548
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/calibration.py
"""Calibration of predicted probabilities.""" # Author: Alexandre Gramfort <[email protected]> # Balazs Kegl <[email protected]> # Jan Hendrik Metzen <[email protected]> # Mathieu Blondel <[email protected]> # # License: BSD 3 clause from __future__ import division import warnings from math import log import numpy as np from scipy.optimize import fmin_bfgs from sklearn.preprocessing import LabelEncoder from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone from .preprocessing import label_binarize, LabelBinarizer from .utils import check_X_y, check_array, indexable, column_or_1d from .utils.validation import check_is_fitted, check_consistent_length from .utils.fixes import signature from .isotonic import IsotonicRegression from .svm import LinearSVC from .model_selection import check_cv from .metrics.classification import _check_binary_probabilistic_predictions class CalibratedClassifierCV(BaseEstimator, ClassifierMixin): """Probability calibration with isotonic regression or sigmoid. With this class, the base_estimator is fit on the train set of the cross-validation generator and the test set is used for calibration. The probabilities for each of the folds are then averaged for prediction. In case that cv="prefit" is passed to __init__, it is assumed that base_estimator has been fitted already and all data is used for calibration. Note that data for fitting the classifier and for calibrating it must be disjoint. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- base_estimator : instance BaseEstimator The classifier whose output decision function needs to be calibrated to offer more accurate predict_proba outputs. If cv=prefit, the classifier must have been fit already on data. method : 'sigmoid' or 'isotonic' The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's method or 'isotonic' which is a non-parametric approach. It is not advised to use isotonic calibration with too few calibration samples ``(<<1000)`` since it tends to overfit. Use sigmoids (Platt's calibration) in this case. cv : integer, cross-validation generator, iterable or "prefit", optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is neither binary nor multiclass, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. If "prefit" is passed, it is assumed that base_estimator has been fitted already and all data is used for calibration. Attributes ---------- classes_ : array, shape (n_classes) The class labels. calibrated_classifiers_ : list (len() equal to cv or 1 if cv == "prefit") The list of calibrated classifiers, one for each crossvalidation fold, which has been fitted on all but the validation fold and calibrated on the validation fold. References ---------- .. [1] Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 .. [2] Transforming Classifier Scores into Accurate Multiclass Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods, J. Platt, (1999) .. [4] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ def __init__(self, base_estimator=None, method='sigmoid', cv=3): self.base_estimator = base_estimator self.method = method self.cv = cv def fit(self, X, y, sample_weight=None): """Fit the calibrated model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False) X, y = indexable(X, y) le = LabelBinarizer().fit(y) self.classes_ = le.classes_ # Check that each cross-validation fold can have at least one # example per class n_folds = self.cv if isinstance(self.cv, int) \ else self.cv.n_folds if hasattr(self.cv, "n_folds") else None if n_folds and \ np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]): raise ValueError("Requesting %d-fold cross-validation but provided" " less than %d examples for at least one class." % (n_folds, n_folds)) self.calibrated_classifiers_ = [] if self.base_estimator is None: # we want all classifiers that don't expose a random_state # to be deterministic (and we don't want to expose this one). base_estimator = LinearSVC(random_state=0) else: base_estimator = self.base_estimator if self.cv == "prefit": calibrated_classifier = _CalibratedClassifier( base_estimator, method=self.method) if sample_weight is not None: calibrated_classifier.fit(X, y, sample_weight) else: calibrated_classifier.fit(X, y) self.calibrated_classifiers_.append(calibrated_classifier) else: cv = check_cv(self.cv, y, classifier=True) fit_parameters = signature(base_estimator.fit).parameters estimator_name = type(base_estimator).__name__ if (sample_weight is not None and "sample_weight" not in fit_parameters): warnings.warn("%s does not support sample_weight. Samples" " weights are only used for the calibration" " itself." % estimator_name) base_estimator_sample_weight = None else: if sample_weight is not None: sample_weight = check_array(sample_weight, ensure_2d=False) check_consistent_length(y, sample_weight) base_estimator_sample_weight = sample_weight for train, test in cv.split(X, y): this_estimator = clone(base_estimator) if base_estimator_sample_weight is not None: this_estimator.fit( X[train], y[train], sample_weight=base_estimator_sample_weight[train]) else: this_estimator.fit(X[train], y[train]) calibrated_classifier = _CalibratedClassifier( this_estimator, method=self.method, classes=self.classes_) if sample_weight is not None: calibrated_classifier.fit(X[test], y[test], sample_weight[test]) else: calibrated_classifier.fit(X[test], y[test]) self.calibrated_classifiers_.append(calibrated_classifier) return self def predict_proba(self, X): """Posterior probabilities of classification This function returns posterior probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples, n_classes) The predicted probas. """ check_is_fitted(self, ["classes_", "calibrated_classifiers_"]) X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False) # Compute the arithmetic mean of the predictions of the calibrated # classifiers mean_proba = np.zeros((X.shape[0], len(self.classes_))) for calibrated_classifier in self.calibrated_classifiers_: proba = calibrated_classifier.predict_proba(X) mean_proba += proba mean_proba /= len(self.calibrated_classifiers_) return mean_proba def predict(self, X): """Predict the target of new samples. Can be different from the prediction of the uncalibrated classifier. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples,) The predicted class. """ check_is_fitted(self, ["classes_", "calibrated_classifiers_"]) return self.classes_[np.argmax(self.predict_proba(X), axis=1)] class _CalibratedClassifier(object): """Probability calibration with isotonic regression or sigmoid. It assumes that base_estimator has already been fit, and trains the calibration on the input set of the fit function. Note that this class should not be used as an estimator directly. Use CalibratedClassifierCV with cv="prefit" instead. Parameters ---------- base_estimator : instance BaseEstimator The classifier whose output decision function needs to be calibrated to offer more accurate predict_proba outputs. No default value since it has to be an already fitted estimator. method : 'sigmoid' | 'isotonic' The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's method or 'isotonic' which is a non-parametric approach based on isotonic regression. classes : array-like, shape (n_classes,), optional Contains unique classes used to fit the base estimator. if None, then classes is extracted from the given target values in fit(). References ---------- .. [1] Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001 .. [2] Transforming Classifier Scores into Accurate Multiclass Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002) .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods, J. Platt, (1999) .. [4] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ def __init__(self, base_estimator, method='sigmoid', classes=None): self.base_estimator = base_estimator self.method = method self.classes = classes def _preproc(self, X): n_classes = len(self.classes_) if hasattr(self.base_estimator, "decision_function"): df = self.base_estimator.decision_function(X) if df.ndim == 1: df = df[:, np.newaxis] elif hasattr(self.base_estimator, "predict_proba"): df = self.base_estimator.predict_proba(X) if n_classes == 2: df = df[:, 1:] else: raise RuntimeError('classifier has no decision_function or ' 'predict_proba method.') idx_pos_class = self.label_encoder_.\ transform(self.base_estimator.classes_) return df, idx_pos_class def fit(self, X, y, sample_weight=None): """Calibrate the fitted model Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ self.label_encoder_ = LabelEncoder() if self.classes is None: self.label_encoder_.fit(y) else: self.label_encoder_.fit(self.classes) self.classes_ = self.label_encoder_.classes_ Y = label_binarize(y, self.classes_) df, idx_pos_class = self._preproc(X) self.calibrators_ = [] for k, this_df in zip(idx_pos_class, df.T): if self.method == 'isotonic': calibrator = IsotonicRegression(out_of_bounds='clip') elif self.method == 'sigmoid': calibrator = _SigmoidCalibration() else: raise ValueError('method should be "sigmoid" or ' '"isotonic". Got %s.' % self.method) calibrator.fit(this_df, Y[:, k], sample_weight) self.calibrators_.append(calibrator) return self def predict_proba(self, X): """Posterior probabilities of classification This function returns posterior probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like, shape (n_samples, n_features) The samples. Returns ------- C : array, shape (n_samples, n_classes) The predicted probas. Can be exact zeros. """ n_classes = len(self.classes_) proba = np.zeros((X.shape[0], n_classes)) df, idx_pos_class = self._preproc(X) for k, this_df, calibrator in \ zip(idx_pos_class, df.T, self.calibrators_): if n_classes == 2: k += 1 proba[:, k] = calibrator.predict(this_df) # Normalize the probabilities if n_classes == 2: proba[:, 0] = 1. - proba[:, 1] else: proba /= np.sum(proba, axis=1)[:, np.newaxis] # XXX : for some reason all probas can be 0 proba[np.isnan(proba)] = 1. / n_classes # Deal with cases where the predicted probability minimally exceeds 1.0 proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0 return proba def _sigmoid_calibration(df, y, sample_weight=None): """Probability Calibration with sigmoid method (Platt 2000) Parameters ---------- df : ndarray, shape (n_samples,) The decision function or predict proba for the samples. y : ndarray, shape (n_samples,) The targets. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- a : float The slope. b : float The intercept. References ---------- Platt, "Probabilistic Outputs for Support Vector Machines" """ df = column_or_1d(df) y = column_or_1d(y) F = df # F follows Platt's notations tiny = np.finfo(np.float).tiny # to avoid division by 0 warning # Bayesian priors (see Platt end of section 2.2) prior0 = float(np.sum(y <= 0)) prior1 = y.shape[0] - prior0 T = np.zeros(y.shape) T[y > 0] = (prior1 + 1.) / (prior1 + 2.) T[y <= 0] = 1. / (prior0 + 2.) T1 = 1. - T def objective(AB): # From Platt (beginning of Section 2.2) E = np.exp(AB[0] * F + AB[1]) P = 1. / (1. + E) l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny)) if sample_weight is not None: return (sample_weight * l).sum() else: return l.sum() def grad(AB): # gradient of the objective function E = np.exp(AB[0] * F + AB[1]) P = 1. / (1. + E) TEP_minus_T1P = P * (T * E - T1) if sample_weight is not None: TEP_minus_T1P *= sample_weight dA = np.dot(TEP_minus_T1P, F) dB = np.sum(TEP_minus_T1P) return np.array([dA, dB]) AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))]) AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False) return AB_[0], AB_[1] class _SigmoidCalibration(BaseEstimator, RegressorMixin): """Sigmoid regression model. Attributes ---------- a_ : float The slope. b_ : float The intercept. """ def fit(self, X, y, sample_weight=None): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples,) Training data. y : array-like, shape (n_samples,) Training target. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self. """ X = column_or_1d(X) y = column_or_1d(y) X, y = indexable(X, y) self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight) return self def predict(self, T): """Predict new data by linear interpolation. Parameters ---------- T : array-like, shape (n_samples,) Data to predict from. Returns ------- T_ : array, shape (n_samples,) The predicted data. """ T = column_or_1d(T) return 1. / (1. + np.exp(self.a_ * T + self.b_)) def calibration_curve(y_true, y_prob, normalize=False, n_bins=5): """Compute true and predicted probabilities for a calibration curve. Calibration curves may also be referred to as reliability diagrams. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. normalize : bool, optional, default=False Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not a proper probability. If True, the smallest value in y_prob is mapped onto 0 and the largest one onto 1. n_bins : int Number of bins. A bigger number requires more data. Returns ------- prob_true : array, shape (n_bins,) The true probability in each bin (fraction of positives). prob_pred : array, shape (n_bins,) The mean predicted probability in each bin. References ---------- Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good Probabilities With Supervised Learning, in Proceedings of the 22nd International Conference on Machine Learning (ICML). See section 4 (Qualitative Analysis of Predictions). """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) if normalize: # Normalize predicted values into interval [0, 1] y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min()) elif y_prob.min() < 0 or y_prob.max() > 1: raise ValueError("y_prob has values outside [0, 1] and normalize is " "set to False.") y_true = _check_binary_probabilistic_predictions(y_true, y_prob) bins = np.linspace(0., 1. + 1e-8, n_bins + 1) binids = np.digitize(y_prob, bins) - 1 bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) bin_total = np.bincount(binids, minlength=len(bins)) nonzero = bin_total != 0 prob_true = (bin_true[nonzero] / bin_total[nonzero]) prob_pred = (bin_sums[nonzero] / bin_total[nonzero]) return prob_true, prob_pred
20,332
34.423345
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/multiclass.py
""" Multiclass and multilabel classification strategies =================================================== This module implements multiclass learning algorithms: - one-vs-the-rest / one-vs-all - one-vs-one - error correcting output codes The estimators provided in this module are meta-estimators: they require a base estimator to be provided in their constructor. For example, it is possible to use these estimators to turn a binary classifier or a regressor into a multiclass classifier. It is also possible to use these estimators with multiclass estimators in the hope that their accuracy or runtime performance improves. All classifiers in scikit-learn implement multiclass classification; you only need to use this module if you want to experiment with custom multiclass strategies. The one-vs-the-rest meta-classifier also implements a `predict_proba` method, so long as such a method is implemented by the base classifier. This method returns probabilities of class membership in both the single label and multilabel case. Note that in the multilabel case, probabilities are the marginal probability that a given sample falls in the given class. As such, in the multilabel case the sum of these probabilities over all possible labels for a given sample *will not* sum to unity, as they do in the single label case. """ # Author: Mathieu Blondel <[email protected]> # Author: Hamzeh Alsalhi <[email protected]> # # License: BSD 3 clause import array import numpy as np import warnings import scipy.sparse as sp import itertools from .base import BaseEstimator, ClassifierMixin, clone, is_classifier from .base import MetaEstimatorMixin, is_regressor from .preprocessing import LabelBinarizer from .metrics.pairwise import euclidean_distances from .utils import check_random_state from .utils.validation import _num_samples from .utils.validation import check_is_fitted from .utils.validation import check_X_y, check_array from .utils.multiclass import (_check_partial_fit_first_call, check_classification_targets, _ovr_decision_function) from .utils.metaestimators import _safe_split, if_delegate_has_method from .externals.joblib import Parallel from .externals.joblib import delayed from .externals.six.moves import zip as izip __all__ = [ "OneVsRestClassifier", "OneVsOneClassifier", "OutputCodeClassifier", ] def _fit_binary(estimator, X, y, classes=None): """Fit a single binary estimator.""" unique_y = np.unique(y) if len(unique_y) == 1: if classes is not None: if y[0] == -1: c = 0 else: c = y[0] warnings.warn("Label %s is present in all training examples." % str(classes[c])) estimator = _ConstantPredictor().fit(X, unique_y) else: estimator = clone(estimator) estimator.fit(X, y) return estimator def _partial_fit_binary(estimator, X, y): """Partially fit a single binary estimator.""" estimator.partial_fit(X, y, np.array((0, 1))) return estimator def _predict_binary(estimator, X): """Make predictions using a single binary estimator.""" if is_regressor(estimator): return estimator.predict(X) try: score = np.ravel(estimator.decision_function(X)) except (AttributeError, NotImplementedError): # probabilities of the positive class score = estimator.predict_proba(X)[:, 1] return score def _check_estimator(estimator): """Make sure that an estimator implements the necessary methods.""" if (not hasattr(estimator, "decision_function") and not hasattr(estimator, "predict_proba")): raise ValueError("The base estimator should implement " "decision_function or predict_proba!") class _ConstantPredictor(BaseEstimator): def fit(self, X, y): self.y_ = y return self def predict(self, X): check_is_fitted(self, 'y_') return np.repeat(self.y_, X.shape[0]) def decision_function(self, X): check_is_fitted(self, 'y_') return np.repeat(self.y_, X.shape[0]) def predict_proba(self, X): check_is_fitted(self, 'y_') return np.repeat([np.hstack([1 - self.y_, self.y_])], X.shape[0], axis=0) class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin): """One-vs-the-rest (OvR) multiclass/multilabel strategy Also known as one-vs-all, this strategy consists in fitting one classifier per class. For each classifier, the class is fitted against all the other classes. In addition to its computational efficiency (only `n_classes` classifiers are needed), one advantage of this approach is its interpretability. Since each class is represented by one and one classifier only, it is possible to gain knowledge about the class by inspecting its corresponding classifier. This is the most commonly used strategy for multiclass classification and is a fair default choice. This strategy can also be used for multilabel learning, where a classifier is used to predict multiple labels for instance, by fitting on a 2-d matrix in which cell [i, j] is 1 if sample i has label j and 0 otherwise. In the multilabel learning literature, OvR is also known as the binary relevance method. Read more in the :ref:`User Guide <ovr_classification>`. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- estimators_ : list of `n_classes` estimators Estimators used for predictions. classes_ : array, shape = [`n_classes`] Class labels. label_binarizer_ : LabelBinarizer object Object used to transform multiclass labels to binary labels and vice-versa. multilabel_ : boolean Whether a OneVsRestClassifier is a multilabel classifier. """ def __init__(self, estimator, n_jobs=1): self.estimator = estimator self.n_jobs = n_jobs def fit(self, X, y): """Fit underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes] Multi-class targets. An indicator matrix turns on multilabel classification. Returns ------- self """ # A sparse LabelBinarizer, with sparse_output=True, has been shown to # outpreform or match a dense label binarizer in all cases and has also # resulted in less or equal memory consumption in the fit_ovr function # overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) Y = self.label_binarizer_.fit_transform(y) Y = Y.tocsc() self.classes_ = self.label_binarizer_.classes_ columns = (col.toarray().ravel() for col in Y.T) # In cases where individual estimators are very fast to train setting # n_jobs > 1 in can results in slower performance due to the overhead # of spawning threads. See joblib issue #112. self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)( self.estimator, X, column, classes=[ "not %s" % self.label_binarizer_.classes_[i], self.label_binarizer_.classes_[i]]) for i, column in enumerate(columns)) return self @if_delegate_has_method('estimator') def partial_fit(self, X, y, classes=None): """Partially fit underlying estimators Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iteration. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes] Multi-class targets. An indicator matrix turns on multilabel classification. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. Returns ------- self """ if _check_partial_fit_first_call(self, classes): if not hasattr(self.estimator, "partial_fit"): raise ValueError(("Base estimator {0}, doesn't have " "partial_fit method").format(self.estimator)) self.estimators_ = [clone(self.estimator) for _ in range (self.n_classes_)] # A sparse LabelBinarizer, with sparse_output=True, has been # shown to outperform or match a dense label binarizer in all # cases and has also resulted in less or equal memory consumption # in the fit_ovr function overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) self.label_binarizer_.fit(self.classes_) if len(np.setdiff1d(y, self.classes_)): raise ValueError(("Mini-batch contains {0} while classes " + "must be subset of {1}").format(np.unique(y), self.classes_)) Y = self.label_binarizer_.transform(y) Y = Y.tocsc() columns = (col.toarray().ravel() for col in Y.T) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_binary)(estimator, X, column) for estimator, column in izip(self.estimators_, columns)) return self def predict(self, X): """Predict multi-class targets using underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]. Predicted multi-class targets. """ check_is_fitted(self, 'estimators_') if (hasattr(self.estimators_[0], "decision_function") and is_classifier(self.estimators_[0])): thresh = 0 else: thresh = .5 n_samples = _num_samples(X) if self.label_binarizer_.y_type_ == "multiclass": maxima = np.empty(n_samples, dtype=float) maxima.fill(-np.inf) argmaxima = np.zeros(n_samples, dtype=int) for i, e in enumerate(self.estimators_): pred = _predict_binary(e, X) np.maximum(maxima, pred, out=maxima) argmaxima[maxima == pred] = i return self.classes_[np.array(argmaxima.T)] else: indices = array.array('i') indptr = array.array('i', [0]) for e in self.estimators_: indices.extend(np.where(_predict_binary(e, X) > thresh)[0]) indptr.append(len(indices)) data = np.ones(len(indices), dtype=int) indicator = sp.csc_matrix((data, indices, indptr), shape=(n_samples, len(self.estimators_))) return self.label_binarizer_.inverse_transform(indicator) @if_delegate_has_method(['_first_estimator', 'estimator']) def predict_proba(self, X): """Probability estimates. The returned estimates for all classes are ordered by label of classes. Note that in the multilabel case, each sample can have any number of labels. This returns the marginal probability that the given sample has the label in question. For example, it is entirely consistent that two labels both have a 90% probability of applying to a given sample. In the single label multiclass case, the rows of the returned matrix sum to 1. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : (sparse) array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ check_is_fitted(self, 'estimators_') # Y[i, j] gives the probability that sample i has the label j. # In the multi-label case, these are not disjoint. Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T if len(self.estimators_) == 1: # Only one estimator, but we still want to return probabilities # for two classes. Y = np.concatenate(((1 - Y), Y), axis=1) if not self.multilabel_: # Then, probabilities should be normalized to 1. Y /= np.sum(Y, axis=1)[:, np.newaxis] return Y @if_delegate_has_method(['_first_estimator', 'estimator']) def decision_function(self, X): """Returns the distance of each sample from the decision boundary for each class. This can only be used with estimators which implement the decision_function method. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] """ check_is_fitted(self, 'estimators_') if len(self.estimators_) == 1: return self.estimators_[0].decision_function(X) return np.array([est.decision_function(X).ravel() for est in self.estimators_]).T @property def multilabel_(self): """Whether this is a multilabel classifier""" return self.label_binarizer_.y_type_.startswith('multilabel') @property def n_classes_(self): return len(self.classes_) @property def coef_(self): check_is_fitted(self, 'estimators_') if not hasattr(self.estimators_[0], "coef_"): raise AttributeError( "Base estimator doesn't have a coef_ attribute.") coefs = [e.coef_ for e in self.estimators_] if sp.issparse(coefs[0]): return sp.vstack(coefs) return np.vstack(coefs) @property def intercept_(self): check_is_fitted(self, 'estimators_') if not hasattr(self.estimators_[0], "intercept_"): raise AttributeError( "Base estimator doesn't have an intercept_ attribute.") return np.array([e.intercept_.ravel() for e in self.estimators_]) @property def _pairwise(self): """Indicate if wrapped estimator is using a precomputed Gram matrix""" return getattr(self.estimator, "_pairwise", False) @property def _first_estimator(self): return self.estimators_[0] def _fit_ovo_binary(estimator, X, y, i, j): """Fit a single binary estimator (one-vs-one).""" cond = np.logical_or(y == i, y == j) y = y[cond] y_binary = np.empty(y.shape, np.int) y_binary[y == i] = 0 y_binary[y == j] = 1 indcond = np.arange(X.shape[0])[cond] return _fit_binary(estimator, _safe_split(estimator, X, None, indices=indcond)[0], y_binary, classes=[i, j]), indcond def _partial_fit_ovo_binary(estimator, X, y, i, j): """Partially fit a single binary estimator(one-vs-one).""" cond = np.logical_or(y == i, y == j) y = y[cond] if len(y) != 0: y_binary = np.zeros_like(y) y_binary[y == j] = 1 return _partial_fit_binary(estimator, X[cond], y_binary) return estimator class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin): """One-vs-one multiclass strategy This strategy consists in fitting one classifier per class pair. At prediction time, the class which received the most votes is selected. Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers, this method is usually slower than one-vs-the-rest, due to its O(n_classes^2) complexity. However, this method may be advantageous for algorithms such as kernel algorithms which don't scale well with `n_samples`. This is because each individual learning problem only involves a small subset of the data whereas, with one-vs-the-rest, the complete dataset is used `n_classes` times. Read more in the :ref:`User Guide <ovo_classification>`. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators Estimators used for predictions. classes_ : numpy array of shape [n_classes] Array containing labels. """ def __init__(self, estimator, n_jobs=1): self.estimator = estimator self.n_jobs = n_jobs def fit(self, X, y): """Fit underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : array-like, shape = [n_samples] Multi-class targets. Returns ------- self """ X, y = check_X_y(X, y, accept_sparse=['csr', 'csc']) check_classification_targets(y) self.classes_ = np.unique(y) if len(self.classes_) == 1: raise ValueError("OneVsOneClassifier can not be fit when only one" " class is present.") n_classes = self.classes_.shape[0] estimators_indices = list(zip(*(Parallel(n_jobs=self.n_jobs)( delayed(_fit_ovo_binary) (self.estimator, X, y, self.classes_[i], self.classes_[j]) for i in range(n_classes) for j in range(i + 1, n_classes))))) self.estimators_ = estimators_indices[0] try: self.pairwise_indices_ = ( estimators_indices[1] if self._pairwise else None) except AttributeError: self.pairwise_indices_ = None return self @if_delegate_has_method(delegate='estimator') def partial_fit(self, X, y, classes=None): """Partially fit underlying estimators Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iteration, where the first call should have an array of all target variables. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : array-like, shape = [n_samples] Multi-class targets. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. Returns ------- self """ if _check_partial_fit_first_call(self, classes): self.estimators_ = [clone(self.estimator) for i in range(self.n_classes_ * (self.n_classes_ - 1) // 2)] if len(np.setdiff1d(y, self.classes_)): raise ValueError("Mini-batch contains {0} while it " "must be subset of {1}".format(np.unique(y), self.classes_)) X, y = check_X_y(X, y, accept_sparse=['csr', 'csc']) check_classification_targets(y) combinations = itertools.combinations(range(self.n_classes_), 2) self.estimators_ = Parallel( n_jobs=self.n_jobs)( delayed(_partial_fit_ovo_binary)( estimator, X, y, self.classes_[i], self.classes_[j]) for estimator, (i, j) in izip(self.estimators_, (combinations))) self.pairwise_indices_ = None return self def predict(self, X): """Estimate the best class label for each sample in X. This is implemented as ``argmax(decision_function(X), axis=1)`` which will return the label of the class with most votes by estimators predicting the outcome of a decision for each possible class pair. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets. """ Y = self.decision_function(X) if self.n_classes_ == 2: return self.classes_[(Y > 0).astype(np.int)] return self.classes_[Y.argmax(axis=1)] def decision_function(self, X): """Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- Y : array-like, shape = [n_samples, n_classes] """ check_is_fitted(self, 'estimators_') indices = self.pairwise_indices_ if indices is None: Xs = [X] * len(self.estimators_) else: Xs = [X[:, idx] for idx in indices] predictions = np.vstack([est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)]).T confidences = np.vstack([_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)]).T Y = _ovr_decision_function(predictions, confidences, len(self.classes_)) if self.n_classes_ == 2: return Y[:, 1] return Y @property def n_classes_(self): return len(self.classes_) @property def _pairwise(self): """Indicate if wrapped estimator is using a precomputed Gram matrix""" return getattr(self.estimator, "_pairwise", False) class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin): """(Error-Correcting) Output-Code multiclass strategy Output-code based strategies consist in representing each class with a binary code (an array of 0s and 1s). At fitting time, one binary classifier per bit in the code book is fitted. At prediction time, the classifiers are used to project new points in the class space and the class closest to the points is chosen. The main advantage of these strategies is that the number of classifiers used can be controlled by the user, either for compressing the model (0 < code_size < 1) or for making the model more robust to errors (code_size > 1). See the documentation for more details. Read more in the :ref:`User Guide <ecoc>`. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and one of `decision_function` or `predict_proba`. code_size : float Percentage of the number of classes to be used to create the code book. A number between 0 and 1 will require fewer classifiers than one-vs-the-rest. A number greater than 1 will require more classifiers than one-vs-the-rest. random_state : int, RandomState instance or None, optional, default: None The generator used to initialize the codebook. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- estimators_ : list of `int(n_classes * code_size)` estimators Estimators used for predictions. classes_ : numpy array of shape [n_classes] Array containing labels. code_book_ : numpy array of shape [n_classes, code_size] Binary array containing the code of each class. References ---------- .. [1] "Solving multiclass learning problems via error-correcting output codes", Dietterich T., Bakiri G., Journal of Artificial Intelligence Research 2, 1995. .. [2] "The error coding method and PICTs", James G., Hastie T., Journal of Computational and Graphical statistics 7, 1998. .. [3] "The Elements of Statistical Learning", Hastie T., Tibshirani R., Friedman J., page 606 (second-edition) 2008. """ def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1): self.estimator = estimator self.code_size = code_size self.random_state = random_state self.n_jobs = n_jobs def fit(self, X, y): """Fit underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. y : numpy array of shape [n_samples] Multi-class targets. Returns ------- self """ X, y = check_X_y(X, y) if self.code_size <= 0: raise ValueError("code_size should be greater than 0, got {0}" "".format(self.code_size)) _check_estimator(self.estimator) random_state = check_random_state(self.random_state) check_classification_targets(y) self.classes_ = np.unique(y) n_classes = self.classes_.shape[0] code_size_ = int(n_classes * self.code_size) # FIXME: there are more elaborate methods than generating the codebook # randomly. self.code_book_ = random_state.random_sample((n_classes, code_size_)) self.code_book_[self.code_book_ > 0.5] = 1 if hasattr(self.estimator, "decision_function"): self.code_book_[self.code_book_ != 1] = -1 else: self.code_book_[self.code_book_ != 1] = 0 classes_index = dict((c, i) for i, c in enumerate(self.classes_)) Y = np.array([self.code_book_[classes_index[y[i]]] for i in range(X.shape[0])], dtype=np.int) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_binary)(self.estimator, X, Y[:, i]) for i in range(Y.shape[1])) return self def predict(self, X): """Predict multi-class targets using underlying estimators. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets. """ check_is_fitted(self, 'estimators_') X = check_array(X) Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T pred = euclidean_distances(Y, self.code_book_).argmin(axis=1) return self.classes_[pred]
28,695
36.074935
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/grid_search.py
""" The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function # Author: Alexandre Gramfort <[email protected]>, # Gael Varoquaux <[email protected]> # Andreas Mueller <[email protected]> # Olivier Grisel <[email protected]> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, Sized from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from .base import BaseEstimator, is_classifier, clone from .base import MetaEstimatorMixin from .cross_validation import check_cv from .cross_validation import _fit_and_score from .externals.joblib import Parallel, delayed from .externals import six from .utils import check_random_state from .utils.random import sample_without_replacement from .utils.validation import _num_samples, indexable from .utils.metaestimators import if_delegate_has_method from .metrics.scorer import check_scoring __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] warnings.warn("This module was deprecated in version 0.18 in favor of the " "model_selection module into which all the refactored classes " "and functions are moved. This module will be removed in 0.20.", DeprecationWarning) class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.ParameterGrid` instead. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.grid_search import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: uses ``ParameterGrid`` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.ParameterSampler` instead. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int, RandomState instance or None, optional (default=None) Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.grid_search import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d." % (grid_size, self.n_iter) + " For exhaustive searches, use GridSearchCV.") for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): params[k] = v.rvs() else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.fit_grid_point` instead. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for name, v in p.items(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") check = [isinstance(v, k) for k in (list, tuple, np.ndarray)] if True not in check: raise ValueError("Parameter values for parameter ({0}) need " "to be a sequence.".format(name)) if len(v) == 0: raise ValueError("Parameter values for parameter ({0}) need " "to be a non-empty sequence.".format(name)) class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score @property def _estimator_type(self): return self.estimator._estimator_type @property def classes_(self): return self.best_estimator_.classes_ def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Notes ----- * The long-standing behavior of this method changed in version 0.16. * It no longer uses the metric provided by ``estimator.score`` if the ``scoring`` parameter was set when fitting. """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) return self.scorer_(self.best_estimator_, X, y) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found parameters. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.inverse_transform(Xt) def _fit(self, X, y, parameter_iterable): """Actual fitting, performing the search over parameters.""" estimator = self.estimator cv = self.cv self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) n_samples = _num_samples(X) X, y = indexable(X, y) if y is not None: if len(y) != n_samples: raise ValueError('Target variable (y) has a different number ' 'of samples (%i) than data (X: %i samples)' % (len(y), n_samples)) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) if self.verbose > 0: if isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(len(cv), n_candidates, n_candidates * len(cv))) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )( delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv) # Out is a list of triplet: score, estimator, n_test_samples n_fits = len(out) n_folds = len(cv) scores = list() grid_scores = list() for grid_start in range(0, n_fits, n_folds): n_test_samples = 0 score = 0 all_scores = [] for this_score, this_n_test_samples, _, parameters in \ out[grid_start:grid_start + n_folds]: all_scores.append(this_score) if self.iid: this_score *= this_n_test_samples n_test_samples += this_n_test_samples score += this_score if self.iid: score /= float(n_test_samples) else: score /= float(n_folds) scores.append((score, parameters)) # TODO: shall we also store the test_fold_sizes? grid_scores.append(_CVScoreTuple( parameters, score, np.array(all_scores))) # Store the computed scores self.grid_scores_ = grid_scores # Find the best parameters by comparing on the mean validation score: # note that `sorted` is deterministic in the way it breaks ties best = sorted(grid_scores, key=lambda x: x.mean_validation_score, reverse=True)[0] self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best.parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.GridSearchCV` instead. Important members are fit, predict. GridSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated grid-search over a parameter grid. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs: int, default: 1 : The maximum number of estimators fit in parallel. - If -1 all CPUs are used. - If 1 is given, no parallel computing code is used at all, which is useful for debugging. - For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used. For example, with ``n_jobs = -2`` all CPUs but one are used. .. versionchanged:: 0.17 Upgraded to joblib 0.9.3. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. In all other cases, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Examples -------- >>> from sklearn import svm, grid_search, datasets >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svr = svm.SVC() >>> clf = grid_search.GridSearchCV(svr, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape='ovr', degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., scoring=..., verbose=...) Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a hyperparameter grid. :func:`sklearn.cross_validation.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(GridSearchCV, self).__init__( estimator, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch, error_score) self.param_grid = param_grid _check_param_grid(param_grid) def fit(self, X, y=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ return self._fit(X, y, ParameterGrid(self.param_grid)) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.RandomizedSearchCV` instead. RandomizedSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs: int, default: 1 : The maximum number of estimators fit in parallel. - If -1 all CPUs are used. - If 1 is given, no parallel computing code is used at all, which is useful for debugging. - For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used. For example, with ``n_jobs = -2`` all CPUs but one are used. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. In all other cases, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int, RandomState instance or None, optional, default=None Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settings, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) def fit(self, X, y=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, sampled_params)
40,365
37.553964
87
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/multioutput.py
""" This module implements multioutput regression and classification. The estimators provided in this module are meta-estimators: they require a base estimator to be provided in their constructor. The meta-estimator extends single output estimators to multioutput estimators. """ # Author: Tim Head <[email protected]> # Author: Hugo Bowne-Anderson <[email protected]> # Author: Chris Rivera <[email protected]> # Author: Michael Williamson # Author: James Ashton Nichols <[email protected]> # # License: BSD 3 clause import numpy as np import scipy.sparse as sp from abc import ABCMeta, abstractmethod from .base import BaseEstimator, clone, MetaEstimatorMixin from .base import RegressorMixin, ClassifierMixin, is_classifier from .model_selection import cross_val_predict from .utils import check_array, check_X_y, check_random_state from .utils.fixes import parallel_helper from .utils.metaestimators import if_delegate_has_method from .utils.validation import check_is_fitted, has_fit_parameter from .utils.multiclass import check_classification_targets from .externals.joblib import Parallel, delayed from .externals import six __all__ = ["MultiOutputRegressor", "MultiOutputClassifier", "ClassifierChain"] def _fit_estimator(estimator, X, y, sample_weight=None): estimator = clone(estimator) if sample_weight is not None: estimator.fit(X, y, sample_weight=sample_weight) else: estimator.fit(X, y) return estimator def _partial_fit_estimator(estimator, X, y, classes=None, sample_weight=None, first_time=True): if first_time: estimator = clone(estimator) if sample_weight is not None: if classes is not None: estimator.partial_fit(X, y, classes=classes, sample_weight=sample_weight) else: estimator.partial_fit(X, y, sample_weight=sample_weight) else: if classes is not None: estimator.partial_fit(X, y, classes=classes) else: estimator.partial_fit(X, y) return estimator class MultiOutputEstimator(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): @abstractmethod def __init__(self, estimator, n_jobs=1): self.estimator = estimator self.n_jobs = n_jobs @if_delegate_has_method('estimator') def partial_fit(self, X, y, classes=None, sample_weight=None): """Incrementally fit the model to data. Fit a separate model for each output variable. Parameters ---------- X : (sparse) array-like, shape (n_samples, n_features) Data. y : (sparse) array-like, shape (n_samples, n_outputs) Multi-output targets. classes : list of numpy arrays, shape (n_outputs) Each array is unique classes for one output in str/int Can be obtained by via ``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the target matrix of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. sample_weight : array-like, shape = (n_samples) or None Sample weights. If None, then samples are equally weighted. Only supported if the underlying regressor supports sample weights. Returns ------- self : object Returns self. """ X, y = check_X_y(X, y, multi_output=True, accept_sparse=True) if y.ndim == 1: raise ValueError("y must have at least two dimensions for " "multi-output regression but has only one.") if (sample_weight is not None and not has_fit_parameter(self.estimator, 'sample_weight')): raise ValueError("Underlying estimator does not support" " sample weights.") first_time = not hasattr(self, 'estimators_') self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_estimator)( self.estimators_[i] if not first_time else self.estimator, X, y[:, i], classes[i] if classes is not None else None, sample_weight, first_time) for i in range(y.shape[1])) return self def fit(self, X, y, sample_weight=None): """ Fit the model to data. Fit a separate model for each output variable. Parameters ---------- X : (sparse) array-like, shape (n_samples, n_features) Data. y : (sparse) array-like, shape (n_samples, n_outputs) Multi-output targets. An indicator matrix turns on multilabel estimation. sample_weight : array-like, shape = (n_samples) or None Sample weights. If None, then samples are equally weighted. Only supported if the underlying regressor supports sample weights. Returns ------- self : object Returns self. """ if not hasattr(self.estimator, "fit"): raise ValueError("The base estimator should implement a fit method") X, y = check_X_y(X, y, multi_output=True, accept_sparse=True) if is_classifier(self): check_classification_targets(y) if y.ndim == 1: raise ValueError("y must have at least two dimensions for " "multi-output regression but has only one.") if (sample_weight is not None and not has_fit_parameter(self.estimator, 'sample_weight')): raise ValueError("Underlying estimator does not support" " sample weights.") self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_estimator)( self.estimator, X, y[:, i], sample_weight) for i in range(y.shape[1])) return self def predict(self, X): """Predict multi-output variable using a model trained for each target variable. Parameters ---------- X : (sparse) array-like, shape (n_samples, n_features) Data. Returns ------- y : (sparse) array-like, shape (n_samples, n_outputs) Multi-output targets predicted across multiple predictors. Note: Separate models are generated for each predictor. """ check_is_fitted(self, 'estimators_') if not hasattr(self.estimator, "predict"): raise ValueError("The base estimator should implement a predict method") X = check_array(X, accept_sparse=True) y = Parallel(n_jobs=self.n_jobs)( delayed(parallel_helper)(e, 'predict', X) for e in self.estimators_) return np.asarray(y).T class MultiOutputRegressor(MultiOutputEstimator, RegressorMixin): """Multi target regression This strategy consists of fitting one regressor per target. This is a simple strategy for extending regressors that do not natively support multi-target regression. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and `predict`. n_jobs : int, optional, default=1 The number of jobs to run in parallel for `fit`. If -1, then the number of jobs is set to the number of cores. When individual estimators are fast to train or predict using `n_jobs>1` can result in slower performance due to the overhead of spawning processes. """ def __init__(self, estimator, n_jobs=1): super(MultiOutputRegressor, self).__init__(estimator, n_jobs) @if_delegate_has_method('estimator') def partial_fit(self, X, y, sample_weight=None): """Incrementally fit the model to data. Fit a separate model for each output variable. Parameters ---------- X : (sparse) array-like, shape (n_samples, n_features) Data. y : (sparse) array-like, shape (n_samples, n_outputs) Multi-output targets. sample_weight : array-like, shape = (n_samples) or None Sample weights. If None, then samples are equally weighted. Only supported if the underlying regressor supports sample weights. Returns ------- self : object Returns self. """ super(MultiOutputRegressor, self).partial_fit( X, y, sample_weight=sample_weight) def score(self, X, y, sample_weight=None): """Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) ** 2).sum() and v is the regression sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Notes ----- R^2 is calculated by weighting all the targets equally using `multioutput='uniform_average'`. Parameters ---------- X : array-like, shape (n_samples, n_features) Test samples. y : array-like, shape (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape [n_samples], optional Sample weights. Returns ------- score : float R^2 of self.predict(X) wrt. y. """ # XXX remove in 0.19 when r2_score default for multioutput changes from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='uniform_average') class MultiOutputClassifier(MultiOutputEstimator, ClassifierMixin): """Multi target classification This strategy consists of fitting one classifier per target. This is a simple strategy for extending classifiers that do not natively support multi-target classification Parameters ---------- estimator : estimator object An estimator object implementing `fit`, `score` and `predict_proba`. n_jobs : int, optional, default=1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. The number of jobs to use for the computation. It does each target variable in y in parallel. Attributes ---------- estimators_ : list of ``n_output`` estimators Estimators used for predictions. """ def __init__(self, estimator, n_jobs=1): super(MultiOutputClassifier, self).__init__(estimator, n_jobs) def predict_proba(self, X): """Probability estimates. Returns prediction probabilities for each class of each output. Parameters ---------- X : array-like, shape (n_samples, n_features) Data Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs \ such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ check_is_fitted(self, 'estimators_') if not hasattr(self.estimator, "predict_proba"): raise ValueError("The base estimator should implement" "predict_proba method") results = [estimator.predict_proba(X) for estimator in self.estimators_] return results def score(self, X, y): """"Returns the mean accuracy on the given test data and labels. Parameters ---------- X : array-like, shape [n_samples, n_features] Test samples y : array-like, shape [n_samples, n_outputs] True values for X Returns ------- scores : float accuracy_score of self.predict(X) versus y """ check_is_fitted(self, 'estimators_') n_outputs_ = len(self.estimators_) if y.ndim == 1: raise ValueError("y must have at least two dimensions for " "multi target classification but has only one") if y.shape[1] != n_outputs_: raise ValueError("The number of outputs of Y for fit {0} and" " score {1} should be same". format(n_outputs_, y.shape[1])) y_pred = self.predict(X) return np.mean(np.all(y == y_pred, axis=1)) class ClassifierChain(BaseEstimator, ClassifierMixin, MetaEstimatorMixin): """A multi-label model that arranges binary classifiers into a chain. Each model makes a prediction in the order specified by the chain using all of the available features provided to the model plus the predictions of models that are earlier in the chain. Parameters ---------- base_estimator : estimator The base estimator from which the classifier chain is built. order : array-like, shape=[n_outputs] or 'random', optional By default the order will be determined by the order of columns in the label matrix Y.:: order = [0, 1, 2, ..., Y.shape[1] - 1] The order of the chain can be explicitly set by providing a list of integers. For example, for a chain of length 5.:: order = [1, 3, 2, 4, 0] means that the first model in the chain will make predictions for column 1 in the Y matrix, the second model will make predictions for column 3, etc. If order is 'random' a random ordering will be used. cv : int, cross-validation generator or an iterable, optional ( default=None) Determines whether to use cross validated predictions or true labels for the results of previous estimators in the chain. If cv is None the true labels are used when fitting. Otherwise possible inputs for cv are: * integer, to specify the number of folds in a (Stratified)KFold, * An object to be used as a cross-validation generator. * An iterable yielding train, test splits. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. The random number generator is used to generate random chain orders. Attributes ---------- classes_ : list A list of arrays of length ``len(estimators_)`` containing the class labels for each estimator in the chain. estimators_ : list A list of clones of base_estimator. order_ : list The order of labels in the classifier chain. References ---------- Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier Chains for Multi-label Classification", 2009. """ def __init__(self, base_estimator, order=None, cv=None, random_state=None): self.base_estimator = base_estimator self.order = order self.cv = cv self.random_state = random_state def fit(self, X, Y): """Fit the model to data matrix X and targets Y. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Y : array-like, shape (n_samples, n_classes) The target values. Returns ------- self : object Returns self. """ X, Y = check_X_y(X, Y, multi_output=True, accept_sparse=True) random_state = check_random_state(self.random_state) check_array(X, accept_sparse=True) self.order_ = self.order if self.order_ is None: self.order_ = np.array(range(Y.shape[1])) elif isinstance(self.order_, str): if self.order_ == 'random': self.order_ = random_state.permutation(Y.shape[1]) elif sorted(self.order_) != list(range(Y.shape[1])): raise ValueError("invalid order") self.estimators_ = [clone(self.base_estimator) for _ in range(Y.shape[1])] self.classes_ = [] if self.cv is None: Y_pred_chain = Y[:, self.order_] if sp.issparse(X): X_aug = sp.hstack((X, Y_pred_chain), format='lil') X_aug = X_aug.tocsr() else: X_aug = np.hstack((X, Y_pred_chain)) elif sp.issparse(X): Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1])) X_aug = sp.hstack((X, Y_pred_chain), format='lil') else: Y_pred_chain = np.zeros((X.shape[0], Y.shape[1])) X_aug = np.hstack((X, Y_pred_chain)) del Y_pred_chain for chain_idx, estimator in enumerate(self.estimators_): y = Y[:, self.order_[chain_idx]] estimator.fit(X_aug[:, :(X.shape[1] + chain_idx)], y) if self.cv is not None and chain_idx < len(self.estimators_) - 1: col_idx = X.shape[1] + chain_idx cv_result = cross_val_predict( self.base_estimator, X_aug[:, :col_idx], y=y, cv=self.cv) if sp.issparse(X_aug): X_aug[:, col_idx] = np.expand_dims(cv_result, 1) else: X_aug[:, col_idx] = cv_result self.classes_.append(estimator.classes_) return self def predict(self, X): """Predict on the data matrix X using the ClassifierChain model. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Returns ------- Y_pred : array-like, shape (n_samples, n_classes) The predicted values. """ X = check_array(X, accept_sparse=True) Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_))) for chain_idx, estimator in enumerate(self.estimators_): previous_predictions = Y_pred_chain[:, :chain_idx] if sp.issparse(X): if chain_idx == 0: X_aug = X else: X_aug = sp.hstack((X, previous_predictions)) else: X_aug = np.hstack((X, previous_predictions)) Y_pred_chain[:, chain_idx] = estimator.predict(X_aug) inv_order = np.empty_like(self.order_) inv_order[self.order_] = np.arange(len(self.order_)) Y_pred = Y_pred_chain[:, inv_order] return Y_pred @if_delegate_has_method('base_estimator') def predict_proba(self, X): """Predict probability estimates. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- Y_prob : array-like, shape (n_samples, n_classes) """ X = check_array(X, accept_sparse=True) Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_))) Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_))) for chain_idx, estimator in enumerate(self.estimators_): previous_predictions = Y_pred_chain[:, :chain_idx] if sp.issparse(X): X_aug = sp.hstack((X, previous_predictions)) else: X_aug = np.hstack((X, previous_predictions)) Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1] Y_pred_chain[:, chain_idx] = estimator.predict(X_aug) inv_order = np.empty_like(self.order_) inv_order[self.order_] = np.arange(len(self.order_)) Y_prob = Y_prob_chain[:, inv_order] return Y_prob @if_delegate_has_method('base_estimator') def decision_function(self, X): """Evaluate the decision_function of the models in the chain. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- Y_decision : array-like, shape (n_samples, n_classes ) Returns the decision function of the sample for each model in the chain. """ Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_))) Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_))) for chain_idx, estimator in enumerate(self.estimators_): previous_predictions = Y_pred_chain[:, :chain_idx] if sp.issparse(X): X_aug = sp.hstack((X, previous_predictions)) else: X_aug = np.hstack((X, previous_predictions)) Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug) Y_pred_chain[:, chain_idx] = estimator.predict(X_aug) inv_order = np.empty_like(self.order_) inv_order[self.order_] = np.arange(len(self.order_)) Y_decision = Y_decision_chain[:, inv_order] return Y_decision
21,892
35.427621
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/__init__.py
""" Machine learning module for Python ================================== sklearn is a Python module integrating classical machine learning algorithms in the tightly-knit world of scientific Python packages (numpy, scipy, matplotlib). It aims to provide simple and efficient solutions to learning problems that are accessible to everybody and reusable in various contexts: machine-learning as a versatile tool for science and engineering. See http://scikit-learn.org for complete documentation. """ import sys import re import warnings import os from contextlib import contextmanager as _contextmanager import logging logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.INFO) _ASSUME_FINITE = bool(os.environ.get('SKLEARN_ASSUME_FINITE', False)) def get_config(): """Retrieve current values for configuration set by :func:`set_config` Returns ------- config : dict Keys are parameter names that can be passed to :func:`set_config`. """ return {'assume_finite': _ASSUME_FINITE} def set_config(assume_finite=None): """Set global scikit-learn configuration Parameters ---------- assume_finite : bool, optional If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. """ global _ASSUME_FINITE if assume_finite is not None: _ASSUME_FINITE = assume_finite @_contextmanager def config_context(**new_config): """Context manager for global scikit-learn configuration Parameters ---------- assume_finite : bool, optional If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. Notes ----- All settings, not just those presently modified, will be returned to their previous values when the context manager is exited. This is not thread-safe. Examples -------- >>> import sklearn >>> from sklearn.utils.validation import assert_all_finite >>> with sklearn.config_context(assume_finite=True): ... assert_all_finite([float('nan')]) >>> with sklearn.config_context(assume_finite=True): ... with sklearn.config_context(assume_finite=False): ... assert_all_finite([float('nan')]) ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Input contains NaN, ... """ old_config = get_config().copy() set_config(**new_config) try: yield finally: set_config(**old_config) # Make sure that DeprecationWarning within this package always gets printed warnings.filterwarnings('always', category=DeprecationWarning, module=r'^{0}\.'.format(re.escape(__name__))) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.19.1' try: # This variable is injected in the __builtins__ by the build # process. It used to enable importing subpackages of sklearn when # the binaries are not built __SKLEARN_SETUP__ except NameError: __SKLEARN_SETUP__ = False if __SKLEARN_SETUP__: sys.stderr.write('Partial import of sklearn during the build process.\n') # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: from . import __check_build from .base import clone __check_build # avoid flakes unused variable error __all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition', 'cross_validation', 'datasets', 'decomposition', 'dummy', 'ensemble', 'exceptions', 'externals', 'feature_extraction', 'feature_selection', 'gaussian_process', 'grid_search', 'isotonic', 'kernel_approximation', 'kernel_ridge', 'learning_curve', 'linear_model', 'manifold', 'metrics', 'mixture', 'model_selection', 'multiclass', 'multioutput', 'naive_bayes', 'neighbors', 'neural_network', 'pipeline', 'preprocessing', 'random_projection', 'semi_supervised', 'svm', 'tree', 'discriminant_analysis', # Non-modules: 'clone'] def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import os import numpy as np import random # It could have been provided in the environment _random_seed = os.environ.get('SKLEARN_SEED', None) if _random_seed is None: _random_seed = np.random.uniform() * (2 ** 31 - 1) _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
5,271
30.951515
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/isotonic.py
# Authors: Fabian Pedregosa <[email protected]> # Alexandre Gramfort <[email protected]> # Nelle Varoquaux <[email protected]> # License: BSD 3 clause import numpy as np from scipy import interpolate from scipy.stats import spearmanr from .base import BaseEstimator, TransformerMixin, RegressorMixin from .utils import as_float_array, check_array, check_consistent_length from .utils import deprecated from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique import warnings import math __all__ = ['check_increasing', 'isotonic_regression', 'IsotonicRegression'] def check_increasing(x, y): """Determine whether y is monotonically correlated with x. y is found increasing or decreasing with respect to x based on a Spearman correlation test. Parameters ---------- x : array-like, shape=(n_samples,) Training data. y : array-like, shape=(n_samples,) Training target. Returns ------- increasing_bool : boolean Whether the relationship is increasing or decreasing. Notes ----- The Spearman correlation coefficient is estimated from the data, and the sign of the resulting estimate is used as the result. In the event that the 95% confidence interval based on Fisher transform spans zero, a warning is raised. References ---------- Fisher transformation. Wikipedia. https://en.wikipedia.org/wiki/Fisher_transformation """ # Calculate Spearman rho estimate and set return accordingly. rho, _ = spearmanr(x, y) increasing_bool = rho >= 0 # Run Fisher transform to get the rho CI, but handle rho=+/-1 if rho not in [-1.0, 1.0] and len(x) > 3: F = 0.5 * math.log((1. + rho) / (1. - rho)) F_se = 1 / math.sqrt(len(x) - 3) # Use a 95% CI, i.e., +/-1.96 S.E. # https://en.wikipedia.org/wiki/Fisher_transformation rho_0 = math.tanh(F - 1.96 * F_se) rho_1 = math.tanh(F + 1.96 * F_se) # Warn if the CI spans zero. if np.sign(rho_0) != np.sign(rho_1): warnings.warn("Confidence interval of the Spearman " "correlation coefficient spans zero. " "Determination of ``increasing`` may be " "suspect.") return increasing_bool def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None, increasing=True): """Solve the isotonic regression model:: min sum w[i] (y[i] - y_[i]) ** 2 subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max where: - y[i] are inputs (real numbers) - y_[i] are fitted - w[i] are optional strictly positive weights (default to 1.0) Read more in the :ref:`User Guide <isotonic>`. Parameters ---------- y : iterable of floating-point values The data. sample_weight : iterable of floating-point values, optional, default: None Weights on each point of the regression. If None, weight is set to 1 (equal weights). y_min : optional, default: None If not None, set the lowest value of the fit to y_min. y_max : optional, default: None If not None, set the highest value of the fit to y_max. increasing : boolean, optional, default: True Whether to compute ``y_`` is increasing (if set to True) or decreasing (if set to False) Returns ------- y_ : list of floating-point values Isotonic fit of y. References ---------- "Active set algorithms for isotonic regression; A unifying framework" by Michael J. Best and Nilotpal Chakravarti, section 3. """ order = np.s_[:] if increasing else np.s_[::-1] y = np.array(y[order], dtype=np.float64) if sample_weight is None: sample_weight = np.ones(len(y), dtype=np.float64) else: sample_weight = np.array(sample_weight[order], dtype=np.float64) _inplace_contiguous_isotonic_regression(y, sample_weight) if y_min is not None or y_max is not None: # Older versions of np.clip don't accept None as a bound, so use np.inf if y_min is None: y_min = -np.inf if y_max is None: y_max = np.inf np.clip(y, y_min, y_max, y) return y[order] class IsotonicRegression(BaseEstimator, TransformerMixin, RegressorMixin): """Isotonic regression model. The isotonic regression optimization problem is defined by:: min sum w_i (y[i] - y_[i]) ** 2 subject to y_[i] <= y_[j] whenever X[i] <= X[j] and min(y_) = y_min, max(y_) = y_max where: - ``y[i]`` are inputs (real numbers) - ``y_[i]`` are fitted - ``X`` specifies the order. If ``X`` is non-decreasing then ``y_`` is non-decreasing. - ``w[i]`` are optional strictly positive weights (default to 1.0) Read more in the :ref:`User Guide <isotonic>`. Parameters ---------- y_min : optional, default: None If not None, set the lowest value of the fit to y_min. y_max : optional, default: None If not None, set the highest value of the fit to y_max. increasing : boolean or string, optional, default: True If boolean, whether or not to fit the isotonic regression with y increasing or decreasing. The string value "auto" determines whether y should increase or decrease based on the Spearman correlation estimate's sign. out_of_bounds : string, optional, default: "nan" The ``out_of_bounds`` parameter handles how x-values outside of the training domain are handled. When set to "nan", predicted y-values will be NaN. When set to "clip", predicted y-values will be set to the value corresponding to the nearest train interval endpoint. When set to "raise", allow ``interp1d`` to throw ValueError. Attributes ---------- X_min_ : float Minimum value of input array `X_` for left bound. X_max_ : float Maximum value of input array `X_` for right bound. f_ : function The stepwise interpolating function that covers the domain `X_`. Notes ----- Ties are broken using the secondary method from Leeuw, 1977. References ---------- Isotonic Median Regression: A Linear Programming Approach Nilotpal Chakravarti Mathematics of Operations Research Vol. 14, No. 2 (May, 1989), pp. 303-308 Isotone Optimization in R : Pool-Adjacent-Violators Algorithm (PAVA) and Active Set Methods Leeuw, Hornik, Mair Journal of Statistical Software 2009 Correctness of Kruskal's algorithms for monotone regression with ties Leeuw, Psychometrica, 1977 """ def __init__(self, y_min=None, y_max=None, increasing=True, out_of_bounds='nan'): self.y_min = y_min self.y_max = y_max self.increasing = increasing self.out_of_bounds = out_of_bounds @property @deprecated("Attribute ``X_`` is deprecated in version 0.18 and will be" " removed in version 0.20.") def X_(self): return self._X_ @X_.setter def X_(self, value): self._X_ = value @X_.deleter def X_(self): del self._X_ @property @deprecated("Attribute ``y_`` is deprecated in version 0.18 and will" " be removed in version 0.20.") def y_(self): return self._y_ @y_.setter def y_(self, value): self._y_ = value @y_.deleter def y_(self): del self._y_ def _check_fit_data(self, X, y, sample_weight=None): if len(X.shape) != 1: raise ValueError("X should be a 1d array") def _build_f(self, X, y): """Build the f_ interp1d function.""" # Handle the out_of_bounds argument by setting bounds_error if self.out_of_bounds not in ["raise", "nan", "clip"]: raise ValueError("The argument ``out_of_bounds`` must be in " "'nan', 'clip', 'raise'; got {0}" .format(self.out_of_bounds)) bounds_error = self.out_of_bounds == "raise" if len(y) == 1: # single y, constant prediction self.f_ = lambda x: y.repeat(x.shape) else: self.f_ = interpolate.interp1d(X, y, kind='linear', bounds_error=bounds_error) def _build_y(self, X, y, sample_weight, trim_duplicates=True): """Build the y_ IsotonicRegression.""" check_consistent_length(X, y, sample_weight) X, y = [check_array(x, ensure_2d=False) for x in [X, y]] y = as_float_array(y) self._check_fit_data(X, y, sample_weight) # Determine increasing if auto-determination requested if self.increasing == 'auto': self.increasing_ = check_increasing(X, y) else: self.increasing_ = self.increasing # If sample_weights is passed, removed zero-weight values and clean # order if sample_weight is not None: sample_weight = check_array(sample_weight, ensure_2d=False) mask = sample_weight > 0 X, y, sample_weight = X[mask], y[mask], sample_weight[mask] else: sample_weight = np.ones(len(y)) order = np.lexsort((y, X)) X, y, sample_weight = [array[order].astype(np.float64, copy=False) for array in [X, y, sample_weight]] unique_X, unique_y, unique_sample_weight = _make_unique( X, y, sample_weight) # Store _X_ and _y_ to maintain backward compat during the deprecation # period of X_ and y_ self._X_ = X = unique_X self._y_ = y = isotonic_regression(unique_y, unique_sample_weight, self.y_min, self.y_max, increasing=self.increasing_) # Handle the left and right bounds on X self.X_min_, self.X_max_ = np.min(X), np.max(X) if trim_duplicates: # Remove unnecessary points for faster prediction keep_data = np.ones((len(y),), dtype=bool) # Aside from the 1st and last point, remove points whose y values # are equal to both the point before and the point after it. keep_data[1:-1] = np.logical_or( np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:]) ) return X[keep_data], y[keep_data] else: # The ability to turn off trim_duplicates is only used to it make # easier to unit test that removing duplicates in y does not have # any impact the resulting interpolation function (besides # prediction speed). return X, y def fit(self, X, y, sample_weight=None): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape=(n_samples,) Training data. y : array-like, shape=(n_samples,) Training target. sample_weight : array-like, shape=(n_samples,), optional, default: None Weights. If set to None, all weights will be set to 1 (equal weights). Returns ------- self : object Returns an instance of self. Notes ----- X is stored for future use, as `transform` needs X to interpolate new input data. """ # Transform y by running the isotonic regression algorithm and # transform X accordingly. X, y = self._build_y(X, y, sample_weight) # It is necessary to store the non-redundant part of the training set # on the model to make it possible to support model persistence via # the pickle module as the object built by scipy.interp1d is not # picklable directly. self._necessary_X_, self._necessary_y_ = X, y # Build the interpolation function self._build_f(X, y) return self def transform(self, T): """Transform new data by linear interpolation Parameters ---------- T : array-like, shape=(n_samples,) Data to transform. Returns ------- T_ : array, shape=(n_samples,) The transformed data """ T = as_float_array(T) if len(T.shape) != 1: raise ValueError("Isotonic regression input should be a 1d array") # Handle the out_of_bounds argument by clipping if needed if self.out_of_bounds not in ["raise", "nan", "clip"]: raise ValueError("The argument ``out_of_bounds`` must be in " "'nan', 'clip', 'raise'; got {0}" .format(self.out_of_bounds)) if self.out_of_bounds == "clip": T = np.clip(T, self.X_min_, self.X_max_) return self.f_(T) def predict(self, T): """Predict new data by linear interpolation. Parameters ---------- T : array-like, shape=(n_samples,) Data to transform. Returns ------- T_ : array, shape=(n_samples,) Transformed data. """ return self.transform(T) def __getstate__(self): """Pickle-protocol - return state of the estimator. """ state = super(IsotonicRegression, self).__getstate__() # remove interpolation method state.pop('f_', None) return state def __setstate__(self, state): """Pickle-protocol - set state of the estimator. We need to rebuild the interpolation function. """ super(IsotonicRegression, self).__setstate__(state) if hasattr(self, '_necessary_X_') and hasattr(self, '_necessary_y_'): self._build_f(self._necessary_X_, self._necessary_y_)
14,061
32.401425
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/cross_validation.py
""" The :mod:`sklearn.cross_validation` module includes utilities for cross- validation and performance evaluation. """ # Author: Alexandre Gramfort <[email protected]>, # Gael Varoquaux <[email protected]>, # Olivier Grisel <[email protected]> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings from itertools import chain, combinations from math import ceil, floor, factorial import numbers import time from abc import ABCMeta, abstractmethod import numpy as np import scipy.sparse as sp from .base import is_classifier, clone from .utils import indexable, check_random_state, safe_indexing from .utils.validation import (_is_arraylike, _num_samples, column_or_1d) from .utils.multiclass import type_of_target from .externals.joblib import Parallel, delayed, logger from .externals.six import with_metaclass from .externals.six.moves import zip from .metrics.scorer import check_scoring from .gaussian_process.kernels import Kernel as GPKernel from .exceptions import FitFailedWarning warnings.warn("This module was deprecated in version 0.18 in favor of the " "model_selection module into which all the refactored classes " "and functions are moved. Also note that the interface of the " "new CV iterators are different from that of this module. " "This module will be removed in 0.20.", DeprecationWarning) __all__ = ['KFold', 'LabelKFold', 'LeaveOneLabelOut', 'LeaveOneOut', 'LeavePLabelOut', 'LeavePOut', 'ShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'PredefinedSplit', 'LabelShuffleSplit', 'check_cv', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'train_test_split'] class _PartitionIterator(with_metaclass(ABCMeta)): """Base class for CV iterators where train_mask = ~test_mask Implementations must define `_iter_test_masks` or `_iter_test_indices`. Parameters ---------- n : int Total number of elements in dataset. """ def __init__(self, n): if abs(n - int(n)) >= np.finfo('f').eps: raise ValueError("n must be an integer") self.n = int(n) def __iter__(self): ind = np.arange(self.n) for test_index in self._iter_test_masks(): train_index = np.logical_not(test_index) train_index = ind[train_index] test_index = ind[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices() """ for test_index in self._iter_test_indices(): test_mask = self._empty_mask() test_mask[test_index] = True yield test_mask def _iter_test_indices(self): """Generates integer indices corresponding to test sets.""" raise NotImplementedError def _empty_mask(self): return np.zeros(self.n, dtype=np.bool) class LeaveOneOut(_PartitionIterator): """Leave-One-Out cross validation iterator. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.LeaveOneOut` instead. Provides train/test indices to split data in train test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and ``LeavePOut(n, p=1)``. Due to the high number of test sets (which is the same as the number of samples) this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in dataset. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = cross_validation.LeaveOneOut(2) >>> len(loo) 2 >>> print(loo) sklearn.cross_validation.LeaveOneOut(n=2) >>> for train_index, test_index in loo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneLabelOut for splitting the data according to explicit, domain-specific stratification of the dataset. """ def _iter_test_indices(self): return range(self.n) def __repr__(self): return '%s.%s(n=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, ) def __len__(self): return self.n class LeavePOut(_PartitionIterator): """Leave-P-Out cross validation iterator .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.LeavePOut` instead. Provides train/test indices to split data in train test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in dataset. p : int Size of the test sets. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = cross_validation.LeavePOut(4, 2) >>> len(lpo) 6 >>> print(lpo) sklearn.cross_validation.LeavePOut(n=4, p=2) >>> for train_index, test_index in lpo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, n, p): super(LeavePOut, self).__init__(n) self.p = p def _iter_test_indices(self): for comb in combinations(range(self.n), self.p): yield np.array(comb) def __repr__(self): return '%s.%s(n=%i, p=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.p, ) def __len__(self): return int(factorial(self.n) / factorial(self.n - self.p) / factorial(self.p)) class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)): """Base class to validate KFold approaches""" @abstractmethod def __init__(self, n, n_folds, shuffle, random_state): super(_BaseKFold, self).__init__(n) if abs(n_folds - int(n_folds)) >= np.finfo('f').eps: raise ValueError("n_folds must be an integer") self.n_folds = n_folds = int(n_folds) if n_folds <= 1: raise ValueError( "k-fold cross validation requires at least one" " train / test split by setting n_folds=2 or more," " got n_folds={0}.".format(n_folds)) if n_folds > self.n: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of samples: {1}.").format(n_folds, n)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) self.shuffle = shuffle self.random_state = random_state class KFold(_BaseKFold): """K-Folds cross validation iterator. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.KFold` instead. Provides train/test indices to split data in train test sets. Split dataset into k consecutive folds (without shuffling by default). Each fold is then used as a validation set once while the k - 1 remaining fold(s) form the training set. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : int, RandomState instance or None, optional, default=None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``shuffle`` == True. Examples -------- >>> from sklearn.cross_validation import KFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = KFold(4, n_folds=2) >>> len(kf) 2 >>> print(kf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in kf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first n % n_folds folds have size n // n_folds + 1, other folds have size n // n_folds. See also -------- StratifiedKFold take label information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, n, n_folds=3, shuffle=False, random_state=None): super(KFold, self).__init__(n, n_folds, shuffle, random_state) self.idxs = np.arange(n) if shuffle: rng = check_random_state(self.random_state) rng.shuffle(self.idxs) def _iter_test_indices(self): n = self.n n_folds = self.n_folds fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield self.idxs[start:stop] current = stop def __repr__(self): return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class LabelKFold(_BaseKFold): """K-fold iterator variant with non-overlapping labels. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.GroupKFold` instead. The same label will not appear in two different folds (the number of distinct labels has to be at least equal to the number of folds). The folds are approximately balanced in the sense that the number of distinct labels is approximately the same in each fold. .. versionadded:: 0.17 Parameters ---------- labels : array-like with shape (n_samples, ) Contains a label for each sample. The folds are built so that the same label does not appear in two different folds. n_folds : int, default=3 Number of folds. Must be at least 2. Examples -------- >>> from sklearn.cross_validation import LabelKFold >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> labels = np.array([0, 0, 2, 2]) >>> label_kfold = LabelKFold(labels, n_folds=2) >>> len(label_kfold) 2 >>> print(label_kfold) sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2) >>> for train_index, test_index in label_kfold: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) ... TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [3 4] TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [3 4] [1 2] See also -------- LeaveOneLabelOut for splitting the data according to explicit, domain-specific stratification of the dataset. """ def __init__(self, labels, n_folds=3): super(LabelKFold, self).__init__(len(labels), n_folds, shuffle=False, random_state=None) unique_labels, labels = np.unique(labels, return_inverse=True) n_labels = len(unique_labels) if n_folds > n_labels: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of labels: {1}.").format(n_folds, n_labels)) # Weight labels by their number of occurrences n_samples_per_label = np.bincount(labels) # Distribute the most frequent labels first indices = np.argsort(n_samples_per_label)[::-1] n_samples_per_label = n_samples_per_label[indices] # Total weight of each fold n_samples_per_fold = np.zeros(n_folds) # Mapping from label index to fold index label_to_fold = np.zeros(len(unique_labels)) # Distribute samples by adding the largest weight to the lightest fold for label_index, weight in enumerate(n_samples_per_label): lightest_fold = np.argmin(n_samples_per_fold) n_samples_per_fold[lightest_fold] += weight label_to_fold[indices[label_index]] = lightest_fold self.idxs = label_to_fold[labels] def _iter_test_indices(self): for f in range(self.n_folds): yield np.where(self.idxs == f)[0] def __repr__(self): return '{0}.{1}(n_labels={2}, n_folds={3})'.format( self.__class__.__module__, self.__class__.__name__, self.n, self.n_folds, ) def __len__(self): return self.n_folds class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross validation iterator .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.StratifiedKFold` instead. Provides train/test indices to split data in train test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- y : array-like, [n_samples] Samples to split in K folds. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle each stratification of the data before splitting into batches. random_state : int, RandomState instance or None, optional, default=None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``shuffle`` == True. Examples -------- >>> from sklearn.cross_validation import StratifiedKFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = StratifiedKFold(y, n_folds=2) >>> len(skf) 2 >>> print(skf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in skf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- All the folds have size trunc(n_samples / n_folds), the last one has the complementary. See also -------- LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, y, n_folds=3, shuffle=False, random_state=None): super(StratifiedKFold, self).__init__( len(y), n_folds, shuffle, random_state) y = np.asarray(y) n_samples = y.shape[0] unique_labels, y_inversed = np.unique(y, return_inverse=True) label_counts = np.bincount(y_inversed) min_labels = np.min(label_counts) if np.all(self.n_folds > label_counts): raise ValueError("All the n_labels for individual classes" " are less than %d folds." % (self.n_folds)) if self.n_folds > min_labels: warnings.warn(("The least populated class in y has only %d" " members, which is too few. The minimum" " number of labels for any class cannot" " be less than n_folds=%d." % (min_labels, self.n_folds)), Warning) # don't want to use the same seed in each label's shuffle if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each label so as to respect the # balance of labels per_label_cvs = [ KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle, random_state=rng) for c in label_counts] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)): for label, (_, test_split) in zip(unique_labels, per_label_splits): label_test_folds = test_folds[y == label] # the test split can be too big because we used # KFold(max(c, self.n_folds), self.n_folds) instead of # KFold(c, self.n_folds) to make it possible to not crash even # if the data is not 100% stratifiable for all the labels # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(label_test_folds)] label_test_folds[test_split] = test_fold_idx test_folds[y == label] = label_test_folds self.test_folds = test_folds self.y = y def _iter_test_masks(self): for i in range(self.n_folds): yield self.test_folds == i def __repr__(self): return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.y, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class LeaveOneLabelOut(_PartitionIterator): """Leave-One-Label_Out cross-validation iterator .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.LeaveOneGroupOut` instead. Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> labels = np.array([1, 1, 2, 2]) >>> lol = cross_validation.LeaveOneLabelOut(labels) >>> len(lol) 2 >>> print(lol) sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2]) >>> for train_index, test_index in lol: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] See also -------- LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, labels): super(LeaveOneLabelOut, self).__init__(len(labels)) # We make a copy of labels to avoid side-effects during iteration self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) def _iter_test_masks(self): for i in self.unique_labels: yield self.labels == i def __repr__(self): return '%s.%s(labels=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, ) def __len__(self): return self.n_unique_labels class LeavePLabelOut(_PartitionIterator): """Leave-P-Label_Out cross-validation iterator .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.LeavePGroupsOut` instead. Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LeaveOneLabelOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the labels while the latter uses samples all assigned the same labels. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. p : int Number of samples to leave out in the test split. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> labels = np.array([1, 2, 3]) >>> lpl = cross_validation.LeavePLabelOut(labels, p=2) >>> len(lpl) 3 >>> print(lpl) sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2) >>> for train_index, test_index in lpl: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] See also -------- LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, labels, p): # We make a copy of labels to avoid side-effects during iteration super(LeavePLabelOut, self).__init__(len(labels)) self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) self.p = p def _iter_test_masks(self): comb = combinations(range(self.n_unique_labels), self.p) for idx in comb: test_index = self._empty_mask() idx = np.array(idx) for l in self.unique_labels[idx]: test_index[self.labels == l] = True yield test_index def __repr__(self): return '%s.%s(labels=%s, p=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, self.p, ) def __len__(self): return int(factorial(self.n_unique_labels) / factorial(self.n_unique_labels - self.p) / factorial(self.p)) class BaseShuffleSplit(with_metaclass(ABCMeta)): """Base class for ShuffleSplit and StratifiedShuffleSplit""" def __init__(self, n, n_iter=10, test_size=0.1, train_size=None, random_state=None): self.n = n self.n_iter = n_iter self.test_size = test_size self.train_size = train_size self.random_state = random_state self.n_train, self.n_test = _validate_shuffle_split(n, test_size, train_size) def __iter__(self): for train, test in self._iter_indices(): yield train, test return @abstractmethod def _iter_indices(self): """Generate (train, test) indices""" class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validation iterator. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.ShuffleSplit` instead. Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in the dataset. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int, RandomState instance or None, optional (default None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Examples -------- >>> from sklearn import cross_validation >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... test_size=.25, random_state=0) >>> len(rs) 3 >>> print(rs) ... # doctest: +ELLIPSIS ShuffleSplit(4, n_iter=3, test_size=0.25, ...) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1 0] TEST: [2] TRAIN: [2 1 3] TEST: [0] TRAIN: [0 2 1] TEST: [3] >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... train_size=0.5, test_size=.25, random_state=0) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1] TEST: [2] TRAIN: [2 1] TEST: [0] TRAIN: [0 2] TEST: [3] """ def _iter_indices(self): rng = check_random_state(self.random_state) for i in range(self.n_iter): # random partition permutation = rng.permutation(self.n) ind_test = permutation[:self.n_test] ind_train = permutation[self.n_test:self.n_test + self.n_train] yield ind_train, ind_test def __repr__(self): return ('%s(%d, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.n, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _validate_shuffle_split(n, test_size, train_size): if test_size is None and train_size is None: raise ValueError( 'test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind == 'i': if test_size >= n: raise ValueError( 'test_size=%d should be smaller ' 'than the number of samples %d' % (test_size, n)) else: raise ValueError("Invalid value for test_size: %r" % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError("train_size=%f should be smaller " "than 1.0 or be an integer" % train_size) elif np.asarray(test_size).dtype.kind == 'f' and \ train_size + test_size > 1.: raise ValueError('The sum of test_size and train_size = %f, ' 'should be smaller than 1.0. Reduce ' 'test_size and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind == 'i': if train_size >= n: raise ValueError("train_size=%d should be smaller " "than the number of samples %d" % (train_size, n)) else: raise ValueError("Invalid value for train_size: %r" % train_size) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n - n_test else: if np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n) else: n_train = float(train_size) if test_size is None: n_test = n - n_train if n_train + n_test > n: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n)) return int(n_train), int(n_test) def _approximate_mode(class_counts, n_draws, rng): """Computes approximate mode of multivariate hypergeometric. This is an approximation to the mode of the multivariate hypergeometric given by class_counts and n_draws. It shouldn't be off by more than one. It is the mostly likely outcome of drawing n_draws many samples from the population given by class_counts. Parameters ---------- class_counts : ndarray of int Population per class. n_draws : int Number of draws (samples to draw) from the overall population. rng : random state Used to break ties. Returns ------- sampled_classes : ndarray of int Number of samples drawn from each class. np.sum(sampled_classes) == n_draws """ # this computes a bad approximation to the mode of the # multivariate hypergeometric given by class_counts and n_draws continuous = n_draws * class_counts / class_counts.sum() # floored means we don't overshoot n_samples, but probably undershoot floored = np.floor(continuous) # we add samples according to how much "left over" probability # they had, until we arrive at n_samples need_to_add = int(n_draws - floored.sum()) if need_to_add > 0: remainder = continuous - floored values = np.sort(np.unique(remainder))[::-1] # add according to remainder, but break ties # randomly to avoid biases for value in values: inds, = np.where(remainder == value) # if we need_to_add less than what's in inds # we draw randomly from them. # if we need to add more, we add them all and # go to the next value add_now = min(len(inds), need_to_add) inds = rng.choice(inds, size=add_now, replace=False) floored[inds] += 1 need_to_add -= add_now if need_to_add == 0: break return floored.astype(np.int) class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross validation iterator .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.StratifiedShuffleSplit` instead. Provides train/test indices to split data in train test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- y : array, [n_samples] Labels of samples. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int, RandomState instance or None, optional (default None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Examples -------- >>> from sklearn.cross_validation import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0) >>> len(sss) 3 >>> print(sss) # doctest: +ELLIPSIS StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...) >>> for train_index, test_index in sss: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2] TEST: [3 0] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 2] TEST: [3 1] """ def __init__(self, y, n_iter=10, test_size=0.1, train_size=None, random_state=None): super(StratifiedShuffleSplit, self).__init__( len(y), n_iter, test_size, train_size, random_state) self.y = np.array(y) self.classes, self.y_indices = np.unique(y, return_inverse=True) n_cls = self.classes.shape[0] if np.min(np.bincount(self.y_indices)) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of labels for any class cannot" " be less than 2.") if self.n_train < n_cls: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_train, n_cls)) if self.n_test < n_cls: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_test, n_cls)) def _iter_indices(self): rng = check_random_state(self.random_state) cls_count = np.bincount(self.y_indices) for n in range(self.n_iter): # if there are ties in the class-counts, we want # to make sure to break them anew in each iteration n_i = _approximate_mode(cls_count, self.n_train, rng) class_counts_remaining = cls_count - n_i t_i = _approximate_mode(class_counts_remaining, self.n_test, rng) train = [] test = [] for i, _ in enumerate(self.classes): permutation = rng.permutation(cls_count[i]) perm_indices_class_i = np.where( (i == self.y_indices))[0][permutation] train.extend(perm_indices_class_i[:n_i[i]]) test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.y, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter class PredefinedSplit(_PartitionIterator): """Predefined split cross validation iterator .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.PredefinedSplit` instead. Splits the data into training/test set folds according to a predefined scheme. Each sample can be assigned to at most one test set fold, as specified by the user through the ``test_fold`` parameter. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- test_fold : "array-like, shape (n_samples,) test_fold[i] gives the test set fold of sample i. A value of -1 indicates that the corresponding sample is not part of any test set folds, but will instead always be put into the training fold. Examples -------- >>> from sklearn.cross_validation import PredefinedSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1]) >>> len(ps) 2 >>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1]) >>> for train_index, test_index in ps: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2 3] TEST: [0] TRAIN: [0 2] TEST: [1 3] """ def __init__(self, test_fold): super(PredefinedSplit, self).__init__(len(test_fold)) self.test_fold = np.array(test_fold, dtype=np.int) self.test_fold = column_or_1d(self.test_fold) self.unique_folds = np.unique(self.test_fold) self.unique_folds = self.unique_folds[self.unique_folds != -1] def _iter_test_indices(self): for f in self.unique_folds: yield np.where(self.test_fold == f)[0] def __repr__(self): return '%s.%s(test_fold=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.test_fold) def __len__(self): return len(self.unique_folds) class LabelShuffleSplit(ShuffleSplit): """Shuffle-Labels-Out cross-validation iterator .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.GroupShuffleSplit` instead. Provides randomized train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LabelShuffleSplit is that the former generates splits using all subsets of size ``p`` unique labels, whereas LabelShuffleSplit generates a user-determined number of random test splits, each with a user-determined fraction of unique labels. For example, a less computationally intensive alternative to ``LeavePLabelOut(labels, p=10)`` would be ``LabelShuffleSplit(labels, test_size=10, n_iter=100)``. Note: The parameters ``test_size`` and ``train_size`` refer to labels, and not to samples, as in ShuffleSplit. .. versionadded:: 0.17 Parameters ---------- labels : array, [n_samples] Labels of samples n_iter : int (default 5) Number of re-shuffling and splitting iterations. test_size : float (default 0.2), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the test split. If int, represents the absolute number of test labels. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the train split. If int, represents the absolute number of train labels. If None, the value is automatically set to the complement of the test size. random_state : int, RandomState instance or None, optional (default None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. """ def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None, random_state=None): classes, label_indices = np.unique(labels, return_inverse=True) super(LabelShuffleSplit, self).__init__( len(classes), n_iter=n_iter, test_size=test_size, train_size=train_size, random_state=random_state) self.labels = labels self.classes = classes self.label_indices = label_indices def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.labels, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _iter_indices(self): for label_train, label_test in super(LabelShuffleSplit, self)._iter_indices(): # these are the indices of classes in the partition # invert them into data indices train = np.flatnonzero(np.in1d(self.label_indices, label_train)) test = np.flatnonzero(np.in1d(self.label_indices, label_test)) yield train, test ############################################################################## def _index_param_value(X, v, indices): """Private helper function for parameter value indexing.""" if not _is_arraylike(v) or _num_samples(v) != _num_samples(X): # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices) def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Generate cross-validated estimates for each input data point .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.cross_val_predict` instead. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- preds : ndarray This is the result of calling 'predict' Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.cross_validation import cross_val_predict >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> y_pred = cross_val_predict(lasso, X, y) """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y, train, test, verbose, fit_params) for train, test in cv) preds = [p for p, _ in preds_blocks] locs = np.concatenate([loc for _, loc in preds_blocks]) if not _check_is_partition(locs, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') inv_locs = np.empty(len(locs), dtype=int) inv_locs[locs] = np.arange(len(locs)) # Check for sparse predictions if sp.issparse(preds[0]): preds = sp.vstack(preds, format=preds[0].format) else: preds = np.concatenate(preds) return preds[inv_locs] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. Returns ------- preds : sequence Result of calling 'estimator.predict' test : array-like This is the value of the test parameter """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) preds = estimator.predict(X_test) return preds, test def _check_is_partition(locs, n): """Check whether locs is a reordering of the array np.arange(n) Parameters ---------- locs : ndarray integer array to test n : int number of expected elements Returns ------- is_partition : bool True iff sorted(locs) is range(n) """ if len(locs) != n: return False hit = np.zeros(n, bool) hit[locs] = True if not np.all(hit): return False return True def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.cross_val_score` instead. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.cross_validation import cross_val_score >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS [ 0.33150734 0.08022311 0.03531764] See Also --------- :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv) return np.array(scores)[:, 0] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, error_score='raise'): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scorer : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = '' else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)" ) else: test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel) \ and not isinstance(estimator.kernel, GPKernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if hasattr(score, 'item'): try: # e.g. unwrap memmapped scalars score = score.item() except ValueError: # non-scalar? pass if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def _permutation_test_score(estimator, X, y, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv: X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) estimator.fit(X_train, y_train) avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return safe_indexing(y, ind) def check_cv(cv, X=None, y=None, classifier=False): """Input checker utility for building a CV in a user friendly way. .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.check_cv` instead. Parameters ---------- cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if classifier is True and ``y`` is binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv : a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type. """ is_sparse = sp.issparse(X) if cv is None: cv = 3 if isinstance(cv, numbers.Integral): if classifier: if type_of_target(y) in ['binary', 'multiclass']: cv = StratifiedKFold(y, cv) else: cv = KFold(_num_samples(y), cv) else: if not is_sparse: n_samples = len(X) else: n_samples = X.shape[0] cv = KFold(n_samples, cv) return cv def permutation_test_score(estimator, X, y, cv=None, n_permutations=100, n_jobs=1, labels=None, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.permutation_test_score` instead. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. labels : array-like of shape [n_samples] (optional) Labels constrain the permutation among groups of samples with a same label. random_state : int, RandomState instance or None, optional (default=0) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The p-value, which approximates the probability that the score would be obtained by chance. This is calculated as: `(C + 1) / (n_permutations + 1)` Where C is the number of permutations whose score >= the true score. The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, labels, random_state), cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue permutation_test_score.__test__ = False # to avoid a pb with nosetests def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.train_test_split` instead. Quick utility that wraps input validation and ``next(iter(ShuffleSplit(n_samples)))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- *arrays : sequence of indexables with same length / shape[0] Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. stratify : array-like or None (default is None) If not None, data is split in a stratified fashion, using this as the labels array. .. versionadded:: 0.17 *stratify* splitting Returns ------- splitting : list, length = 2 * len(arrays), List containing train-test split of inputs. .. versionadded:: 0.16 If the input is sparse, the output will be a ``scipy.sparse.csr_matrix``. Else, output type is the same as the input type. Examples -------- >>> import numpy as np >>> from sklearn.cross_validation import train_test_split >>> X, y = np.arange(10).reshape((5, 2)), range(5) >>> X array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(y) [0, 1, 2, 3, 4] >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.33, random_state=42) ... >>> X_train array([[4, 5], [0, 1], [6, 7]]) >>> y_train [2, 0, 3] >>> X_test array([[2, 3], [8, 9]]) >>> y_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) stratify = options.pop('stratify', None) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) if stratify is not None: cv = StratifiedShuffleSplit(stratify, test_size=test_size, train_size=train_size, random_state=random_state) else: n_samples = _num_samples(arrays[0]) cv = ShuffleSplit(n_samples, test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(iter(cv)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) train_test_split.__test__ = False # to avoid a pb with nosetests
73,625
34.465318
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/pipeline.py
""" The :mod:`sklearn.pipeline` module implements utilities to build a composite estimator, as a chain of transforms and estimators. """ # Author: Edouard Duchesnay # Gael Varoquaux # Virgile Fritsch # Alexandre Gramfort # Lars Buitinck # License: BSD from collections import defaultdict import numpy as np from scipy import sparse from .base import clone, TransformerMixin from .externals.joblib import Parallel, delayed, Memory from .externals import six from .utils.metaestimators import if_delegate_has_method from .utils import Bunch from .utils.validation import check_memory from .utils.metaestimators import _BaseComposition __all__ = ['Pipeline', 'FeatureUnion'] class Pipeline(_BaseComposition): """Pipeline of transforms with a final estimator. Sequentially apply a list of transforms and a final estimator. Intermediate steps of the pipeline must be 'transforms', that is, they must implement fit and transform methods. The final estimator only needs to implement fit. The transformers in the pipeline can be cached using ``memory`` argument. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. For this, it enables setting parameters of the various steps using their names and the parameter name separated by a '__', as in the example below. A step's estimator may be replaced entirely by setting the parameter with its name to another estimator, or a transformer removed by setting to None. Read more in the :ref:`User Guide <pipeline>`. Parameters ---------- steps : list List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator. memory : None, str or object with the joblib.Memory interface, optional Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. Attributes ---------- named_steps : bunch object, a dictionary with attribute access Read-only attribute to access any step parameter by user given name. Keys are step names and values are steps parameters. Examples -------- >>> from sklearn import svm >>> from sklearn.datasets import samples_generator >>> from sklearn.feature_selection import SelectKBest >>> from sklearn.feature_selection import f_regression >>> from sklearn.pipeline import Pipeline >>> # generate some data to play with >>> X, y = samples_generator.make_classification( ... n_informative=5, n_redundant=0, random_state=42) >>> # ANOVA SVM-C >>> anova_filter = SelectKBest(f_regression, k=5) >>> clf = svm.SVC(kernel='linear') >>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)]) >>> # You can set the parameters using the names issued >>> # For instance, fit using a k of 10 in the SelectKBest >>> # and a parameter 'C' of the svm >>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Pipeline(memory=None, steps=[('anova', SelectKBest(...)), ('svc', SVC(...))]) >>> prediction = anova_svm.predict(X) >>> anova_svm.score(X, y) # doctest: +ELLIPSIS 0.829... >>> # getting the selected features chosen by anova_filter >>> anova_svm.named_steps['anova'].get_support() ... # doctest: +NORMALIZE_WHITESPACE array([False, False, True, True, False, False, True, True, False, True, False, True, True, False, True, False, True, True, False, False], dtype=bool) >>> # Another way to get selected features chosen by anova_filter >>> anova_svm.named_steps.anova.get_support() ... # doctest: +NORMALIZE_WHITESPACE array([False, False, True, True, False, False, True, True, False, True, False, True, True, False, True, False, True, True, False, False], dtype=bool) """ # BaseEstimator interface def __init__(self, steps, memory=None): self.steps = steps self._validate_steps() self.memory = memory def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ return self._get_params('steps', deep=deep) def set_params(self, **kwargs): """Set the parameters of this estimator. Valid parameter keys can be listed with ``get_params()``. Returns ------- self """ self._set_params('steps', **kwargs) return self def _validate_steps(self): names, estimators = zip(*self.steps) # validate names self._validate_names(names) # validate estimators transformers = estimators[:-1] estimator = estimators[-1] for t in transformers: if t is None: continue if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(t, "transform")): raise TypeError("All intermediate steps should be " "transformers and implement fit and transform." " '%s' (type %s) doesn't" % (t, type(t))) # We allow last estimator to be None as an identity transformation if estimator is not None and not hasattr(estimator, "fit"): raise TypeError("Last step of Pipeline should implement fit. " "'%s' (type %s) doesn't" % (estimator, type(estimator))) @property def _estimator_type(self): return self.steps[-1][1]._estimator_type @property def named_steps(self): # Use Bunch object to improve autocomplete return Bunch(**dict(self.steps)) @property def _final_estimator(self): return self.steps[-1][1] # Estimator interface def _fit(self, X, y=None, **fit_params): # shallow copy of steps - this should really be steps_ self.steps = list(self.steps) self._validate_steps() # Setup the memory memory = check_memory(self.memory) fit_transform_one_cached = memory.cache(_fit_transform_one) fit_params_steps = dict((name, {}) for name, step in self.steps if step is not None) for pname, pval in six.iteritems(fit_params): step, param = pname.split('__', 1) fit_params_steps[step][param] = pval Xt = X for step_idx, (name, transformer) in enumerate(self.steps[:-1]): if transformer is None: pass else: if hasattr(memory, 'cachedir') and memory.cachedir is None: # we do not clone when caching is disabled to preserve # backward compatibility cloned_transformer = transformer else: cloned_transformer = clone(transformer) # Fit or load from cache the current transfomer Xt, fitted_transformer = fit_transform_one_cached( cloned_transformer, None, Xt, y, **fit_params_steps[name]) # Replace the transformer of the step with the fitted # transformer. This is necessary when loading the transformer # from the cache. self.steps[step_idx] = (name, fitted_transformer) if self._final_estimator is None: return Xt, {} return Xt, fit_params_steps[self.steps[-1][0]] def fit(self, X, y=None, **fit_params): """Fit the model Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- self : Pipeline This estimator """ Xt, fit_params = self._fit(X, y, **fit_params) if self._final_estimator is not None: self._final_estimator.fit(Xt, y, **fit_params) return self def fit_transform(self, X, y=None, **fit_params): """Fit the model and transform with the final estimator Fits all the transforms one after the other and transforms the data, then uses fit_transform on transformed data with the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- Xt : array-like, shape = [n_samples, n_transformed_features] Transformed samples """ last_step = self._final_estimator Xt, fit_params = self._fit(X, y, **fit_params) if hasattr(last_step, 'fit_transform'): return last_step.fit_transform(Xt, y, **fit_params) elif last_step is None: return Xt else: return last_step.fit(Xt, y, **fit_params).transform(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict(self, X): """Apply transforms to the data, and predict with the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_pred : array-like """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].predict(Xt) @if_delegate_has_method(delegate='_final_estimator') def fit_predict(self, X, y=None, **fit_params): """Applies fit_predict of last step in pipeline after transforms. Applies fit_transforms of a pipeline to the data, followed by the fit_predict method of the final estimator in the pipeline. Valid only if the final estimator implements fit_predict. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- y_pred : array-like """ Xt, fit_params = self._fit(X, y, **fit_params) return self.steps[-1][-1].fit_predict(Xt, y, **fit_params) @if_delegate_has_method(delegate='_final_estimator') def predict_proba(self, X): """Apply transforms, and predict_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_proba : array-like, shape = [n_samples, n_classes] """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_proba(Xt) @if_delegate_has_method(delegate='_final_estimator') def decision_function(self, X): """Apply transforms, and decision_function of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes] """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].decision_function(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict_log_proba(self, X): """Apply transforms, and predict_log_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes] """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_log_proba(Xt) @property def transform(self): """Apply transforms, and transform with the final estimator This also works where final estimator is ``None``: all prior transformations are applied. Parameters ---------- X : iterable Data to transform. Must fulfill input requirements of first step of the pipeline. Returns ------- Xt : array-like, shape = [n_samples, n_transformed_features] """ # _final_estimator is None or has transform, otherwise attribute error # XXX: Handling the None case means we can't use if_delegate_has_method if self._final_estimator is not None: self._final_estimator.transform return self._transform def _transform(self, X): Xt = X for name, transform in self.steps: if transform is not None: Xt = transform.transform(Xt) return Xt @property def inverse_transform(self): """Apply inverse transformations in reverse order All estimators in the pipeline must support ``inverse_transform``. Parameters ---------- Xt : array-like, shape = [n_samples, n_transformed_features] Data samples, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Must fulfill input requirements of last step of pipeline's ``inverse_transform`` method. Returns ------- Xt : array-like, shape = [n_samples, n_features] """ # raise AttributeError if necessary for hasattr behaviour # XXX: Handling the None case means we can't use if_delegate_has_method for name, transform in self.steps: if transform is not None: transform.inverse_transform return self._inverse_transform def _inverse_transform(self, X): Xt = X for name, transform in self.steps[::-1]: if transform is not None: Xt = transform.inverse_transform(Xt) return Xt @if_delegate_has_method(delegate='_final_estimator') def score(self, X, y=None, sample_weight=None): """Apply transforms, and score with the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. sample_weight : array-like, default=None If not None, this argument is passed as ``sample_weight`` keyword argument to the ``score`` method of the final estimator. Returns ------- score : float """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) score_params = {} if sample_weight is not None: score_params['sample_weight'] = sample_weight return self.steps[-1][-1].score(Xt, y, **score_params) @property def classes_(self): return self.steps[-1][-1].classes_ @property def _pairwise(self): # check if first estimator expects pairwise input return getattr(self.steps[0][1], '_pairwise', False) def _name_estimators(estimators): """Generate names for estimators.""" names = [type(estimator).__name__.lower() for estimator in estimators] namecount = defaultdict(int) for est, name in zip(estimators, names): namecount[name] += 1 for k, v in list(six.iteritems(namecount)): if v == 1: del namecount[k] for i in reversed(range(len(estimators))): name = names[i] if name in namecount: names[i] += "-%d" % namecount[name] namecount[name] -= 1 return list(zip(names, estimators)) def make_pipeline(*steps, **kwargs): """Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Parameters ---------- *steps : list of estimators, memory : None, str or object with the joblib.Memory interface, optional Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) ... # doctest: +NORMALIZE_WHITESPACE Pipeline(memory=None, steps=[('standardscaler', StandardScaler(copy=True, with_mean=True, with_std=True)), ('gaussiannb', GaussianNB(priors=None))]) Returns ------- p : Pipeline """ memory = kwargs.pop('memory', None) if kwargs: raise TypeError('Unknown keyword arguments: "{}"' .format(list(kwargs.keys())[0])) return Pipeline(_name_estimators(steps), memory=memory) def _fit_one_transformer(transformer, X, y): return transformer.fit(X, y) def _transform_one(transformer, weight, X): res = transformer.transform(X) # if we have a weight for this transformer, multiply output if weight is None: return res return res * weight def _fit_transform_one(transformer, weight, X, y, **fit_params): if hasattr(transformer, 'fit_transform'): res = transformer.fit_transform(X, y, **fit_params) else: res = transformer.fit(X, y, **fit_params).transform(X) # if we have a weight for this transformer, multiply output if weight is None: return res, transformer return res * weight, transformer class FeatureUnion(_BaseComposition, TransformerMixin): """Concatenates results of multiple transformer objects. This estimator applies a list of transformer objects in parallel to the input data, then concatenates the results. This is useful to combine several feature extraction mechanisms into a single transformer. Parameters of the transformers may be set using its name and the parameter name separated by a '__'. A transformer may be replaced entirely by setting the parameter with its name to another transformer, or removed by setting to ``None``. Read more in the :ref:`User Guide <feature_union>`. Parameters ---------- transformer_list : list of (string, transformer) tuples List of transformer objects to be applied to the data. The first half of each tuple is the name of the transformer. n_jobs : int, optional Number of jobs to run in parallel (default 1). transformer_weights : dict, optional Multiplicative weights for features per transformer. Keys are transformer names, values the weights. """ def __init__(self, transformer_list, n_jobs=1, transformer_weights=None): self.transformer_list = transformer_list self.n_jobs = n_jobs self.transformer_weights = transformer_weights self._validate_transformers() def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ return self._get_params('transformer_list', deep=deep) def set_params(self, **kwargs): """Set the parameters of this estimator. Valid parameter keys can be listed with ``get_params()``. Returns ------- self """ self._set_params('transformer_list', **kwargs) return self def _validate_transformers(self): names, transformers = zip(*self.transformer_list) # validate names self._validate_names(names) # validate estimators for t in transformers: if t is None: continue if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(t, "transform")): raise TypeError("All estimators should implement fit and " "transform. '%s' (type %s) doesn't" % (t, type(t))) def _iter(self): """Generate (name, est, weight) tuples excluding None transformers """ get_weight = (self.transformer_weights or {}).get return ((name, trans, get_weight(name)) for name, trans in self.transformer_list if trans is not None) def get_feature_names(self): """Get feature names from all transformers. Returns ------- feature_names : list of strings Names of the features produced by transform. """ feature_names = [] for name, trans, weight in self._iter(): if not hasattr(trans, 'get_feature_names'): raise AttributeError("Transformer %s (type %s) does not " "provide get_feature_names." % (str(name), type(trans).__name__)) feature_names.extend([name + "__" + f for f in trans.get_feature_names()]) return feature_names def fit(self, X, y=None): """Fit all transformers using X. Parameters ---------- X : iterable or array-like, depending on transformers Input data, used to fit transformers. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- self : FeatureUnion This estimator """ self.transformer_list = list(self.transformer_list) self._validate_transformers() transformers = Parallel(n_jobs=self.n_jobs)( delayed(_fit_one_transformer)(trans, X, y) for _, trans, _ in self._iter()) self._update_transformer_list(transformers) return self def fit_transform(self, X, y=None, **fit_params): """Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ self._validate_transformers() result = Parallel(n_jobs=self.n_jobs)( delayed(_fit_transform_one)(trans, weight, X, y, **fit_params) for name, trans, weight in self._iter()) if not result: # All transformers are None return np.zeros((X.shape[0], 0)) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def transform(self, X): """Transform X separately by each transformer, concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ Xs = Parallel(n_jobs=self.n_jobs)( delayed(_transform_one)(trans, weight, X) for name, trans, weight in self._iter()) if not Xs: # All transformers are None return np.zeros((X.shape[0], 0)) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def _update_transformer_list(self, transformers): transformers = iter(transformers) self.transformer_list[:] = [ (name, None if old is None else next(transformers)) for name, old in self.transformer_list ] def make_union(*transformers, **kwargs): """Construct a FeatureUnion from the given transformers. This is a shorthand for the FeatureUnion constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Parameters ---------- *transformers : list of estimators n_jobs : int, optional Number of jobs to run in parallel (default 1). Returns ------- f : FeatureUnion Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> from sklearn.pipeline import make_union >>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE FeatureUnion(n_jobs=1, transformer_list=[('pca', PCA(copy=True, iterated_power='auto', n_components=None, random_state=None, svd_solver='auto', tol=0.0, whiten=False)), ('truncatedsvd', TruncatedSVD(algorithm='randomized', n_components=2, n_iter=5, random_state=None, tol=0.0))], transformer_weights=None) """ n_jobs = kwargs.pop('n_jobs', 1) if kwargs: # We do not currently support `transformer_weights` as we may want to # change its type spec in make_union raise TypeError('Unknown keyword arguments: "{}"' .format(list(kwargs.keys())[0])) return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs)
29,550
34.732769
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/dummy.py
# Author: Mathieu Blondel <[email protected]> # Arnaud Joly <[email protected]> # Maheshakya Wijewardena <[email protected]> # License: BSD 3 clause from __future__ import division import warnings import numpy as np import scipy.sparse as sp from .base import BaseEstimator, ClassifierMixin, RegressorMixin from .utils import check_random_state from .utils.validation import check_array from .utils.validation import check_consistent_length from .utils.validation import check_is_fitted from .utils.random import random_choice_csc from .utils.stats import _weighted_percentile from .utils.multiclass import class_distribution class DummyClassifier(BaseEstimator, ClassifierMixin): """ DummyClassifier is a classifier that makes predictions using simple rules. This classifier is useful as a simple baseline to compare with other (real) classifiers. Do not use it for real problems. Read more in the :ref:`User Guide <dummy_estimators>`. Parameters ---------- strategy : str, default="stratified" Strategy to use to generate predictions. * "stratified": generates predictions by respecting the training set's class distribution. * "most_frequent": always predicts the most frequent label in the training set. * "prior": always predicts the class that maximizes the class prior (like "most_frequent") and ``predict_proba`` returns the class prior. * "uniform": generates predictions uniformly at random. * "constant": always predicts a constant label that is provided by the user. This is useful for metrics that evaluate a non-majority class .. versionadded:: 0.17 Dummy Classifier now supports prior fitting strategy using parameter *prior*. random_state : int, RandomState instance or None, optional, default=None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. constant : int or str or array of shape = [n_outputs] The explicit constant as predicted by the "constant" strategy. This parameter is useful only for the "constant" strategy. Attributes ---------- classes_ : array or list of array of shape = [n_classes] Class labels for each output. n_classes_ : array or list of array of shape = [n_classes] Number of label for each output. class_prior_ : array or list of array of shape = [n_classes] Probability of each class for each output. n_outputs_ : int, Number of outputs. outputs_2d_ : bool, True if the output at fit is 2d, else false. sparse_output_ : bool, True if the array returned from predict is to be in sparse CSC format. Is automatically set to True if the input y is passed in sparse format. """ def __init__(self, strategy="stratified", random_state=None, constant=None): self.strategy = strategy self.random_state = random_state self.constant = constant def fit(self, X, y, sample_weight=None): """Fit the random classifier. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) if self.strategy not in ("most_frequent", "stratified", "uniform", "constant", "prior"): raise ValueError("Unknown strategy type.") if self.strategy == "uniform" and sp.issparse(y): y = y.toarray() warnings.warn('A local copy of the target data has been converted ' 'to a numpy array. Predicting on sparse target data ' 'with the uniform strategy would not save memory ' 'and would be slower.', UserWarning) self.sparse_output_ = sp.issparse(y) if not self.sparse_output_: y = np.atleast_1d(y) self.output_2d_ = y.ndim == 2 if y.ndim == 1: y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] if self.strategy == "constant": if self.constant is None: raise ValueError("Constant target value has to be specified " "when the constant strategy is used.") else: constant = np.reshape(np.atleast_1d(self.constant), (-1, 1)) if constant.shape[0] != self.n_outputs_: raise ValueError("Constant target value should have " "shape (%d, 1)." % self.n_outputs_) (self.classes_, self.n_classes_, self.class_prior_) = class_distribution(y, sample_weight) if (self.strategy == "constant" and any(constant[k] not in self.classes_[k] for k in range(self.n_outputs_))): # Checking in case of constant strategy if the constant # provided by the user is in y. raise ValueError("The constant target value must be " "present in training data") if self.n_outputs_ == 1 and not self.output_2d_: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] self.class_prior_ = self.class_prior_[0] return self def predict(self, X): """Perform classification on test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y : array, shape = [n_samples] or [n_samples, n_outputs] Predicted target values for X. """ check_is_fitted(self, 'classes_') X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) # numpy random_state expects Python int and not long as size argument # under Windows n_samples = int(X.shape[0]) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1: # Get same type even for self.n_outputs_ == 1 n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] # Compute probability only once if self.strategy == "stratified": proba = self.predict_proba(X) if self.n_outputs_ == 1: proba = [proba] if self.sparse_output_: class_prob = None if self.strategy in ("most_frequent", "prior"): classes_ = [np.array([cp.argmax()]) for cp in class_prior_] elif self.strategy == "stratified": class_prob = class_prior_ elif self.strategy == "uniform": raise ValueError("Sparse target prediction is not " "supported with the uniform strategy") elif self.strategy == "constant": classes_ = [np.array([c]) for c in constant] y = random_choice_csc(n_samples, classes_, class_prob, self.random_state) else: if self.strategy in ("most_frequent", "prior"): y = np.tile([classes_[k][class_prior_[k].argmax()] for k in range(self.n_outputs_)], [n_samples, 1]) elif self.strategy == "stratified": y = np.vstack(classes_[k][proba[k].argmax(axis=1)] for k in range(self.n_outputs_)).T elif self.strategy == "uniform": ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)] for k in range(self.n_outputs_)] y = np.vstack(ret).T elif self.strategy == "constant": y = np.tile(self.constant, (n_samples, 1)) if self.n_outputs_ == 1 and not self.output_2d_: y = np.ravel(y) return y def predict_proba(self, X): """ Return probability estimates for the test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- P : array-like or list of array-lke of shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered arithmetically, for each output. """ check_is_fitted(self, 'classes_') X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) # numpy random_state expects Python int and not long as size argument # under Windows n_samples = int(X.shape[0]) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1 and not self.output_2d_: # Get same type even for self.n_outputs_ == 1 n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] P = [] for k in range(self.n_outputs_): if self.strategy == "most_frequent": ind = class_prior_[k].argmax() out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 elif self.strategy == "prior": out = np.ones((n_samples, 1)) * class_prior_[k] elif self.strategy == "stratified": out = rs.multinomial(1, class_prior_[k], size=n_samples) elif self.strategy == "uniform": out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) out /= n_classes_[k] elif self.strategy == "constant": ind = np.where(classes_[k] == constant[k]) out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 P.append(out) if self.n_outputs_ == 1 and not self.output_2d_: P = P[0] return P def predict_log_proba(self, X): """ Return log probability estimates for the test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- P : array-like or list of array-like of shape = [n_samples, n_classes] Returns the log probability of the sample for each class in the model, where classes are ordered arithmetically for each output. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: return [np.log(p) for p in proba] class DummyRegressor(BaseEstimator, RegressorMixin): """ DummyRegressor is a regressor that makes predictions using simple rules. This regressor is useful as a simple baseline to compare with other (real) regressors. Do not use it for real problems. Read more in the :ref:`User Guide <dummy_estimators>`. Parameters ---------- strategy : str Strategy to use to generate predictions. * "mean": always predicts the mean of the training set * "median": always predicts the median of the training set * "quantile": always predicts a specified quantile of the training set, provided with the quantile parameter. * "constant": always predicts a constant value that is provided by the user. constant : int or float or array of shape = [n_outputs] The explicit constant as predicted by the "constant" strategy. This parameter is useful only for the "constant" strategy. quantile : float in [0.0, 1.0] The quantile to predict using the "quantile" strategy. A quantile of 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the maximum. Attributes ---------- constant_ : float or array of shape [n_outputs] Mean or median or quantile of the training targets or constant value given by the user. n_outputs_ : int, Number of outputs. outputs_2d_ : bool, True if the output at fit is 2d, else false. """ def __init__(self, strategy="mean", constant=None, quantile=None): self.strategy = strategy self.constant = constant self.quantile = quantile def fit(self, X, y, sample_weight=None): """Fit the random regressor. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) if self.strategy not in ("mean", "median", "quantile", "constant"): raise ValueError("Unknown strategy type: %s, expected " "'mean', 'median', 'quantile' or 'constant'" % self.strategy) y = check_array(y, ensure_2d=False) if len(y) == 0: raise ValueError("y must not be empty.") self.output_2d_ = y.ndim == 2 if y.ndim == 1: y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] check_consistent_length(X, y, sample_weight) if self.strategy == "mean": self.constant_ = np.average(y, axis=0, weights=sample_weight) elif self.strategy == "median": if sample_weight is None: self.constant_ = np.median(y, axis=0) else: self.constant_ = [_weighted_percentile(y[:, k], sample_weight, percentile=50.) for k in range(self.n_outputs_)] elif self.strategy == "quantile": if self.quantile is None or not np.isscalar(self.quantile): raise ValueError("Quantile must be a scalar in the range " "[0.0, 1.0], but got %s." % self.quantile) percentile = self.quantile * 100.0 if sample_weight is None: self.constant_ = np.percentile(y, axis=0, q=percentile) else: self.constant_ = [_weighted_percentile(y[:, k], sample_weight, percentile=percentile) for k in range(self.n_outputs_)] elif self.strategy == "constant": if self.constant is None: raise TypeError("Constant target value has to be specified " "when the constant strategy is used.") self.constant = check_array(self.constant, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False, ensure_min_samples=0) if self.output_2d_ and self.constant.shape[0] != y.shape[1]: raise ValueError( "Constant target value should have " "shape (%d, 1)." % y.shape[1]) self.constant_ = self.constant self.constant_ = np.reshape(self.constant_, (1, -1)) return self def predict(self, X): """ Perform classification on test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y : array, shape = [n_samples] or [n_samples, n_outputs] Predicted target values for X. """ check_is_fitted(self, "constant_") X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False) n_samples = X.shape[0] y = np.ones((n_samples, 1)) * self.constant_ if self.n_outputs_ == 1 and not self.output_2d_: y = np.ravel(y) return y
17,936
35.756148
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/from_model.py
# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena # License: BSD 3 clause import numpy as np from .base import SelectorMixin from ..base import BaseEstimator, clone, MetaEstimatorMixin from ..externals import six from ..exceptions import NotFittedError from ..utils.metaestimators import if_delegate_has_method def _get_feature_importances(estimator, norm_order=1): """Retrieve or aggregate feature importances from estimator""" importances = getattr(estimator, "feature_importances_", None) if importances is None and hasattr(estimator, "coef_"): if estimator.coef_.ndim == 1: importances = np.abs(estimator.coef_) else: importances = np.linalg.norm(estimator.coef_, axis=0, ord=norm_order) elif importances is None: raise ValueError( "The underlying estimator %s has no `coef_` or " "`feature_importances_` attribute. Either pass a fitted estimator" " to SelectFromModel or call fit before calling transform." % estimator.__class__.__name__) return importances def _calculate_threshold(estimator, importances, threshold): """Interpret the threshold value""" if threshold is None: # determine default from estimator est_name = estimator.__class__.__name__ if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or "Lasso" in est_name): # the natural default threshold is 0 when l1 penalty was used threshold = 1e-5 else: threshold = "mean" if isinstance(threshold, six.string_types): if "*" in threshold: scale, reference = threshold.split("*") scale = float(scale.strip()) reference = reference.strip() if reference == "median": reference = np.median(importances) elif reference == "mean": reference = np.mean(importances) else: raise ValueError("Unknown reference: " + reference) threshold = scale * reference elif threshold == "median": threshold = np.median(importances) elif threshold == "mean": threshold = np.mean(importances) else: raise ValueError("Expected threshold='mean' or threshold='median' " "got %s" % threshold) else: threshold = float(threshold) return threshold class SelectFromModel(BaseEstimator, SelectorMixin, MetaEstimatorMixin): """Meta-transformer for selecting features based on importance weights. .. versionadded:: 0.17 Parameters ---------- estimator : object The base estimator from which the transformer is built. This can be both a fitted (if ``prefit`` is set to True) or a non-fitted estimator. The estimator must have either a ``feature_importances_`` or ``coef_`` attribute after fitting. threshold : string, float, optional default None The threshold value to use for feature selection. Features whose importance is greater or equal are kept while the others are discarded. If "median" (resp. "mean"), then the ``threshold`` value is the median (resp. the mean) of the feature importances. A scaling factor (e.g., "1.25*mean") may also be used. If None and if the estimator has a parameter penalty set to l1, either explicitly or implicitly (e.g, Lasso), the threshold used is 1e-5. Otherwise, "mean" is used by default. prefit : bool, default False Whether a prefit model is expected to be passed into the constructor directly or not. If True, ``transform`` must be called directly and SelectFromModel cannot be used with ``cross_val_score``, ``GridSearchCV`` and similar utilities that clone the estimator. Otherwise train the model using ``fit`` and then ``transform`` to do feature selection. norm_order : non-zero int, inf, -inf, default 1 Order of the norm used to filter the vectors of coefficients below ``threshold`` in the case where the ``coef_`` attribute of the estimator is of dimension 2. Attributes ---------- estimator_ : an estimator The base estimator from which the transformer is built. This is stored only when a non-fitted estimator is passed to the ``SelectFromModel``, i.e when prefit is False. threshold_ : float The threshold value used for feature selection. """ def __init__(self, estimator, threshold=None, prefit=False, norm_order=1): self.estimator = estimator self.threshold = threshold self.prefit = prefit self.norm_order = norm_order def _get_support_mask(self): # SelectFromModel can directly call on transform. if self.prefit: estimator = self.estimator elif hasattr(self, 'estimator_'): estimator = self.estimator_ else: raise ValueError( 'Either fit SelectFromModel before transform or set "prefit=' 'True" and pass a fitted estimator to the constructor.') scores = _get_feature_importances(estimator, self.norm_order) threshold = _calculate_threshold(estimator, scores, self.threshold) return scores >= threshold def fit(self, X, y=None, **fit_params): """Fit the SelectFromModel meta-transformer. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). **fit_params : Other estimator specific parameters Returns ------- self : object Returns self. """ if self.prefit: raise NotFittedError( "Since 'prefit=True', call transform directly") self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **fit_params) return self @property def threshold_(self): scores = _get_feature_importances(self.estimator_, self.norm_order) return _calculate_threshold(self.estimator, scores, self.threshold) @if_delegate_has_method('estimator') def partial_fit(self, X, y=None, **fit_params): """Fit the SelectFromModel meta-transformer only once. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). **fit_params : Other estimator specific parameters Returns ------- self : object Returns self. """ if self.prefit: raise NotFittedError( "Since 'prefit=True', call transform directly") if not hasattr(self, "estimator_"): self.estimator_ = clone(self.estimator) self.estimator_.partial_fit(X, y, **fit_params) return self
7,363
35.098039
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/base.py
# -*- coding: utf-8 -*- """Generic feature selection mixin""" # Authors: G. Varoquaux, A. Gramfort, L. Buitinck, J. Nothman # License: BSD 3 clause from abc import ABCMeta, abstractmethod from warnings import warn import numpy as np from scipy.sparse import issparse, csc_matrix from ..base import TransformerMixin from ..utils import check_array, safe_mask from ..externals import six class SelectorMixin(six.with_metaclass(ABCMeta, TransformerMixin)): """ Transformer mixin that performs feature selection given a support mask This mixin provides a feature selector implementation with `transform` and `inverse_transform` functionality given an implementation of `_get_support_mask`. """ def get_support(self, indices=False): """ Get a mask, or integer index, of the features selected Parameters ---------- indices : boolean (default False) If True, the return value will be an array of integers, rather than a boolean mask. Returns ------- support : array An index that selects the retained features from a feature vector. If `indices` is False, this is a boolean array of shape [# input features], in which an element is True iff its corresponding feature is selected for retention. If `indices` is True, this is an integer array of shape [# output features] whose values are indices into the input feature vector. """ mask = self._get_support_mask() return mask if not indices else np.where(mask)[0] @abstractmethod def _get_support_mask(self): """ Get the boolean mask indicating which features are selected Returns ------- support : boolean array of shape [# input features] An element is True iff its corresponding feature is selected for retention. """ def transform(self, X): """Reduce X to the selected features. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- X_r : array of shape [n_samples, n_selected_features] The input samples with only the selected features. """ X = check_array(X, accept_sparse='csr') mask = self.get_support() if not mask.any(): warn("No features were selected: either the data is" " too noisy or the selection test too strict.", UserWarning) return np.empty(0).reshape((X.shape[0], 0)) if len(mask) != X.shape[1]: raise ValueError("X has a different shape than during fitting.") return X[:, safe_mask(X, mask)] def inverse_transform(self, X): """ Reverse the transformation operation Parameters ---------- X : array of shape [n_samples, n_selected_features] The input samples. Returns ------- X_r : array of shape [n_samples, n_original_features] `X` with columns of zeros inserted where features would have been removed by `transform`. """ if issparse(X): X = X.tocsc() # insert additional entries in indptr: # e.g. if transform changed indptr from [0 2 6 7] to [0 2 3] # col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3] it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1)) col_nonzeros = it.ravel() indptr = np.concatenate([[0], np.cumsum(col_nonzeros)]) Xt = csc_matrix((X.data, X.indices, indptr), shape=(X.shape[0], len(indptr) - 1), dtype=X.dtype) return Xt support = self.get_support() X = check_array(X) if support.sum() != X.shape[1]: raise ValueError("X has a different shape than during fitting.") if X.ndim == 1: X = X[None, :] Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype) Xt[:, support] = X return Xt
4,188
33.056911
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/variance_threshold.py
# Author: Lars Buitinck # License: 3-clause BSD import numpy as np from ..base import BaseEstimator from .base import SelectorMixin from ..utils import check_array from ..utils.sparsefuncs import mean_variance_axis from ..utils.validation import check_is_fitted class VarianceThreshold(BaseEstimator, SelectorMixin): """Feature selector that removes all low-variance features. This feature selection algorithm looks only at the features (X), not the desired outputs (y), and can thus be used for unsupervised learning. Read more in the :ref:`User Guide <variance_threshold>`. Parameters ---------- threshold : float, optional Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples. Attributes ---------- variances_ : array, shape (n_features,) Variances of individual features. Examples -------- The following dataset has integer features, two of which are the same in every sample. These are removed with the default setting for threshold:: >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] >>> selector = VarianceThreshold() >>> selector.fit_transform(X) array([[2, 0], [1, 4], [1, 1]]) """ def __init__(self, threshold=0.): self.threshold = threshold def fit(self, X, y=None): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self """ X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix _, self.variances_ = mean_variance_axis(X, axis=0) else: self.variances_ = np.var(X, axis=0) if np.all(self.variances_ <= self.threshold): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self def _get_support_mask(self): check_is_fitted(self, 'variances_') return self.variances_ > self.threshold
2,572
30
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/mutual_info_.py
# Author: Nikolay Mayorov <[email protected]> # License: 3-clause BSD from __future__ import division import numpy as np from scipy.sparse import issparse from scipy.special import digamma from ..externals.six import moves from ..metrics.cluster.supervised import mutual_info_score from ..neighbors import NearestNeighbors from ..preprocessing import scale from ..utils import check_random_state from ..utils.validation import check_X_y from ..utils.multiclass import check_classification_targets def _compute_mi_cc(x, y, n_neighbors): """Compute mutual information between two continuous variables. Parameters ---------- x, y : ndarray, shape (n_samples,) Samples of two continuous random variables, must have an identical shape. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information. If it turned out to be negative it is replace by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. """ n_samples = x.size x = x.reshape((-1, 1)) y = y.reshape((-1, 1)) xy = np.hstack((x, y)) # Here we rely on NearestNeighbors to select the fastest algorithm. nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors) nn.fit(xy) radius = nn.kneighbors()[0] radius = np.nextafter(radius[:, -1], 0) # Algorithm is selected explicitly to allow passing an array as radius # later (not all algorithms support this). nn.set_params(algorithm='kd_tree') nn.fit(x) ind = nn.radius_neighbors(radius=radius, return_distance=False) nx = np.array([i.size for i in ind]) nn.fit(y) ind = nn.radius_neighbors(radius=radius, return_distance=False) ny = np.array([i.size for i in ind]) mi = (digamma(n_samples) + digamma(n_neighbors) - np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1))) return max(0, mi) def _compute_mi_cd(c, d, n_neighbors): """Compute mutual information between continuous and discrete variables. Parameters ---------- c : ndarray, shape (n_samples,) Samples of a continuous random variable. d : ndarray, shape (n_samples,) Samples of a discrete random variable. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information. If it turned out to be negative it is replace by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. """ n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with unique labels. mask = label_counts > 1 n_samples = np.sum(mask) label_counts = label_counts[mask] k_all = k_all[mask] c = c[mask] radius = radius[mask] nn.set_params(algorithm='kd_tree') nn.fit(c) ind = nn.radius_neighbors(radius=radius, return_distance=False) m_all = np.array([i.size for i in ind]) mi = (digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all + 1))) return max(0, mi) def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3): """Compute mutual information between two variables. This is a simple wrapper which selects a proper function to call based on whether `x` and `y` are discrete or not. """ if x_discrete and y_discrete: return mutual_info_score(x, y) elif x_discrete and not y_discrete: return _compute_mi_cd(y, x, n_neighbors) elif not x_discrete and y_discrete: return _compute_mi_cd(x, y, n_neighbors) else: return _compute_mi_cc(x, y, n_neighbors) def _iterate_columns(X, columns=None): """Iterate over columns of a matrix. Parameters ---------- X : ndarray or csc_matrix, shape (n_samples, n_features) Matrix over which to iterate. columns : iterable or None, default None Indices of columns to iterate over. If None, iterate over all columns. Yields ------ x : ndarray, shape (n_samples,) Columns of `X` in dense format. """ if columns is None: columns = range(X.shape[1]) if issparse(X): for i in columns: x = np.zeros(X.shape[0]) start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1] x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr] yield x else: for i in columns: yield X[:, i] def _estimate_mi(X, y, discrete_features='auto', discrete_target=False, n_neighbors=3, copy=True, random_state=None): """Estimate mutual information between the features and the target. Parameters ---------- X : array_like or sparse matrix, shape (n_samples, n_features) Feature matrix. y : array_like, shape (n_samples,) Target vector. discrete_features : {'auto', bool, array_like}, default 'auto' If bool, then determines whether to consider all features discrete or continuous. If array, then it should be either a boolean mask with shape (n_features,) or array with indices of discrete features. If 'auto', it is assigned to False for dense `X` and to True for sparse `X`. discrete_target : bool, default False Whether to consider `y` as a discrete variable. n_neighbors : int, default 3 Number of neighbors to use for MI estimation for continuous variables, see [1]_ and [2]_. Higher values reduce variance of the estimation, but could introduce a bias. copy : bool, default True Whether to make a copy of the given data. If set to False, the initial data will be overwritten. random_state : int, RandomState instance or None, optional, default None The seed of the pseudo random number generator for adding small noise to continuous variables in order to remove repeated values. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- mi : ndarray, shape (n_features,) Estimated mutual information between each feature and the target. A negative value will be replaced by 0. References ---------- .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. .. [2] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. """ X, y = check_X_y(X, y, accept_sparse='csc', y_numeric=not discrete_target) n_samples, n_features = X.shape if discrete_features == 'auto': discrete_features = issparse(X) if isinstance(discrete_features, bool): discrete_mask = np.empty(n_features, dtype=bool) discrete_mask.fill(discrete_features) else: discrete_features = np.asarray(discrete_features) if discrete_features.dtype != 'bool': discrete_mask = np.zeros(n_features, dtype=bool) discrete_mask[discrete_features] = True else: discrete_mask = discrete_features continuous_mask = ~discrete_mask if np.any(continuous_mask) and issparse(X): raise ValueError("Sparse matrix `X` can't have continuous features.") rng = check_random_state(random_state) if np.any(continuous_mask): if copy: X = X.copy() if not discrete_target: X[:, continuous_mask] = scale(X[:, continuous_mask], with_mean=False, copy=False) # Add small noise to continuous features as advised in Kraskov et. al. X = X.astype(float) means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0)) X[:, continuous_mask] += 1e-10 * means * rng.randn( n_samples, np.sum(continuous_mask)) if not discrete_target: y = scale(y, with_mean=False) y += 1e-10 * np.maximum(1, np.mean(np.abs(y))) * rng.randn(n_samples) mi = [_compute_mi(x, y, discrete_feature, discrete_target, n_neighbors) for x, discrete_feature in moves.zip(_iterate_columns(X), discrete_mask)] return np.array(mi) def mutual_info_regression(X, y, discrete_features='auto', n_neighbors=3, copy=True, random_state=None): """Estimate mutual information for a continuous target variable. Mutual information (MI) [1]_ between two random variables is a non-negative value, which measures the dependency between the variables. It is equal to zero if and only if two random variables are independent, and higher values mean higher dependency. The function relies on nonparametric methods based on entropy estimation from k-nearest neighbors distances as described in [2]_ and [3]_. Both methods are based on the idea originally proposed in [4]_. It can be used for univariate features selection, read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : array_like or sparse matrix, shape (n_samples, n_features) Feature matrix. y : array_like, shape (n_samples,) Target vector. discrete_features : {'auto', bool, array_like}, default 'auto' If bool, then determines whether to consider all features discrete or continuous. If array, then it should be either a boolean mask with shape (n_features,) or array with indices of discrete features. If 'auto', it is assigned to False for dense `X` and to True for sparse `X`. n_neighbors : int, default 3 Number of neighbors to use for MI estimation for continuous variables, see [2]_ and [3]_. Higher values reduce variance of the estimation, but could introduce a bias. copy : bool, default True Whether to make a copy of the given data. If set to False, the initial data will be overwritten. random_state : int, RandomState instance or None, optional, default None The seed of the pseudo random number generator for adding small noise to continuous variables in order to remove repeated values. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- mi : ndarray, shape (n_features,) Estimated mutual information between each feature and the target. Notes ----- 1. The term "discrete features" is used instead of naming them "categorical", because it describes the essence more accurately. For example, pixel intensities of an image are discrete features (but hardly categorical) and you will get better results if mark them as such. Also note, that treating a continuous variable as discrete and vice versa will usually give incorrect results, so be attentive about that. 2. True mutual information can't be negative. If its estimate turns out to be negative, it is replaced by zero. References ---------- .. [1] `Mutual Information <https://en.wikipedia.org/wiki/Mutual_information>`_ on Wikipedia. .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. .. [3] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16 """ return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state) def mutual_info_classif(X, y, discrete_features='auto', n_neighbors=3, copy=True, random_state=None): """Estimate mutual information for a discrete target variable. Mutual information (MI) [1]_ between two random variables is a non-negative value, which measures the dependency between the variables. It is equal to zero if and only if two random variables are independent, and higher values mean higher dependency. The function relies on nonparametric methods based on entropy estimation from k-nearest neighbors distances as described in [2]_ and [3]_. Both methods are based on the idea originally proposed in [4]_. It can be used for univariate features selection, read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : array_like or sparse matrix, shape (n_samples, n_features) Feature matrix. y : array_like, shape (n_samples,) Target vector. discrete_features : {'auto', bool, array_like}, default 'auto' If bool, then determines whether to consider all features discrete or continuous. If array, then it should be either a boolean mask with shape (n_features,) or array with indices of discrete features. If 'auto', it is assigned to False for dense `X` and to True for sparse `X`. n_neighbors : int, default 3 Number of neighbors to use for MI estimation for continuous variables, see [2]_ and [3]_. Higher values reduce variance of the estimation, but could introduce a bias. copy : bool, default True Whether to make a copy of the given data. If set to False, the initial data will be overwritten. random_state : int, RandomState instance or None, optional, default None The seed of the pseudo random number generator for adding small noise to continuous variables in order to remove repeated values. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- mi : ndarray, shape (n_features,) Estimated mutual information between each feature and the target. Notes ----- 1. The term "discrete features" is used instead of naming them "categorical", because it describes the essence more accurately. For example, pixel intensities of an image are discrete features (but hardly categorical) and you will get better results if mark them as such. Also note, that treating a continuous variable as discrete and vice versa will usually give incorrect results, so be attentive about that. 2. True mutual information can't be negative. If its estimate turns out to be negative, it is replaced by zero. References ---------- .. [1] `Mutual Information <https://en.wikipedia.org/wiki/Mutual_information>`_ on Wikipedia. .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual information". Phys. Rev. E 69, 2004. .. [3] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014. .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16 """ check_classification_targets(y) return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state)
16,914
36.505543
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/rfe.py
# Authors: Alexandre Gramfort <[email protected]> # Vincent Michel <[email protected]> # Gilles Louppe <[email protected]> # # License: BSD 3 clause """Recursive feature elimination for feature ranking""" import numpy as np from ..utils import check_X_y, safe_sqr from ..utils.metaestimators import if_delegate_has_method from ..utils.validation import check_is_fitted from ..base import BaseEstimator from ..base import MetaEstimatorMixin from ..base import clone from ..base import is_classifier from ..externals.joblib import Parallel, delayed from ..model_selection import check_cv from ..model_selection._validation import _safe_split, _score from ..metrics.scorer import check_scoring from .base import SelectorMixin def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer): """ Return the score for a fit across one fold. """ X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) return rfe._fit( X_train, y_train, lambda estimator, features: _score(estimator, X_test[:, features], y_test, scorer)).scores_ class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin): """Feature ranking with recursive feature elimination. Given an external estimator that assigns weights to features (e.g., the coefficients of a linear model), the goal of recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features. First, the estimator is trained on the initial set of features and the importance of each feature is obtained either through a ``coef_`` attribute or through a ``feature_importances_`` attribute. Then, the least important features are pruned from current set of features. That procedure is recursively repeated on the pruned set until the desired number of features to select is eventually reached. Read more in the :ref:`User Guide <rfe>`. Parameters ---------- estimator : object A supervised learning estimator with a ``fit`` method that provides information about feature importance either through a ``coef_`` attribute or through a ``feature_importances_`` attribute. n_features_to_select : int or None (default=None) The number of features to select. If `None`, half of the features are selected. step : int or float, optional (default=1) If greater than or equal to 1, then `step` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then `step` corresponds to the percentage (rounded down) of features to remove at each iteration. verbose : int, default=0 Controls verbosity of output. Attributes ---------- n_features_ : int The number of selected features. support_ : array of shape [n_features] The mask of selected features. ranking_ : array of shape [n_features] The feature ranking, such that ``ranking_[i]`` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. estimator_ : object The external estimator fit on the reduced dataset. Examples -------- The following example shows how to retrieve the 5 right informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFE >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFE(estimator, 5, step=1) >>> selector = selector.fit(X, y) >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, True, True, False, False, False, False, False], dtype=bool) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. """ def __init__(self, estimator, n_features_to_select=None, step=1, verbose=0): self.estimator = estimator self.n_features_to_select = n_features_to_select self.step = step self.verbose = verbose @property def _estimator_type(self): return self.estimator._estimator_type def fit(self, X, y): """Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] The target values. """ return self._fit(X, y) def _fit(self, X, y, step_score=None): # Parameter step_score controls the calculation of self.scores_ # step_score is not exposed to users # and is used when implementing RFECV # self.scores_ will not be calculated when calling _fit through fit X, y = check_X_y(X, y, "csc") # Initialization n_features = X.shape[1] if self.n_features_to_select is None: n_features_to_select = n_features // 2 else: n_features_to_select = self.n_features_to_select if 0.0 < self.step < 1.0: step = int(max(1, self.step * n_features)) else: step = int(self.step) if step <= 0: raise ValueError("Step must be >0") support_ = np.ones(n_features, dtype=np.bool) ranking_ = np.ones(n_features, dtype=np.int) if step_score: self.scores_ = [] # Elimination while np.sum(support_) > n_features_to_select: # Remaining features features = np.arange(n_features)[support_] # Rank the remaining features estimator = clone(self.estimator) if self.verbose > 0: print("Fitting estimator with %d features." % np.sum(support_)) estimator.fit(X[:, features], y) # Get coefs if hasattr(estimator, 'coef_'): coefs = estimator.coef_ else: coefs = getattr(estimator, 'feature_importances_', None) if coefs is None: raise RuntimeError('The classifier does not expose ' '"coef_" or "feature_importances_" ' 'attributes') # Get ranks if coefs.ndim > 1: ranks = np.argsort(safe_sqr(coefs).sum(axis=0)) else: ranks = np.argsort(safe_sqr(coefs)) # for sparse case ranks is matrix ranks = np.ravel(ranks) # Eliminate the worse features threshold = min(step, np.sum(support_) - n_features_to_select) # Compute step score on the previous selection iteration # because 'estimator' must use features # that have not been eliminated yet if step_score: self.scores_.append(step_score(estimator, features)) support_[features[ranks][:threshold]] = False ranking_[np.logical_not(support_)] += 1 # Set final attributes features = np.arange(n_features)[support_] self.estimator_ = clone(self.estimator) self.estimator_.fit(X[:, features], y) # Compute step score when only n_features_to_select features left if step_score: self.scores_.append(step_score(self.estimator_, features)) self.n_features_ = support_.sum() self.support_ = support_ self.ranking_ = ranking_ return self @if_delegate_has_method(delegate='estimator') def predict(self, X): """Reduce X to the selected features and then predict using the underlying estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- y : array of shape [n_samples] The predicted target values. """ check_is_fitted(self, 'estimator_') return self.estimator_.predict(self.transform(X)) @if_delegate_has_method(delegate='estimator') def score(self, X, y): """Reduce X to the selected features and then return the score of the underlying estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. """ check_is_fitted(self, 'estimator_') return self.estimator_.score(self.transform(X), y) def _get_support_mask(self): check_is_fitted(self, 'support_') return self.support_ @if_delegate_has_method(delegate='estimator') def decision_function(self, X): check_is_fitted(self, 'estimator_') return self.estimator_.decision_function(self.transform(X)) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): check_is_fitted(self, 'estimator_') return self.estimator_.predict_proba(self.transform(X)) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): check_is_fitted(self, 'estimator_') return self.estimator_.predict_log_proba(self.transform(X)) class RFECV(RFE, MetaEstimatorMixin): """Feature ranking with recursive feature elimination and cross-validated selection of the best number of features. Read more in the :ref:`User Guide <rfe>`. Parameters ---------- estimator : object A supervised learning estimator with a ``fit`` method that provides information about feature importance either through a ``coef_`` attribute or through a ``feature_importances_`` attribute. step : int or float, optional (default=1) If greater than or equal to 1, then `step` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then `step` corresponds to the percentage (rounded down) of features to remove at each iteration. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int, default=0 Controls verbosity of output. n_jobs : int, default 1 Number of cores to run in parallel while fitting across folds. Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set to number of cores. Attributes ---------- n_features_ : int The number of selected features with cross-validation. support_ : array of shape [n_features] The mask of selected features. ranking_ : array of shape [n_features] The feature ranking, such that `ranking_[i]` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. grid_scores_ : array of shape [n_subsets_of_features] The cross-validation scores such that ``grid_scores_[i]`` corresponds to the CV score of the i-th subset of features. estimator_ : object The external estimator fit on the reduced dataset. Notes ----- The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1, where step is the number of features removed at each iteration. Examples -------- The following example shows how to retrieve the a-priori not known 5 informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFECV >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFECV(estimator, step=1, cv=5) >>> selector = selector.fit(X, y) >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, True, True, False, False, False, False, False], dtype=bool) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. """ def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0, n_jobs=1): self.estimator = estimator self.step = step self.cv = cv self.scoring = scoring self.verbose = verbose self.n_jobs = n_jobs def fit(self, X, y): """Fit the RFE model and automatically tune the number of selected features. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where `n_samples` is the number of samples and `n_features` is the total number of features. y : array-like, shape = [n_samples] Target values (integers for classification, real numbers for regression). """ X, y = check_X_y(X, y, "csr") # Initialization cv = check_cv(self.cv, y, is_classifier(self.estimator)) scorer = check_scoring(self.estimator, scoring=self.scoring) n_features = X.shape[1] n_features_to_select = 1 if 0.0 < self.step < 1.0: step = int(max(1, self.step * n_features)) else: step = int(self.step) if step <= 0: raise ValueError("Step must be >0") rfe = RFE(estimator=self.estimator, n_features_to_select=n_features_to_select, step=self.step, verbose=self.verbose) # Determine the number of subsets of features by fitting across # the train folds and choosing the "features_to_select" parameter # that gives the least averaged error across all folds. # Note that joblib raises a non-picklable error for bound methods # even if n_jobs is set to 1 with the default multiprocessing # backend. # This branching is done so that to # make sure that user code that sets n_jobs to 1 # and provides bound methods as scorers is not broken with the # addition of n_jobs parameter in version 0.18. if self.n_jobs == 1: parallel, func = list, _rfe_single_fit else: parallel, func, = Parallel(n_jobs=self.n_jobs), delayed(_rfe_single_fit) scores = parallel( func(rfe, self.estimator, X, y, train, test, scorer) for train, test in cv.split(X, y)) scores = np.sum(scores, axis=0) n_features_to_select = max( n_features - (np.argmax(scores) * step), n_features_to_select) # Re-execute an elimination with best_k over the whole set rfe = RFE(estimator=self.estimator, n_features_to_select=n_features_to_select, step=self.step) rfe.fit(X, y) # Set final attributes self.support_ = rfe.support_ self.n_features_ = rfe.n_features_ self.ranking_ = rfe.ranking_ self.estimator_ = clone(self.estimator) self.estimator_.fit(self.transform(X), y) # Fixing a normalization error, n is equal to get_n_splits(X, y) - 1 # here, the scores are normalized by get_n_splits(X, y) self.grid_scores_ = scores[::-1] / cv.get_n_splits(X, y) return self
16,859
35.652174
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/univariate_selection.py
"""Univariate features selection.""" # Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay. # L. Buitinck, A. Joly # License: BSD 3 clause import numpy as np import warnings from scipy import special, stats from scipy.sparse import issparse from ..base import BaseEstimator from ..preprocessing import LabelBinarizer from ..utils import (as_float_array, check_array, check_X_y, safe_sqr, safe_mask) from ..utils.extmath import safe_sparse_dot, row_norms from ..utils.validation import check_is_fitted from .base import SelectorMixin def _clean_nans(scores): """ Fixes Issue #1240: NaNs can't be properly compared, so change them to the smallest value of scores's dtype. -inf seems to be unreliable. """ # XXX where should this function be called? fit? scoring functions # themselves? scores = as_float_array(scores, copy=True) scores[np.isnan(scores)] = np.finfo(scores.dtype).min return scores ###################################################################### # Scoring functions # The following function is a rewriting of scipy.stats.f_oneway # Contrary to the scipy.stats.f_oneway implementation it does not # copy the data while keeping the inputs unchanged. def f_oneway(*args): """Performs a 1-way ANOVA. The one-way ANOVA tests the null hypothesis that 2 or more groups have the same population mean. The test is applied to samples from two or more groups, possibly with differing sizes. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- sample1, sample2, ... : array_like, sparse matrices The sample measurements should be given as arguments. Returns ------- F-value : float The computed F-value of the test. p-value : float The associated p-value from the F-distribution. Notes ----- The ANOVA test has important assumptions that must be satisfied in order for the associated p-value to be valid. 1. The samples are independent 2. Each sample is from a normally distributed population 3. The population standard deviations of the groups are all equal. This property is known as homoscedasticity. If these assumptions are not true for a given set of data, it may still be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although with some loss of power. The algorithm is from Heiman[2], pp.394-7. See ``scipy.stats.f_oneway`` that should give the same results while being less efficient. References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 14. http://faculty.vassar.edu/lowry/ch14pt1.html .. [2] Heiman, G.W. Research Methods in Statistics. 2002. """ n_classes = len(args) args = [as_float_array(a) for a in args] n_samples_per_class = np.array([a.shape[0] for a in args]) n_samples = np.sum(n_samples_per_class) ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args) sums_args = [np.asarray(a.sum(axis=0)) for a in args] square_of_sums_alldata = sum(sums_args) ** 2 square_of_sums_args = [s ** 2 for s in sums_args] sstot = ss_alldata - square_of_sums_alldata / float(n_samples) ssbn = 0. for k, _ in enumerate(args): ssbn += square_of_sums_args[k] / n_samples_per_class[k] ssbn -= square_of_sums_alldata / float(n_samples) sswn = sstot - ssbn dfbn = n_classes - 1 dfwn = n_samples - n_classes msb = ssbn / float(dfbn) msw = sswn / float(dfwn) constant_features_idx = np.where(msw == 0.)[0] if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size): warnings.warn("Features %s are constant." % constant_features_idx, UserWarning) f = msb / msw # flatten matrix to vector in sparse case f = np.asarray(f).ravel() prob = special.fdtrc(dfbn, dfwn, f) return f, prob def f_classif(X, y): """Compute the ANOVA F-value for the provided sample. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} shape = [n_samples, n_features] The set of regressors that will be tested sequentially. y : array of shape(n_samples) The data matrix. Returns ------- F : array, shape = [n_features,] The set of F values. pval : array, shape = [n_features,] The set of p-values. See also -------- chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo']) args = [X[safe_mask(X, y == k)] for k in np.unique(y)] return f_oneway(*args) def _chisquare(f_obs, f_exp): """Fast replacement for scipy.stats.chisquare. Version from https://github.com/scipy/scipy/pull/2525 with additional optimizations. """ f_obs = np.asarray(f_obs, dtype=np.float64) k = len(f_obs) # Reuse f_obs for chi-squared statistics chisq = f_obs chisq -= f_exp chisq **= 2 with np.errstate(invalid="ignore"): chisq /= f_exp chisq = chisq.sum(axis=0) return chisq, special.chdtrc(k - 1, chisq) def chi2(X, y): """Compute chi-squared stats between each non-negative feature and class. This score can be used to select the n_features features with the highest values for the test chi-squared statistic from X, which must contain only non-negative features such as booleans or frequencies (e.g., term counts in document classification), relative to the classes. Recall that the chi-square test measures dependence between stochastic variables, so using this function "weeds out" the features that are the most likely to be independent of class and therefore irrelevant for classification. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features_in) Sample vectors. y : array-like, shape = (n_samples,) Target vector (class labels). Returns ------- chi2 : array, shape = (n_features,) chi2 statistics of each feature. pval : array, shape = (n_features,) p-values of each feature. Notes ----- Complexity of this algorithm is O(n_classes * n_features). See also -------- f_classif: ANOVA F-value between label/feature for classification tasks. f_regression: F-value between label/feature for regression tasks. """ # XXX: we might want to do some of the following in logspace instead for # numerical stability. X = check_array(X, accept_sparse='csr') if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative.") Y = LabelBinarizer().fit_transform(y) if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) observed = safe_sparse_dot(Y.T, X) # n_classes * n_features feature_count = X.sum(axis=0).reshape(1, -1) class_prob = Y.mean(axis=0).reshape(1, -1) expected = np.dot(class_prob.T, feature_count) return _chisquare(observed, expected) def f_regression(X, y, center=True): """Univariate linear regression tests. Linear model for testing the individual effect of each of many regressors. This is a scoring function to be used in a feature seletion procedure, not a free standing feature selection procedure. This is done in 2 steps: 1. The correlation between each regressor and the target is computed, that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) * std(y)). 2. It is converted to an F score then to a p-value. For more on usage see the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will be tested sequentially. y : array of shape(n_samples). The data matrix center : True, bool, If true, X and y will be centered. Returns ------- F : array, shape=(n_features,) F values of features. pval : array, shape=(n_features,) p-values of F-scores. See also -------- mutual_info_regression: Mutual information for a continuous target. f_classif: ANOVA F-value between label/feature for classification tasks. chi2: Chi-squared stats of non-negative features for classification tasks. SelectKBest: Select features based on the k highest scores. SelectFpr: Select features based on a false positive rate test. SelectFdr: Select features based on an estimated false discovery rate. SelectFwe: Select features based on family-wise error rate. SelectPercentile: Select features based on percentile of the highest scores. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64) n_samples = X.shape[0] # compute centered values # note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we # need not center X if center: y = y - np.mean(y) if issparse(X): X_means = X.mean(axis=0).getA1() else: X_means = X.mean(axis=0) # compute the scaled standard deviations via moments X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means ** 2) else: X_norms = row_norms(X.T) # compute the correlation corr = safe_sparse_dot(y, X) corr /= X_norms corr /= np.linalg.norm(y) # convert to p-value degrees_of_freedom = y.size - (2 if center else 1) F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom pv = stats.f.sf(F, 1, degrees_of_freedom) return F, pv ###################################################################### # Base classes class _BaseFilter(BaseEstimator, SelectorMixin): """Initialize the univariate feature selection. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. """ def __init__(self, score_func): self.score_func = score_func def fit(self, X, y): """Run score function on (X, y) and get the appropriate features. Parameters ---------- X : array-like, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] The target values (class labels in classification, real numbers in regression). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True) if not callable(self.score_func): raise TypeError("The score function should be a callable, %s (%s) " "was passed." % (self.score_func, type(self.score_func))) self._check_params(X, y) score_func_ret = self.score_func(X, y) if isinstance(score_func_ret, (list, tuple)): self.scores_, self.pvalues_ = score_func_ret self.pvalues_ = np.asarray(self.pvalues_) else: self.scores_ = score_func_ret self.pvalues_ = None self.scores_ = np.asarray(self.scores_) return self def _check_params(self, X, y): pass ###################################################################### # Specific filters ###################################################################### class SelectPercentile(_BaseFilter): """Select features according to a percentile of the highest scores. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. Default is f_classif (see below "See also"). The default function only works with classification tasks. percentile : int, optional, default=10 Percent of features to keep. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores, None if `score_func` returned only scores. Notes ----- Ties between features with equal scores will be broken in an unspecified way. See also -------- f_classif: ANOVA F-value between label/feature for classification tasks. mutual_info_classif: Mutual information for a discrete target. chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. mutual_info_regression: Mutual information for a continuous target. SelectKBest: Select features based on the k highest scores. SelectFpr: Select features based on a false positive rate test. SelectFdr: Select features based on an estimated false discovery rate. SelectFwe: Select features based on family-wise error rate. GenericUnivariateSelect: Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, percentile=10): super(SelectPercentile, self).__init__(score_func) self.percentile = percentile def _check_params(self, X, y): if not 0 <= self.percentile <= 100: raise ValueError("percentile should be >=0, <=100; got %r" % self.percentile) def _get_support_mask(self): check_is_fitted(self, 'scores_') # Cater for NaNs if self.percentile == 100: return np.ones(len(self.scores_), dtype=np.bool) elif self.percentile == 0: return np.zeros(len(self.scores_), dtype=np.bool) scores = _clean_nans(self.scores_) treshold = stats.scoreatpercentile(scores, 100 - self.percentile) mask = scores > treshold ties = np.where(scores == treshold)[0] if len(ties): max_feats = int(len(scores) * self.percentile / 100) kept_ties = ties[:max_feats - mask.sum()] mask[kept_ties] = True return mask class SelectKBest(_BaseFilter): """Select features according to the k highest scores. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores. Default is f_classif (see below "See also"). The default function only works with classification tasks. k : int or "all", optional, default=10 Number of top features to select. The "all" option bypasses selection, for use in a parameter search. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores, None if `score_func` returned only scores. Notes ----- Ties between features with equal scores will be broken in an unspecified way. See also -------- f_classif: ANOVA F-value between label/feature for classification tasks. mutual_info_classif: Mutual information for a discrete target. chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. mutual_info_regression: Mutual information for a continuous target. SelectPercentile: Select features based on percentile of the highest scores. SelectFpr: Select features based on a false positive rate test. SelectFdr: Select features based on an estimated false discovery rate. SelectFwe: Select features based on family-wise error rate. GenericUnivariateSelect: Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, k=10): super(SelectKBest, self).__init__(score_func) self.k = k def _check_params(self, X, y): if not (self.k == "all" or 0 <= self.k <= X.shape[1]): raise ValueError("k should be >=0, <= n_features; got %r." "Use k='all' to return all features." % self.k) def _get_support_mask(self): check_is_fitted(self, 'scores_') if self.k == 'all': return np.ones(self.scores_.shape, dtype=bool) elif self.k == 0: return np.zeros(self.scores_.shape, dtype=bool) else: scores = _clean_nans(self.scores_) mask = np.zeros(scores.shape, dtype=bool) # Request a stable sort. Mergesort takes more memory (~40MB per # megafeature on x86-64). mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1 return mask class SelectFpr(_BaseFilter): """Filter: Select the pvalues below alpha based on a FPR test. FPR test stands for False Positive Rate test. It controls the total amount of false detections. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See also"). The default function only works with classification tasks. alpha : float, optional The highest p-value for features to be kept. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. See also -------- f_classif: ANOVA F-value between label/feature for classification tasks. chi2: Chi-squared stats of non-negative features for classification tasks. mutual_info_classif: f_regression: F-value between label/feature for regression tasks. mutual_info_regression: Mutual information between features and the target. SelectPercentile: Select features based on percentile of the highest scores. SelectKBest: Select features based on the k highest scores. SelectFdr: Select features based on an estimated false discovery rate. SelectFwe: Select features based on family-wise error rate. GenericUnivariateSelect: Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, alpha=5e-2): super(SelectFpr, self).__init__(score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self, 'scores_') return self.pvalues_ < self.alpha class SelectFdr(_BaseFilter): """Filter: Select the p-values for an estimated false discovery rate This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound on the expected false discovery rate. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See also"). The default function only works with classification tasks. alpha : float, optional The highest uncorrected p-value for features to keep. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. References ---------- https://en.wikipedia.org/wiki/False_discovery_rate See also -------- f_classif: ANOVA F-value between label/feature for classification tasks. mutual_info_classif: Mutual information for a discrete target. chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. mutual_info_regression: Mutual information for a contnuous target. SelectPercentile: Select features based on percentile of the highest scores. SelectKBest: Select features based on the k highest scores. SelectFpr: Select features based on a false positive rate test. SelectFwe: Select features based on family-wise error rate. GenericUnivariateSelect: Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, alpha=5e-2): super(SelectFdr, self).__init__(score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self, 'scores_') n_features = len(self.pvalues_) sv = np.sort(self.pvalues_) selected = sv[sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1)] if selected.size == 0: return np.zeros_like(self.pvalues_, dtype=bool) return self.pvalues_ <= selected.max() class SelectFwe(_BaseFilter): """Filter: Select the p-values corresponding to Family-wise error rate Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). Default is f_classif (see below "See also"). The default function only works with classification tasks. alpha : float, optional The highest uncorrected p-value for features to keep. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores. See also -------- f_classif: ANOVA F-value between label/feature for classification tasks. chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. SelectPercentile: Select features based on percentile of the highest scores. SelectKBest: Select features based on the k highest scores. SelectFpr: Select features based on a false positive rate test. SelectFdr: Select features based on an estimated false discovery rate. GenericUnivariateSelect: Univariate feature selector with configurable mode. """ def __init__(self, score_func=f_classif, alpha=5e-2): super(SelectFwe, self).__init__(score_func) self.alpha = alpha def _get_support_mask(self): check_is_fitted(self, 'scores_') return (self.pvalues_ < self.alpha / len(self.pvalues_)) ###################################################################### # Generic filter ###################################################################### # TODO this class should fit on either p-values or scores, # depending on the mode. class GenericUnivariateSelect(_BaseFilter): """Univariate feature selector with configurable strategy. Read more in the :ref:`User Guide <univariate_feature_selection>`. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). For modes 'percentile' or 'kbest' it can return a single array scores. mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'} Feature selection mode. param : float or int depending on the feature selection mode Parameter of the corresponding mode. Attributes ---------- scores_ : array-like, shape=(n_features,) Scores of features. pvalues_ : array-like, shape=(n_features,) p-values of feature scores, None if `score_func` returned scores only. See also -------- f_classif: ANOVA F-value between label/feature for classification tasks. mutual_info_classif: Mutual information for a discrete target. chi2: Chi-squared stats of non-negative features for classification tasks. f_regression: F-value between label/feature for regression tasks. mutual_info_regression: Mutual information for a continuous target. SelectPercentile: Select features based on percentile of the highest scores. SelectKBest: Select features based on the k highest scores. SelectFpr: Select features based on a false positive rate test. SelectFdr: Select features based on an estimated false discovery rate. SelectFwe: Select features based on family-wise error rate. """ _selection_modes = {'percentile': SelectPercentile, 'k_best': SelectKBest, 'fpr': SelectFpr, 'fdr': SelectFdr, 'fwe': SelectFwe} def __init__(self, score_func=f_classif, mode='percentile', param=1e-5): super(GenericUnivariateSelect, self).__init__(score_func) self.mode = mode self.param = param def _make_selector(self): selector = self._selection_modes[self.mode](score_func=self.score_func) # Now perform some acrobatics to set the right named parameter in # the selector possible_params = selector._get_param_names() possible_params.remove('score_func') selector.set_params(**{possible_params[0]: self.param}) return selector def _check_params(self, X, y): if self.mode not in self._selection_modes: raise ValueError("The mode passed should be one of %s, %r," " (type %s) was passed." % (self._selection_modes.keys(), self.mode, type(self.mode))) self._make_selector()._check_params(X, y) def _get_support_mask(self): check_is_fitted(self, 'scores_') selector = self._make_selector() selector.pvalues_ = self.pvalues_ selector.scores_ = self.scores_ return selector._get_support_mask()
26,302
33.838411
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/__init__.py
""" The :mod:`sklearn.feature_selection` module implements feature selection algorithms. It currently includes univariate filter selection methods and the recursive feature elimination algorithm. """ from .univariate_selection import chi2 from .univariate_selection import f_classif from .univariate_selection import f_oneway from .univariate_selection import f_regression from .univariate_selection import SelectPercentile from .univariate_selection import SelectKBest from .univariate_selection import SelectFpr from .univariate_selection import SelectFdr from .univariate_selection import SelectFwe from .univariate_selection import GenericUnivariateSelect from .variance_threshold import VarianceThreshold from .rfe import RFE from .rfe import RFECV from .from_model import SelectFromModel from .mutual_info_ import mutual_info_regression, mutual_info_classif __all__ = ['GenericUnivariateSelect', 'RFE', 'RFECV', 'SelectFdr', 'SelectFpr', 'SelectFwe', 'SelectKBest', 'SelectFromModel', 'SelectPercentile', 'VarianceThreshold', 'chi2', 'f_classif', 'f_oneway', 'f_regression', 'mutual_info_classif', 'mutual_info_regression']
1,302
28.613636
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py
from sklearn.utils.testing import (assert_array_equal, assert_equal, assert_raises) from scipy.sparse import bsr_matrix, csc_matrix, csr_matrix from sklearn.feature_selection import VarianceThreshold data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]] def test_zero_variance(): # Test VarianceThreshold with default setting, zero variance. for X in [data, csr_matrix(data), csc_matrix(data), bsr_matrix(data)]: sel = VarianceThreshold().fit(X) assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True)) assert_raises(ValueError, VarianceThreshold().fit, [[0, 1, 2, 3]]) assert_raises(ValueError, VarianceThreshold().fit, [[0, 1], [0, 1]]) def test_variance_threshold(): # Test VarianceThreshold with custom variance. for X in [data, csr_matrix(data)]: X = VarianceThreshold(threshold=.4).fit_transform(X) assert_equal((len(data), 1), X.shape)
971
32.517241
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_from_model.py
import numpy as np from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import skip_if_32bit from sklearn import datasets from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso from sklearn.svm import LinearSVC from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import PassiveAggressiveClassifier iris = datasets.load_iris() data, y = iris.data, iris.target rng = np.random.RandomState(0) def test_invalid_input(): clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None) for threshold in ["gobbledigook", ".5 * gobbledigook"]: model = SelectFromModel(clf, threshold=threshold) model.fit(data, y) assert_raises(ValueError, model.transform, data) def test_input_estimator_unchanged(): # Test that SelectFromModel fits on a clone of the estimator. est = RandomForestClassifier() transformer = SelectFromModel(estimator=est) transformer.fit(data, y) assert_true(transformer.estimator is est) def test_feature_importances(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) est = RandomForestClassifier(n_estimators=50, random_state=0) for threshold, func in zip(["mean", "median"], [np.mean, np.median]): transformer = SelectFromModel(estimator=est, threshold=threshold) transformer.fit(X, y) assert_true(hasattr(transformer.estimator_, 'feature_importances_')) X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) importances = transformer.estimator_.feature_importances_ feature_mask = np.abs(importances) > func(importances) assert_array_almost_equal(X_new, X[:, feature_mask]) def test_sample_weight(): # Ensure sample weights are passed to underlying estimator X, y = datasets.make_classification( n_samples=100, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 est = LogisticRegression(random_state=0, fit_intercept=False) transformer = SelectFromModel(estimator=est) transformer.fit(X, y, sample_weight=None) mask = transformer._get_support_mask() transformer.fit(X, y, sample_weight=sample_weight) weighted_mask = transformer._get_support_mask() assert not np.all(weighted_mask == mask) transformer.fit(X, y, sample_weight=3 * sample_weight) reweighted_mask = transformer._get_support_mask() assert np.all(weighted_mask == reweighted_mask) def test_coef_default_threshold(): X, y = datasets.make_classification( n_samples=100, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # For the Lasso and related models, the threshold defaults to 1e-5 transformer = SelectFromModel(estimator=Lasso(alpha=0.1)) transformer.fit(X, y) X_new = transformer.transform(X) mask = np.abs(transformer.estimator_.coef_) > 1e-5 assert_array_equal(X_new, X[:, mask]) @skip_if_32bit def test_2d_coef(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, n_classes=4) est = LogisticRegression() for threshold, func in zip(["mean", "median"], [np.mean, np.median]): for order in [1, 2, np.inf]: # Fit SelectFromModel a multi-class problem transformer = SelectFromModel(estimator=LogisticRegression(), threshold=threshold, norm_order=order) transformer.fit(X, y) assert_true(hasattr(transformer.estimator_, 'coef_')) X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) # Manually check that the norm is correctly performed est.fit(X, y) importances = np.linalg.norm(est.coef_, axis=0, ord=order) feature_mask = importances > func(importances) assert_array_equal(X_new, X[:, feature_mask]) def test_partial_fit(): est = PassiveAggressiveClassifier(random_state=0, shuffle=False, max_iter=5, tol=None) transformer = SelectFromModel(estimator=est) transformer.partial_fit(data, y, classes=np.unique(y)) old_model = transformer.estimator_ transformer.partial_fit(data, y, classes=np.unique(y)) new_model = transformer.estimator_ assert_true(old_model is new_model) X_transform = transformer.transform(data) transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) assert_array_equal(X_transform, transformer.transform(data)) # check that if est doesn't have partial_fit, neither does SelectFromModel transformer = SelectFromModel(estimator=RandomForestClassifier()) assert_false(hasattr(transformer, "partial_fit")) def test_calling_fit_reinitializes(): est = LinearSVC(random_state=0) transformer = SelectFromModel(estimator=est) transformer.fit(data, y) transformer.set_params(estimator__C=100) transformer.fit(data, y) assert_equal(transformer.estimator_.C, 100) def test_prefit(): # Test all possible combinations of the prefit parameter. # Passing a prefit parameter with the selected model # and fitting a unfit model with prefit=False should give same results. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf) model.fit(data, y) X_transform = model.transform(data) clf.fit(data, y) model = SelectFromModel(clf, prefit=True) assert_array_equal(model.transform(data), X_transform) # Check that the model is rewritten if prefit=False and a fitted model is # passed model = SelectFromModel(clf, prefit=False) model.fit(data, y) assert_array_equal(model.transform(data), X_transform) # Check that prefit=True and calling fit raises a ValueError model = SelectFromModel(clf, prefit=True) assert_raises(ValueError, model.fit, data, y) def test_threshold_string(): est = RandomForestClassifier(n_estimators=50, random_state=0) model = SelectFromModel(est, threshold="0.5*mean") model.fit(data, y) X_transform = model.transform(data) # Calculate the threshold from the estimator directly. est.fit(data, y) threshold = 0.5 * np.mean(est.feature_importances_) mask = est.feature_importances_ > threshold assert_array_equal(X_transform, data[:, mask]) def test_threshold_without_refitting(): # Test that the threshold can be set without refitting the model. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf, threshold="0.1 * mean") model.fit(data, y) X_transform = model.transform(data) # Set a higher threshold to filter out more features. model.threshold = "1.0 * mean" assert_greater(X_transform.shape[1], model.transform(data).shape[1])
7,845
38.034826
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_rfe.py
""" Testing Recursive feature elimination """ import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from scipy import sparse from sklearn.feature_selection.rfe import RFE, RFECV from sklearn.datasets import load_iris, make_friedman1 from sklearn.metrics import zero_one_loss from sklearn.svm import SVC, SVR from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.utils import check_random_state from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_greater, assert_equal, assert_true from sklearn.metrics import make_scorer from sklearn.metrics import get_scorer class MockClassifier(object): """ Dummy classifier to test recursive feature elimination """ def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert_true(len(X) == len(Y)) self.coef_ = np.ones(X.shape[1], dtype=np.float64) return self def predict(self, T): return T.shape[0] predict_proba = predict decision_function = predict transform = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=True): return {'foo_param': self.foo_param} def set_params(self, **params): return self def test_rfe_features_importance(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2) rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) assert_equal(len(rfe.ranking_), X.shape[1]) clf_svc = SVC(kernel="linear") rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1) rfe_svc.fit(X, y) # Check if the supports are equal assert_array_equal(rfe.get_support(), rfe_svc.get_support()) def test_rfe(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] X_sparse = sparse.csr_matrix(X) y = iris.target # dense model clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert_equal(len(rfe.ranking_), X.shape[1]) # sparse model clf_sparse = SVC(kernel="linear") rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) rfe_sparse.fit(X_sparse, y) X_r_sparse = rfe_sparse.transform(X_sparse) assert_equal(X_r.shape, iris.data.shape) assert_array_almost_equal(X_r[:10], iris.data[:10]) assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target)) assert_array_almost_equal(X_r, X_r_sparse.toarray()) def test_rfe_mockclassifier(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target # dense model clf = MockClassifier() rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert_equal(len(rfe.ranking_), X.shape[1]) assert_equal(X_r.shape, iris.data.shape) def test_rfecv(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) rfecv.fit(X, y) # non-regression test for missing worst feature: assert_equal(len(rfecv.grid_scores_), X.shape[1]) assert_equal(len(rfecv.ranking_), X.shape[1]) X_r = rfecv.transform(X) # All the noisy variable were filtered out assert_array_equal(X_r, iris.data) # same in sparse rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) # Test using a customized loss function scoring = make_scorer(zero_one_loss, greater_is_better=False) rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scoring) ignore_warnings(rfecv.fit)(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test using a scorer scorer = get_scorer('accuracy') rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scorer) rfecv.fit(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test fix on grid_scores def test_scorer(estimator, X, y): return 1.0 rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=test_scorer) rfecv.fit(X, y) assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_))) # Same as the first two tests, but with step=2 rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5) rfecv.fit(X, y) assert_equal(len(rfecv.grid_scores_), 6) assert_equal(len(rfecv.ranking_), X.shape[1]) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) # Verifying that steps < 1 don't blow up. rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=.2, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) def test_rfecv_mockclassifier(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5) rfecv.fit(X, y) # non-regression test for missing worst feature: assert_equal(len(rfecv.grid_scores_), X.shape[1]) assert_equal(len(rfecv.ranking_), X.shape[1]) def test_rfecv_verbose_output(): # Check verbose=1 is producing an output. from sklearn.externals.six.moves import cStringIO as StringIO import sys sys.stdout = StringIO() generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, verbose=1) rfecv.fit(X, y) verbose_output = sys.stdout verbose_output.seek(0) assert_greater(len(verbose_output.readline()), 0) def test_rfe_estimator_tags(): rfe = RFE(SVC(kernel='linear')) assert_equal(rfe._estimator_type, "classifier") # make sure that cross-validation is stratified iris = load_iris() score = cross_val_score(rfe, iris.data, iris.target) assert_greater(score.min(), .7) def test_rfe_min_step(): n_features = 10 X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0) n_samples, n_features = X.shape estimator = SVR(kernel="linear") # Test when floor(step * n_features) <= 0 selector = RFE(estimator, step=0.01) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is between (0,1) and floor(step * n_features) > 0 selector = RFE(estimator, step=0.20) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is an integer selector = RFE(estimator, step=5) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) def test_number_of_subsets_of_features(): # In RFE, 'number_of_subsets_of_features' # = the number of iterations in '_fit' # = max(ranking_) # = 1 + (n_features + step - n_features_to_select - 1) // step # After optimization #4534, this number # = 1 + np.ceil((n_features - n_features_to_select) / float(step)) # This test case is to test their equivalence, refer to #4534 and #3824 def formula1(n_features, n_features_to_select, step): return 1 + ((n_features + step - n_features_to_select - 1) // step) def formula2(n_features, n_features_to_select, step): return 1 + np.ceil((n_features - n_features_to_select) / float(step)) # RFE # Case 1, n_features - n_features_to_select is divisible by step # Case 2, n_features - n_features_to_select is not divisible by step n_features_list = [11, 11] n_features_to_select_list = [3, 3] step_list = [2, 3] for n_features, n_features_to_select, step in zip( n_features_list, n_features_to_select_list, step_list): generator = check_random_state(43) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfe = RFE(estimator=SVC(kernel="linear"), n_features_to_select=n_features_to_select, step=step) rfe.fit(X, y) # this number also equals to the maximum of ranking_ assert_equal(np.max(rfe.ranking_), formula1(n_features, n_features_to_select, step)) assert_equal(np.max(rfe.ranking_), formula2(n_features, n_features_to_select, step)) # In RFECV, 'fit' calls 'RFE._fit' # 'number_of_subsets_of_features' of RFE # = the size of 'grid_scores' of RFECV # = the number of iterations of the for loop before optimization #4534 # RFECV, n_features_to_select = 1 # Case 1, n_features - 1 is divisible by step # Case 2, n_features - 1 is not divisible by step n_features_to_select = 1 n_features_list = [11, 10] step_list = [2, 2] for n_features, step in zip(n_features_list, step_list): generator = check_random_state(43) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5) rfecv.fit(X, y) assert_equal(rfecv.grid_scores_.shape[0], formula1(n_features, n_features_to_select, step)) assert_equal(rfecv.grid_scores_.shape[0], formula2(n_features, n_features_to_select, step)) def test_rfe_cv_n_jobs(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target rfecv = RFECV(estimator=SVC(kernel='linear')) rfecv.fit(X, y) rfecv_ranking = rfecv.ranking_ rfecv_grid_scores = rfecv.grid_scores_ rfecv.set_params(n_jobs=2) rfecv.fit(X, y) assert_array_almost_equal(rfecv.ranking_, rfecv_ranking) assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
11,274
33.063444
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_mutual_info.py
from __future__ import division import numpy as np from numpy.testing import run_module_suite from scipy.sparse import csr_matrix from sklearn.utils import check_random_state from sklearn.utils.testing import (assert_array_equal, assert_almost_equal, assert_false, assert_raises, assert_equal, assert_greater) from sklearn.feature_selection.mutual_info_ import ( mutual_info_regression, mutual_info_classif, _compute_mi) def test_compute_mi_dd(): # In discrete case computations are straightforward and can be done # by hand on given vectors. x = np.array([0, 1, 1, 0, 0]) y = np.array([1, 0, 0, 0, 1]) H_x = H_y = -(3/5) * np.log(3/5) - (2/5) * np.log(2/5) H_xy = -1/5 * np.log(1/5) - 2/5 * np.log(2/5) - 2/5 * np.log(2/5) I_xy = H_x + H_y - H_xy assert_almost_equal(_compute_mi(x, y, True, True), I_xy) def test_compute_mi_cc(): # For two continuous variables a good approach is to test on bivariate # normal distribution, where mutual information is known. # Mean of the distribution, irrelevant for mutual information. mean = np.zeros(2) # Setup covariance matrix with correlation coeff. equal 0.5. sigma_1 = 1 sigma_2 = 10 corr = 0.5 cov = np.array([ [sigma_1**2, corr * sigma_1 * sigma_2], [corr * sigma_1 * sigma_2, sigma_2**2] ]) # True theoretical mutual information. I_theory = (np.log(sigma_1) + np.log(sigma_2) - 0.5 * np.log(np.linalg.det(cov))) rng = check_random_state(0) Z = rng.multivariate_normal(mean, cov, size=1000) x, y = Z[:, 0], Z[:, 1] # Theory and computed values won't be very close, assert that the # first figures after decimal point match. for n_neighbors in [3, 5, 7]: I_computed = _compute_mi(x, y, False, False, n_neighbors) assert_almost_equal(I_computed, I_theory, 1) def test_compute_mi_cd(): # To test define a joint distribution as follows: # p(x, y) = p(x) p(y | x) # X ~ Bernoulli(p) # (Y | x = 0) ~ Uniform(-1, 1) # (Y | x = 1) ~ Uniform(0, 2) # Use the following formula for mutual information: # I(X; Y) = H(Y) - H(Y | X) # Two entropies can be computed by hand: # H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2) # H(Y | X) = ln(2) # Now we need to implement sampling from out distribution, which is # done easily using conditional distribution logic. n_samples = 1000 rng = check_random_state(0) for p in [0.3, 0.5, 0.7]: x = rng.uniform(size=n_samples) > p y = np.empty(n_samples) mask = x == 0 y[mask] = rng.uniform(-1, 1, size=np.sum(mask)) y[~mask] = rng.uniform(0, 2, size=np.sum(~mask)) I_theory = -0.5 * ((1 - p) * np.log(0.5 * (1 - p)) + p * np.log(0.5 * p) + np.log(0.5)) - np.log(2) # Assert the same tolerance. for n_neighbors in [3, 5, 7]: I_computed = _compute_mi(x, y, True, False, n_neighbors) assert_almost_equal(I_computed, I_theory, 1) def test_compute_mi_cd_unique_label(): # Test that adding unique label doesn't change MI. n_samples = 100 x = np.random.uniform(size=n_samples) > 0.5 y = np.empty(n_samples) mask = x == 0 y[mask] = np.random.uniform(-1, 1, size=np.sum(mask)) y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask)) mi_1 = _compute_mi(x, y, True, False) x = np.hstack((x, 2)) y = np.hstack((y, 10)) mi_2 = _compute_mi(x, y, True, False) assert_equal(mi_1, mi_2) # We are going test that feature ordering by MI matches our expectations. def test_mutual_info_classif_discrete(): X = np.array([[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]]) y = np.array([0, 1, 2, 2, 1]) # Here X[:, 0] is the most informative feature, and X[:, 1] is weakly # informative. mi = mutual_info_classif(X, y, discrete_features=True) assert_array_equal(np.argsort(-mi), np.array([0, 2, 1])) def test_mutual_info_regression(): # We generate sample from multivariate normal distribution, using # transformation from initially uncorrelated variables. The zero # variables after transformation is selected as the target vector, # it has the strongest correlation with the variable 2, and # the weakest correlation with the variable 1. T = np.array([ [1, 0.5, 2, 1], [0, 1, 0.1, 0.0], [0, 0.1, 1, 0.1], [0, 0.1, 0.1, 1] ]) cov = T.dot(T.T) mean = np.zeros(4) rng = check_random_state(0) Z = rng.multivariate_normal(mean, cov, size=1000) X = Z[:, 1:] y = Z[:, 0] mi = mutual_info_regression(X, y, random_state=0) assert_array_equal(np.argsort(-mi), np.array([1, 2, 0])) def test_mutual_info_classif_mixed(): # Here the target is discrete and there are two continuous and one # discrete feature. The idea of this test is clear from the code. rng = check_random_state(0) X = rng.rand(1000, 3) X[:, 1] += X[:, 0] y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int) X[:, 2] = X[:, 2] > 0.5 mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3, random_state=0) assert_array_equal(np.argsort(-mi), [2, 0, 1]) for n_neighbors in [5, 7, 9]: mi_nn = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=n_neighbors, random_state=0) # Check that the continuous values have an higher MI with greater # n_neighbors assert_greater(mi_nn[0], mi[0]) assert_greater(mi_nn[1], mi[1]) # The n_neighbors should not have any effect on the discrete value # The MI should be the same assert_equal(mi_nn[2], mi[2]) def test_mutual_info_options(): X = np.array([[0, 0, 0], [1, 1, 0], [2, 0, 1], [2, 0, 1], [2, 0, 1]], dtype=float) y = np.array([0, 1, 2, 2, 1], dtype=float) X_csr = csr_matrix(X) for mutual_info in (mutual_info_regression, mutual_info_classif): assert_raises(ValueError, mutual_info_regression, X_csr, y, discrete_features=False) mi_1 = mutual_info(X, y, discrete_features='auto', random_state=0) mi_2 = mutual_info(X, y, discrete_features=False, random_state=0) mi_3 = mutual_info(X_csr, y, discrete_features='auto', random_state=0) mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0) assert_array_equal(mi_1, mi_2) assert_array_equal(mi_3, mi_4) assert_false(np.allclose(mi_1, mi_3)) if __name__ == '__main__': run_module_suite()
6,913
32.400966
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_chi2.py
""" Tests for chi2, currently the only feature selection function designed specifically to work with sparse matrices. """ import warnings import numpy as np from scipy.sparse import coo_matrix, csr_matrix import scipy.stats from sklearn.feature_selection import SelectKBest, chi2 from sklearn.feature_selection.univariate_selection import _chisquare from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import clean_warning_registry # Feature 0 is highly informative for class 1; # feature 1 is the same everywhere; # feature 2 is a bit informative for class 2. X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]] y = [0, 1, 2, 2] def mkchi2(k): """Make k-best chi2 selector""" return SelectKBest(chi2, k=k) def test_chi2(): # Test Chi2 feature extraction chi2 = mkchi2(k=1).fit(X, y) chi2 = mkchi2(k=1).fit(X, y) assert_array_equal(chi2.get_support(indices=True), [0]) assert_array_equal(chi2.transform(X), np.array(X)[:, [0]]) chi2 = mkchi2(k=2).fit(X, y) assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) Xsp = csr_matrix(X, dtype=np.float64) chi2 = mkchi2(k=2).fit(Xsp, y) assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2]) Xtrans = chi2.transform(Xsp) assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2]) # == doesn't work on scipy.sparse matrices Xtrans = Xtrans.toarray() Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray() assert_array_equal(Xtrans, Xtrans2) def test_chi2_coo(): # Check that chi2 works with a COO matrix # (as returned by CountVectorizer, DictVectorizer) Xcoo = coo_matrix(X) mkchi2(k=2).fit_transform(Xcoo, y) # if we got here without an exception, we're safe def test_chi2_negative(): # Check for proper error on negative numbers in the input X. X, y = [[0, 1], [-1e-20, 1]], [0, 1] for X in (X, np.array(X), csr_matrix(X)): assert_raises(ValueError, chi2, X, y) def test_chi2_unused_feature(): # Unused feature should evaluate to NaN # and should issue no runtime warning clean_warning_registry() with warnings.catch_warnings(record=True) as warned: warnings.simplefilter('always') chi, p = chi2([[1, 0], [0, 0]], [1, 0]) for w in warned: if 'divide by zero' in repr(w): raise AssertionError('Found unexpected warning %s' % w) assert_array_equal(chi, [1, np.nan]) assert_array_equal(p[1], np.nan) def test_chisquare(): # Test replacement for scipy.stats.chisquare against the original. obs = np.array([[2., 2.], [1., 1.]]) exp = np.array([[1.5, 1.5], [1.5, 1.5]]) # call SciPy first because our version overwrites obs chi_scp, p_scp = scipy.stats.chisquare(obs, exp) chi_our, p_our = _chisquare(obs, exp) assert_array_almost_equal(chi_scp, chi_our) assert_array_almost_equal(p_scp, p_our)
3,080
30.438776
71
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_base.py
import numpy as np from scipy import sparse as sp from numpy.testing import assert_array_equal from sklearn.base import BaseEstimator from sklearn.feature_selection.base import SelectorMixin from sklearn.utils import check_array from sklearn.utils.testing import assert_raises, assert_equal class StepSelector(SelectorMixin, BaseEstimator): """Retain every `step` features (beginning with 0)""" def __init__(self, step=2): self.step = step def fit(self, X, y=None): X = check_array(X, 'csc') self.n_input_feats = X.shape[1] return self def _get_support_mask(self): mask = np.zeros(self.n_input_feats, dtype=bool) mask[::self.step] = True return mask support = [True, False] * 5 support_inds = [0, 2, 4, 6, 8] X = np.arange(20).reshape(2, 10) Xt = np.arange(0, 20, 2).reshape(2, 5) Xinv = X.copy() Xinv[:, 1::2] = 0 y = [0, 1] feature_names = list('ABCDEFGHIJ') feature_names_t = feature_names[::2] feature_names_inv = np.array(feature_names) feature_names_inv[1::2] = '' def test_transform_dense(): sel = StepSelector() Xt_actual = sel.fit(X, y).transform(X) Xt_actual2 = StepSelector().fit_transform(X, y) assert_array_equal(Xt, Xt_actual) assert_array_equal(Xt, Xt_actual2) # Check dtype matches assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype) assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype) # Check 1d list and other dtype: names_t_actual = sel.transform([feature_names]) assert_array_equal(feature_names_t, names_t_actual.ravel()) # Check wrong shape raises error assert_raises(ValueError, sel.transform, np.array([[1], [2]])) def test_transform_sparse(): sparse = sp.csc_matrix sel = StepSelector() Xt_actual = sel.fit(sparse(X)).transform(sparse(X)) Xt_actual2 = sel.fit_transform(sparse(X)) assert_array_equal(Xt, Xt_actual.toarray()) assert_array_equal(Xt, Xt_actual2.toarray()) # Check dtype matches assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype) assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype) # Check wrong shape raises error assert_raises(ValueError, sel.transform, np.array([[1], [2]])) def test_inverse_transform_dense(): sel = StepSelector() Xinv_actual = sel.fit(X, y).inverse_transform(Xt) assert_array_equal(Xinv, Xinv_actual) # Check dtype matches assert_equal(np.int32, sel.inverse_transform(Xt.astype(np.int32)).dtype) assert_equal(np.float32, sel.inverse_transform(Xt.astype(np.float32)).dtype) # Check 1d list and other dtype: names_inv_actual = sel.inverse_transform([feature_names_t]) assert_array_equal(feature_names_inv, names_inv_actual.ravel()) # Check wrong shape raises error assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]])) def test_inverse_transform_sparse(): sparse = sp.csc_matrix sel = StepSelector() Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt)) assert_array_equal(Xinv, Xinv_actual.toarray()) # Check dtype matches assert_equal(np.int32, sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype) assert_equal(np.float32, sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype) # Check wrong shape raises error assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]])) def test_get_support(): sel = StepSelector() sel.fit(X, y) assert_array_equal(support, sel.get_support()) assert_array_equal(support_inds, sel.get_support(indices=True))
3,681
30.741379
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/feature_selection/tests/test_feature_select.py
""" Todo: cross-check the F-value with stats model """ from __future__ import division import itertools import warnings import numpy as np from scipy import stats, sparse from numpy.testing import run_module_suite from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_not_in from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils import safe_mask from sklearn.datasets.samples_generator import (make_classification, make_regression) from sklearn.feature_selection import ( chi2, f_classif, f_oneway, f_regression, mutual_info_classif, mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr, SelectFdr, SelectFwe, GenericUnivariateSelect) ############################################################################## # Test the score functions def test_f_oneway_vs_scipy_stats(): # Test that our f_oneway gives the same result as scipy.stats rng = np.random.RandomState(0) X1 = rng.randn(10, 3) X2 = 1 + rng.randn(10, 3) f, pv = stats.f_oneway(X1, X2) f2, pv2 = f_oneway(X1, X2) assert_true(np.allclose(f, f2)) assert_true(np.allclose(pv, pv2)) def test_f_oneway_ints(): # Smoke test f_oneway on integers: that it does raise casting errors # with recent numpys rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 10)) y = np.arange(10) fint, pint = f_oneway(X, y) # test that is gives the same result as with float f, p = f_oneway(X.astype(np.float), y) assert_array_almost_equal(f, fint, decimal=4) assert_array_almost_equal(p, pint, decimal=4) def test_f_classif(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression(): # Test whether the F test yields meaningful results # on a simple simulated regression problem X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) F, pv = f_regression(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) # with centering, compare with sparse F, pv = f_regression(X, y, center=True) F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) # again without centering, compare with sparse F, pv = f_regression(X, y, center=False) F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression_input_dtype(): # Test whether f_regression returns the same value # for any numeric data_type rng = np.random.RandomState(0) X = rng.rand(10, 20) y = np.arange(10).astype(np.int) F1, pv1 = f_regression(X, y) F2, pv2 = f_regression(X, y.astype(np.float)) assert_array_almost_equal(F1, F2, 5) assert_array_almost_equal(pv1, pv2, 5) def test_f_regression_center(): # Test whether f_regression preserves dof according to 'center' argument # We use two centered variates so we have a simple relationship between # F-score with variates centering and F-score without variates centering. # Create toy example X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean n_samples = X.size Y = np.ones(n_samples) Y[::2] *= -1. Y[0] = 0. # have Y mean being null F1, _ = f_regression(X, Y, center=True) F2, _ = f_regression(X, Y, center=False) assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2) assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS def test_f_classif_multi_class(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) def test_select_percentile_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_percentile_classif_sparse(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) X = sparse.csr_matrix(X) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r.toarray(), X_r2.toarray()) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_r2inv = univariate_filter.inverse_transform(X_r2) assert_true(sparse.issparse(X_r2inv)) support_mask = safe_mask(X_r2inv, support) assert_equal(X_r2inv.shape, X.shape) assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) # Check other columns are empty assert_equal(X_r2inv.getnnz(), X_r.getnnz()) ############################################################################## # Test univariate selection in classification settings def test_select_kbest_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the k best heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=5) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_classif, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_kbest_all(): # Test whether k="all" correctly returns all features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k='all') X_r = univariate_filter.fit(X, y).transform(X) assert_array_equal(X, X_r) def test_select_kbest_zero(): # Test whether k=0 correctly returns no features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=0) univariate_filter.fit(X, y) support = univariate_filter.get_support() gtruth = np.zeros(10, dtype=bool) assert_array_equal(support, gtruth) X_selected = assert_warns_message(UserWarning, 'No features were selected', univariate_filter.transform, X) assert_equal(X_selected.shape, (20, 0)) def test_select_heuristics_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the fdr, fwe and fpr heuristics X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_classif, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_classif, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_almost_equal(support, gtruth) ############################################################################## # Test univariate selection in regression settings def assert_best_scores_kept(score_filter): scores = score_filter.scores_ support = score_filter.get_support() assert_array_equal(np.sort(scores[support]), np.sort(scores)[-support.sum():]) def test_select_percentile_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the percentile heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_2 = X.copy() X_2[:, np.logical_not(support)] = 0 assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) # Check inverse_transform respects dtype assert_array_equal(X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool))) def test_select_percentile_regression_full(): # Test whether the relative univariate feature selection # selects all features when '100%' is asked. X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=100) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=100).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.ones(20) assert_array_equal(support, gtruth) def test_invalid_percentile(): X, y = make_regression(n_samples=10, n_features=20, n_informative=2, shuffle=False, random_state=0) assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y) assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=101).fit, X, y) def test_select_kbest_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the k best heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectKBest(f_regression, k=5) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_heuristics_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fpr, fdr or fwe heuristics X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectFpr(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_regression, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 3) def test_boundary_case_ch2(): # Test boundary case, and always aim to select 1 feature. X = np.array([[10, 20], [20, 20], [20, 30]]) y = np.array([[1], [0], [0]]) scores, pvalues = chi2(X, y) assert_array_almost_equal(scores, np.array([4., 0.71428571])) assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472])) filter_fdr = SelectFdr(chi2, alpha=0.1) filter_fdr.fit(X, y) support_fdr = filter_fdr.get_support() assert_array_equal(support_fdr, np.array([True, False])) filter_kbest = SelectKBest(chi2, k=1) filter_kbest.fit(X, y) support_kbest = filter_kbest.get_support() assert_array_equal(support_kbest, np.array([True, False])) filter_percentile = SelectPercentile(chi2, percentile=50) filter_percentile.fit(X, y) support_percentile = filter_percentile.get_support() assert_array_equal(support_percentile, np.array([True, False])) filter_fpr = SelectFpr(chi2, alpha=0.1) filter_fpr.fit(X, y) support_fpr = filter_fpr.get_support() assert_array_equal(support_fpr, np.array([True, False])) filter_fwe = SelectFwe(chi2, alpha=0.1) filter_fwe.fit(X, y) support_fwe = filter_fwe.get_support() assert_array_equal(support_fwe, np.array([True, False])) def test_select_fdr_regression(): # Test that fdr heuristic actually has low FDR. def single_fdr(alpha, n_informative, random_state): X, y = make_regression(n_samples=150, n_features=20, n_informative=n_informative, shuffle=False, random_state=random_state, noise=10) with warnings.catch_warnings(record=True): # Warnings can be raised when no features are selected # (low alpha or very noisy data) univariate_filter = SelectFdr(f_regression, alpha=alpha) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fdr', param=alpha).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() num_false_positives = np.sum(support[n_informative:] == 1) num_true_positives = np.sum(support[:n_informative] == 1) if num_false_positives == 0: return 0. false_discovery_rate = (num_false_positives / (num_true_positives + num_false_positives)) return false_discovery_rate for alpha in [0.001, 0.01, 0.1]: for n_informative in [1, 5, 10]: # As per Benjamini-Hochberg, the expected false discovery rate # should be lower than alpha: # FDR = E(FP / (TP + FP)) <= alpha false_discovery_rate = np.mean([single_fdr(alpha, n_informative, random_state) for random_state in range(100)]) assert_greater_equal(alpha, false_discovery_rate) # Make sure that the empirical false discovery rate increases # with alpha: if false_discovery_rate != 0: assert_greater(false_discovery_rate, alpha / 10) def test_select_fwe_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fwe heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fwe', param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 2) def test_selectkbest_tiebreaking(): # Test whether SelectKBest actually selects k features in case of ties. # Prior to 0.11, SelectKBest would return more features than requested. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectKBest(dummy_score, k=1) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectKBest(dummy_score, k=2) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_selectpercentile_tiebreaking(): # Test if SelectPercentile selects the right n_features in case of ties. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectPercentile(dummy_score, percentile=34) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectPercentile(dummy_score, percentile=67) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_tied_pvalues(): # Test whether k-best and percentiles work with tied pvalues from chi2. # chi2 will return the same p-values for the following features, but it # will return different scores. X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) y = [0, 1] for perm in itertools.permutations((0, 1, 2)): X = X0[:, perm] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) def test_scorefunc_multilabel(): # Test whether k-best and percentiles works with multilabels with chi2. X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]]) y = [[1, 1], [0, 1], [1, 0]] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert_equal(Xt.shape, (3, 2)) assert_not_in(0, Xt) Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert_equal(Xt.shape, (3, 2)) assert_not_in(0, Xt) def test_tied_scores(): # Test for stable sorting in k-best with tied scores. X_train = np.array([[0, 0, 0], [1, 1, 1]]) y_train = [0, 1] for n_features in [1, 2, 3]: sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) X_test = sel.transform([[0, 1, 2]]) assert_array_equal(X_test[0], np.arange(3)[-n_features:]) def test_nans(): # Assert that SelectKBest and SelectPercentile can handle NaNs. # First feature has zero variance to confuse f_classif (ANOVA) and # make it return a NaN. X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for select in (SelectKBest(f_classif, 2), SelectPercentile(f_classif, percentile=67)): ignore_warnings(select.fit)(X, y) assert_array_equal(select.get_support(indices=True), np.array([1, 2])) def test_score_func_error(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe, SelectFdr, SelectFpr, GenericUnivariateSelect]: assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y) def test_invalid_k(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] assert_raises(ValueError, SelectKBest(k=-1).fit, X, y) assert_raises(ValueError, SelectKBest(k=4).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=4).fit, X, y) def test_f_classif_constant_feature(): # Test that f_classif warns if a feature is constant throughout. X, y = make_classification(n_samples=10, n_features=5) X[:, 0] = 2.0 assert_warns(UserWarning, f_classif, X, y) def test_no_feature_selected(): rng = np.random.RandomState(0) # Generate random uncorrelated data: a strict univariate test should # rejects all the features X = rng.rand(40, 10) y = rng.randint(0, 4, size=40) strict_selectors = [ SelectFwe(alpha=0.01).fit(X, y), SelectFdr(alpha=0.01).fit(X, y), SelectFpr(alpha=0.01).fit(X, y), SelectPercentile(percentile=0).fit(X, y), SelectKBest(k=0).fit(X, y), ] for selector in strict_selectors: assert_array_equal(selector.get_support(), np.zeros(10)) X_selected = assert_warns_message( UserWarning, 'No features were selected', selector.transform, X) assert_equal(X_selected.shape, (40, 0)) def test_mutual_info_classif(): X, y = make_classification(n_samples=100, n_features=5, n_informative=1, n_redundant=1, n_repeated=0, n_classes=2, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_classif, k=2) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_classif, percentile=40) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) def test_mutual_info_regression(): X, y = make_regression(n_samples=100, n_features=10, n_informative=2, shuffle=False, random_state=0, noise=10) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_regression, k=2) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_regression, percentile=20) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile', param=20).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) if __name__ == '__main__': run_module_suite()
26,651
38.367799
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/correlation_models.py
# -*- coding: utf-8 -*- # Author: Vincent Dubourg <[email protected]> # (mostly translation, see implementation details) # License: BSD 3 clause """ The built-in correlation models submodule for the gaussian_process module. """ import numpy as np from ..utils import deprecated @deprecated("The function absolute_exponential of correlation_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def absolute_exponential(theta, d): """ Absolute exponential autocorrelation model. (Ornstein-Uhlenbeck stochastic process):: n theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| ) i = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) containing the values of the autocorrelation model. """ theta = np.asarray(theta, dtype=np.float64) d = np.abs(np.asarray(d, dtype=np.float64)) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 if theta.size == 1: return np.exp(- theta[0] * np.sum(d, axis=1)) elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1)) @deprecated("The function squared_exponential of correlation_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def squared_exponential(theta, d): """ Squared exponential correlation model (Radial Basis Function). (Infinitely differentiable stochastic process, very smooth):: n theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 ) i = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) containing the values of the autocorrelation model. """ theta = np.asarray(theta, dtype=np.float64) d = np.asarray(d, dtype=np.float64) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 if theta.size == 1: return np.exp(-theta[0] * np.sum(d ** 2, axis=1)) elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1)) @deprecated("The function generalized_exponential of correlation_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def generalized_exponential(theta, d): """ Generalized exponential correlation model. (Useful when one does not know the smoothness of the function to be predicted.):: n theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p ) i = 1 Parameters ---------- theta : array_like An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the autocorrelation parameter(s) (theta, p). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) with the values of the autocorrelation model. """ theta = np.asarray(theta, dtype=np.float64) d = np.asarray(d, dtype=np.float64) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 lth = theta.size if n_features > 1 and lth == 2: theta = np.hstack([np.repeat(theta[0], n_features), theta[1]]) elif lth != n_features + 1: raise Exception("Length of theta must be 2 or %s" % (n_features + 1)) else: theta = theta.reshape(1, lth) td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1] r = np.exp(- np.sum(td, 1)) return r @deprecated("The function pure_nugget of correlation_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def pure_nugget(theta, d): """ Spatial independence correlation model (pure nugget). (Useful when one wants to solve an ordinary least squares problem!):: n theta, d --> r(theta, d) = 1 if sum |d_i| == 0 i = 1 0 otherwise Parameters ---------- theta : array_like None. d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) with the values of the autocorrelation model. """ theta = np.asarray(theta, dtype=np.float64) d = np.asarray(d, dtype=np.float64) n_eval = d.shape[0] r = np.zeros(n_eval) r[np.all(d == 0., axis=1)] = 1. return r @deprecated("The function cubic of correlation_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def cubic(theta, d): """ Cubic correlation model:: theta, d --> r(theta, d) = n prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m j = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) with the values of the autocorrelation model. """ theta = np.asarray(theta, dtype=np.float64) d = np.asarray(d, dtype=np.float64) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 lth = theta.size if lth == 1: td = np.abs(d) * theta elif lth != n_features: raise Exception("Length of theta must be 1 or " + str(n_features)) else: td = np.abs(d) * theta.reshape(1, n_features) td[td > 1.] = 1. ss = 1. - td ** 2. * (3. - 2. * td) r = np.prod(ss, 1) return r @deprecated("The function linear of correlation_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def linear(theta, d): """ Linear correlation model:: theta, d --> r(theta, d) = n prod max(0, 1 - theta_j*d_ij) , i = 1,...,m j = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) with the values of the autocorrelation model. """ theta = np.asarray(theta, dtype=np.float64) d = np.asarray(d, dtype=np.float64) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 lth = theta.size if lth == 1: td = np.abs(d) * theta elif lth != n_features: raise Exception("Length of theta must be 1 or %s" % n_features) else: td = np.abs(d) * theta.reshape(1, n_features) td[td > 1.] = 1. ss = 1. - td r = np.prod(ss, 1) return r
8,531
27.630872
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/kernels.py
"""Kernels for Gaussian process regression and classification. The kernels in this module allow kernel-engineering, i.e., they can be combined via the "+" and "*" operators or be exponentiated with a scalar via "**". These sum and product expressions can also contain scalar values, which are automatically converted to a constant kernel. All kernels allow (analytic) gradient-based hyperparameter optimization. The space of hyperparameters can be specified by giving lower und upper boundaries for the value of each hyperparameter (the search space is thus rectangular). Instead of specifying bounds, hyperparameters can also be declared to be "fixed", which causes these hyperparameters to be excluded from optimization. """ # Author: Jan Hendrik Metzen <[email protected]> # License: BSD 3 clause # Note: this module is strongly inspired by the kernel module of the george # package. from abc import ABCMeta, abstractmethod from collections import namedtuple import math import numpy as np from scipy.special import kv, gamma from scipy.spatial.distance import pdist, cdist, squareform from ..metrics.pairwise import pairwise_kernels from ..externals import six from ..base import clone from sklearn.externals.funcsigs import signature def _check_length_scale(X, length_scale): length_scale = np.squeeze(length_scale).astype(float) if np.ndim(length_scale) > 1: raise ValueError("length_scale cannot be of dimension greater than 1") if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]: raise ValueError("Anisotropic kernel must have the same number of " "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1])) return length_scale class Hyperparameter(namedtuple('Hyperparameter', ('name', 'value_type', 'bounds', 'n_elements', 'fixed'))): """A kernel hyperparameter's specification in form of a namedtuple. .. versionadded:: 0.18 Attributes ---------- name : string The name of the hyperparameter. Note that a kernel using a hyperparameter with name "x" must have the attributes self.x and self.x_bounds value_type : string The type of the hyperparameter. Currently, only "numeric" hyperparameters are supported. bounds : pair of floats >= 0 or "fixed" The lower and upper bound on the parameter. If n_elements>1, a pair of 1d array with n_elements each may be given alternatively. If the string "fixed" is passed as bounds, the hyperparameter's value cannot be changed. n_elements : int, default=1 The number of elements of the hyperparameter value. Defaults to 1, which corresponds to a scalar hyperparameter. n_elements > 1 corresponds to a hyperparameter which is vector-valued, such as, e.g., anisotropic length-scales. fixed : bool, default: None Whether the value of this hyperparameter is fixed, i.e., cannot be changed during hyperparameter tuning. If None is passed, the "fixed" is derived based on the given bounds. """ # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __init__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None): if not isinstance(bounds, six.string_types) or bounds != "fixed": bounds = np.atleast_2d(bounds) if n_elements > 1: # vector-valued parameter if bounds.shape[0] == 1: bounds = np.repeat(bounds, n_elements, 0) elif bounds.shape[0] != n_elements: raise ValueError("Bounds on %s should have either 1 or " "%d dimensions. Given are %d" % (name, n_elements, bounds.shape[0])) if fixed is None: fixed = isinstance(bounds, six.string_types) and bounds == "fixed" return super(Hyperparameter, cls).__new__( cls, name, value_type, bounds, n_elements, fixed) # This is mainly a testing utility to check that two hyperparameters # are equal. def __eq__(self, other): return (self.name == other.name and self.value_type == other.value_type and np.all(self.bounds == other.bounds) and self.n_elements == other.n_elements and self.fixed == other.fixed) class Kernel(six.with_metaclass(ABCMeta)): """Base class for all kernels. .. versionadded:: 0.18 """ def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ params = dict() # introspect the constructor arguments to find the model parameters # to represent cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) init_sign = signature(init) args, varargs = [], [] for parameter in init_sign.parameters.values(): if (parameter.kind != parameter.VAR_KEYWORD and parameter.name != 'self'): args.append(parameter.name) if parameter.kind == parameter.VAR_POSITIONAL: varargs.append(parameter.name) if len(varargs) != 0: raise RuntimeError("scikit-learn kernels should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s doesn't follow this convention." % (cls, )) for arg in args: params[arg] = getattr(self, arg, None) return params def set_params(self, **params): """Set the parameters of this kernel. The method works on simple kernels as well as on nested kernels. The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in six.iteritems(params): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for kernel %s. ' 'Check the list of available parameters ' 'with `kernel.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for kernel %s. ' 'Check the list of available parameters ' 'with `kernel.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self def clone_with_theta(self, theta): """Returns a clone of self with given hyperparameters theta. """ cloned = clone(self) cloned.theta = theta return cloned @property def n_dims(self): """Returns the number of non-fixed hyperparameters of the kernel.""" return self.theta.shape[0] @property def hyperparameters(self): """Returns a list of all hyperparameter specifications.""" r = [] for attr in dir(self): if attr.startswith("hyperparameter_"): r.append(getattr(self, attr)) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ theta = [] params = self.get_params() for hyperparameter in self.hyperparameters: if not hyperparameter.fixed: theta.append(params[hyperparameter.name]) if len(theta) > 0: return np.log(np.hstack(theta)) else: return np.array([]) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ params = self.get_params() i = 0 for hyperparameter in self.hyperparameters: if hyperparameter.fixed: continue if hyperparameter.n_elements > 1: # vector-valued parameter params[hyperparameter.name] = np.exp( theta[i:i + hyperparameter.n_elements]) i += hyperparameter.n_elements else: params[hyperparameter.name] = np.exp(theta[i]) i += 1 if i != len(theta): raise ValueError("theta has not the correct number of entries." " Should be %d; given are %d" % (i, len(theta))) self.set_params(**params) @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ bounds = [] for hyperparameter in self.hyperparameters: if not hyperparameter.fixed: bounds.append(hyperparameter.bounds) if len(bounds) > 0: return np.log(np.vstack(bounds)) else: return np.array([]) def __add__(self, b): if not isinstance(b, Kernel): return Sum(self, ConstantKernel(b)) return Sum(self, b) def __radd__(self, b): if not isinstance(b, Kernel): return Sum(ConstantKernel(b), self) return Sum(b, self) def __mul__(self, b): if not isinstance(b, Kernel): return Product(self, ConstantKernel(b)) return Product(self, b) def __rmul__(self, b): if not isinstance(b, Kernel): return Product(ConstantKernel(b), self) return Product(b, self) def __pow__(self, b): return Exponentiation(self, b) def __eq__(self, b): if type(self) != type(b): return False params_a = self.get_params() params_b = b.get_params() for key in set(list(params_a.keys()) + list(params_b.keys())): if np.any(params_a.get(key, None) != params_b.get(key, None)): return False return True def __repr__(self): return "{0}({1})".format(self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta))) @abstractmethod def __call__(self, X, Y=None, eval_gradient=False): """Evaluate the kernel.""" @abstractmethod def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ @abstractmethod def is_stationary(self): """Returns whether the kernel is stationary. """ class NormalizedKernelMixin(object): """Mixin for kernels which are normalized: k(X, X)=1. .. versionadded:: 0.18 """ def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return np.ones(X.shape[0]) class StationaryKernelMixin(object): """Mixin for kernels which are stationary: k(X, Y)= f(X-Y). .. versionadded:: 0.18 """ def is_stationary(self): """Returns whether the kernel is stationary. """ return True class CompoundKernel(Kernel): """Kernel which is composed of a set of other kernels. .. versionadded:: 0.18 """ def __init__(self, kernels): self.kernels = kernels def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ return dict(kernels=self.kernels) @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return np.hstack([kernel.theta for kernel in self.kernels]) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ k_dims = self.k1.n_dims for i, kernel in enumerate(self.kernels): kernel.theta = theta[i * k_dims:(i + 1) * k_dims] @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ return np.vstack([kernel.bounds for kernel in self.kernels]) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Note that this compound kernel returns the results of all simple kernel stacked along an additional axis. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y, n_kernels) Kernel k(X, Y) K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K = [] K_grad = [] for kernel in self.kernels: K_single, K_grad_single = kernel(X, Y, eval_gradient) K.append(K_single) K_grad.append(K_grad_single[..., np.newaxis]) return np.dstack(K), np.concatenate(K_grad, 3) else: return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels]) def __eq__(self, b): if type(self) != type(b) or len(self.kernels) != len(b.kernels): return False return np.all([self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))]) def is_stationary(self): """Returns whether the kernel is stationary. """ return np.all([kernel.is_stationary() for kernel in self.kernels]) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X, n_kernels) Diagonal of kernel k(X, X) """ return np.vstack([kernel.diag(X) for kernel in self.kernels]).T class KernelOperator(Kernel): """Base class for all kernel operators. .. versionadded:: 0.18 """ def __init__(self, k1, k2): self.k1 = k1 self.k2 = k2 def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ params = dict(k1=self.k1, k2=self.k2) if deep: deep_items = self.k1.get_params().items() params.update(('k1__' + k, val) for k, val in deep_items) deep_items = self.k2.get_params().items() params.update(('k2__' + k, val) for k, val in deep_items) return params @property def hyperparameters(self): """Returns a list of all hyperparameter.""" r = [] for hyperparameter in self.k1.hyperparameters: r.append(Hyperparameter("k1__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) for hyperparameter in self.k2.hyperparameters: r.append(Hyperparameter("k2__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return np.append(self.k1.theta, self.k2.theta) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ k1_dims = self.k1.n_dims self.k1.theta = theta[:k1_dims] self.k2.theta = theta[k1_dims:] @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ if self.k1.bounds.size == 0: return self.k2.bounds if self.k2.bounds.size == 0: return self.k1.bounds return np.vstack((self.k1.bounds, self.k2.bounds)) def __eq__(self, b): if type(self) != type(b): return False return (self.k1 == b.k1 and self.k2 == b.k2) \ or (self.k1 == b.k2 and self.k2 == b.k1) def is_stationary(self): """Returns whether the kernel is stationary. """ return self.k1.is_stationary() and self.k2.is_stationary() class Sum(KernelOperator): """Sum-kernel k1 + k2 of two kernels k1 and k2. The resulting kernel is defined as k_sum(X, Y) = k1(X, Y) + k2(X, Y) .. versionadded:: 0.18 Parameters ---------- k1 : Kernel object The first base-kernel of the sum-kernel k2 : Kernel object The second base-kernel of the sum-kernel """ def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient=True) K2, K2_gradient = self.k2(X, Y, eval_gradient=True) return K1 + K2, np.dstack((K1_gradient, K2_gradient)) else: return self.k1(X, Y) + self.k2(X, Y) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.k1.diag(X) + self.k2.diag(X) def __repr__(self): return "{0} + {1}".format(self.k1, self.k2) class Product(KernelOperator): """Product-kernel k1 * k2 of two kernels k1 and k2. The resulting kernel is defined as k_prod(X, Y) = k1(X, Y) * k2(X, Y) .. versionadded:: 0.18 Parameters ---------- k1 : Kernel object The first base-kernel of the product-kernel k2 : Kernel object The second base-kernel of the product-kernel """ def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient=True) K2, K2_gradient = self.k2(X, Y, eval_gradient=True) return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])) else: return self.k1(X, Y) * self.k2(X, Y) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.k1.diag(X) * self.k2.diag(X) def __repr__(self): return "{0} * {1}".format(self.k1, self.k2) class Exponentiation(Kernel): """Exponentiate kernel by given exponent. The resulting kernel is defined as k_exp(X, Y) = k(X, Y) ** exponent .. versionadded:: 0.18 Parameters ---------- kernel : Kernel object The base kernel exponent : float The exponent for the base kernel """ def __init__(self, kernel, exponent): self.kernel = kernel self.exponent = exponent def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ params = dict(kernel=self.kernel, exponent=self.exponent) if deep: deep_items = self.kernel.get_params().items() params.update(('kernel__' + k, val) for k, val in deep_items) return params @property def hyperparameters(self): """Returns a list of all hyperparameter.""" r = [] for hyperparameter in self.kernel.hyperparameters: r.append(Hyperparameter("kernel__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return self.kernel.theta @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ self.kernel.theta = theta @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ return self.kernel.bounds def __eq__(self, b): if type(self) != type(b): return False return (self.kernel == b.kernel and self.exponent == b.exponent) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K, K_gradient = self.kernel(X, Y, eval_gradient=True) K_gradient *= \ self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1) return K ** self.exponent, K_gradient else: K = self.kernel(X, Y, eval_gradient=False) return K ** self.exponent def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.kernel.diag(X) ** self.exponent def __repr__(self): return "{0} ** {1}".format(self.kernel, self.exponent) def is_stationary(self): """Returns whether the kernel is stationary. """ return self.kernel.is_stationary() class ConstantKernel(StationaryKernelMixin, Kernel): """Constant kernel. Can be used as part of a product-kernel where it scales the magnitude of the other factor (kernel) or as part of a sum-kernel, where it modifies the mean of the Gaussian process. k(x_1, x_2) = constant_value for all x_1, x_2 .. versionadded:: 0.18 Parameters ---------- constant_value : float, default: 1.0 The constant value which defines the covariance: k(x_1, x_2) = constant_value constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on constant_value """ def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)): self.constant_value = constant_value self.constant_value_bounds = constant_value_bounds @property def hyperparameter_constant_value(self): return Hyperparameter( "constant_value", "numeric", self.constant_value_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: Y = X elif eval_gradient: raise ValueError("Gradient can only be evaluated when Y is None.") K = self.constant_value * np.ones((X.shape[0], Y.shape[0])) if eval_gradient: if not self.hyperparameter_constant_value.fixed: return (K, self.constant_value * np.ones((X.shape[0], X.shape[0], 1))) else: return K, np.empty((X.shape[0], X.shape[0], 0)) else: return K def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.constant_value * np.ones(X.shape[0]) def __repr__(self): return "{0:.3g}**2".format(np.sqrt(self.constant_value)) class WhiteKernel(StationaryKernelMixin, Kernel): """White kernel. The main use-case of this kernel is as part of a sum-kernel where it explains the noise-component of the signal. Tuning its parameter corresponds to estimating the noise-level. k(x_1, x_2) = noise_level if x_1 == x_2 else 0 .. versionadded:: 0.18 Parameters ---------- noise_level : float, default: 1.0 Parameter controlling the noise level noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on noise_level """ def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)): self.noise_level = noise_level self.noise_level_bounds = noise_level_bounds @property def hyperparameter_noise_level(self): return Hyperparameter( "noise_level", "numeric", self.noise_level_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is not None and eval_gradient: raise ValueError("Gradient can only be evaluated when Y is None.") if Y is None: K = self.noise_level * np.eye(X.shape[0]) if eval_gradient: if not self.hyperparameter_noise_level.fixed: return (K, self.noise_level * np.eye(X.shape[0])[:, :, np.newaxis]) else: return K, np.empty((X.shape[0], X.shape[0], 0)) else: return K else: return np.zeros((X.shape[0], Y.shape[0])) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.noise_level * np.ones(X.shape[0]) def __repr__(self): return "{0}(noise_level={1:.3g})".format(self.__class__.__name__, self.noise_level) class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel): """Radial-basis function kernel (aka squared-exponential kernel). The RBF kernel is a stationary kernel. It is also known as the "squared exponential" kernel. It is parameterized by a length-scale parameter length_scale>0, which can either be a scalar (isotropic variant of the kernel) or a vector with the same number of dimensions as the inputs X (anisotropic variant of the kernel). The kernel is given by: k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2) This kernel is infinitely differentiable, which implies that GPs with this kernel as covariance function have mean square derivatives of all orders, and are thus very smooth. .. versionadded:: 0.18 Parameters ----------- length_scale : float or array with shape (n_features,), default: 1.0 The length scale of the kernel. If a float, an isotropic kernel is used. If an array, an anisotropic kernel is used where each dimension of l defines the length-scale of the respective feature dimension. length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale """ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)): self.length_scale = length_scale self.length_scale_bounds = length_scale_bounds @property def anisotropic(self): return np.iterable(self.length_scale) and len(self.length_scale) > 1 @property def hyperparameter_length_scale(self): if self.anisotropic: return Hyperparameter("length_scale", "numeric", self.length_scale_bounds, len(self.length_scale)) return Hyperparameter( "length_scale", "numeric", self.length_scale_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) length_scale = _check_length_scale(X, self.length_scale) if Y is None: dists = pdist(X / length_scale, metric='sqeuclidean') K = np.exp(-.5 * dists) # convert from upper-triangular matrix to square matrix K = squareform(K) np.fill_diagonal(K, 1) else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean') K = np.exp(-.5 * dists) if eval_gradient: if self.hyperparameter_length_scale.fixed: # Hyperparameter l kept fixed return K, np.empty((X.shape[0], X.shape[0], 0)) elif not self.anisotropic or length_scale.shape[0] == 1: K_gradient = \ (K * squareform(dists))[:, :, np.newaxis] return K, K_gradient elif self.anisotropic: # We need to recompute the pairwise dimension-wise distances K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \ / (length_scale ** 2) K_gradient *= K[..., np.newaxis] return K, K_gradient else: return K def __repr__(self): if self.anisotropic: return "{0}(length_scale=[{1}])".format( self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.length_scale))) else: # isotropic return "{0}(length_scale={1:.3g})".format( self.__class__.__name__, np.ravel(self.length_scale)[0]) class Matern(RBF): """ Matern kernel. The class of Matern kernels is a generalization of the RBF and the absolute exponential kernel parameterized by an additional parameter nu. The smaller nu, the less smooth the approximated function is. For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5 to the absolute exponential kernel. Important intermediate values are nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable functions). See Rasmussen and Williams 2006, pp84 for details regarding the different variants of the Matern kernel. .. versionadded:: 0.18 Parameters ----------- length_scale : float or array with shape (n_features,), default: 1.0 The length scale of the kernel. If a float, an isotropic kernel is used. If an array, an anisotropic kernel is used where each dimension of l defines the length-scale of the respective feature dimension. length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale nu: float, default: 1.5 The parameter nu controlling the smoothness of the learned function. The smaller nu, the less smooth the approximated function is. For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5 to the absolute exponential kernel. Important intermediate values are nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable functions). Note that values of nu not in [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost (appr. 10 times higher) since they require to evaluate the modified Bessel function. Furthermore, in contrast to l, nu is kept fixed to its initial value and not optimized. """ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5): super(Matern, self).__init__(length_scale, length_scale_bounds) self.nu = nu def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) length_scale = _check_length_scale(X, self.length_scale) if Y is None: dists = pdist(X / length_scale, metric='euclidean') else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X / length_scale, Y / length_scale, metric='euclidean') if self.nu == 0.5: K = np.exp(-dists) elif self.nu == 1.5: K = dists * math.sqrt(3) K = (1. + K) * np.exp(-K) elif self.nu == 2.5: K = dists * math.sqrt(5) K = (1. + K + K ** 2 / 3.0) * np.exp(-K) else: # general case; expensive to evaluate K = dists K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan tmp = (math.sqrt(2 * self.nu) * K) K.fill((2 ** (1. - self.nu)) / gamma(self.nu)) K *= tmp ** self.nu K *= kv(self.nu, tmp) if Y is None: # convert from upper-triangular matrix to square matrix K = squareform(K) np.fill_diagonal(K, 1) if eval_gradient: if self.hyperparameter_length_scale.fixed: # Hyperparameter l kept fixed K_gradient = np.empty((X.shape[0], X.shape[0], 0)) return K, K_gradient # We need to recompute the pairwise dimension-wise distances if self.anisotropic: D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \ / (length_scale ** 2) else: D = squareform(dists**2)[:, :, np.newaxis] if self.nu == 0.5: K_gradient = K[..., np.newaxis] * D \ / np.sqrt(D.sum(2))[:, :, np.newaxis] K_gradient[~np.isfinite(K_gradient)] = 0 elif self.nu == 1.5: K_gradient = \ 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis] elif self.nu == 2.5: tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis] K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp) else: # approximate gradient numerically def f(theta): # helper function return self.clone_with_theta(theta)(X, Y) return K, _approx_fprime(self.theta, f, 1e-10) if not self.anisotropic: return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis] else: return K, K_gradient else: return K def __repr__(self): if self.anisotropic: return "{0}(length_scale=[{1}], nu={2:.3g})".format( self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.length_scale)), self.nu) else: return "{0}(length_scale={1:.3g}, nu={2:.3g})".format( self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu) class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel): """Rational Quadratic kernel. The RationalQuadratic kernel can be seen as a scale mixture (an infinite sum) of RBF kernels with different characteristic length-scales. It is parameterized by a length-scale parameter length_scale>0 and a scale mixture parameter alpha>0. Only the isotropic variant where length_scale is a scalar is supported at the moment. The kernel given by: k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha .. versionadded:: 0.18 Parameters ---------- length_scale : float > 0, default: 1.0 The length scale of the kernel. alpha : float > 0, default: 1.0 Scale mixture parameter length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on alpha """ def __init__(self, length_scale=1.0, alpha=1.0, length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)): self.length_scale = length_scale self.alpha = alpha self.length_scale_bounds = length_scale_bounds self.alpha_bounds = alpha_bounds @property def hyperparameter_length_scale(self): return Hyperparameter( "length_scale", "numeric", self.length_scale_bounds) @property def hyperparameter_alpha(self): return Hyperparameter("alpha", "numeric", self.alpha_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: dists = squareform(pdist(X, metric='sqeuclidean')) tmp = dists / (2 * self.alpha * self.length_scale ** 2) base = (1 + tmp) K = base ** -self.alpha np.fill_diagonal(K, 1) else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X, Y, metric='sqeuclidean') K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \ ** -self.alpha if eval_gradient: # gradient with respect to length_scale if not self.hyperparameter_length_scale.fixed: length_scale_gradient = \ dists * K / (self.length_scale ** 2 * base) length_scale_gradient = length_scale_gradient[:, :, np.newaxis] else: # l is kept fixed length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) # gradient with respect to alpha if not self.hyperparameter_alpha.fixed: alpha_gradient = \ K * (-self.alpha * np.log(base) + dists / (2 * self.length_scale ** 2 * base)) alpha_gradient = alpha_gradient[:, :, np.newaxis] else: # alpha is kept fixed alpha_gradient = np.empty((K.shape[0], K.shape[1], 0)) return K, np.dstack((alpha_gradient, length_scale_gradient)) else: return K def __repr__(self): return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format( self.__class__.__name__, self.alpha, self.length_scale) class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel): """Exp-Sine-Squared kernel. The ExpSineSquared kernel allows modeling periodic functions. It is parameterized by a length-scale parameter length_scale>0 and a periodicity parameter periodicity>0. Only the isotropic variant where l is a scalar is supported at the moment. The kernel given by: k(x_i, x_j) = exp(-2 (sin(\pi / periodicity * d(x_i, x_j)) / length_scale) ^ 2) .. versionadded:: 0.18 Parameters ---------- length_scale : float > 0, default: 1.0 The length scale of the kernel. periodicity : float > 0, default: 1.0 The periodicity of the kernel. length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on periodicity """ def __init__(self, length_scale=1.0, periodicity=1.0, length_scale_bounds=(1e-5, 1e5), periodicity_bounds=(1e-5, 1e5)): self.length_scale = length_scale self.periodicity = periodicity self.length_scale_bounds = length_scale_bounds self.periodicity_bounds = periodicity_bounds @property def hyperparameter_length_scale(self): return Hyperparameter( "length_scale", "numeric", self.length_scale_bounds) @property def hyperparameter_periodicity(self): return Hyperparameter( "periodicity", "numeric", self.periodicity_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: dists = squareform(pdist(X, metric='euclidean')) arg = np.pi * dists / self.periodicity sin_of_arg = np.sin(arg) K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2) else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X, Y, metric='euclidean') K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2) if eval_gradient: cos_of_arg = np.cos(arg) # gradient with respect to length_scale if not self.hyperparameter_length_scale.fixed: length_scale_gradient = \ 4 / self.length_scale**2 * sin_of_arg**2 * K length_scale_gradient = length_scale_gradient[:, :, np.newaxis] else: # length_scale is kept fixed length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) # gradient with respect to p if not self.hyperparameter_periodicity.fixed: periodicity_gradient = \ 4 * arg / self.length_scale**2 * cos_of_arg \ * sin_of_arg * K periodicity_gradient = periodicity_gradient[:, :, np.newaxis] else: # p is kept fixed periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0)) return K, np.dstack((length_scale_gradient, periodicity_gradient)) else: return K def __repr__(self): return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format( self.__class__.__name__, self.length_scale, self.periodicity) class DotProduct(Kernel): """Dot-Product kernel. The DotProduct kernel is non-stationary and can be obtained from linear regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . . . , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel is invariant to a rotation of the coordinates about the origin, but not translations. It is parameterized by a parameter sigma_0^2. For sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise it is inhomogeneous. The kernel is given by k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j The DotProduct kernel is commonly combined with exponentiation. .. versionadded:: 0.18 Parameters ---------- sigma_0 : float >= 0, default: 1.0 Parameter controlling the inhomogenity of the kernel. If sigma_0=0, the kernel is homogenous. sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on l """ def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)): self.sigma_0 = sigma_0 self.sigma_0_bounds = sigma_0_bounds @property def hyperparameter_sigma_0(self): return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: K = np.inner(X, X) + self.sigma_0 ** 2 else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") K = np.inner(X, Y) + self.sigma_0 ** 2 if eval_gradient: if not self.hyperparameter_sigma_0.fixed: K_gradient = np.empty((K.shape[0], K.shape[1], 1)) K_gradient[..., 0] = 2 * self.sigma_0 ** 2 return K, K_gradient else: return K, np.empty((X.shape[0], X.shape[0], 0)) else: return K def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2 def is_stationary(self): """Returns whether the kernel is stationary. """ return False def __repr__(self): return "{0}(sigma_0={1:.3g})".format( self.__class__.__name__, self.sigma_0) # adapted from scipy/optimize/optimize.py for functions with 2d output def _approx_fprime(xk, f, epsilon, args=()): f0 = f(*((xk,) + args)) grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float) ei = np.zeros((len(xk), ), float) for k in range(len(xk)): ei[k] = 1.0 d = epsilon * ei grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k] ei[k] = 0.0 return grad class PairwiseKernel(Kernel): """Wrapper for kernels in sklearn.metrics.pairwise. A thin wrapper around the functionality of the kernels in sklearn.metrics.pairwise. Note: Evaluation of eval_gradient is not analytic but numeric and all kernels support only isotropic distances. The parameter gamma is considered to be a hyperparameter and may be optimized. The other kernel parameters are set directly at initialization and are kept fixed. .. versionadded:: 0.18 Parameters ---------- gamma: float >= 0, default: 1.0 Parameter gamma of the pairwise kernel specified by metric gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on gamma metric : string, or callable, default: "linear" The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. pairwise_kernels_kwargs : dict, default: None All entries of this dict (if any) are passed as keyword arguments to the pairwise kernel function. """ def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear", pairwise_kernels_kwargs=None): self.gamma = gamma self.gamma_bounds = gamma_bounds self.metric = metric self.pairwise_kernels_kwargs = pairwise_kernels_kwargs @property def hyperparameter_gamma(self): return Hyperparameter("gamma", "numeric", self.gamma_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ pairwise_kernels_kwargs = self.pairwise_kernels_kwargs if self.pairwise_kernels_kwargs is None: pairwise_kernels_kwargs = {} X = np.atleast_2d(X) K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma, filter_params=True, **pairwise_kernels_kwargs) if eval_gradient: if self.hyperparameter_gamma.fixed: return K, np.empty((X.shape[0], X.shape[0], 0)) else: # approximate gradient numerically def f(gamma): # helper function return pairwise_kernels( X, Y, metric=self.metric, gamma=np.exp(gamma), filter_params=True, **pairwise_kernels_kwargs) return K, _approx_fprime(self.theta, f, 1e-10) else: return K def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ # We have to fall back to slow way of computing diagonal return np.apply_along_axis(self, 1, X).ravel() def is_stationary(self): """Returns whether the kernel is stationary. """ return self.metric in ["rbf"] def __repr__(self): return "{0}(gamma={1}, metric={2})".format( self.__class__.__name__, self.gamma, self.metric)
67,177
35.020375
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/regression_models.py
# -*- coding: utf-8 -*- # Author: Vincent Dubourg <[email protected]> # (mostly translation, see implementation details) # License: BSD 3 clause """ The built-in regression models submodule for the gaussian_process module. """ import numpy as np from ..utils import deprecated @deprecated("The function constant of regression_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def constant(x): """ Zero order polynomial (constant, p = 1) regression model. x --> f(x) = 1 Parameters ---------- x : array_like An array with shape (n_eval, n_features) giving the locations x at which the regression model should be evaluated. Returns ------- f : array_like An array with shape (n_eval, p) with the values of the regression model. """ x = np.asarray(x, dtype=np.float64) n_eval = x.shape[0] f = np.ones([n_eval, 1]) return f @deprecated("The function linear of regression_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def linear(x): """ First order polynomial (linear, p = n+1) regression model. x --> f(x) = [ 1, x_1, ..., x_n ].T Parameters ---------- x : array_like An array with shape (n_eval, n_features) giving the locations x at which the regression model should be evaluated. Returns ------- f : array_like An array with shape (n_eval, p) with the values of the regression model. """ x = np.asarray(x, dtype=np.float64) n_eval = x.shape[0] f = np.hstack([np.ones([n_eval, 1]), x]) return f @deprecated("The function quadratic of regression_models is " "deprecated in version 0.19.1 and will be removed in 0.22.") def quadratic(x): """ Second order polynomial (quadratic, p = n*(n-1)/2+n+1) regression model. x --> f(x) = [ 1, { x_i, i = 1,...,n }, { x_i * x_j, (i,j) = 1,...,n } ].T i > j Parameters ---------- x : array_like An array with shape (n_eval, n_features) giving the locations x at which the regression model should be evaluated. Returns ------- f : array_like An array with shape (n_eval, p) with the values of the regression model. """ x = np.asarray(x, dtype=np.float64) n_eval, n_features = x.shape f = np.hstack([np.ones([n_eval, 1]), x]) for k in range(n_features): f = np.hstack([f, x[:, k, np.newaxis] * x[:, k:]]) return f
2,604
25.85567
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/gaussian_process.py
# -*- coding: utf-8 -*- # Author: Vincent Dubourg <[email protected]> # (mostly translation, see implementation details) # License: BSD 3 clause from __future__ import print_function import numpy as np from scipy import linalg, optimize from ..base import BaseEstimator, RegressorMixin from ..metrics.pairwise import manhattan_distances from ..utils import check_random_state, check_array, check_X_y from ..utils.validation import check_is_fitted from . import regression_models as regression from . import correlation_models as correlation from ..utils import deprecated MACHINE_EPSILON = np.finfo(np.double).eps @deprecated("l1_cross_distances was deprecated in version 0.18 " "and will be removed in 0.20.") def l1_cross_distances(X): """ Computes the nonzero componentwise L1 cross-distances between the vectors in X. Parameters ---------- X : array_like An array with shape (n_samples, n_features) Returns ------- D : array with shape (n_samples * (n_samples - 1) / 2, n_features) The array of componentwise L1 cross-distances. ij : arrays with shape (n_samples * (n_samples - 1) / 2, 2) The indices i and j of the vectors in X associated to the cross- distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]). """ X = check_array(X) n_samples, n_features = X.shape n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2 ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int) D = np.zeros((n_nonzero_cross_dist, n_features)) ll_1 = 0 for k in range(n_samples - 1): ll_0 = ll_1 ll_1 = ll_0 + n_samples - k - 1 ij[ll_0:ll_1, 0] = k ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples) D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples]) return D, ij @deprecated("GaussianProcess was deprecated in version 0.18 and will be " "removed in 0.20. Use the GaussianProcessRegressor instead.") class GaussianProcess(BaseEstimator, RegressorMixin): """The legacy Gaussian Process model class. .. deprecated:: 0.18 This class will be removed in 0.20. Use the :class:`GaussianProcessRegressor` instead. Read more in the :ref:`User Guide <gaussian_process>`. Parameters ---------- regr : string or callable, optional A regression function returning an array of outputs of the linear regression functional basis. The number of observations n_samples should be greater than the size p of this basis. Default assumes a simple constant regression trend. Available built-in regression models are:: 'constant', 'linear', 'quadratic' corr : string or callable, optional A stationary autocorrelation function returning the autocorrelation between two points x and x'. Default assumes a squared-exponential autocorrelation model. Built-in correlation models are:: 'absolute_exponential', 'squared_exponential', 'generalized_exponential', 'cubic', 'linear' beta0 : double array_like, optional The regression weight vector to perform Ordinary Kriging (OK). Default assumes Universal Kriging (UK) so that the vector beta of regression weights is estimated using the maximum likelihood principle. storage_mode : string, optional A string specifying whether the Cholesky decomposition of the correlation matrix should be stored in the class (storage_mode = 'full') or not (storage_mode = 'light'). Default assumes storage_mode = 'full', so that the Cholesky decomposition of the correlation matrix is stored. This might be a useful parameter when one is not interested in the MSE and only plan to estimate the BLUP, for which the correlation matrix is not required. verbose : boolean, optional A boolean specifying the verbose level. Default is verbose = False. theta0 : double array_like, optional An array with shape (n_features, ) or (1, ). The parameters in the autocorrelation model. If thetaL and thetaU are also specified, theta0 is considered as the starting point for the maximum likelihood estimation of the best set of parameters. Default assumes isotropic autocorrelation model with theta0 = 1e-1. thetaL : double array_like, optional An array with shape matching theta0's. Lower bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. thetaU : double array_like, optional An array with shape matching theta0's. Upper bound on the autocorrelation parameters for maximum likelihood estimation. Default is None, so that it skips maximum likelihood estimation and it uses theta0. normalize : boolean, optional Input X and observations y are centered and reduced wrt means and standard deviations estimated from the n_samples observations provided. Default is normalize = True so that data is normalized to ease maximum likelihood estimation. nugget : double or ndarray, optional Introduce a nugget effect to allow smooth predictions from noisy data. If nugget is an ndarray, it must be the same length as the number of data points used for the fit. The nugget is added to the diagonal of the assumed training covariance; in this way it acts as a Tikhonov regularization in the problem. In the special case of the squared exponential correlation function, the nugget mathematically represents the variance of the input values. Default assumes a nugget close to machine precision for the sake of robustness (nugget = 10. * MACHINE_EPSILON). optimizer : string, optional A string specifying the optimization algorithm to be used. Default uses 'fmin_cobyla' algorithm from scipy.optimize. Available optimizers are:: 'fmin_cobyla', 'Welch' 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_. It consists in iterating over several one-dimensional optimizations instead of running one single multi-dimensional optimization. random_start : int, optional The number of times the Maximum Likelihood Estimation should be performed from a random starting point. The first MLE always uses the specified starting point (theta0), the next starting points are picked at random according to an exponential distribution (log-uniform on [thetaL, thetaU]). Default does not use random starting point (random_start = 1). random_state : int, RandomState instance or None, optional (default=None) The generator used to shuffle the sequence of coordinates of theta in the Welch optimizer. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- theta_ : array Specified theta OR the best set of autocorrelation parameters (the \ sought maximizer of the reduced likelihood function). reduced_likelihood_function_value_ : array The optimal reduced likelihood function value. Examples -------- >>> import numpy as np >>> from sklearn.gaussian_process import GaussianProcess >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T >>> y = (X * np.sin(X)).ravel() >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.) >>> gp.fit(X, y) # doctest: +ELLIPSIS GaussianProcess(beta0=None... ... Notes ----- The presentation implementation is based on a translation of the DACE Matlab toolbox, see reference [NLNS2002]_. References ---------- .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J. Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002) http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell, and M.D. Morris (1992). Screening, predicting, and computer experiments. Technometrics, 34(1) 15--25.` http://www.jstor.org/stable/1269548 """ _regression_types = { 'constant': regression.constant, 'linear': regression.linear, 'quadratic': regression.quadratic} _correlation_types = { 'absolute_exponential': correlation.absolute_exponential, 'squared_exponential': correlation.squared_exponential, 'generalized_exponential': correlation.generalized_exponential, 'cubic': correlation.cubic, 'linear': correlation.linear} _optimizer_types = [ 'fmin_cobyla', 'Welch'] def __init__(self, regr='constant', corr='squared_exponential', beta0=None, storage_mode='full', verbose=False, theta0=1e-1, thetaL=None, thetaU=None, optimizer='fmin_cobyla', random_start=1, normalize=True, nugget=10. * MACHINE_EPSILON, random_state=None): self.regr = regr self.corr = corr self.beta0 = beta0 self.storage_mode = storage_mode self.verbose = verbose self.theta0 = theta0 self.thetaL = thetaL self.thetaU = thetaU self.normalize = normalize self.nugget = nugget self.optimizer = optimizer self.random_start = random_start self.random_state = random_state def fit(self, X, y): """ The Gaussian Process model fitting method. Parameters ---------- X : double array_like An array with shape (n_samples, n_features) with the input at which observations were made. y : double array_like An array with shape (n_samples, ) or shape (n_samples, n_targets) with the observations of the output to be predicted. Returns ------- gp : self A fitted Gaussian Process model object awaiting data to perform predictions. """ # Run input checks self._check_params() self.random_state = check_random_state(self.random_state) # Force data to 2D numpy.array X, y = check_X_y(X, y, multi_output=True, y_numeric=True) self.y_ndim_ = y.ndim if y.ndim == 1: y = y[:, np.newaxis] # Check shapes of DOE & observations n_samples, n_features = X.shape _, n_targets = y.shape # Run input checks self._check_params(n_samples) # Normalize data or don't if self.normalize: X_mean = np.mean(X, axis=0) X_std = np.std(X, axis=0) y_mean = np.mean(y, axis=0) y_std = np.std(y, axis=0) X_std[X_std == 0.] = 1. y_std[y_std == 0.] = 1. # center and scale X if necessary X = (X - X_mean) / X_std y = (y - y_mean) / y_std else: X_mean = np.zeros(1) X_std = np.ones(1) y_mean = np.zeros(1) y_std = np.ones(1) # Calculate matrix of distances D between samples D, ij = l1_cross_distances(X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple input features cannot have the same" " target value.") # Regression matrix and parameters F = self.regr(X) n_samples_F = F.shape[0] if F.ndim > 1: p = F.shape[1] else: p = 1 if n_samples_F != n_samples: raise Exception("Number of rows in F and X do not match. Most " "likely something is going wrong with the " "regression model.") if p > n_samples_F: raise Exception(("Ordinary least squares problem is undetermined " "n_samples=%d must be greater than the " "regression model size p=%d.") % (n_samples, p)) if self.beta0 is not None: if self.beta0.shape[0] != p: raise Exception("Shapes of beta0 and F do not match.") # Set attributes self.X = X self.y = y self.D = D self.ij = ij self.F = F self.X_mean, self.X_std = X_mean, X_std self.y_mean, self.y_std = y_mean, y_std # Determine Gaussian Process model parameters if self.thetaL is not None and self.thetaU is not None: # Maximum Likelihood Estimation of the parameters if self.verbose: print("Performing Maximum Likelihood Estimation of the " "autocorrelation parameters...") self.theta_, self.reduced_likelihood_function_value_, par = \ self._arg_max_reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad parameter region. " "Try increasing upper bound") else: # Given parameters if self.verbose: print("Given autocorrelation parameters. " "Computing Gaussian Process model parameters...") self.theta_ = self.theta0 self.reduced_likelihood_function_value_, par = \ self.reduced_likelihood_function() if np.isinf(self.reduced_likelihood_function_value_): raise Exception("Bad point. Try increasing theta0.") self.beta = par['beta'] self.gamma = par['gamma'] self.sigma2 = par['sigma2'] self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] if self.storage_mode == 'light': # Delete heavy data (it will be computed again if required) # (it is required only when MSE is wanted in self.predict) if self.verbose: print("Light storage mode specified. " "Flushing autocorrelation matrix...") self.D = None self.ij = None self.F = None self.C = None self.Ft = None self.G = None return self def predict(self, X, eval_MSE=False, batch_size=None): """ This function evaluates the Gaussian Process model at x. Parameters ---------- X : array_like An array with shape (n_eval, n_features) giving the point(s) at which the prediction(s) should be made. eval_MSE : boolean, optional A boolean specifying whether the Mean Squared Error should be evaluated or not. Default assumes evalMSE = False and evaluates only the BLUP (mean prediction). batch_size : integer, optional An integer giving the maximum number of points that can be evaluated simultaneously (depending on the available memory). Default is None so that all given points are evaluated at the same time. Returns ------- y : array_like, shape (n_samples, ) or (n_samples, n_targets) An array with shape (n_eval, ) if the Gaussian Process was trained on an array of shape (n_samples, ) or an array with shape (n_eval, n_targets) if the Gaussian Process was trained on an array of shape (n_samples, n_targets) with the Best Linear Unbiased Prediction at x. MSE : array_like, optional (if eval_MSE == True) An array with shape (n_eval, ) or (n_eval, n_targets) as with y, with the Mean Squared Error at x. """ check_is_fitted(self, "X") # Check input shapes X = check_array(X) n_eval, _ = X.shape n_samples, n_features = self.X.shape n_samples_y, n_targets = self.y.shape # Run input checks self._check_params(n_samples) if X.shape[1] != n_features: raise ValueError(("The number of features in X (X.shape[1] = %d) " "should match the number of features used " "for fit() " "which is %d.") % (X.shape[1], n_features)) if batch_size is None: # No memory management # (evaluates all given points in a single batch run) # Normalize input X = (X - self.X_mean) / self.X_std # Initialize output y = np.zeros(n_eval) if eval_MSE: MSE = np.zeros(n_eval) # Get pairwise componentwise L1-distances to the input training set dx = manhattan_distances(X, Y=self.X, sum_over_features=False) # Get regression function and correlation f = self.regr(X) r = self.corr(self.theta_, dx).reshape(n_eval, n_samples) # Scaled predictor y_ = np.dot(f, self.beta) + np.dot(r, self.gamma) # Predictor y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets) if self.y_ndim_ == 1: y = y.ravel() # Mean Squared Error if eval_MSE: C = self.C if C is None: # Light storage mode (need to recompute C, F, Ft and G) if self.verbose: print("This GaussianProcess used 'light' storage mode " "at instantiation. Need to recompute " "autocorrelation matrix...") reduced_likelihood_function_value, par = \ self.reduced_likelihood_function() self.C = par['C'] self.Ft = par['Ft'] self.G = par['G'] rt = linalg.solve_triangular(self.C, r.T, lower=True) if self.beta0 is None: # Universal Kriging u = linalg.solve_triangular(self.G.T, np.dot(self.Ft.T, rt) - f.T, lower=True) else: # Ordinary Kriging u = np.zeros((n_targets, n_eval)) MSE = np.dot(self.sigma2.reshape(n_targets, 1), (1. - (rt ** 2.).sum(axis=0) + (u ** 2.).sum(axis=0))[np.newaxis, :]) MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets) # Mean Squared Error might be slightly negative depending on # machine precision: force to zero! MSE[MSE < 0.] = 0. if self.y_ndim_ == 1: MSE = MSE.ravel() return y, MSE else: return y else: # Memory management if type(batch_size) is not int or batch_size <= 0: raise Exception("batch_size must be a positive integer") if eval_MSE: y, MSE = np.zeros(n_eval), np.zeros(n_eval) for k in range(max(1, int(n_eval / batch_size))): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to], MSE[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y, MSE else: y = np.zeros(n_eval) for k in range(max(1, int(n_eval / batch_size))): batch_from = k * batch_size batch_to = min([(k + 1) * batch_size + 1, n_eval + 1]) y[batch_from:batch_to] = \ self.predict(X[batch_from:batch_to], eval_MSE=eval_MSE, batch_size=None) return y def reduced_likelihood_function(self, theta=None): """ This function determines the BLUP parameters and evaluates the reduced likelihood function for the given autocorrelation parameters theta. Maximizing this function wrt the autocorrelation parameters theta is equivalent to maximizing the likelihood of the assumed joint Gaussian distribution of the observations y evaluated onto the design of experiments X. Parameters ---------- theta : array_like, optional An array containing the autocorrelation parameters at which the Gaussian Process model parameters should be determined. Default uses the built-in autocorrelation parameters (ie ``theta = self.theta_``). Returns ------- reduced_likelihood_function_value : double The value of the reduced likelihood function associated to the given autocorrelation parameters theta. par : dict A dictionary containing the requested Gaussian Process model parameters: - ``sigma2`` is the Gaussian Process variance. - ``beta`` is the generalized least-squares regression weights for Universal Kriging or given beta0 for Ordinary Kriging. - ``gamma`` is the Gaussian Process weights. - ``C`` is the Cholesky decomposition of the correlation matrix [R]. - ``Ft`` is the solution of the linear equation system [R] x Ft = F - ``G`` is the QR decomposition of the matrix Ft. """ check_is_fitted(self, "X") if theta is None: # Use built-in autocorrelation parameters theta = self.theta_ # Initialize output reduced_likelihood_function_value = - np.inf par = {} # Retrieve data n_samples = self.X.shape[0] D = self.D ij = self.ij F = self.F if D is None: # Light storage mode (need to recompute D, ij and F) D, ij = l1_cross_distances(self.X) if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): raise Exception("Multiple X are not allowed") F = self.regr(self.X) # Set up R r = self.corr(theta, D) R = np.eye(n_samples) * (1. + self.nugget) R[ij[:, 0], ij[:, 1]] = r R[ij[:, 1], ij[:, 0]] = r # Cholesky decomposition of R try: C = linalg.cholesky(R, lower=True) except linalg.LinAlgError: return reduced_likelihood_function_value, par # Get generalized least squares solution Ft = linalg.solve_triangular(C, F, lower=True) Q, G = linalg.qr(Ft, mode='economic') sv = linalg.svd(G, compute_uv=False) rcondG = sv[-1] / sv[0] if rcondG < 1e-10: # Check F sv = linalg.svd(F, compute_uv=False) condF = sv[0] / sv[-1] if condF > 1e15: raise Exception("F is too ill conditioned. Poor combination " "of regression model and observations.") else: # Ft is too ill conditioned, get out (try different theta) return reduced_likelihood_function_value, par Yt = linalg.solve_triangular(C, self.y, lower=True) if self.beta0 is None: # Universal Kriging beta = linalg.solve_triangular(G, np.dot(Q.T, Yt)) else: # Ordinary Kriging beta = np.array(self.beta0) rho = Yt - np.dot(Ft, beta) sigma2 = (rho ** 2.).sum(axis=0) / n_samples # The determinant of R is equal to the squared product of the diagonal # elements of its Cholesky decomposition C detR = (np.diag(C) ** (2. / n_samples)).prod() # Compute/Organize output reduced_likelihood_function_value = - sigma2.sum() * detR par['sigma2'] = sigma2 * self.y_std ** 2. par['beta'] = beta par['gamma'] = linalg.solve_triangular(C.T, rho) par['C'] = C par['Ft'] = Ft par['G'] = G return reduced_likelihood_function_value, par def _arg_max_reduced_likelihood_function(self): """ This function estimates the autocorrelation parameters theta as the maximizer of the reduced likelihood function. (Minimization of the opposite reduced likelihood function is used for convenience) Parameters ---------- self : All parameters are stored in the Gaussian Process model object. Returns ------- optimal_theta : array_like The best set of autocorrelation parameters (the sought maximizer of the reduced likelihood function). optimal_reduced_likelihood_function_value : double The optimal reduced likelihood function value. optimal_par : dict The BLUP parameters associated to thetaOpt. """ # Initialize output best_optimal_theta = [] best_optimal_rlf_value = [] best_optimal_par = [] if self.verbose: print("The chosen optimizer is: " + str(self.optimizer)) if self.random_start > 1: print(str(self.random_start) + " random starts are required.") percent_completed = 0. # Force optimizer to fmin_cobyla if the model is meant to be isotropic if self.optimizer == 'Welch' and self.theta0.size == 1: self.optimizer = 'fmin_cobyla' if self.optimizer == 'fmin_cobyla': def minus_reduced_likelihood_function(log10t): return - self.reduced_likelihood_function( theta=10. ** log10t)[0] constraints = [] for i in range(self.theta0.size): constraints.append(lambda log10t, i=i: log10t[i] - np.log10(self.thetaL[0, i])) constraints.append(lambda log10t, i=i: np.log10(self.thetaU[0, i]) - log10t[i]) for k in range(self.random_start): if k == 0: # Use specified starting point as first guess theta0 = self.theta0 else: # Generate a random starting point log10-uniformly # distributed between bounds log10theta0 = (np.log10(self.thetaL) + self.random_state.rand(*self.theta0.shape) * np.log10(self.thetaU / self.thetaL)) theta0 = 10. ** log10theta0 # Run Cobyla try: log10_optimal_theta = \ optimize.fmin_cobyla(minus_reduced_likelihood_function, np.log10(theta0).ravel(), constraints, disp=0) except ValueError as ve: print("Optimization failed. Try increasing the ``nugget``") raise ve optimal_theta = 10. ** log10_optimal_theta optimal_rlf_value, optimal_par = \ self.reduced_likelihood_function(theta=optimal_theta) # Compare the new optimizer to the best previous one if k > 0: if optimal_rlf_value > best_optimal_rlf_value: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta else: best_optimal_rlf_value = optimal_rlf_value best_optimal_par = optimal_par best_optimal_theta = optimal_theta if self.verbose and self.random_start > 1: if (20 * k) / self.random_start > percent_completed: percent_completed = (20 * k) / self.random_start print("%s completed" % (5 * percent_completed)) optimal_rlf_value = best_optimal_rlf_value optimal_par = best_optimal_par optimal_theta = best_optimal_theta elif self.optimizer == 'Welch': # Backup of the given attributes theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU corr = self.corr verbose = self.verbose # This will iterate over fmin_cobyla optimizer self.optimizer = 'fmin_cobyla' self.verbose = False # Initialize under isotropy assumption if verbose: print("Initialize under isotropy assumption...") self.theta0 = check_array(self.theta0.min()) self.thetaL = check_array(self.thetaL.min()) self.thetaU = check_array(self.thetaU.max()) theta_iso, optimal_rlf_value_iso, par_iso = \ self._arg_max_reduced_likelihood_function() optimal_theta = theta_iso + np.zeros(theta0.shape) # Iterate over all dimensions of theta allowing for anisotropy if verbose: print("Now improving allowing for anisotropy...") for i in self.random_state.permutation(theta0.size): if verbose: print("Proceeding along dimension %d..." % (i + 1)) self.theta0 = check_array(theta_iso) self.thetaL = check_array(thetaL[0, i]) self.thetaU = check_array(thetaU[0, i]) def corr_cut(t, d): return corr(check_array(np.hstack([optimal_theta[0][0:i], t[0], optimal_theta[0][(i + 1)::]])), d) self.corr = corr_cut optimal_theta[0, i], optimal_rlf_value, optimal_par = \ self._arg_max_reduced_likelihood_function() # Restore the given attributes self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU self.corr = corr self.optimizer = 'Welch' self.verbose = verbose else: raise NotImplementedError("This optimizer ('%s') is not " "implemented yet. Please contribute!" % self.optimizer) return optimal_theta, optimal_rlf_value, optimal_par def _check_params(self, n_samples=None): # Check regression model if not callable(self.regr): if self.regr in self._regression_types: self.regr = self._regression_types[self.regr] else: raise ValueError("regr should be one of %s or callable, " "%s was given." % (self._regression_types.keys(), self.regr)) # Check regression weights if given (Ordinary Kriging) if self.beta0 is not None: self.beta0 = np.atleast_2d(self.beta0) if self.beta0.shape[1] != 1: # Force to column vector self.beta0 = self.beta0.T # Check correlation model if not callable(self.corr): if self.corr in self._correlation_types: self.corr = self._correlation_types[self.corr] else: raise ValueError("corr should be one of %s or callable, " "%s was given." % (self._correlation_types.keys(), self.corr)) # Check storage mode if self.storage_mode != 'full' and self.storage_mode != 'light': raise ValueError("Storage mode should either be 'full' or " "'light', %s was given." % self.storage_mode) # Check correlation parameters self.theta0 = np.atleast_2d(self.theta0) lth = self.theta0.size if self.thetaL is not None and self.thetaU is not None: self.thetaL = np.atleast_2d(self.thetaL) self.thetaU = np.atleast_2d(self.thetaU) if self.thetaL.size != lth or self.thetaU.size != lth: raise ValueError("theta0, thetaL and thetaU must have the " "same length.") if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL): raise ValueError("The bounds must satisfy O < thetaL <= " "thetaU.") elif self.thetaL is None and self.thetaU is None: if np.any(self.theta0 <= 0): raise ValueError("theta0 must be strictly positive.") elif self.thetaL is None or self.thetaU is None: raise ValueError("thetaL and thetaU should either be both or " "neither specified.") # Force verbose type to bool self.verbose = bool(self.verbose) # Force normalize type to bool self.normalize = bool(self.normalize) # Check nugget value self.nugget = np.asarray(self.nugget) if np.any(self.nugget) < 0.: raise ValueError("nugget must be positive or zero.") if (n_samples is not None and self.nugget.shape not in [(), (n_samples,)]): raise ValueError("nugget must be either a scalar " "or array of length n_samples.") # Check optimizer if self.optimizer not in self._optimizer_types: raise ValueError("optimizer should be one of %s" % self._optimizer_types) # Force random_start type to int self.random_start = int(self.random_start)
34,769
38.155405
125
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/gpr.py
"""Gaussian processes regression. """ # Authors: Jan Hendrik Metzen <[email protected]> # # License: BSD 3 clause import warnings from operator import itemgetter import numpy as np from scipy.linalg import cholesky, cho_solve, solve_triangular from scipy.optimize import fmin_l_bfgs_b from sklearn.base import BaseEstimator, RegressorMixin, clone from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C from sklearn.utils import check_random_state from sklearn.utils.validation import check_X_y, check_array from sklearn.utils.deprecation import deprecated class GaussianProcessRegressor(BaseEstimator, RegressorMixin): """Gaussian process regression (GPR). The implementation is based on Algorithm 2.1 of Gaussian Processes for Machine Learning (GPML) by Rasmussen and Williams. In addition to standard scikit-learn estimator API, GaussianProcessRegressor: * allows prediction without prior fitting (based on the GP prior) * provides an additional method sample_y(X), which evaluates samples drawn from the GPR (prior or posterior) at given inputs * exposes a method log_marginal_likelihood(theta), which can be used externally for other ways of selecting hyperparameters, e.g., via Markov chain Monte Carlo. Read more in the :ref:`User Guide <gaussian_process>`. .. versionadded:: 0.18 Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. alpha : float or array-like, optional (default: 1e-10) Value added to the diagonal of the kernel matrix during fitting. Larger values correspond to increased noise level in the observations. This can also prevent a potential numerical issue during fitting, by ensuring that the calculated values form a positive definite matrix. If an array is passed, it must have the same number of entries as the data used for fitting and is used as datapoint-dependent noise level. Note that this is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify the noise level directly as a parameter is mainly for convenience and for consistency with Ridge. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer : int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer == 0 implies that one run is performed. normalize_y : boolean, optional (default: False) Whether the target values y are normalized, i.e., the mean of the observed target values become zero. This parameter should be set to True if the target values' mean is expected to differ considerable from zero. When enabled, the normalization effectively modifies the GP's prior based on the data, which contradicts the likelihood principle; normalization is thus disabled per default. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : int, RandomState instance or None, optional (default: None) The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- X_train_ : array-like, shape = (n_samples, n_features) Feature values in training data (also required for prediction) y_train_ : array-like, shape = (n_samples, [n_output_dims]) Target values in training data (also required for prediction) kernel_ : kernel object The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters L_ : array-like, shape = (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in ``X_train_`` alpha_ : array-like, shape = (n_samples,) Dual coefficients of training data points in kernel space log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta`` """ def __init__(self, kernel=None, alpha=1e-10, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, random_state=None): self.kernel = kernel self.alpha = alpha self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.normalize_y = normalize_y self.copy_X_train = copy_X_train self.random_state = random_state @property @deprecated("Attribute rng was deprecated in version 0.19 and " "will be removed in 0.21.") def rng(self): return self._rng @property @deprecated("Attribute y_train_mean was deprecated in version 0.19 and " "will be removed in 0.21.") def y_train_mean(self): return self._y_train_mean def fit(self, X, y): """Fit Gaussian process regression model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples, [n_output_dims]) Target values Returns ------- self : returns an instance of self. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") \ * RBF(1.0, length_scale_bounds="fixed") else: self.kernel_ = clone(self.kernel) self._rng = check_random_state(self.random_state) X, y = check_X_y(X, y, multi_output=True, y_numeric=True) # Normalize target value if self.normalize_y: self._y_train_mean = np.mean(y, axis=0) # demean y y = y - self._y_train_mean else: self._y_train_mean = np.zeros(1) if np.iterable(self.alpha) \ and self.alpha.shape[0] != y.shape[0]: if self.alpha.shape[0] == 1: self.alpha = self.alpha[0] else: raise ValueError("alpha must be a scalar or an array" " with same number of entries as y.(%d != %d)" % (self.alpha.shape[0], y.shape[0])) self.X_train_ = np.copy(X) if self.copy_X_train else X self.y_train_ = np.copy(y) if self.copy_X_train else y if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True) return -lml, -grad else: return -self.log_marginal_likelihood(theta) # First optimize starting from theta specified in kernel optima = [(self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds))] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite.") bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = \ self._rng.uniform(bounds[:, 0], bounds[:, 1]) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds)) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = \ self.log_marginal_likelihood(self.kernel_.theta) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: self.L_ = cholesky(K, lower=True) # Line 2 except np.linalg.LinAlgError as exc: exc.args = ("The kernel, %s, is not returning a " "positive definite matrix. Try gradually " "increasing the 'alpha' parameter of your " "GaussianProcessRegressor estimator." % self.kernel_,) + exc.args raise self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3 return self def predict(self, X, return_std=False, return_cov=False): """Predict using the Gaussian process regression model We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, also its standard deviation (return_std=True) or covariance (return_cov=True). Note that at most one of the two can be requested. Parameters ---------- X : array-like, shape = (n_samples, n_features) Query points where the GP is evaluated return_std : bool, default: False If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean. return_cov : bool, default: False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean Returns ------- y_mean : array, shape = (n_samples, [n_output_dims]) Mean of predictive distribution a query points y_std : array, shape = (n_samples,), optional Standard deviation of predictive distribution at query points. Only returned when return_std is True. y_cov : array, shape = (n_samples, n_samples), optional Covariance of joint predictive distribution a query points. Only returned when return_cov is True. """ if return_std and return_cov: raise RuntimeError( "Not returning standard deviation of predictions when " "returning full covariance.") X = check_array(X) if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior if self.kernel is None: kernel = (C(1.0, constant_value_bounds="fixed") * RBF(1.0, length_scale_bounds="fixed")) else: kernel = self.kernel y_mean = np.zeros(X.shape[0]) if return_cov: y_cov = kernel(X) return y_mean, y_cov elif return_std: y_var = kernel.diag(X) return y_mean, np.sqrt(y_var) else: return y_mean else: # Predict based on GP posterior K_trans = self.kernel_(X, self.X_train_) y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star) y_mean = self._y_train_mean + y_mean # undo normal. if return_cov: v = cho_solve((self.L_, True), K_trans.T) # Line 5 y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6 return y_mean, y_cov elif return_std: # compute inverse K_inv of K based on its Cholesky # decomposition L and its inverse L_inv L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0])) K_inv = L_inv.dot(L_inv.T) # Compute variance of predictive distribution y_var = self.kernel_.diag(X) y_var -= np.einsum("ij,ij->i", np.dot(K_trans, K_inv), K_trans) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. y_var_negative = y_var < 0 if np.any(y_var_negative): warnings.warn("Predicted variances smaller than 0. " "Setting those variances to 0.") y_var[y_var_negative] = 0.0 return y_mean, np.sqrt(y_var) else: return y_mean def sample_y(self, X, n_samples=1, random_state=0): """Draw samples from Gaussian process and evaluate at X. Parameters ---------- X : array-like, shape = (n_samples_X, n_features) Query points where the GP samples are evaluated n_samples : int, default: 1 The number of samples drawn from the Gaussian process random_state : int, RandomState instance or None, optional (default=0) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples) Values of n_samples samples drawn from Gaussian process and evaluated at query points. """ rng = check_random_state(random_state) y_mean, y_cov = self.predict(X, return_cov=True) if y_mean.ndim == 1: y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T else: y_samples = \ [rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis] for i in range(y_mean.shape[1])] y_samples = np.hstack(y_samples) return y_samples def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: L = cholesky(K, lower=True) # Line 2 except np.linalg.LinAlgError: return (-np.inf, np.zeros_like(theta)) \ if eval_gradient else -np.inf # Support multi-dimensional output of self.y_train_ y_train = self.y_train_ if y_train.ndim == 1: y_train = y_train[:, np.newaxis] alpha = cho_solve((L, True), y_train) # Line 3 # Compute log-likelihood (compare line 7) log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha) log_likelihood_dims -= np.log(np.diag(L)).sum() log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi) log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions if eval_gradient: # compare Equation 5.9 from GPML tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis] # Compute "0.5 * trace(tmp.dot(K_gradient))" without # constructing the full matrix tmp.dot(K_gradient) since only # its diagonal is required log_likelihood_gradient_dims = \ 0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient) log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1) if eval_gradient: return log_likelihood, log_likelihood_gradient else: return log_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": theta_opt, func_min, convergence_dict = \ fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds) if convergence_dict["warnflag"] != 0: warnings.warn("fmin_l_bfgs_b terminated abnormally with the " " state: %s" % convergence_dict) elif callable(self.optimizer): theta_opt, func_min = \ self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError("Unknown optimizer %s." % self.optimizer) return theta_opt, func_min
20,273
42.6
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/__init__.py
# -*- coding: utf-8 -*- # Author: Jan Hendrik Metzen <[email protected]> # Vincent Dubourg <[email protected]> # (mostly translation, see implementation details) # License: BSD 3 clause """ The :mod:`sklearn.gaussian_process` module implements Gaussian Process based regression and classification. """ from .gpr import GaussianProcessRegressor from .gpc import GaussianProcessClassifier from . import kernels from .gaussian_process import GaussianProcess from . import correlation_models from . import regression_models __all__ = ['GaussianProcess', 'correlation_models', 'regression_models', 'GaussianProcessRegressor', 'GaussianProcessClassifier', 'kernels']
723
29.166667
72
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/gpc.py
"""Gaussian processes classification.""" # Authors: Jan Hendrik Metzen <[email protected]> # # License: BSD 3 clause import warnings from operator import itemgetter import numpy as np from scipy.linalg import cholesky, cho_solve, solve from scipy.optimize import fmin_l_bfgs_b from scipy.special import erf, expit from sklearn.base import BaseEstimator, ClassifierMixin, clone from sklearn.gaussian_process.kernels \ import RBF, CompoundKernel, ConstantKernel as C from sklearn.utils.validation import check_X_y, check_is_fitted, check_array from sklearn.utils import check_random_state from sklearn.preprocessing import LabelEncoder from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier # Values required for approximating the logistic sigmoid by # error functions. coefs are obtained via: # x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf]) # b = logistic(x) # A = (erf(np.dot(x, self.lambdas)) + 1) / 2 # coefs = lstsq(A, b)[0] LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis] COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654])[:, np.newaxis] class _BinaryGaussianProcessClassifierLaplace(BaseEstimator): """Binary Gaussian process classification based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 of ``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and Williams. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. .. versionadded:: 0.18 Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer: int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict: int, optional (default: 100) The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, optional (default: False) If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : int, RandomState instance or None, optional (default: None) The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- X_train_ : array-like, shape = (n_samples, n_features) Feature values in training data (also required for prediction) y_train_ : array-like, shape = (n_samples,) Target values in training data (also required for prediction) classes_ : array-like, shape = (n_classes,) Unique class labels. kernel_ : kernel object The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters L_ : array-like, shape = (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in X_train_ pi_ : array-like, shape = (n_samples,) The probabilities of the positive class for the training points X_train_ W_sr_ : array-like, shape = (n_samples,) Square root of W, the Hessian of log-likelihood of the latent function values for the observed labels. Since W is diagonal, only the diagonal of sqrt(W) is stored. log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta`` """ def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state def fit(self, X, y): """Fit Gaussian process classification model Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns ------- self : returns an instance of self. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") \ * RBF(1.0, length_scale_bounds="fixed") else: self.kernel_ = clone(self.kernel) self.rng = check_random_state(self.random_state) self.X_train_ = np.copy(X) if self.copy_X_train else X # Encode class labels and check that it is a binary classification # problem label_encoder = LabelEncoder() self.y_train_ = label_encoder.fit_transform(y) self.classes_ = label_encoder.classes_ if self.classes_.size > 2: raise ValueError("%s supports only binary classification. " "y contains classes %s" % (self.__class__.__name__, self.classes_)) elif self.classes_.size == 1: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True) return -lml, -grad else: return -self.log_marginal_likelihood(theta) # First optimize starting from theta specified in kernel optima = [self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds)] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite.") bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1])) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds)) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = \ self.log_marginal_likelihood(self.kernel_.theta) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) _, (self.pi_, self.W_sr_, self.L_, _, _) = \ self._posterior_mode(K, return_temporaries=True) return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array, shape = (n_samples,) Predicted target values for X, values are from ``classes_`` """ check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"]) # As discussed on Section 3.4.2 of GPML, for making hard binary # decisions, it is enough to compute the MAP of the posterior and # pass it through the link function K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4 return np.where(f_star > 0, self.classes_[1], self.classes_[0]) def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute ``classes_``. """ check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"]) # Based on Algorithm 3.2 of GPML K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4 v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5 # Line 6 (compute np.diag(v.T.dot(v)) via einsum) var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v) # Line 7: # Approximate \int log(z) * N(z | f_star, var_f_star) # Approximation is due to Williams & Barber, "Bayesian Classification # with Gaussian Processes", Appendix A: Approximate the logistic # sigmoid by a linear combination of 5 error functions. # For information on how this integral can be computed see # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html alpha = 1 / (2 * var_f_star) gamma = LAMBDAS * f_star integrals = np.sqrt(np.pi / alpha) \ * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \ / (2 * np.sqrt(var_f_star * 2 * np.pi)) pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum() return np.vstack((1 - pi_star, pi_star)).T def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) # Compute log-marginal-likelihood Z and also store some temporaries # which can be reused for computing Z's gradient Z, (pi, W_sr, L, b, a) = \ self._posterior_mode(K, return_temporaries=True) if not eval_gradient: return Z # Compute gradient based on Algorithm 5.1 of GPML d_Z = np.empty(theta.shape[0]) # XXX: Get rid of the np.diag() in the next line R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7 C = solve(L, W_sr[:, np.newaxis] * K) # Line 8 # Line 9: (use einsum to compute np.diag(C.T.dot(C)))) s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \ * (pi * (1 - pi) * (1 - 2 * pi)) # third derivative for j in range(d_Z.shape[0]): C = K_gradient[:, :, j] # Line 11 # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C))) s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel()) b = C.dot(self.y_train_ - pi) # Line 13 s_3 = b - K.dot(R.dot(b)) # Line 14 d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15 return Z, d_Z def _posterior_mode(self, K, return_temporaries=False): """Mode-finding for binary Laplace GPC and fixed kernel. This approximates the posterior of the latent function values for given inputs and target observations with a Gaussian approximation and uses Newton's iteration to find the mode of this approximation. """ # Based on Algorithm 3.1 of GPML # If warm_start are enabled, we reuse the last solution for the # posterior mode as initialization; otherwise, we initialize with 0 if self.warm_start and hasattr(self, "f_cached") \ and self.f_cached.shape == self.y_train_.shape: f = self.f_cached else: f = np.zeros_like(self.y_train_, dtype=np.float64) # Use Newton's iteration method to find mode of Laplace approximation log_marginal_likelihood = -np.inf for _ in range(self.max_iter_predict): # Line 4 pi = expit(f) W = pi * (1 - pi) # Line 5 W_sr = np.sqrt(W) W_sr_K = W_sr[:, np.newaxis] * K B = np.eye(W.shape[0]) + W_sr_K * W_sr L = cholesky(B, lower=True) # Line 6 b = W * f + (self.y_train_ - pi) # Line 7 a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b)) # Line 8 f = K.dot(a) # Line 10: Compute log marginal likelihood in loop and use as # convergence criterion lml = -0.5 * a.T.dot(f) \ - np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \ - np.log(np.diag(L)).sum() # Check if we have converged (log marginal likelihood does # not decrease) # XXX: more complex convergence criterion if lml - log_marginal_likelihood < 1e-10: break log_marginal_likelihood = lml self.f_cached = f # Remember solution for later warm-starts if return_temporaries: return log_marginal_likelihood, (pi, W_sr, L, b, a) else: return log_marginal_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": theta_opt, func_min, convergence_dict = \ fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds) if convergence_dict["warnflag"] != 0: warnings.warn("fmin_l_bfgs_b terminated abnormally with the " " state: %s" % convergence_dict) elif callable(self.optimizer): theta_opt, func_min = \ self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError("Unknown optimizer %s." % self.optimizer) return theta_opt, func_min class GaussianProcessClassifier(BaseEstimator, ClassifierMixin): """Gaussian process classification (GPC) based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 of Gaussian Processes for Machine Learning (GPML) by Rasmussen and Williams. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. For multi-class classification, several binary one-versus rest classifiers are fitted. Note that this class thus does not implement a true multi-class Laplace approximation. Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer : int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict : int, optional (default: 100) The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, optional (default: False) If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : int, RandomState instance or None, optional (default: None) The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. multi_class : string, default : "one_vs_rest" Specifies how multi-class classification problems are handled. Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest", one binary Gaussian process classifier is fitted for each class, which is trained to separate this class from the rest. In "one_vs_one", one binary Gaussian process classifier is fitted for each pair of classes, which is trained to separate these two classes. The predictions of these binary predictors are combined into multi-class predictions. Note that "one_vs_one" does not support predicting probability estimates. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- kernel_ : kernel object The kernel used for prediction. In case of binary classification, the structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters. In case of multi-class classification, a CompoundKernel is returned which consists of the different kernels used in the one-versus-rest classifiers. log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta`` classes_ : array-like, shape = (n_classes,) Unique class labels. n_classes_ : int The number of classes in the training data .. versionadded:: 0.18 """ def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None, multi_class="one_vs_rest", n_jobs=1): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state self.multi_class = multi_class self.n_jobs = n_jobs def fit(self, X, y): """Fit Gaussian process classification model Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples,) Target values, must be binary Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y, multi_output=False) self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace( self.kernel, self.optimizer, self.n_restarts_optimizer, self.max_iter_predict, self.warm_start, self.copy_X_train, self.random_state) self.classes_ = np.unique(y) self.n_classes_ = self.classes_.size if self.n_classes_ == 1: raise ValueError("GaussianProcessClassifier requires 2 or more " "distinct classes. Only class %s present." % self.classes_[0]) if self.n_classes_ > 2: if self.multi_class == "one_vs_rest": self.base_estimator_ = \ OneVsRestClassifier(self.base_estimator_, n_jobs=self.n_jobs) elif self.multi_class == "one_vs_one": self.base_estimator_ = \ OneVsOneClassifier(self.base_estimator_, n_jobs=self.n_jobs) else: raise ValueError("Unknown multi-class mode %s" % self.multi_class) self.base_estimator_.fit(X, y) if self.n_classes_ > 2: self.log_marginal_likelihood_value_ = np.mean( [estimator.log_marginal_likelihood() for estimator in self.base_estimator_.estimators_]) else: self.log_marginal_likelihood_value_ = \ self.base_estimator_.log_marginal_likelihood() return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array, shape = (n_samples,) Predicted target values for X, values are from ``classes_`` """ check_is_fitted(self, ["classes_", "n_classes_"]) X = check_array(X) return self.base_estimator_.predict(X) def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ check_is_fitted(self, ["classes_", "n_classes_"]) if self.n_classes_ > 2 and self.multi_class == "one_vs_one": raise ValueError("one_vs_one multi-class mode does not support " "predicting probability estimates. Use " "one_vs_rest mode instead.") X = check_array(X) return self.base_estimator_.predict_proba(X) @property def kernel_(self): if self.n_classes_ == 2: return self.base_estimator_.kernel_ else: return CompoundKernel( [estimator.kernel_ for estimator in self.base_estimator_.estimators_]) def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. In the case of multi-class classification, the mean log-marginal likelihood of the one-versus-rest classifiers are returned. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or none Kernel hyperparameters for which the log-marginal likelihood is evaluated. In the case of multi-class classification, theta may be the hyperparameters of the compound kernel or of an individual kernel. In the latter case, all individual kernel get assigned the same theta values. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. Note that gradient computation is not supported for non-binary classification. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ check_is_fitted(self, ["classes_", "n_classes_"]) if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ theta = np.asarray(theta) if self.n_classes_ == 2: return self.base_estimator_.log_marginal_likelihood( theta, eval_gradient) else: if eval_gradient: raise NotImplementedError( "Gradient of log-marginal-likelihood not implemented for " "multi-class GPC.") estimators = self.base_estimator_.estimators_ n_dims = estimators[0].kernel_.n_dims if theta.shape[0] == n_dims: # use same theta for all sub-kernels return np.mean( [estimator.log_marginal_likelihood(theta) for i, estimator in enumerate(estimators)]) elif theta.shape[0] == n_dims * self.classes_.shape[0]: # theta for compound kernel return np.mean( [estimator.log_marginal_likelihood( theta[n_dims * i:n_dims * (i + 1)]) for i, estimator in enumerate(estimators)]) else: raise ValueError("Shape of theta must be either %d or %d. " "Obtained theta with shape %d." % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0]))
31,958
42.304878
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/tests/test_gpr.py
"""Testing for Gaussian process regression """ # Author: Jan Hendrik Metzen <[email protected]> # License: BSD 3 clause import numpy as np from scipy.optimize import approx_fprime from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels \ import RBF, ConstantKernel as C, WhiteKernel from sklearn.gaussian_process.kernels import DotProduct from sklearn.utils.testing \ import (assert_true, assert_greater, assert_array_less, assert_almost_equal, assert_equal, assert_raise_message, assert_array_almost_equal) def f(x): return x * np.sin(x) X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T y = f(X).ravel() fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed") kernels = [RBF(length_scale=1.0), fixed_kernel, RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) + C(1e-5, (1e-5, 1e2)), C(0.1, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) + C(1e-5, (1e-5, 1e2))] def test_gpr_interpolation(): # Test the interpolating property for different kernels. for kernel in kernels: gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) y_pred, y_cov = gpr.predict(X, return_cov=True) assert_almost_equal(y_pred, y) assert_almost_equal(np.diag(y_cov), 0.) def test_lml_improving(): # Test that hyperparameter-tuning improves log-marginal likelihood. for kernel in kernels: if kernel == fixed_kernel: continue gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta), gpr.log_marginal_likelihood(kernel.theta)) def test_lml_precomputed(): # Test that lml of optimized kernel is stored correctly. for kernel in kernels: gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta), gpr.log_marginal_likelihood()) def test_converged_to_local_maximum(): # Test that we are in local maximum after hyperparameter-optimization. for kernel in kernels: if kernel == fixed_kernel: continue gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) lml, lml_gradient = \ gpr.log_marginal_likelihood(gpr.kernel_.theta, True) assert_true(np.all((np.abs(lml_gradient) < 1e-4) | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1]))) def test_solution_inside_bounds(): # Test that hyperparameter-optimization remains in bounds# for kernel in kernels: if kernel == fixed_kernel: continue gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) bounds = gpr.kernel_.bounds max_ = np.finfo(gpr.kernel_.theta.dtype).max tiny = 1e-10 bounds[~np.isfinite(bounds[:, 1]), 1] = max_ assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny) assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny) def test_lml_gradient(): # Compare analytic and numeric gradient of log marginal likelihood. for kernel in kernels: gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True) lml_gradient_approx = \ approx_fprime(kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10) assert_almost_equal(lml_gradient, lml_gradient_approx, 3) def test_prior(): # Test that GP prior has mean 0 and identical variances. for kernel in kernels: gpr = GaussianProcessRegressor(kernel=kernel) y_mean, y_cov = gpr.predict(X, return_cov=True) assert_almost_equal(y_mean, 0, 5) if len(gpr.kernel.theta) > 1: # XXX: quite hacky, works only for current kernels assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5) else: assert_almost_equal(np.diag(y_cov), 1, 5) def test_sample_statistics(): # Test that statistics of samples drawn from GP are correct. for kernel in kernels: gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) y_mean, y_cov = gpr.predict(X2, return_cov=True) samples = gpr.sample_y(X2, 300000) # More digits accuracy would require many more samples assert_almost_equal(y_mean, np.mean(samples, 1), 1) assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(), np.var(samples, 1) / np.diag(y_cov).max(), 1) def test_no_optimizer(): # Test that kernel parameters are unmodified when optimizer is None. kernel = RBF(1.0) gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y) assert_equal(np.exp(gpr.kernel_.theta), 1.0) def test_predict_cov_vs_std(): # Test that predicted std.-dev. is consistent with cov's diagonal. for kernel in kernels: gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) y_mean, y_cov = gpr.predict(X2, return_cov=True) y_mean, y_std = gpr.predict(X2, return_std=True) assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std) def test_anisotropic_kernel(): # Test that GPR can identify meaningful anisotropic length-scales. # We learn a function which varies in one dimension ten-times slower # than in the other. The corresponding length-scales should differ by at # least a factor 5 rng = np.random.RandomState(0) X = rng.uniform(-1, 1, (50, 2)) y = X[:, 0] + 0.1 * X[:, 1] kernel = RBF([1.0, 1.0]) gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) assert_greater(np.exp(gpr.kernel_.theta[1]), np.exp(gpr.kernel_.theta[0]) * 5) def test_random_starts(): # Test that an increasing number of random-starts of GP fitting only # increases the log marginal likelihood of the chosen theta. n_samples, n_features = 25, 2 rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) * 2 - 1 y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \ + rng.normal(scale=0.1, size=n_samples) kernel = C(1.0, (1e-2, 1e2)) \ * RBF(length_scale=[1.0] * n_features, length_scale_bounds=[(1e-4, 1e+2)] * n_features) \ + WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1)) last_lml = -np.inf for n_restarts_optimizer in range(5): gp = GaussianProcessRegressor( kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, random_state=0,).fit(X, y) lml = gp.log_marginal_likelihood(gp.kernel_.theta) assert_greater(lml, last_lml - np.finfo(np.float32).eps) last_lml = lml def test_y_normalization(): # Test normalization of the target values in GP # Fitting non-normalizing GP on normalized y and fitting normalizing GP # on unnormalized y should yield identical results y_mean = y.mean(0) y_norm = y - y_mean for kernel in kernels: # Fit non-normalizing GP on normalized y gpr = GaussianProcessRegressor(kernel=kernel) gpr.fit(X, y_norm) # Fit normalizing GP on unnormalized y gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True) gpr_norm.fit(X, y) # Compare predicted mean, std-devs and covariances y_pred, y_pred_std = gpr.predict(X2, return_std=True) y_pred = y_mean + y_pred y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True) assert_almost_equal(y_pred, y_pred_norm) assert_almost_equal(y_pred_std, y_pred_std_norm) _, y_cov = gpr.predict(X2, return_cov=True) _, y_cov_norm = gpr_norm.predict(X2, return_cov=True) assert_almost_equal(y_cov, y_cov_norm) def test_y_multioutput(): # Test that GPR can deal with multi-dimensional target values y_2d = np.vstack((y, y * 2)).T # Test for fixed kernel that first dimension of 2d GP equals the output # of 1d GP and that second dimension is twice as large kernel = RBF(length_scale=1.0) gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False) gpr.fit(X, y) gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False) gpr_2d.fit(X, y_2d) y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True) y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True) _, y_cov_1d = gpr.predict(X2, return_cov=True) _, y_cov_2d = gpr_2d.predict(X2, return_cov=True) assert_almost_equal(y_pred_1d, y_pred_2d[:, 0]) assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2) # Standard deviation and covariance do not depend on output assert_almost_equal(y_std_1d, y_std_2d) assert_almost_equal(y_cov_1d, y_cov_2d) y_sample_1d = gpr.sample_y(X2, n_samples=10) y_sample_2d = gpr_2d.sample_y(X2, n_samples=10) assert_almost_equal(y_sample_1d, y_sample_2d[:, 0]) # Test hyperparameter optimization for kernel in kernels: gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True) gpr.fit(X, y) gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True) gpr_2d.fit(X, np.vstack((y, y)).T) assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4) def test_custom_optimizer(): # Test that GPR can use externally defined optimizers. # Define a dummy optimizer that simply tests 50 random hyperparameters def optimizer(obj_func, initial_theta, bounds): rng = np.random.RandomState(0) theta_opt, func_min = \ initial_theta, obj_func(initial_theta, eval_gradient=False) for _ in range(50): theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))) f = obj_func(theta, eval_gradient=False) if f < func_min: theta_opt, func_min = theta, f return theta_opt, func_min for kernel in kernels: if kernel == fixed_kernel: continue gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer) gpr.fit(X, y) # Checks that optimizer improved marginal likelihood assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta), gpr.log_marginal_likelihood(gpr.kernel.theta)) def test_gpr_correct_error_message(): X = np.arange(12).reshape(6, -1) y = np.ones(6) kernel = DotProduct() gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0) assert_raise_message(np.linalg.LinAlgError, "The kernel, %s, is not returning a " "positive definite matrix. Try gradually increasing " "the 'alpha' parameter of your " "GaussianProcessRegressor estimator." % kernel, gpr.fit, X, y) def test_duplicate_input(): # Test GPR can handle two different output-values for the same input. for kernel in kernels: gpr_equal_inputs = \ GaussianProcessRegressor(kernel=kernel, alpha=1e-2) gpr_similar_inputs = \ GaussianProcessRegressor(kernel=kernel, alpha=1e-2) X_ = np.vstack((X, X[0])) y_ = np.hstack((y, y[0] + 1)) gpr_equal_inputs.fit(X_, y_) X_ = np.vstack((X, X[0] + 1e-15)) y_ = np.hstack((y, y[0] + 1)) gpr_similar_inputs.fit(X_, y_) X_test = np.linspace(0, 10, 100)[:, None] y_pred_equal, y_std_equal = \ gpr_equal_inputs.predict(X_test, return_std=True) y_pred_similar, y_std_similar = \ gpr_similar_inputs.predict(X_test, return_std=True) assert_almost_equal(y_pred_equal, y_pred_similar) assert_almost_equal(y_std_equal, y_std_similar) def test_no_fit_default_predict(): # Test that GPR predictions without fit does not break by default. default_kernel = (C(1.0, constant_value_bounds="fixed") * RBF(1.0, length_scale_bounds="fixed")) gpr1 = GaussianProcessRegressor() _, y_std1 = gpr1.predict(X, return_std=True) _, y_cov1 = gpr1.predict(X, return_cov=True) gpr2 = GaussianProcessRegressor(kernel=default_kernel) _, y_std2 = gpr2.predict(X, return_std=True) _, y_cov2 = gpr2.predict(X, return_cov=True) assert_array_almost_equal(y_std1, y_std2) assert_array_almost_equal(y_cov1, y_cov2)
13,070
36.668588
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/tests/test_gpc.py
"""Testing for Gaussian process classification """ # Author: Jan Hendrik Metzen <[email protected]> # License: BSD 3 clause import numpy as np from scipy.optimize import approx_fprime from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C from sklearn.utils.testing import (assert_true, assert_greater, assert_almost_equal, assert_array_equal) def f(x): return np.sin(x) X = np.atleast_2d(np.linspace(0, 10, 30)).T X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T y = np.array(f(X).ravel() > 0, dtype=int) fX = f(X).ravel() y_mc = np.empty(y.shape, dtype=int) # multi-class y_mc[fX < -0.35] = 0 y_mc[(fX >= -0.35) & (fX < 0.35)] = 1 y_mc[fX > 0.35] = 2 fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed") kernels = [RBF(length_scale=0.1), fixed_kernel, RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))] def test_predict_consistent(): # Check binary predict decision has also predicted probability above 0.5. for kernel in kernels: gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5) def test_lml_improving(): # Test that hyperparameter-tuning improves log-marginal likelihood. for kernel in kernels: if kernel == fixed_kernel: continue gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(kernel.theta)) def test_lml_precomputed(): # Test that lml of optimized kernel is stored correctly. for kernel in kernels: gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7) def test_converged_to_local_maximum(): # Test that we are in local maximum after hyperparameter-optimization. for kernel in kernels: if kernel == fixed_kernel: continue gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) lml, lml_gradient = \ gpc.log_marginal_likelihood(gpc.kernel_.theta, True) assert_true(np.all((np.abs(lml_gradient) < 1e-4) | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]))) def test_lml_gradient(): # Compare analytic and numeric gradient of log marginal likelihood. for kernel in kernels: gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True) lml_gradient_approx = \ approx_fprime(kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10) assert_almost_equal(lml_gradient, lml_gradient_approx, 3) def test_random_starts(): # Test that an increasing number of random-starts of GP fitting only # increases the log marginal likelihood of the chosen theta. n_samples, n_features = 25, 2 rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) * 2 - 1 y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0 kernel = C(1.0, (1e-2, 1e2)) \ * RBF(length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e+2)] * n_features) last_lml = -np.inf for n_restarts_optimizer in range(5): gp = GaussianProcessClassifier( kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, random_state=0).fit(X, y) lml = gp.log_marginal_likelihood(gp.kernel_.theta) assert_greater(lml, last_lml - np.finfo(np.float32).eps) last_lml = lml def test_custom_optimizer(): # Test that GPC can use externally defined optimizers. # Define a dummy optimizer that simply tests 50 random hyperparameters def optimizer(obj_func, initial_theta, bounds): rng = np.random.RandomState(0) theta_opt, func_min = \ initial_theta, obj_func(initial_theta, eval_gradient=False) for _ in range(50): theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))) f = obj_func(theta, eval_gradient=False) if f < func_min: theta_opt, func_min = theta, f return theta_opt, func_min for kernel in kernels: if kernel == fixed_kernel: continue gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer) gpc.fit(X, y_mc) # Checks that optimizer improved marginal likelihood assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(kernel.theta)) def test_multi_class(): # Test GPC for multi-class classification problems. for kernel in kernels: gpc = GaussianProcessClassifier(kernel=kernel) gpc.fit(X, y_mc) y_prob = gpc.predict_proba(X2) assert_almost_equal(y_prob.sum(1), 1) y_pred = gpc.predict(X2) assert_array_equal(np.argmax(y_prob, 1), y_pred) def test_multi_class_n_jobs(): # Test that multi-class GPC produces identical results with n_jobs>1. for kernel in kernels: gpc = GaussianProcessClassifier(kernel=kernel) gpc.fit(X, y_mc) gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2) gpc_2.fit(X, y_mc) y_prob = gpc.predict_proba(X2) y_prob_2 = gpc_2.predict_proba(X2) assert_almost_equal(y_prob, y_prob_2)
5,994
35.779141
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/tests/test_kernels.py
"""Testing for kernels for Gaussian processes.""" # Author: Jan Hendrik Metzen <[email protected]> # License: BSD 3 clause from sklearn.externals.funcsigs import signature import numpy as np from sklearn.gaussian_process.kernels import _approx_fprime from sklearn.metrics.pairwise \ import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels from sklearn.gaussian_process.kernels \ import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator, Exponentiation) from sklearn.base import clone from sklearn.utils.testing import (assert_equal, assert_almost_equal, assert_not_equal, assert_array_equal, assert_array_almost_equal) X = np.random.RandomState(0).normal(0, 1, (5, 2)) Y = np.random.RandomState(0).normal(0, 1, (6, 2)) kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0) kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)), ConstantKernel(constant_value=10.0), 2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"), 2.0 * RBF(length_scale=0.5), kernel_white, 2.0 * RBF(length_scale=[0.5, 2.0]), 2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"), 2.0 * Matern(length_scale=0.5, nu=0.5), 2.0 * Matern(length_scale=1.5, nu=1.5), 2.0 * Matern(length_scale=2.5, nu=2.5), 2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5), 3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5), 4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5), RationalQuadratic(length_scale=0.5, alpha=1.5), ExpSineSquared(length_scale=0.5, periodicity=1.5), DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2, RBF(length_scale=[2.0]), Matern(length_scale=[2.0])] for metric in PAIRWISE_KERNEL_FUNCTIONS: if metric in ["additive_chi2", "chi2"]: continue kernels.append(PairwiseKernel(gamma=1.0, metric=metric)) def test_kernel_gradient(): # Compare analytic and numeric gradient of kernels. for kernel in kernels: K, K_gradient = kernel(X, eval_gradient=True) assert_equal(K_gradient.shape[0], X.shape[0]) assert_equal(K_gradient.shape[1], X.shape[0]) assert_equal(K_gradient.shape[2], kernel.theta.shape[0]) def eval_kernel_for_theta(theta): kernel_clone = kernel.clone_with_theta(theta) K = kernel_clone(X, eval_gradient=False) return K K_gradient_approx = \ _approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10) assert_almost_equal(K_gradient, K_gradient_approx, 4) def test_kernel_theta(): # Check that parameter vector theta of kernel is set correctly. for kernel in kernels: if isinstance(kernel, KernelOperator) \ or isinstance(kernel, Exponentiation): # skip non-basic kernels continue theta = kernel.theta _, K_gradient = kernel(X, eval_gradient=True) # Determine kernel parameters that contribute to theta init_sign = signature(kernel.__class__.__init__).parameters.values() args = [p.name for p in init_sign if p.name != 'self'] theta_vars = map(lambda s: s[0:-len("_bounds")], filter(lambda s: s.endswith("_bounds"), args)) assert_equal( set(hyperparameter.name for hyperparameter in kernel.hyperparameters), set(theta_vars)) # Check that values returned in theta are consistent with # hyperparameter values (being their logarithms) for i, hyperparameter in enumerate(kernel.hyperparameters): assert_equal(theta[i], np.log(getattr(kernel, hyperparameter.name))) # Fixed kernel parameters must be excluded from theta and gradient. for i, hyperparameter in enumerate(kernel.hyperparameters): # create copy with certain hyperparameter fixed params = kernel.get_params() params[hyperparameter.name + "_bounds"] = "fixed" kernel_class = kernel.__class__ new_kernel = kernel_class(**params) # Check that theta and K_gradient are identical with the fixed # dimension left out _, K_gradient_new = new_kernel(X, eval_gradient=True) assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1) assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1) if i > 0: assert_equal(theta[:i], new_kernel.theta[:i]) assert_array_equal(K_gradient[..., :i], K_gradient_new[..., :i]) if i + 1 < len(kernel.hyperparameters): assert_equal(theta[i + 1:], new_kernel.theta[i:]) assert_array_equal(K_gradient[..., i + 1:], K_gradient_new[..., i:]) # Check that values of theta are modified correctly for i, hyperparameter in enumerate(kernel.hyperparameters): theta[i] = np.log(42) kernel.theta = theta assert_almost_equal(getattr(kernel, hyperparameter.name), 42) setattr(kernel, hyperparameter.name, 43) assert_almost_equal(kernel.theta[i], np.log(43)) def test_auto_vs_cross(): # Auto-correlation and cross-correlation should be consistent. for kernel in kernels: if kernel == kernel_white: continue # Identity is not satisfied on diagonal K_auto = kernel(X) K_cross = kernel(X, X) assert_almost_equal(K_auto, K_cross, 5) def test_kernel_diag(): # Test that diag method of kernel returns consistent results. for kernel in kernels: K_call_diag = np.diag(kernel(X)) K_diag = kernel.diag(X) assert_almost_equal(K_call_diag, K_diag, 5) def test_kernel_operator_commutative(): # Adding kernels and multiplying kernels should be commutative. # Check addition assert_almost_equal((RBF(2.0) + 1.0)(X), (1.0 + RBF(2.0))(X)) # Check multiplication assert_almost_equal((3.0 * RBF(2.0))(X), (RBF(2.0) * 3.0)(X)) def test_kernel_anisotropic(): # Anisotropic kernel should be consistent with isotropic kernels. kernel = 3.0 * RBF([0.5, 2.0]) K = kernel(X) X1 = np.array(X) X1[:, 0] *= 4 K1 = 3.0 * RBF(2.0)(X1) assert_almost_equal(K, K1) X2 = np.array(X) X2[:, 1] /= 4 K2 = 3.0 * RBF(0.5)(X2) assert_almost_equal(K, K2) # Check getting and setting via theta kernel.theta = kernel.theta + np.log(2) assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0])) assert_array_equal(kernel.k2.length_scale, [1.0, 4.0]) def test_kernel_stationary(): # Test stationarity of kernels. for kernel in kernels: if not kernel.is_stationary(): continue K = kernel(X, X + 1) assert_almost_equal(K[0, 0], np.diag(K)) def check_hyperparameters_equal(kernel1, kernel2): # Check that hyperparameters of two kernels are equal for attr in set(dir(kernel1) + dir(kernel2)): if attr.startswith("hyperparameter_"): attr_value1 = getattr(kernel1, attr) attr_value2 = getattr(kernel2, attr) assert_equal(attr_value1, attr_value2) def test_kernel_clone(): # Test that sklearn's clone works correctly on kernels. for kernel in kernels: kernel_cloned = clone(kernel) # XXX: Should this be fixed? # This differs from the sklearn's estimators equality check. assert_equal(kernel, kernel_cloned) assert_not_equal(id(kernel), id(kernel_cloned)) # Check that all constructor parameters are equal. assert_equal(kernel.get_params(), kernel_cloned.get_params()) # Check that all hyperparameters are equal. yield check_hyperparameters_equal, kernel, kernel_cloned def test_kernel_clone_after_set_params(): # This test is to verify that using set_params does not # break clone on kernels. # This used to break because in kernels such as the RBF, non-trivial # logic that modified the length scale used to be in the constructor # See https://github.com/scikit-learn/scikit-learn/issues/6961 # for more details. bounds = (1e-5, 1e5) for kernel in kernels: kernel_cloned = clone(kernel) params = kernel.get_params() # RationalQuadratic kernel is isotropic. isotropic_kernels = (ExpSineSquared, RationalQuadratic) if 'length_scale' in params and not isinstance(kernel, isotropic_kernels): length_scale = params['length_scale'] if np.iterable(length_scale): params['length_scale'] = length_scale[0] params['length_scale_bounds'] = bounds else: params['length_scale'] = [length_scale] * 2 params['length_scale_bounds'] = bounds * 2 kernel_cloned.set_params(**params) kernel_cloned_clone = clone(kernel_cloned) assert_equal(kernel_cloned_clone.get_params(), kernel_cloned.get_params()) assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned)) yield (check_hyperparameters_equal, kernel_cloned, kernel_cloned_clone) def test_matern_kernel(): # Test consistency of Matern kernel for special values of nu. K = Matern(nu=1.5, length_scale=1.0)(X) # the diagonal elements of a matern kernel are 1 assert_array_almost_equal(np.diag(K), np.ones(X.shape[0])) # matern kernel for coef0==0.5 is equal to absolute exponential kernel K_absexp = np.exp(-euclidean_distances(X, X, squared=False)) K = Matern(nu=0.5, length_scale=1.0)(X) assert_array_almost_equal(K, K_absexp) # test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5]) # result in nearly identical results as the general case for coef0 in # [0.5 + tiny, 1.5 + tiny, 2.5 + tiny] tiny = 1e-10 for nu in [0.5, 1.5, 2.5]: K1 = Matern(nu=nu, length_scale=1.0)(X) K2 = Matern(nu=nu + tiny, length_scale=1.0)(X) assert_array_almost_equal(K1, K2) def test_kernel_versus_pairwise(): # Check that GP kernels can also be used as pairwise kernels. for kernel in kernels: # Test auto-kernel if kernel != kernel_white: # For WhiteKernel: k(X) != k(X,X). This is assumed by # pairwise_kernels K1 = kernel(X) K2 = pairwise_kernels(X, metric=kernel) assert_array_almost_equal(K1, K2) # Test cross-kernel K1 = kernel(X, Y) K2 = pairwise_kernels(X, Y, metric=kernel) assert_array_almost_equal(K1, K2) def test_set_get_params(): # Check that set_params()/get_params() is consistent with kernel.theta. for kernel in kernels: # Test get_params() index = 0 params = kernel.get_params() for hyperparameter in kernel.hyperparameters: if isinstance("string", type(hyperparameter.bounds)): if hyperparameter.bounds == "fixed": continue size = hyperparameter.n_elements if size > 1: # anisotropic kernels assert_almost_equal(np.exp(kernel.theta[index:index + size]), params[hyperparameter.name]) index += size else: assert_almost_equal(np.exp(kernel.theta[index]), params[hyperparameter.name]) index += 1 # Test set_params() index = 0 value = 10 # arbitrary value for hyperparameter in kernel.hyperparameters: if isinstance("string", type(hyperparameter.bounds)): if hyperparameter.bounds == "fixed": continue size = hyperparameter.n_elements if size > 1: # anisotropic kernels kernel.set_params(**{hyperparameter.name: [value] * size}) assert_almost_equal(np.exp(kernel.theta[index:index + size]), [value] * size) index += size else: kernel.set_params(**{hyperparameter.name: value}) assert_almost_equal(np.exp(kernel.theta[index]), value) index += 1 def test_repr_kernels(): # Smoke-test for repr in kernels. for kernel in kernels: repr(kernel)
12,799
38.751553
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/tests/test_gaussian_process.py
""" Testing for Gaussian Process module (sklearn.gaussian_process) """ # Author: Vincent Dubourg <[email protected]> # License: BSD 3 clause import numpy as np from sklearn.gaussian_process import GaussianProcess from sklearn.gaussian_process import regression_models as regression from sklearn.gaussian_process import correlation_models as correlation from sklearn.datasets import make_regression from sklearn.utils.testing import assert_greater, assert_true, raises f = lambda x: x * np.sin(x) X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T y = f(X).ravel() def test_1d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a one-dimensional Gaussian Process model. # Check random start optimization. # Test the interpolating property. gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=random_start, verbose=False).fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) y2_pred, MSE2 = gp.predict(X2, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.) and np.allclose(MSE2, 0., atol=10)) def test_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a two-dimensional Gaussian Process model accounting for # anisotropy. Check random start optimization. # Test the interpolating property. b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = g(X).ravel() thetaL = [1e-4] * 2 thetaU = [1e-1] * 2 gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=thetaL, thetaU=thetaU, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) eps = np.finfo(gp.theta_.dtype).eps assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a two-dimensional Gaussian Process model accounting for # anisotropy. Check random start optimization. # Test the GP interpolation for 2D output b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. f = lambda x: np.vstack((g(x), g(x))).T X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = f(X) gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=[1e-4] * 2, thetaU=[1e-1] * 2, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) @raises(ValueError) def test_wrong_number_of_outputs(): gp = GaussianProcess() gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3]) def test_more_builtin_correlation_models(random_start=1): # Repeat test_1d and test_2d for several built-in correlation # models specified as strings. all_corr = ['absolute_exponential', 'squared_exponential', 'cubic', 'linear'] for corr in all_corr: test_1d(regr='constant', corr=corr, random_start=random_start) test_2d(regr='constant', corr=corr, random_start=random_start) test_2d_2d(regr='constant', corr=corr, random_start=random_start) def test_ordinary_kriging(): # Repeat test_1d and test_2d with given regression weights (beta0) for # different regression models (Ordinary Kriging). test_1d(regr='linear', beta0=[0., 0.5]) test_1d(regr='quadratic', beta0=[0., 0.5, 0.5]) test_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) def test_no_normalize(): gp = GaussianProcess(normalize=False).fit(X, y) y_pred = gp.predict(X) assert_true(np.allclose(y_pred, y)) def test_batch_size(): # TypeError when using batch_size on Python 3, see # https://github.com/scikit-learn/scikit-learn/issues/7329 for more # details gp = GaussianProcess() gp.fit(X, y) gp.predict(X, batch_size=1) gp.predict(X, batch_size=1, eval_MSE=True) def test_random_starts(): # Test that an increasing number of random-starts of GP fitting only # increases the reduced likelihood function of the optimal theta. n_samples, n_features = 50, 3 rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) * 2 - 1 y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) best_likelihood = -np.inf for random_start in range(1, 5): gp = GaussianProcess(regr="constant", corr="squared_exponential", theta0=[1e-0] * n_features, thetaL=[1e-4] * n_features, thetaU=[1e+1] * n_features, random_start=random_start, random_state=0, verbose=False).fit(X, y) rlf = gp.reduced_likelihood_function()[0] assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps) best_likelihood = rlf def test_mse_solving(): # test the MSE estimate to be sane. # non-regression test for ignoring off-diagonals of feature covariance, # testing with nugget that renders covariance useless, only # using the mean function, with low effective rank of data gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4, thetaL=1e-12, thetaU=1e-2, nugget=1e-2, optimizer='Welch', regr="linear", random_state=0) X, y = make_regression(n_informative=3, n_features=60, noise=50, random_state=0, effective_rank=1) gp.fit(X, y) assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
7,035
38.751412
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/gaussian_process/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/locally_linear.py
"""Locally Linear Embedding""" # Author: Fabian Pedregosa -- <[email protected]> # Jake Vanderplas -- <[email protected]> # License: BSD 3 clause (C) INRIA 2011 import numpy as np from scipy.linalg import eigh, svd, qr, solve from scipy.sparse import eye, csr_matrix from scipy.sparse.linalg import eigsh from ..base import BaseEstimator, TransformerMixin from ..utils import check_random_state, check_array from ..utils.extmath import stable_cumsum from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..neighbors import NearestNeighbors def barycenter_weights(X, Z, reg=1e-3): """Compute barycenter weights of X from Y along the first axis We estimate the weights to assign to each point in Y[i] to recover the point X[i]. The barycenter weights sum to 1. Parameters ---------- X : array-like, shape (n_samples, n_dim) Z : array-like, shape (n_samples, n_neighbors, n_dim) reg : float, optional amount of regularization to add for the problem to be well-posed in the case of n_neighbors > n_dim Returns ------- B : array-like, shape (n_samples, n_neighbors) Notes ----- See developers note for more information. """ X = check_array(X, dtype=FLOAT_DTYPES) Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True) n_samples, n_neighbors = X.shape[0], Z.shape[1] B = np.empty((n_samples, n_neighbors), dtype=X.dtype) v = np.ones(n_neighbors, dtype=X.dtype) # this might raise a LinalgError if G is singular and has trace # zero for i, A in enumerate(Z.transpose(0, 2, 1)): C = A.T - X[i] # broadcasting G = np.dot(C, C.T) trace = np.trace(G) if trace > 0: R = reg * trace else: R = reg G.flat[::Z.shape[1] + 1] += R w = solve(G, v, sym_pos=True) B[i, :] = w / np.sum(w) return B def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1): """Computes the barycenter weighted graph of k-Neighbors for points in X Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, sparse array, precomputed tree, or NearestNeighbors object. n_neighbors : int Number of neighbors for each sample. reg : float, optional Amount of regularization when solving the least-squares problem. Only relevant if mode='barycenter'. If None, use the default. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. See also -------- sklearn.neighbors.kneighbors_graph sklearn.neighbors.radius_neighbors_graph """ knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X) X = knn._fit_X n_samples = X.shape[0] ind = knn.kneighbors(X, return_distance=False)[:, 1:] data = barycenter_weights(X, X[ind], reg=reg) indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors) return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples)) def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100, random_state=None): """ Find the null space of a matrix M. Parameters ---------- M : {array, matrix, sparse matrix, LinearOperator} Input covariance matrix: should be symmetric positive semi-definite k : integer Number of eigenvalues/vectors to return k_skip : integer, optional Number of low eigenvalues to skip. eigen_solver : string, {'auto', 'arpack', 'dense'} auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, optional Tolerance for 'arpack' method. Not used if eigen_solver=='dense'. max_iter : maximum number of iterations for 'arpack' method not used if eigen_solver=='dense' random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'arpack'. """ if eigen_solver == 'auto': if M.shape[0] > 200 and k + k_skip < 10: eigen_solver = 'arpack' else: eigen_solver = 'dense' if eigen_solver == 'arpack': random_state = check_random_state(random_state) # initialize with [-1,1] as in ARPACK v0 = random_state.uniform(-1, 1, M.shape[0]) try: eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0) except RuntimeError as msg: raise ValueError("Error in determining null-space with ARPACK. " "Error message: '%s'. " "Note that method='arpack' can fail when the " "weight matrix is singular or otherwise " "ill-behaved. method='dense' is recommended. " "See online documentation for more information." % msg) return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:]) elif eigen_solver == 'dense': if hasattr(M, 'toarray'): M = M.toarray() eigen_values, eigen_vectors = eigh( M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True) index = np.argsort(np.abs(eigen_values)) return eigen_vectors[:, index], np.sum(eigen_values) else: raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver) def locally_linear_embedding( X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6, max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12, random_state=None, n_jobs=1): """Perform a Locally Linear Embedding analysis on the data. Read more in the :ref:`User Guide <locally_linear_embedding>`. Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, sparse array, precomputed tree, or NearestNeighbors object. n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold. reg : float regularization constant, multiplies the trace of the local covariance matrix of the distances. eigen_solver : string, {'auto', 'arpack', 'dense'} auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, optional Tolerance for 'arpack' method Not used if eigen_solver=='dense'. max_iter : integer maximum number of iterations for the arpack solver. method : {'standard', 'hessian', 'modified', 'ltsa'} standard : use the standard locally linear embedding algorithm. see reference [1]_ hessian : use the Hessian eigenmap method. This method requires n_neighbors > n_components * (1 + (n_components + 1) / 2. see reference [2]_ modified : use the modified locally linear embedding algorithm. see reference [3]_ ltsa : use local tangent space alignment algorithm see reference [4]_ hessian_tol : float, optional Tolerance for Hessian eigenmapping method. Only used if method == 'hessian' modified_tol : float, optional Tolerance for modified LLE method. Only used if method == 'modified' random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'arpack'. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- Y : array-like, shape [n_samples, n_components] Embedding vectors. squared_error : float Reconstruction error for the embedding vectors. Equivalent to ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights. References ---------- .. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction by locally linear embedding. Science 290:2323 (2000).` .. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally linear embedding techniques for high-dimensional data. Proc Natl Acad Sci U S A. 100:5591 (2003).` .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear Embedding Using Multiple Weights.` http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382 .. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear dimensionality reduction via tangent space alignment. Journal of Shanghai Univ. 8:406 (2004)` """ if eigen_solver not in ('auto', 'arpack', 'dense'): raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver) if method not in ('standard', 'hessian', 'modified', 'ltsa'): raise ValueError("unrecognized method '%s'" % method) nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs) nbrs.fit(X) X = nbrs._fit_X N, d_in = X.shape if n_components > d_in: raise ValueError("output dimension must be less than or equal " "to input dimension") if n_neighbors >= N: raise ValueError("n_neighbors must be less than number of points") if n_neighbors <= 0: raise ValueError("n_neighbors must be positive") M_sparse = (eigen_solver != 'dense') if method == 'standard': W = barycenter_kneighbors_graph( nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs) # we'll compute M = (I-W)'(I-W) # depending on the solver, we'll do this differently if M_sparse: M = eye(*W.shape, format=W.format) - W M = (M.T * M).tocsr() else: M = (W.T * W - W.T - W).toarray() M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I elif method == 'hessian': dp = n_components * (n_components + 1) // 2 if n_neighbors <= n_components + dp: raise ValueError("for method='hessian', n_neighbors must be " "greater than " "[n_components * (n_components + 3) / 2]") neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1, return_distance=False) neighbors = neighbors[:, 1:] Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64) Yi[:, 0] = 1 M = np.zeros((N, N), dtype=np.float64) use_svd = (n_neighbors > d_in) for i in range(N): Gi = X[neighbors[i]] Gi -= Gi.mean(0) # build Hessian estimator if use_svd: U = svd(Gi, full_matrices=0)[0] else: Ci = np.dot(Gi, Gi.T) U = eigh(Ci)[1][:, ::-1] Yi[:, 1:1 + n_components] = U[:, :n_components] j = 1 + n_components for k in range(n_components): Yi[:, j:j + n_components - k] = (U[:, k:k + 1] * U[:, k:n_components]) j += n_components - k Q, R = qr(Yi) w = Q[:, n_components + 1:] S = w.sum(0) S[np.where(abs(S) < hessian_tol)] = 1 w /= S nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) M[nbrs_x, nbrs_y] += np.dot(w, w.T) if M_sparse: M = csr_matrix(M) elif method == 'modified': if n_neighbors < n_components: raise ValueError("modified LLE requires " "n_neighbors >= n_components") neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1, return_distance=False) neighbors = neighbors[:, 1:] # find the eigenvectors and eigenvalues of each local covariance # matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix, # where the columns are eigenvectors V = np.zeros((N, n_neighbors, n_neighbors)) nev = min(d_in, n_neighbors) evals = np.zeros([N, nev]) # choose the most efficient way to find the eigenvectors use_svd = (n_neighbors > d_in) if use_svd: for i in range(N): X_nbrs = X[neighbors[i]] - X[i] V[i], evals[i], _ = svd(X_nbrs, full_matrices=True) evals **= 2 else: for i in range(N): X_nbrs = X[neighbors[i]] - X[i] C_nbrs = np.dot(X_nbrs, X_nbrs.T) evi, vi = eigh(C_nbrs) evals[i] = evi[::-1] V[i] = vi[:, ::-1] # find regularized weights: this is like normal LLE. # because we've already computed the SVD of each covariance matrix, # it's faster to use this rather than np.linalg.solve reg = 1E-3 * evals.sum(1) tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors)) tmp[:, :nev] /= evals + reg[:, None] tmp[:, nev:] /= reg[:, None] w_reg = np.zeros((N, n_neighbors)) for i in range(N): w_reg[i] = np.dot(V[i], tmp[i]) w_reg /= w_reg.sum(1)[:, None] # calculate eta: the median of the ratio of small to large eigenvalues # across the points. This is used to determine s_i, below rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1) eta = np.median(rho) # find s_i, the size of the "almost null space" for each point: # this is the size of the largest set of eigenvalues # such that Sum[v; v in set]/Sum[v; v not in set] < eta s_range = np.zeros(N, dtype=int) evals_cumsum = stable_cumsum(evals, 1) eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1 for i in range(N): s_range[i] = np.searchsorted(eta_range[i, ::-1], eta) s_range += n_neighbors - nev # number of zero eigenvalues # Now calculate M. # This is the [N x N] matrix whose null space is the desired embedding M = np.zeros((N, N), dtype=np.float64) for i in range(N): s_i = s_range[i] # select bottom s_i eigenvectors and calculate alpha Vi = V[i, :, n_neighbors - s_i:] alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i) # compute Householder matrix which satisfies # Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s) # using prescription from paper h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors)) norm_h = np.linalg.norm(h) if norm_h < modified_tol: h *= 0 else: h /= norm_h # Householder matrix is # >> Hi = np.identity(s_i) - 2*np.outer(h,h) # Then the weight matrix is # >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None] # We do this much more efficiently: Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) + (1 - alpha_i) * w_reg[i, :, None]) # Update M as follows: # >> W_hat = np.zeros( (N,s_i) ) # >> W_hat[neighbors[i],:] = Wi # >> W_hat[i] -= 1 # >> M += np.dot(W_hat,W_hat.T) # We can do this much more efficiently: nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T) Wi_sum1 = Wi.sum(1) M[i, neighbors[i]] -= Wi_sum1 M[neighbors[i], i] -= Wi_sum1 M[i, i] += s_i if M_sparse: M = csr_matrix(M) elif method == 'ltsa': neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1, return_distance=False) neighbors = neighbors[:, 1:] M = np.zeros((N, N)) use_svd = (n_neighbors > d_in) for i in range(N): Xi = X[neighbors[i]] Xi -= Xi.mean(0) # compute n_components largest eigenvalues of Xi * Xi^T if use_svd: v = svd(Xi, full_matrices=True)[0] else: Ci = np.dot(Xi, Xi.T) v = eigh(Ci)[1][:, ::-1] Gi = np.zeros((n_neighbors, n_components + 1)) Gi[:, 1:] = v[:, :n_components] Gi[:, 0] = 1. / np.sqrt(n_neighbors) GiGiT = np.dot(Gi, Gi.T) nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) M[nbrs_x, nbrs_y] -= GiGiT M[neighbors[i], neighbors[i]] += 1 return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver, tol=tol, max_iter=max_iter, random_state=random_state) class LocallyLinearEmbedding(BaseEstimator, TransformerMixin): """Locally Linear Embedding Read more in the :ref:`User Guide <locally_linear_embedding>`. Parameters ---------- n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold reg : float regularization constant, multiplies the trace of the local covariance matrix of the distances. eigen_solver : string, {'auto', 'arpack', 'dense'} auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, optional Tolerance for 'arpack' method Not used if eigen_solver=='dense'. max_iter : integer maximum number of iterations for the arpack solver. Not used if eigen_solver=='dense'. method : string ('standard', 'hessian', 'modified' or 'ltsa') standard : use the standard locally linear embedding algorithm. see reference [1] hessian : use the Hessian eigenmap method. This method requires ``n_neighbors > n_components * (1 + (n_components + 1) / 2`` see reference [2] modified : use the modified locally linear embedding algorithm. see reference [3] ltsa : use local tangent space alignment algorithm see reference [4] hessian_tol : float, optional Tolerance for Hessian eigenmapping method. Only used if ``method == 'hessian'`` modified_tol : float, optional Tolerance for modified LLE method. Only used if ``method == 'modified'`` neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree'] algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``eigen_solver`` == 'arpack'. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- embedding_vectors_ : array-like, shape [n_components, n_samples] Stores the embedding vectors reconstruction_error_ : float Reconstruction error associated with `embedding_vectors_` nbrs_ : NearestNeighbors object Stores nearest neighbors instance, including BallTree or KDtree if applicable. References ---------- .. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction by locally linear embedding. Science 290:2323 (2000).` .. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally linear embedding techniques for high-dimensional data. Proc Natl Acad Sci U S A. 100:5591 (2003).` .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear Embedding Using Multiple Weights.` http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382 .. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear dimensionality reduction via tangent space alignment. Journal of Shanghai Univ. 8:406 (2004)` """ def __init__(self, n_neighbors=5, n_components=2, reg=1E-3, eigen_solver='auto', tol=1E-6, max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12, neighbors_algorithm='auto', random_state=None, n_jobs=1): self.n_neighbors = n_neighbors self.n_components = n_components self.reg = reg self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.method = method self.hessian_tol = hessian_tol self.modified_tol = modified_tol self.random_state = random_state self.neighbors_algorithm = neighbors_algorithm self.n_jobs = n_jobs def _fit_transform(self, X): self.nbrs_ = NearestNeighbors(self.n_neighbors, algorithm=self.neighbors_algorithm, n_jobs=self.n_jobs) random_state = check_random_state(self.random_state) X = check_array(X, dtype=float) self.nbrs_.fit(X) self.embedding_, self.reconstruction_error_ = \ locally_linear_embedding( self.nbrs_, self.n_neighbors, self.n_components, eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter, method=self.method, hessian_tol=self.hessian_tol, modified_tol=self.modified_tol, random_state=random_state, reg=self.reg, n_jobs=self.n_jobs) def fit(self, X, y=None): """Compute the embedding vectors for data X Parameters ---------- X : array-like of shape [n_samples, n_features] training set. y: Ignored. Returns ------- self : returns an instance of self. """ self._fit_transform(X) return self def fit_transform(self, X, y=None): """Compute the embedding vectors for data X and transform X. Parameters ---------- X : array-like of shape [n_samples, n_features] training set. y: Ignored. Returns ------- X_new : array-like, shape (n_samples, n_components) """ self._fit_transform(X) return self.embedding_ def transform(self, X): """ Transform new points into embedding space. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- X_new : array, shape = [n_samples, n_components] Notes ----- Because of scaling performed by this method, it is discouraged to use it together with methods that are not scale-invariant (like SVMs) """ check_is_fitted(self, "nbrs_") X = check_array(X) ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=False) weights = barycenter_weights(X, self.nbrs_._fit_X[ind], reg=self.reg) X_new = np.empty((X.shape[0], self.n_components)) for i in range(X.shape[0]): X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i]) return X_new
26,540
36.434415
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/setup.py
import os from os.path import join import numpy from numpy.distutils.misc_util import Configuration from sklearn._build_utils import get_blas_info def configuration(parent_package="", top_path=None): config = Configuration("manifold", parent_package, top_path) libraries = [] if os.name == 'posix': libraries.append('m') config.add_extension("_utils", sources=["_utils.pyx"], include_dirs=[numpy.get_include()], libraries=libraries, extra_compile_args=["-O3"]) cblas_libs, blas_info = get_blas_info() eca = blas_info.pop('extra_compile_args', []) eca.append("-O4") config.add_extension("_barnes_hut_tsne", libraries=cblas_libs, sources=["_barnes_hut_tsne.pyx"], include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], extra_compile_args=eca, **blas_info) config.add_subpackage('tests') return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
1,284
32.815789
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/isomap.py
"""Isomap for manifold learning""" # Author: Jake Vanderplas -- <[email protected]> # License: BSD 3 clause (C) 2011 import numpy as np from ..base import BaseEstimator, TransformerMixin from ..neighbors import NearestNeighbors, kneighbors_graph from ..utils import check_array from ..utils.graph import graph_shortest_path from ..decomposition import KernelPCA from ..preprocessing import KernelCenterer class Isomap(BaseEstimator, TransformerMixin): """Isomap Embedding Non-linear dimensionality reduction through Isometric Mapping Read more in the :ref:`User Guide <isomap>`. Parameters ---------- n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold eigen_solver : ['auto'|'arpack'|'dense'] 'auto' : Attempt to choose the most efficient solver for the given problem. 'arpack' : Use Arnoldi decomposition to find the eigenvalues and eigenvectors. 'dense' : Use a direct solver (i.e. LAPACK) for the eigenvalue decomposition. tol : float Convergence tolerance passed to arpack or lobpcg. not used if eigen_solver == 'dense'. max_iter : integer Maximum number of iterations for the arpack solver. not used if eigen_solver == 'dense'. path_method : string ['auto'|'FW'|'D'] Method to use in finding shortest path. 'auto' : attempt to choose the best algorithm automatically. 'FW' : Floyd-Warshall algorithm. 'D' : Dijkstra's algorithm. neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree'] Algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. kernel_pca_ : object `KernelPCA` object used to implement the embedding. training_data_ : array-like, shape (n_samples, n_features) Stores the training data. nbrs_ : sklearn.neighbors.NearestNeighbors instance Stores nearest neighbors instance, including BallTree or KDtree if applicable. dist_matrix_ : array-like, shape (n_samples, n_samples) Stores the geodesic distance matrix of training data. References ---------- .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric framework for nonlinear dimensionality reduction. Science 290 (5500) """ def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto', tol=0, max_iter=None, path_method='auto', neighbors_algorithm='auto', n_jobs=1): self.n_neighbors = n_neighbors self.n_components = n_components self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.path_method = path_method self.neighbors_algorithm = neighbors_algorithm self.n_jobs = n_jobs def _fit_transform(self, X): X = check_array(X) self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors, algorithm=self.neighbors_algorithm, n_jobs=self.n_jobs) self.nbrs_.fit(X) self.training_data_ = self.nbrs_._fit_X self.kernel_pca_ = KernelPCA(n_components=self.n_components, kernel="precomputed", eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter, n_jobs=self.n_jobs) kng = kneighbors_graph(self.nbrs_, self.n_neighbors, mode='distance', n_jobs=self.n_jobs) self.dist_matrix_ = graph_shortest_path(kng, method=self.path_method, directed=False) G = self.dist_matrix_ ** 2 G *= -0.5 self.embedding_ = self.kernel_pca_.fit_transform(G) def reconstruction_error(self): """Compute the reconstruction error for the embedding. Returns ------- reconstruction_error : float Notes ------- The cost function of an isomap embedding is ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` Where D is the matrix of distances for the input data X, D_fit is the matrix of distances for the output embedding X_fit, and K is the isomap kernel: ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)`` """ G = -0.5 * self.dist_matrix_ ** 2 G_center = KernelCenterer().fit_transform(G) evals = self.kernel_pca_.lambdas_ return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0] def fit(self, X, y=None): """Compute the embedding vectors for data X Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, precomputed tree, or NearestNeighbors object. y: Ignored. Returns ------- self : returns an instance of self. """ self._fit_transform(X) return self def fit_transform(self, X, y=None): """Fit the model from data in X and transform X. Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training vector, where n_samples in the number of samples and n_features is the number of features. y: Ignored. Returns ------- X_new : array-like, shape (n_samples, n_components) """ self._fit_transform(X) return self.embedding_ def transform(self, X): """Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = check_array(X) distances, indices = self.nbrs_.kneighbors(X, return_distance=True) # Create the graph of shortest distances from X to self.training_data_ # via the nearest neighbors of X. # This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: G_X = np.zeros((X.shape[0], self.training_data_.shape[0])) for i in range(X.shape[0]): G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X)
7,561
33.063063
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/mds.py
""" Multi-dimensional Scaling (MDS) """ # author: Nelle Varoquaux <[email protected]> # License: BSD import numpy as np import warnings from ..base import BaseEstimator from ..metrics import euclidean_distances from ..utils import check_random_state, check_array, check_symmetric from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..isotonic import IsotonicRegression def _smacof_single(dissimilarities, metric=True, n_components=2, init=None, max_iter=300, verbose=0, eps=1e-3, random_state=None): """Computes multidimensional scaling using SMACOF algorithm Parameters ---------- dissimilarities : ndarray, shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : boolean, optional, default: True Compute metric or nonmetric SMACOF algorithm. n_components : int, optional, default: 2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray, shape (n_samples, n_components), optional, default: None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, optional, default: 0 Level of verbosity. eps : float, optional, default: 1e-3 Relative tolerance with respect to stress at which to declare convergence. random_state : int, RandomState instance or None, optional, default: None The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : ndarray, shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). n_iter : int The number of iterations corresponding to the best stress. """ dissimilarities = check_symmetric(dissimilarities, raise_exception=True) n_samples = dissimilarities.shape[0] random_state = check_random_state(random_state) sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: # Randomly choose initial configuration X = random_state.rand(n_samples * n_components) X = X.reshape((n_samples, n_components)) else: # overrides the parameter p n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError("init matrix should be of shape (%d, %d)" % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): # Compute distance and monotonic regression dis = euclidean_distances(X) if metric: disparities = dissimilarities else: dis_flat = dis.ravel() # dissimilarities with 0 are considered as missing values dis_flat_w = dis_flat[sim_flat != 0] # Compute the disparities using a monotonic regression disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / (disparities ** 2).sum()) # Compute stress stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 # Update X using the Guttman transform dis[dis == 0] = 1e-5 ratio = disparities / dis B = - ratio B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) X = 1. / n_samples * np.dot(B, X) dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, stress)) if old_stress is not None: if(old_stress - stress / dis) < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, stress)) break old_stress = stress / dis return X, stress, it + 1 def smacof(dissimilarities, metric=True, n_components=2, init=None, n_init=8, n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None, return_n_iter=False): """Computes multidimensional scaling using the SMACOF algorithm. The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a multidimensional scaling algorithm which minimizes an objective function (the *stress*) using a majorization technique. Stress majorization, also known as the Guttman Transform, guarantees a monotone convergence of stress, and is more powerful than traditional techniques such as gradient descent. The SMACOF algorithm for metric MDS can summarized by the following steps: 1. Set an initial start configuration, randomly or not. 2. Compute the stress 3. Compute the Guttman Transform 4. Iterate 2 and 3 until convergence. The nonmetric algorithm adds a monotonic regression step before computing the stress. Parameters ---------- dissimilarities : ndarray, shape (n_samples, n_samples) Pairwise dissimilarities between the points. Must be symmetric. metric : boolean, optional, default: True Compute metric or nonmetric SMACOF algorithm. n_components : int, optional, default: 2 Number of dimensions in which to immerse the dissimilarities. If an ``init`` array is provided, this option is overridden and the shape of ``init`` is used to determine the dimensionality of the embedding space. init : ndarray, shape (n_samples, n_components), optional, default: None Starting configuration of the embedding to initialize the algorithm. By default, the algorithm is initialized with a randomly chosen array. n_init : int, optional, default: 8 Number of times the SMACOF algorithm will be run with different initializations. The final results will be the best output of the runs, determined by the run with the smallest final stress. If ``init`` is provided, this option is overridden and a single run is performed. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If multiple initializations are used (``n_init``), each run of the algorithm is computed in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For ``n_jobs`` below -1, (``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs but one are used. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, optional, default: 0 Level of verbosity. eps : float, optional, default: 1e-3 Relative tolerance with respect to stress at which to declare convergence. random_state : int, RandomState instance or None, optional, default: None The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_n_iter : bool, optional, default: False Whether or not to return the number of iterations. Returns ------- X : ndarray, shape (n_samples, n_components) Coordinates of the points in a ``n_components``-space. stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). n_iter : int The number of iterations corresponding to the best stress. Returned only if ``return_n_iter`` is set to ``True``. Notes ----- "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) """ dissimilarities = check_array(dissimilarities) random_state = check_random_state(random_state) if hasattr(init, '__array__'): init = np.asarray(init).copy() if not n_init == 1: warnings.warn( 'Explicit initial positions passed: ' 'performing only one init of the MDS instead of %d' % n_init) n_init = 1 best_pos, best_stress = None, None if n_jobs == 1: for it in range(n_init): pos, stress, n_iter_ = _smacof_single( dissimilarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=random_state) if best_stress is None or stress < best_stress: best_stress = stress best_pos = pos.copy() best_iter = n_iter_ else: seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))( delayed(_smacof_single)( dissimilarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=seed) for seed in seeds) positions, stress, n_iters = zip(*results) best = np.argmin(stress) best_stress = stress[best] best_pos = positions[best] best_iter = n_iters[best] if return_n_iter: return best_pos, best_stress, best_iter else: return best_pos, best_stress class MDS(BaseEstimator): """Multidimensional scaling Read more in the :ref:`User Guide <multidimensional_scaling>`. Parameters ---------- n_components : int, optional, default: 2 Number of dimensions in which to immerse the dissimilarities. metric : boolean, optional, default: True If ``True``, perform metric MDS; otherwise, perform nonmetric MDS. n_init : int, optional, default: 4 Number of times the SMACOF algorithm will be run with different initializations. The final results will be the best output of the runs, determined by the run with the smallest final stress. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run. verbose : int, optional, default: 0 Level of verbosity. eps : float, optional, default: 1e-3 Relative tolerance with respect to stress at which to declare convergence. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. If multiple initializations are used (``n_init``), each run of the algorithm is computed in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For ``n_jobs`` below -1, (``n_cpus + 1 + n_jobs``) are used. Thus for ``n_jobs = -2``, all CPUs but one are used. random_state : int, RandomState instance or None, optional, default: None The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. dissimilarity : 'euclidean' | 'precomputed', optional, default: 'euclidean' Dissimilarity measure to use: - 'euclidean': Pairwise Euclidean distances between points in the dataset. - 'precomputed': Pre-computed dissimilarities are passed directly to ``fit`` and ``fit_transform``. Attributes ---------- embedding_ : array-like, shape (n_components, n_samples) Stores the position of the dataset in the embedding space. stress_ : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points). References ---------- "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) """ def __init__(self, n_components=2, metric=True, n_init=4, max_iter=300, verbose=0, eps=1e-3, n_jobs=1, random_state=None, dissimilarity="euclidean"): self.n_components = n_components self.dissimilarity = dissimilarity self.metric = metric self.n_init = n_init self.max_iter = max_iter self.eps = eps self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state @property def _pairwise(self): return self.kernel == "precomputed" def fit(self, X, y=None, init=None): """ Computes the position of the points in the embedding space Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``dissimilarity=='precomputed'``, the input should be the dissimilarity matrix. y: Ignored. init : ndarray, shape (n_samples,), optional, default: None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array. """ self.fit_transform(X, init=init) return self def fit_transform(self, X, y=None, init=None): """ Fit the data from X, and returns the embedded coordinates Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``dissimilarity=='precomputed'``, the input should be the dissimilarity matrix. y: Ignored. init : ndarray, shape (n_samples,), optional, default: None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array. """ X = check_array(X) if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed": warnings.warn("The MDS API has changed. ``fit`` now constructs an" " dissimilarity matrix from data. To use a custom " "dissimilarity matrix, set " "``dissimilarity='precomputed'``.") if self.dissimilarity == "precomputed": self.dissimilarity_matrix_ = X elif self.dissimilarity == "euclidean": self.dissimilarity_matrix_ = euclidean_distances(X) else: raise ValueError("Proximity must be 'precomputed' or 'euclidean'." " Got %s instead" % str(self.dissimilarity)) self.embedding_, self.stress_, self.n_iter_ = smacof( self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, return_n_iter=True) return self.embedding_
16,754
37.784722
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/spectral_embedding_.py
"""Spectral Embedding""" # Author: Gael Varoquaux <[email protected]> # Wei LI <[email protected]> # License: BSD 3 clause import warnings import numpy as np from scipy import sparse from scipy.linalg import eigh from scipy.sparse.linalg import eigsh, lobpcg from scipy.sparse.csgraph import connected_components from ..base import BaseEstimator from ..externals import six from ..utils import check_random_state, check_array, check_symmetric from ..utils.extmath import _deterministic_vector_sign_flip from ..metrics.pairwise import rbf_kernel from ..neighbors import kneighbors_graph def _graph_connected_component(graph, node_id): """Find the largest graph connected components that contains one given node Parameters ---------- graph : array-like, shape: (n_samples, n_samples) adjacency matrix of the graph, non-zero weight means an edge between the nodes node_id : int The index of the query node of the graph Returns ------- connected_components_matrix : array-like, shape: (n_samples,) An array of bool value indicating the indexes of the nodes belonging to the largest connected components of the given query node """ n_node = graph.shape[0] if sparse.issparse(graph): # speed up row-wise access to boolean connection mask graph = graph.tocsr() connected_nodes = np.zeros(n_node, dtype=np.bool) nodes_to_explore = np.zeros(n_node, dtype=np.bool) nodes_to_explore[node_id] = True for _ in range(n_node): last_num_component = connected_nodes.sum() np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes) if last_num_component >= connected_nodes.sum(): break indices = np.where(nodes_to_explore)[0] nodes_to_explore.fill(False) for i in indices: if sparse.issparse(graph): neighbors = graph[i].toarray().ravel() else: neighbors = graph[i] np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore) return connected_nodes def _graph_is_connected(graph): """ Return whether the graph is connected (True) or Not (False) Parameters ---------- graph : array-like or sparse matrix, shape: (n_samples, n_samples) adjacency matrix of the graph, non-zero weight means an edge between the nodes Returns ------- is_connected : bool True means the graph is fully connected and False means not """ if sparse.isspmatrix(graph): # sparse graph, find all the connected components n_connected_components, _ = connected_components(graph) return n_connected_components == 1 else: # dense graph, find all connected components start from node 0 return _graph_connected_component(graph, 0).sum() == graph.shape[0] def _set_diag(laplacian, value, norm_laplacian): """Set the diagonal of the laplacian matrix and convert it to a sparse format well suited for eigenvalue decomposition Parameters ---------- laplacian : array or sparse matrix The graph laplacian value : float The value of the diagonal norm_laplacian : bool Whether the value of the diagonal should be changed or not Returns ------- laplacian : array or sparse matrix An array of matrix in a form that is well suited to fast eigenvalue decomposition, depending on the band width of the matrix. """ n_nodes = laplacian.shape[0] # We need all entries in the diagonal to values if not sparse.isspmatrix(laplacian): if norm_laplacian: laplacian.flat[::n_nodes + 1] = value else: laplacian = laplacian.tocoo() if norm_laplacian: diag_idx = (laplacian.row == laplacian.col) laplacian.data[diag_idx] = value # If the matrix has a small number of diagonals (as in the # case of structured matrices coming from images), the # dia format might be best suited for matvec products: n_diags = np.unique(laplacian.row - laplacian.col).size if n_diags <= 7: # 3 or less outer diagonals on each side laplacian = laplacian.todia() else: # csr has the fastest matvec and is thus best suited to # arpack laplacian = laplacian.tocsr() return laplacian def spectral_embedding(adjacency, n_components=8, eigen_solver=None, random_state=None, eigen_tol=0.0, norm_laplacian=True, drop_first=True): """Project the sample on the first eigenvectors of the graph Laplacian. The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigenvectors associated to the smallest eigenvalues) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (for instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken to always make the affinity matrix symmetric so that the eigenvector decomposition works as expected. Note : Laplacian Eigenmaps is the actual algorithm implemented here. Read more in the :ref:`User Guide <spectral_embedding>`. Parameters ---------- adjacency : array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components : integer, optional, default 8 The dimension of the projection subspace. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int, RandomState instance or None, optional, default: None A pseudo random number generator used for the initialization of the lobpcg eigenvectors decomposition. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'amg'. eigen_tol : float, optional, default=0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. norm_laplacian : bool, optional, default=True If True, then compute normalized Laplacian. drop_first : bool, optional, default=True Whether to drop the first eigenvector. For spectral embedding, this should be True as the first eigenvector should be constant vector for connected graph, but for spectral clustering, this should be kept as False to retain the first eigenvector. Returns ------- embedding : array, shape=(n_samples, n_components) The reduced samples. Notes ----- Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph has one connected component. If there graph has many components, the first few eigenvectors will simply uncover the connected components of the graph. References ---------- * https://en.wikipedia.org/wiki/LOBPCG * Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method Andrew V. Knyazev http://dx.doi.org/10.1137%2FS1064827500366124 """ adjacency = check_symmetric(adjacency) try: from pyamg import smoothed_aggregation_solver except ImportError: if eigen_solver == "amg": raise ValueError("The eigen_solver was set to 'amg', but pyamg is " "not available.") if eigen_solver is None: eigen_solver = 'arpack' elif eigen_solver not in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for eigen_solver: '%s'." "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver) random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # Whether to drop the first eigenvector if drop_first: n_components = n_components + 1 if not _graph_is_connected(adjacency): warnings.warn("Graph is not fully connected, spectral embedding" " may not work as expected.") laplacian, dd = sparse.csgraph.laplacian(adjacency, normed=norm_laplacian, return_diag=True) if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)): # lobpcg used with eigen_solver='amg' has bugs for low number of nodes # for details see the source code in scipy: # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen # /lobpcg/lobpcg.py#L237 # or matlab: # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m laplacian = _set_diag(laplacian, 1, norm_laplacian) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude speedup over simply using keyword which='LA' # in standard mode. try: # We are computing the opposite of the laplacian inplace so as # to spare a memory allocation of a possibly very large array laplacian *= -1 v0 = random_state.uniform(-1, 1, laplacian.shape[0]) lambdas, diffusion_map = eigsh(laplacian, k=n_components, sigma=1.0, which='LM', tol=eigen_tol, v0=v0) embedding = diffusion_map.T[n_components::-1] * dd except RuntimeError: # When submatrices are exactly singular, an LU decomposition # in arpack fails. We fallback to lobpcg eigen_solver = "lobpcg" # Revert the laplacian to its opposite to have lobpcg work laplacian *= -1 if eigen_solver == 'amg': # Use AMG to get a preconditioner and speed up the eigenvalue # problem. if not sparse.issparse(laplacian): warnings.warn("AMG works better for sparse matrices") # lobpcg needs double precision floats laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True) laplacian = _set_diag(laplacian, 1, norm_laplacian) ml = smoothed_aggregation_solver(check_array(laplacian, 'csr')) M = ml.aspreconditioner() X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, largest=False) embedding = diffusion_map.T * dd if embedding.shape[0] == 1: raise ValueError elif eigen_solver == "lobpcg": # lobpcg needs double precision floats laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True) if n_nodes < 5 * n_components + 1: # see note above under arpack why lobpcg has problems with small # number of nodes # lobpcg will fallback to eigh, so we short circuit it if sparse.isspmatrix(laplacian): laplacian = laplacian.toarray() lambdas, diffusion_map = eigh(laplacian) embedding = diffusion_map.T[:n_components] * dd else: laplacian = _set_diag(laplacian, 1, norm_laplacian) # We increase the number of eigenvectors requested, as lobpcg # doesn't behave well in low dimension X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15, largest=False, maxiter=2000) embedding = diffusion_map.T[:n_components] * dd if embedding.shape[0] == 1: raise ValueError embedding = _deterministic_vector_sign_flip(embedding) if drop_first: return embedding[1:n_components].T else: return embedding[:n_components].T class SpectralEmbedding(BaseEstimator): """Spectral embedding for non-linear dimensionality reduction. Forms an affinity matrix given by the specified function and applies spectral decomposition to the corresponding graph laplacian. The resulting transformation is given by the value of the eigenvectors for each data point. Note : Laplacian Eigenmaps is the actual algorithm implemented here. Read more in the :ref:`User Guide <spectral_embedding>`. Parameters ----------- n_components : integer, default: 2 The dimension of the projected subspace. affinity : string or callable, default : "nearest_neighbors" How to construct the affinity matrix. - 'nearest_neighbors' : construct affinity matrix by knn graph - 'rbf' : construct affinity matrix by rbf kernel - 'precomputed' : interpret X as precomputed affinity matrix - callable : use passed in function as affinity the function takes in data matrix (n_samples, n_features) and return affinity matrix (n_samples, n_samples). gamma : float, optional, default : 1/n_features Kernel coefficient for rbf kernel. random_state : int, RandomState instance or None, optional, default: None A pseudo random number generator used for the initialization of the lobpcg eigenvectors. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``solver`` == 'amg'. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. n_neighbors : int, default : max(n_samples/10 , 1) Number of nearest neighbors for nearest_neighbors graph building. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- embedding_ : array, shape = (n_samples, n_components) Spectral embedding of the training matrix. affinity_matrix_ : array, shape = (n_samples, n_samples) Affinity_matrix constructed from samples or precomputed. References ---------- - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - On Spectral Clustering: Analysis and an algorithm, 2001 Andrew Y. Ng, Michael I. Jordan, Yair Weiss http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100 - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 """ def __init__(self, n_components=2, affinity="nearest_neighbors", gamma=None, random_state=None, eigen_solver=None, n_neighbors=None, n_jobs=1): self.n_components = n_components self.affinity = affinity self.gamma = gamma self.random_state = random_state self.eigen_solver = eigen_solver self.n_neighbors = n_neighbors self.n_jobs = n_jobs @property def _pairwise(self): return self.affinity == "precomputed" def _get_affinity_matrix(self, X, Y=None): """Calculate the affinity matrix from data Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Y: Ignored. Returns ------- affinity_matrix, shape (n_samples, n_samples) """ if self.affinity == 'precomputed': self.affinity_matrix_ = X return self.affinity_matrix_ if self.affinity == 'nearest_neighbors': if sparse.issparse(X): warnings.warn("Nearest neighbors affinity currently does " "not support sparse input, falling back to " "rbf affinity") self.affinity = "rbf" else: self.n_neighbors_ = (self.n_neighbors if self.n_neighbors is not None else max(int(X.shape[0] / 10), 1)) self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs) # currently only symmetric affinity_matrix supported self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ + self.affinity_matrix_.T) return self.affinity_matrix_ if self.affinity == 'rbf': self.gamma_ = (self.gamma if self.gamma is not None else 1.0 / X.shape[1]) self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_) return self.affinity_matrix_ self.affinity_matrix_ = self.affinity(X) return self.affinity_matrix_ def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Y: Ignored. Returns ------- self : object Returns the instance itself. """ X = check_array(X, ensure_min_samples=2, estimator=self) random_state = check_random_state(self.random_state) if isinstance(self.affinity, six.string_types): if self.affinity not in set(("nearest_neighbors", "rbf", "precomputed")): raise ValueError(("%s is not a valid affinity. Expected " "'precomputed', 'rbf', 'nearest_neighbors' " "or a callable.") % self.affinity) elif not callable(self.affinity): raise ValueError(("'affinity' is expected to be an affinity " "name or a callable. Got: %s") % self.affinity) affinity_matrix = self._get_affinity_matrix(X) self.embedding_ = spectral_embedding(affinity_matrix, n_components=self.n_components, eigen_solver=self.eigen_solver, random_state=random_state) return self def fit_transform(self, X, y=None): """Fit the model from data in X and transform X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Y: Ignored. Returns ------- X_new : array-like, shape (n_samples, n_components) """ self.fit(X) return self.embedding_
21,511
39.665406
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/t_sne.py
# Author: Alexander Fabisch -- <[email protected]> # Author: Christopher Moody <[email protected]> # Author: Nick Travers <[email protected]> # License: BSD 3 clause (C) 2014 # This is the exact and Barnes-Hut t-SNE implementation. There are other # modifications of the algorithm: # * Fast Optimization for t-SNE: # http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf from time import time import numpy as np from scipy import linalg import scipy.sparse as sp from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform from scipy.sparse import csr_matrix from ..neighbors import NearestNeighbors from ..base import BaseEstimator from ..utils import check_array from ..utils import check_random_state from ..decomposition import PCA from ..metrics.pairwise import pairwise_distances from . import _utils from . import _barnes_hut_tsne from ..externals.six import string_types from ..utils import deprecated MACHINE_EPSILON = np.finfo(np.double).eps def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity distances = distances.astype(np.float32, copy=False) conditional_P = _utils._binary_search_perplexity( distances, None, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples, k) Distances of samples to its k nearest neighbors. neighbors : array, shape (n_samples, k) Indices of the k nearest-neighbors for each samples. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : csr sparse matrix, shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors. """ t0 = time() # Compute conditional probabilities such that they approximately match # the desired perplexity n_samples, k = neighbors.shape distances = distances.astype(np.float32, copy=False) neighbors = neighbors.astype(np.int64, copy=False) conditional_P = _utils._binary_search_perplexity( distances, neighbors, desired_perplexity, verbose) assert np.all(np.isfinite(conditional_P)), \ "All probabilities should be finite" # Symmetrize the joint probability distribution using sparse operations P = csr_matrix((conditional_P.ravel(), neighbors.ravel(), range(0, n_samples * k + 1, k)), shape=(n_samples, n_samples)) P = P + P.T # Normalize the joint probability distribution sum_P = np.maximum(P.sum(), MACHINE_EPSILON) P /= sum_P assert np.all(np.abs(P.data) <= 1.0) if verbose >= 2: duration = time() - t0 print("[t-SNE] Computed conditional probabilities in {:.3f}s" .format(duration)) return P def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components, skip_num_points=0): """t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution dist = pdist(X_embedded, "sqeuclidean") dist /= degrees_of_freedom dist += 1. dist **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q)) # Gradient: dC/dY # pdist always returns double precision distances. Thus we need to take grad = np.ndarray((n_samples, n_components), dtype=params.dtype) PQd = squareform((P - Q) * dist) for i in range(skip_num_points, n_samples): grad[i] = np.dot(np.ravel(PQd[i], order='K'), X_embedded[i] - X_embedded) grad = grad.ravel() c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad *= c return kl_divergence, grad def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components, angle=0.5, skip_num_points=0, verbose=False): """t-SNE objective function: KL divergence of p_ijs and q_ijs. Uses Barnes-Hut tree methods to calculate the gradient that runs in O(NlogN) instead of O(N^2) Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : csr sparse matrix, shape (n_samples, n_sample) Sparse approximate joint probability matrix, computed only for the k nearest-neighbors and symmetrized. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. angle : float (default: 0.5) This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. verbose : int Verbosity level. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ params = params.astype(np.float32, copy=False) X_embedded = params.reshape(n_samples, n_components) val_P = P.data.astype(np.float32, copy=False) neighbors = P.indices.astype(np.int64, copy=False) indptr = P.indptr.astype(np.int64, copy=False) grad = np.zeros(X_embedded.shape, dtype=np.float32) error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr, grad, angle, n_components, verbose, dof=degrees_of_freedom) c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad = grad.ravel() grad *= c return error, grad def _gradient_descent(objective, p0, it, n_iter, n_iter_check=1, n_iter_without_progress=300, momentum=0.8, learning_rate=200.0, min_gain=0.01, min_grad_norm=1e-7, verbose=0, args=None, kwargs=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. When expensive to compute, the cost can optionally be None and can be computed every n_iter_check steps using the objective_error function. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_check : int Number of iterations before evaluating the global error. If the error is sufficiently low, we abort the optimization. n_iter_without_progress : int, optional (default: 300) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.8) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 200.0) The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If the learning rate is too high, the data may look like a 'ball' with any point approximately equidistant from its nearest neighbours. If the learning rate is too low, most points may look compressed in a dense cloud with few outliers. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. kwargs : dict Keyword arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """ if args is None: args = [] if kwargs is None: kwargs = {} p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = i = it tic = time() for i in range(it, n_iter): error, grad = objective(p, *args, **kwargs) grad_norm = linalg.norm(grad) inc = update * grad < 0.0 dec = np.invert(inc) gains[inc] += 0.2 gains[dec] *= 0.8 np.clip(gains, min_gain, np.inf, out=gains) grad *= gains update = momentum * update - learning_rate * grad p += update if (i + 1) % n_iter_check == 0: toc = time() duration = toc - tic tic = toc if verbose >= 2: print("[t-SNE] Iteration %d: error = %.7f," " gradient norm = %.7f" " (%s iterations in %0.3fs)" % (i + 1, error, grad_norm, n_iter_check, duration)) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if grad_norm <= min_grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break return p, error, i def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i} (r(i, j) - k) where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t class TSNE(BaseEstimator): """t-distributed Stochastic Neighbor Embedding. t-SNE [1] is a tool to visualize high-dimensional data. It converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results. It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50) if the number of features is very high. This will suppress some noise and speed up the computation of pairwise distances between samples. For more tips see Laurens van der Maaten's FAQ [2]. Read more in the :ref:`User Guide <t_sne>`. Parameters ---------- n_components : int, optional (default: 2) Dimension of the embedded space. perplexity : float, optional (default: 30) The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50. The choice is not extremely critical since t-SNE is quite insensitive to this parameter. early_exaggeration : float, optional (default: 12.0) Controls how tight natural clusters in the original space are in the embedded space and how much space will be between them. For larger values, the space between natural clusters will be larger in the embedded space. Again, the choice of this parameter is not very critical. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. learning_rate : float, optional (default: 200.0) The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If the learning rate is too high, the data may look like a 'ball' with any point approximately equidistant from its nearest neighbours. If the learning rate is too low, most points may look compressed in a dense cloud with few outliers. If the cost function gets stuck in a bad local minimum increasing the learning rate may help. n_iter : int, optional (default: 1000) Maximum number of iterations for the optimization. Should be at least 250. n_iter_without_progress : int, optional (default: 300) Maximum number of iterations without progress before we abort the optimization, used after 250 initial iterations with early exaggeration. Note that progress is only checked every 50 iterations so this value is rounded to the next multiple of 50. .. versionadded:: 0.17 parameter *n_iter_without_progress* to control stopping criteria. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be stopped. metric : string or callable, optional The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. The default is "euclidean" which is interpreted as squared euclidean distance. init : string or numpy array, optional (default: "random") Initialization of embedding. Possible options are 'random', 'pca', and a numpy array of shape (n_samples, n_components). PCA initialization cannot be used with precomputed distances and is usually more globally stable than random initialization. verbose : int, optional (default: 0) Verbosity level. random_state : int, RandomState instance or None, optional (default: None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Note that different initializations might result in different local minima of the cost function. method : string (default: 'barnes_hut') By default the gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will run on the slower, but exact, algorithm in O(N^2) time. The exact algorithm should be used when nearest-neighbor errors need to be better than 3%. However, the exact method cannot scale to millions of examples. .. versionadded:: 0.17 Approximate optimization *method* via the Barnes-Hut. angle : float (default: 0.5) Only used if method='barnes_hut' This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. kl_divergence_ : float Kullback-Leibler divergence after optimization. n_iter_ : int Number of iterations run. Examples -------- >>> import numpy as np >>> from sklearn.manifold import TSNE >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> X_embedded = TSNE(n_components=2).fit_transform(X) >>> X_embedded.shape (4, 2) References ---------- [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html [3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms. Journal of Machine Learning Research 15(Oct):3221-3245, 2014. http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf """ # Control the number of exploration iterations with early_exaggeration on _EXPLORATION_N_ITER = 250 # Control the number of iterations between progress checks _N_ITER_CHECK = 50 def __init__(self, n_components=2, perplexity=30.0, early_exaggeration=12.0, learning_rate=200.0, n_iter=1000, n_iter_without_progress=300, min_grad_norm=1e-7, metric="euclidean", init="random", verbose=0, random_state=None, method='barnes_hut', angle=0.5): self.n_components = n_components self.perplexity = perplexity self.early_exaggeration = early_exaggeration self.learning_rate = learning_rate self.n_iter = n_iter self.n_iter_without_progress = n_iter_without_progress self.min_grad_norm = min_grad_norm self.metric = metric self.init = init self.verbose = verbose self.random_state = random_state self.method = method self.angle = angle def _fit(self, X, skip_num_points=0): """Fit the model using X as training data. Note that sparse arrays can only be handled by method='exact'. It is recommended that you convert your sparse array to dense (e.g. `X.toarray()`) if it fits in memory, or otherwise using a dimensionality reduction technique (e.g. TruncatedSVD). Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Note that this when method='barnes_hut', X cannot be a sparse array and if need be will be converted to a 32 bit float array. Method='exact' allows sparse arrays and 64bit floating point inputs. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. """ if self.method not in ['barnes_hut', 'exact']: raise ValueError("'method' must be 'barnes_hut' or 'exact'") if self.angle < 0.0 or self.angle > 1.0: raise ValueError("'angle' must be between 0.0 - 1.0") if self.metric == "precomputed": if isinstance(self.init, string_types) and self.init == 'pca': raise ValueError("The parameter init=\"pca\" cannot be " "used with metric=\"precomputed\".") if X.shape[0] != X.shape[1]: raise ValueError("X should be a square distance matrix") if np.any(X < 0): raise ValueError("All distances should be positive, the " "precomputed distances given as X is not " "correct") if self.method == 'barnes_hut' and sp.issparse(X): raise TypeError('A sparse matrix was passed, but dense ' 'data is required for method="barnes_hut". Use ' 'X.toarray() to convert to a dense numpy array if ' 'the array is small enough for it to fit in ' 'memory. Otherwise consider dimensionality ' 'reduction techniques (e.g. TruncatedSVD)') else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float32, np.float64]) if self.method == 'barnes_hut' and self.n_components > 3: raise ValueError("'n_components' should be inferior to 4 for the " "barnes_hut algorithm as it relies on " "quad-tree or oct-tree.") random_state = check_random_state(self.random_state) if self.early_exaggeration < 1.0: raise ValueError("early_exaggeration must be at least 1, but is {}" .format(self.early_exaggeration)) if self.n_iter < 250: raise ValueError("n_iter should be at least 250") n_samples = X.shape[0] neighbors_nn = None if self.method == "exact": # Retrieve the distance matrix, either using the precomputed one or # computing it. if self.metric == "precomputed": distances = X else: if self.verbose: print("[t-SNE] Computing pairwise distances...") if self.metric == "euclidean": distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) if np.any(distances < 0): raise ValueError("All distances should be positive, the " "metric given is not correct") # compute the joint probability distribution for the input space P = _joint_probabilities(distances, self.perplexity, self.verbose) assert np.all(np.isfinite(P)), "All probabilities should be finite" assert np.all(P >= 0), "All probabilities should be non-negative" assert np.all(P <= 1), ("All probabilities should be less " "or then equal to one") else: # Cpmpute the number of nearest neighbors to find. # LvdM uses 3 * perplexity as the number of neighbors. # In the event that we have very small # of points # set the neighbors to n - 1. k = min(n_samples - 1, int(3. * self.perplexity + 1)) if self.verbose: print("[t-SNE] Computing {} nearest neighbors...".format(k)) # Find the nearest neighbors for every point knn = NearestNeighbors(algorithm='auto', n_neighbors=k, metric=self.metric) t0 = time() knn.fit(X) duration = time() - t0 if self.verbose: print("[t-SNE] Indexed {} samples in {:.3f}s...".format( n_samples, duration)) t0 = time() distances_nn, neighbors_nn = knn.kneighbors( None, n_neighbors=k) duration = time() - t0 if self.verbose: print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..." .format(n_samples, duration)) # Free the memory used by the ball_tree del knn if self.metric == "euclidean": # knn return the euclidean distance but we need it squared # to be consistent with the 'exact' method. Note that the # the method was derived using the euclidean method as in the # input space. Not sure of the implication of using a different # metric. distances_nn **= 2 # compute the joint probability distribution for the input space P = _joint_probabilities_nn(distances_nn, neighbors_nn, self.perplexity, self.verbose) if isinstance(self.init, np.ndarray): X_embedded = self.init elif self.init == 'pca': pca = PCA(n_components=self.n_components, svd_solver='randomized', random_state=random_state) X_embedded = pca.fit_transform(X).astype(np.float32, copy=False) elif self.init == 'random': # The embedding is initialized with iid samples from Gaussians with # standard deviation 1e-4. X_embedded = 1e-4 * random_state.randn( n_samples, self.n_components).astype(np.float32) else: raise ValueError("'init' must be 'pca', 'random', or " "a numpy array") # Degrees of freedom of the Student's t-distribution. The suggestion # degrees_of_freedom = n_components - 1 comes from # "Learning a Parametric Embedding by Preserving Local Structure" # Laurens van der Maaten, 2009. degrees_of_freedom = max(self.n_components - 1.0, 1) return self._tsne(P, degrees_of_freedom, n_samples, X_embedded=X_embedded, neighbors=neighbors_nn, skip_num_points=skip_num_points) @property @deprecated("Attribute n_iter_final was deprecated in version 0.19 and " "will be removed in 0.21. Use ``n_iter_`` instead") def n_iter_final(self): return self.n_iter_ def _tsne(self, P, degrees_of_freedom, n_samples, X_embedded, neighbors=None, skip_num_points=0): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with two stages: # * initial optimization with early exaggeration and momentum at 0.5 # * final optimization with momentum at 0.8 params = X_embedded.ravel() opt_args = { "it": 0, "n_iter_check": self._N_ITER_CHECK, "min_grad_norm": self.min_grad_norm, "learning_rate": self.learning_rate, "verbose": self.verbose, "kwargs": dict(skip_num_points=skip_num_points), "args": [P, degrees_of_freedom, n_samples, self.n_components], "n_iter_without_progress": self._EXPLORATION_N_ITER, "n_iter": self._EXPLORATION_N_ITER, "momentum": 0.5, } if self.method == 'barnes_hut': obj_func = _kl_divergence_bh opt_args['kwargs']['angle'] = self.angle # Repeat verbose argument for _kl_divergence_bh opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence # Learning schedule (part 1): do 250 iteration with lower momentum but # higher learning rate controlled via the early exageration parameter P *= self.early_exaggeration params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] KL divergence after %d iterations with early " "exaggeration: %f" % (it + 1, kl_divergence)) # Learning schedule (part 2): disable early exaggeration and finish # optimization with a higher momentum at 0.8 P /= self.early_exaggeration remaining = self.n_iter - self._EXPLORATION_N_ITER if it < self._EXPLORATION_N_ITER or remaining > 0: opt_args['n_iter'] = self.n_iter opt_args['it'] = it + 1 opt_args['momentum'] = 0.8 opt_args['n_iter_without_progress'] = self.n_iter_without_progress params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) # Save the final number of iterations self.n_iter_ = it if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, kl_divergence)) X_embedded = params.reshape(n_samples, self.n_components) self.kl_divergence_ = kl_divergence return X_embedded def fit_transform(self, X, y=None): """Fit X into an embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. y : Ignored. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ embedding = self._fit(X) self.embedding_ = embedding return self.embedding_ def fit(self, X, y=None): """Fit X into an embedded space. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. y : Ignored. """ self.fit_transform(X) return self
35,089
39.011403
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/__init__.py
""" The :mod:`sklearn.manifold` module implements data embedding techniques. """ from .locally_linear import locally_linear_embedding, LocallyLinearEmbedding from .isomap import Isomap from .mds import MDS, smacof from .spectral_embedding_ import SpectralEmbedding, spectral_embedding from .t_sne import TSNE __all__ = ['locally_linear_embedding', 'LocallyLinearEmbedding', 'Isomap', 'MDS', 'smacof', 'SpectralEmbedding', 'spectral_embedding', "TSNE"]
465
34.846154
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/sklearn/manifold/tests/test_t_sne.py
import sys from sklearn.externals.six.moves import cStringIO as StringIO import numpy as np import scipy.sparse as sp from sklearn.neighbors import BallTree from sklearn.neighbors import NearestNeighbors from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_in from sklearn.utils.testing import skip_if_32bit from sklearn.utils import check_random_state from sklearn.manifold.t_sne import _joint_probabilities from sklearn.manifold.t_sne import _joint_probabilities_nn from sklearn.manifold.t_sne import _kl_divergence from sklearn.manifold.t_sne import _kl_divergence_bh from sklearn.manifold.t_sne import _gradient_descent from sklearn.manifold.t_sne import trustworthiness from sklearn.manifold.t_sne import TSNE from sklearn.manifold import _barnes_hut_tsne from sklearn.manifold._utils import _binary_search_perplexity from sklearn.datasets import make_blobs from scipy.optimize import check_grad from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import manhattan_distances from sklearn.metrics.pairwise import cosine_distances x = np.linspace(0, 1, 10) xx, yy = np.meshgrid(x, x) X_2d_grid = np.hstack([ xx.ravel().reshape(-1, 1), yy.ravel().reshape(-1, 1), ]) def test_gradient_descent_stops(): # Test stopping conditions of gradient descent. class ObjectiveSmallGradient: def __init__(self): self.it = -1 def __call__(self, _): self.it += 1 return (10 - self.it) / 10.0, np.array([1e-5]) def flat_function(_): return 0.0, np.ones(1) # Gradient norm old_stdout = sys.stdout sys.stdout = StringIO() try: _, error, it = _gradient_descent( ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100, n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, min_gain=0.0, min_grad_norm=1e-5, verbose=2) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert_equal(error, 1.0) assert_equal(it, 0) assert("gradient norm" in out) # Maximum number of iterations without improvement old_stdout = sys.stdout sys.stdout = StringIO() try: _, error, it = _gradient_descent( flat_function, np.zeros(1), 0, n_iter=100, n_iter_without_progress=10, momentum=0.0, learning_rate=0.0, min_gain=0.0, min_grad_norm=0.0, verbose=2) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert_equal(error, 0.0) assert_equal(it, 11) assert("did not make any progress" in out) # Maximum number of iterations old_stdout = sys.stdout sys.stdout = StringIO() try: _, error, it = _gradient_descent( ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11, n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, min_gain=0.0, min_grad_norm=0.0, verbose=2) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert_equal(error, 0.0) assert_equal(it, 10) assert("Iteration 10" in out) def test_binary_search(): # Test if the binary search finds Gaussians with desired perplexity. random_state = check_random_state(0) distances = random_state.randn(50, 2).astype(np.float32) # Distances shouldn't be negative distances = np.abs(distances.dot(distances.T)) np.fill_diagonal(distances, 0.0) desired_perplexity = 25.0 P = _binary_search_perplexity(distances, None, desired_perplexity, verbose=0) P = np.maximum(P, np.finfo(np.double).eps) mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])]) assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3) def test_binary_search_neighbors(): # Binary perplexity search approximation. # Should be approximately equal to the slow method when we use # all points as neighbors. n_samples = 500 desired_perplexity = 25.0 random_state = check_random_state(0) distances = random_state.randn(n_samples, 2).astype(np.float32) # Distances shouldn't be negative distances = np.abs(distances.dot(distances.T)) np.fill_diagonal(distances, 0.0) P1 = _binary_search_perplexity(distances, None, desired_perplexity, verbose=0) # Test that when we use all the neighbors the results are identical k = n_samples neighbors_nn = np.argsort(distances, axis=1)[:, 1:k].astype(np.int64) distances_nn = np.array([distances[k, neighbors_nn[k]] for k in range(n_samples)]) P2 = _binary_search_perplexity(distances_nn, neighbors_nn, desired_perplexity, verbose=0) P_nn = np.array([P1[k, neighbors_nn[k]] for k in range(n_samples)]) assert_array_almost_equal(P_nn, P2, decimal=4) # Test that the highest P_ij are the same when few neighbors are used for k in np.linspace(80, n_samples, 5): k = int(k) topn = k * 10 # check the top 10 *k entries out of k * k entries neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64) distances_nn = np.array([distances[k, neighbors_nn[k]] for k in range(n_samples)]) P2k = _binary_search_perplexity(distances_nn, neighbors_nn, desired_perplexity, verbose=0) idx = np.argsort(P1.ravel())[::-1] P1top = P1.ravel()[idx][:topn] idx = np.argsort(P2k.ravel())[::-1] P2top = P2k.ravel()[idx][:topn] assert_array_almost_equal(P1top, P2top, decimal=2) def test_binary_perplexity_stability(): # Binary perplexity search should be stable. # The binary_search_perplexity had a bug wherein the P array # was uninitialized, leading to sporadically failing tests. k = 10 n_samples = 100 random_state = check_random_state(0) distances = random_state.randn(n_samples, 2).astype(np.float32) # Distances shouldn't be negative distances = np.abs(distances.dot(distances.T)) np.fill_diagonal(distances, 0.0) last_P = None neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64) for _ in range(100): P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(), 3, verbose=0) P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0) # Convert the sparse matrix to a dense one for testing P1 = P1.toarray() if last_P is None: last_P = P last_P1 = P1 else: assert_array_almost_equal(P, last_P, decimal=4) assert_array_almost_equal(P1, last_P1, decimal=4) def test_gradient(): # Test gradient of Kullback-Leibler divergence. random_state = check_random_state(0) n_samples = 50 n_features = 2 n_components = 2 alpha = 1.0 distances = random_state.randn(n_samples, n_features).astype(np.float32) distances = np.abs(distances.dot(distances.T)) np.fill_diagonal(distances, 0.0) X_embedded = random_state.randn(n_samples, n_components).astype(np.float32) P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0) def fun(params): return _kl_divergence(params, P, alpha, n_samples, n_components)[0] def grad(params): return _kl_divergence(params, P, alpha, n_samples, n_components)[1] assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5) def test_trustworthiness(): # Test trustworthiness score. random_state = check_random_state(0) # Affine transformation X = random_state.randn(100, 2) assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0) # Randomly shuffled X = np.arange(100).reshape(-1, 1) X_embedded = X.copy() random_state.shuffle(X_embedded) assert_less(trustworthiness(X, X_embedded), 0.6) # Completely different X = np.arange(5).reshape(-1, 1) X_embedded = np.array([[0], [2], [4], [1], [3]]) assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2) def test_preserve_trustworthiness_approximately(): # Nearest neighbors should be preserved approximately. random_state = check_random_state(0) n_components = 2 methods = ['exact', 'barnes_hut'] X = random_state.randn(50, n_components).astype(np.float32) for init in ('random', 'pca'): for method in methods: tsne = TSNE(n_components=n_components, init=init, random_state=0, method=method) X_embedded = tsne.fit_transform(X) t = trustworthiness(X, X_embedded, n_neighbors=1) assert_greater(t, 0.9) def test_optimization_minimizes_kl_divergence(): """t-SNE should give a lower KL divergence with more iterations.""" random_state = check_random_state(0) X, _ = make_blobs(n_features=3, random_state=random_state) kl_divergences = [] for n_iter in [250, 300, 350]: tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, n_iter=n_iter, random_state=0) tsne.fit_transform(X) kl_divergences.append(tsne.kl_divergence_) assert_less_equal(kl_divergences[1], kl_divergences[0]) assert_less_equal(kl_divergences[2], kl_divergences[1]) def test_fit_csr_matrix(): # X can be a sparse matrix. random_state = check_random_state(0) X = random_state.randn(100, 2) X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0 X_csr = sp.csr_matrix(X) tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, random_state=0, method='exact') X_embedded = tsne.fit_transform(X_csr) assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, decimal=1) def test_preserve_trustworthiness_approximately_with_precomputed_distances(): # Nearest neighbors should be preserved approximately. random_state = check_random_state(0) for i in range(3): X = random_state.randn(100, 2) D = squareform(pdist(X), "sqeuclidean") tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0, early_exaggeration=2.0, metric="precomputed", random_state=i, verbose=0) X_embedded = tsne.fit_transform(D) t = trustworthiness(D, X_embedded, n_neighbors=1, precomputed=True) assert t > .95 def test_early_exaggeration_too_small(): # Early exaggeration factor must be >= 1. tsne = TSNE(early_exaggeration=0.99) assert_raises_regexp(ValueError, "early_exaggeration .*", tsne.fit_transform, np.array([[0.0]])) def test_too_few_iterations(): # Number of gradient descent iterations must be at least 200. tsne = TSNE(n_iter=199) assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform, np.array([[0.0]])) def test_non_square_precomputed_distances(): # Precomputed distance matrices must be square matrices. tsne = TSNE(metric="precomputed") assert_raises_regexp(ValueError, ".* square distance matrix", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_non_positive_precomputed_distances(): # Precomputed distance matrices must be positive. bad_dist = np.array([[0., -1.], [1., 0.]]) for method in ['barnes_hut', 'exact']: tsne = TSNE(metric="precomputed", method=method) assert_raises_regexp(ValueError, "All distances .*precomputed.*", tsne.fit_transform, bad_dist) def test_non_positive_computed_distances(): # Computed distance matrices must be positive. def metric(x, y): return -1 tsne = TSNE(metric=metric, method='exact') X = np.array([[0.0, 0.0], [1.0, 1.0]]) assert_raises_regexp(ValueError, "All distances .*metric given.*", tsne.fit_transform, X) def test_init_not_available(): # 'init' must be 'pca', 'random', or numpy array. tsne = TSNE(init="not available") m = "'init' must be 'pca', 'random', or a numpy array" assert_raises_regexp(ValueError, m, tsne.fit_transform, np.array([[0.0], [1.0]])) def test_init_ndarray(): # Initialize TSNE with ndarray and test fit tsne = TSNE(init=np.zeros((100, 2))) X_embedded = tsne.fit_transform(np.ones((100, 5))) assert_array_equal(np.zeros((100, 2)), X_embedded) def test_init_ndarray_precomputed(): # Initialize TSNE with ndarray and metric 'precomputed' # Make sure no FutureWarning is thrown from _fit tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed") tsne.fit(np.zeros((100, 100))) def test_distance_not_available(): # 'metric' must be valid. tsne = TSNE(metric="not available", method='exact') assert_raises_regexp(ValueError, "Unknown metric not available.*", tsne.fit_transform, np.array([[0.0], [1.0]])) tsne = TSNE(metric="not available", method='barnes_hut') assert_raises_regexp(ValueError, "Metric 'not available' not valid.*", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_method_not_available(): # 'nethod' must be 'barnes_hut' or 'exact' tsne = TSNE(method='not available') assert_raises_regexp(ValueError, "'method' must be 'barnes_hut' or ", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_angle_out_of_range_checks(): # check the angle parameter range for angle in [-1, -1e-6, 1 + 1e-6, 2]: tsne = TSNE(angle=angle) assert_raises_regexp(ValueError, "'angle' must be between 0.0 - 1.0", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_pca_initialization_not_compatible_with_precomputed_kernel(): # Precomputed distance matrices must be square matrices. tsne = TSNE(metric="precomputed", init="pca") assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be " "used with metric=\"precomputed\".", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_n_components_range(): # barnes_hut method should only be used with n_components <= 3 tsne = TSNE(n_components=4, method="barnes_hut") assert_raises_regexp(ValueError, "'n_components' should be .*", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_early_exaggeration_used(): # check that the ``early_exaggeration`` parameter has an effect random_state = check_random_state(0) n_components = 2 methods = ['exact', 'barnes_hut'] X = random_state.randn(25, n_components).astype(np.float32) for method in methods: tsne = TSNE(n_components=n_components, perplexity=1, learning_rate=100.0, init="pca", random_state=0, method=method, early_exaggeration=1.0) X_embedded1 = tsne.fit_transform(X) tsne = TSNE(n_components=n_components, perplexity=1, learning_rate=100.0, init="pca", random_state=0, method=method, early_exaggeration=10.0) X_embedded2 = tsne.fit_transform(X) assert not np.allclose(X_embedded1, X_embedded2) def test_n_iter_used(): # check that the ``n_iter`` parameter has an effect random_state = check_random_state(0) n_components = 2 methods = ['exact', 'barnes_hut'] X = random_state.randn(25, n_components).astype(np.float32) for method in methods: for n_iter in [251, 500]: tsne = TSNE(n_components=n_components, perplexity=1, learning_rate=0.5, init="random", random_state=0, method=method, early_exaggeration=1.0, n_iter=n_iter) tsne.fit_transform(X) assert tsne.n_iter_ == n_iter - 1 def test_answer_gradient_two_points(): # Test the tree with only a single set of children. # # These tests & answers have been checked against the reference # implementation by LvdM. pos_input = np.array([[1.0, 0.0], [0.0, 1.0]]) pos_output = np.array([[-4.961291e-05, -1.072243e-04], [9.259460e-05, 2.702024e-04]]) neighbors = np.array([[1], [0]]) grad_output = np.array([[-2.37012478e-05, -6.29044398e-05], [2.37012478e-05, 6.29044398e-05]]) _run_answer_test(pos_input, pos_output, neighbors, grad_output) def test_answer_gradient_four_points(): # Four points tests the tree with multiple levels of children. # # These tests & answers have been checked against the reference # implementation by LvdM. pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) pos_output = np.array([[6.080564e-05, -7.120823e-05], [-1.718945e-04, -4.000536e-05], [-2.271720e-04, 8.663310e-05], [-1.032577e-04, -3.582033e-05]]) neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) grad_output = np.array([[5.81128448e-05, -7.78033454e-06], [-5.81526851e-05, 7.80976444e-06], [4.24275173e-08, -3.69569698e-08], [-2.58720939e-09, 7.52706374e-09]]) _run_answer_test(pos_input, pos_output, neighbors, grad_output) def test_skip_num_points_gradient(): # Test the kwargs option skip_num_points. # # Skip num points should make it such that the Barnes_hut gradient # is not calculated for indices below skip_num_point. # Aside from skip_num_points=2 and the first two gradient rows # being set to zero, these data points are the same as in # test_answer_gradient_four_points() pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) pos_output = np.array([[6.080564e-05, -7.120823e-05], [-1.718945e-04, -4.000536e-05], [-2.271720e-04, 8.663310e-05], [-1.032577e-04, -3.582033e-05]]) neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) grad_output = np.array([[0.0, 0.0], [0.0, 0.0], [4.24275173e-08, -3.69569698e-08], [-2.58720939e-09, 7.52706374e-09]]) _run_answer_test(pos_input, pos_output, neighbors, grad_output, False, 0.1, 2) def _run_answer_test(pos_input, pos_output, neighbors, grad_output, verbose=False, perplexity=0.1, skip_num_points=0): distances = pairwise_distances(pos_input).astype(np.float32) args = distances, perplexity, verbose pos_output = pos_output.astype(np.float32) neighbors = neighbors.astype(np.int64) pij_input = _joint_probabilities(*args) pij_input = squareform(pij_input).astype(np.float32) grad_bh = np.zeros(pos_output.shape, dtype=np.float32) from scipy.sparse import csr_matrix P = csr_matrix(pij_input) neighbors = P.indices.astype(np.int64) indptr = P.indptr.astype(np.int64) _barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr, grad_bh, 0.5, 2, 1, skip_num_points=0) assert_array_almost_equal(grad_bh, grad_output, decimal=4) def test_verbose(): # Verbose options write to stdout. random_state = check_random_state(0) tsne = TSNE(verbose=2) X = random_state.randn(5, 2) old_stdout = sys.stdout sys.stdout = StringIO() try: tsne.fit_transform(X) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert("[t-SNE]" in out) assert("nearest neighbors..." in out) assert("Computed conditional probabilities" in out) assert("Mean sigma" in out) assert("early exaggeration" in out) def test_chebyshev_metric(): # t-SNE should allow metrics that cannot be squared (issue #3526). random_state = check_random_state(0) tsne = TSNE(metric="chebyshev") X = random_state.randn(5, 2) tsne.fit_transform(X) def test_reduction_to_one_component(): # t-SNE should allow reduction to one component (issue #4154). random_state = check_random_state(0) tsne = TSNE(n_components=1) X = random_state.randn(5, 2) X_embedded = tsne.fit(X).embedding_ assert(np.all(np.isfinite(X_embedded))) def test_no_sparse_on_barnes_hut(): # No sparse matrices allowed on Barnes-Hut. random_state = check_random_state(0) X = random_state.randn(100, 2) X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0 X_csr = sp.csr_matrix(X) tsne = TSNE(n_iter=199, method='barnes_hut') assert_raises_regexp(TypeError, "A sparse matrix was.*", tsne.fit_transform, X_csr) def test_64bit(): # Ensure 64bit arrays are handled correctly. random_state = check_random_state(0) methods = ['barnes_hut', 'exact'] for method in methods: for dt in [np.float32, np.float64]: X = random_state.randn(50, 2).astype(dt) tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0, random_state=0, method=method, verbose=0) X_embedded = tsne.fit_transform(X) effective_type = X_embedded.dtype # tsne cython code is only single precision, so the output will # always be single precision, irrespectively of the input dtype assert effective_type == np.float32 def test_barnes_hut_angle(): # When Barnes-Hut's angle=0 this corresponds to the exact method. angle = 0.0 perplexity = 10 n_samples = 100 for n_components in [2, 3]: n_features = 5 degrees_of_freedom = float(n_components - 1.0) random_state = check_random_state(0) distances = random_state.randn(n_samples, n_features) distances = distances.astype(np.float32) distances = abs(distances.dot(distances.T)) np.fill_diagonal(distances, 0.0) params = random_state.randn(n_samples, n_components) P = _joint_probabilities(distances, perplexity, verbose=0) kl_exact, grad_exact = _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components) k = n_samples - 1 bt = BallTree(distances) distances_nn, neighbors_nn = bt.query(distances, k=k + 1) neighbors_nn = neighbors_nn[:, 1:] distances_nn = np.array([distances[i, neighbors_nn[i]] for i in range(n_samples)]) assert np.all(distances[0, neighbors_nn[0]] == distances_nn[0]),\ abs(distances[0, neighbors_nn[0]] - distances_nn[0]) P_bh = _joint_probabilities_nn(distances_nn, neighbors_nn, perplexity, verbose=0) kl_bh, grad_bh = _kl_divergence_bh(params, P_bh, degrees_of_freedom, n_samples, n_components, angle=angle, skip_num_points=0, verbose=0) P = squareform(P) P_bh = P_bh.toarray() assert_array_almost_equal(P_bh, P, decimal=5) assert_almost_equal(kl_exact, kl_bh, decimal=3) @skip_if_32bit def test_n_iter_without_progress(): # Use a dummy negative n_iter_without_progress and check output on stdout random_state = check_random_state(0) X = random_state.randn(100, 10) for method in ["barnes_hut", "exact"]: tsne = TSNE(n_iter_without_progress=-1, verbose=2, learning_rate=1e8, random_state=0, method=method, n_iter=351, init="random") tsne._N_ITER_CHECK = 1 tsne._EXPLORATION_N_ITER = 0 old_stdout = sys.stdout sys.stdout = StringIO() try: tsne.fit_transform(X) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout # The output needs to contain the value of n_iter_without_progress assert_in("did not make any progress during the " "last -1 episodes. Finished.", out) def test_min_grad_norm(): # Make sure that the parameter min_grad_norm is used correctly random_state = check_random_state(0) X = random_state.randn(100, 2) min_grad_norm = 0.002 tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2, random_state=0, method='exact') old_stdout = sys.stdout sys.stdout = StringIO() try: tsne.fit_transform(X) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout lines_out = out.split('\n') # extract the gradient norm from the verbose output gradient_norm_values = [] for line in lines_out: # When the computation is Finished just an old gradient norm value # is repeated that we do not need to store if 'Finished' in line: break start_grad_norm = line.find('gradient norm') if start_grad_norm >= 0: line = line[start_grad_norm:] line = line.replace('gradient norm = ', '').split(' ')[0] gradient_norm_values.append(float(line)) # Compute how often the gradient norm is smaller than min_grad_norm gradient_norm_values = np.array(gradient_norm_values) n_smaller_gradient_norms = \ len(gradient_norm_values[gradient_norm_values <= min_grad_norm]) # The gradient norm can be smaller than min_grad_norm at most once, # because in the moment it becomes smaller the optimization stops assert_less_equal(n_smaller_gradient_norms, 1) def test_accessible_kl_divergence(): # Ensures that the accessible kl_divergence matches the computed value random_state = check_random_state(0) X = random_state.randn(100, 2) tsne = TSNE(n_iter_without_progress=2, verbose=2, random_state=0, method='exact') old_stdout = sys.stdout sys.stdout = StringIO() try: tsne.fit_transform(X) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout # The output needs to contain the accessible kl_divergence as the error at # the last iteration for line in out.split('\n')[::-1]: if 'Iteration' in line: _, _, error = line.partition('error = ') if error: error, _, _ = error.partition(',') break assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5) def check_uniform_grid(method, seeds=[0, 1, 2], n_iter=1000): """Make sure that TSNE can approximately recover a uniform 2D grid Due to ties in distances between point in X_2d_grid, this test is platform dependent for ``method='barnes_hut'`` due to numerical imprecision. Also, t-SNE is not assured to converge to the right solution because bad initialization can lead to convergence to bad local minimum (the optimization problem is non-convex). To avoid breaking the test too often, we re-run t-SNE from the final point when the convergence is not good enough. """ for seed in seeds: tsne = TSNE(n_components=2, init='random', random_state=seed, perplexity=20, n_iter=n_iter, method=method) Y = tsne.fit_transform(X_2d_grid) try_name = "{}_{}".format(method, seed) try: assert_uniform_grid(Y, try_name) except AssertionError: # If the test fails a first time, re-run with init=Y to see if # this was caused by a bad initialization. Note that this will # also run an early_exaggeration step. try_name += ":rerun" tsne.init = Y Y = tsne.fit_transform(X_2d_grid) assert_uniform_grid(Y, try_name) def assert_uniform_grid(Y, try_name=None): # Ensure that the resulting embedding leads to approximately # uniformly spaced points: the distance to the closest neighbors # should be non-zero and approximately constant. nn = NearestNeighbors(n_neighbors=1).fit(Y) dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel() assert dist_to_nn.min() > 0.1 smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn) largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn) assert_greater(smallest_to_mean, .5, msg=try_name) assert_less(largest_to_mean, 2, msg=try_name) def test_uniform_grid(): for method in ['barnes_hut', 'exact']: yield check_uniform_grid, method def test_bh_match_exact(): # check that the ``barnes_hut`` method match the exact one when # ``angle = 0`` and ``perplexity > n_samples / 3`` random_state = check_random_state(0) n_features = 10 X = random_state.randn(30, n_features).astype(np.float32) X_embeddeds = {} n_iter = {} for method in ['exact', 'barnes_hut']: tsne = TSNE(n_components=2, method=method, learning_rate=1.0, init="random", random_state=0, n_iter=251, perplexity=30.0, angle=0) # Kill the early_exaggeration tsne._EXPLORATION_N_ITER = 0 X_embeddeds[method] = tsne.fit_transform(X) n_iter[method] = tsne.n_iter_ assert n_iter['exact'] == n_iter['barnes_hut'] assert_array_almost_equal(X_embeddeds['exact'], X_embeddeds['barnes_hut'], decimal=3) def test_tsne_with_different_distance_metrics(): """Make sure that TSNE works for different distance metrics""" random_state = check_random_state(0) n_components_original = 3 n_components_embedding = 2 X = random_state.randn(50, n_components_original).astype(np.float32) metrics = ['manhattan', 'cosine'] dist_funcs = [manhattan_distances, cosine_distances] for metric, dist_func in zip(metrics, dist_funcs): X_transformed_tsne = TSNE( metric=metric, n_components=n_components_embedding, random_state=0).fit_transform(X) X_transformed_tsne_precomputed = TSNE( metric='precomputed', n_components=n_components_embedding, random_state=0).fit_transform(dist_func(X)) assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)
31,389
37.89715
79
py