Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def parse_vasprun( self ): self.vasprun_filename = match_filename( 'vasprun.xml' ) if not self.vasprun_filename: raise FileNotFoundError( 'Could not find vasprun.xml or vasprun.xml.gz file' ) try: self.vasprun = Vasprun( self.vasprun_filename, parse_potcar_file=False ) except ET.ParseError: self.vasprun = None except: raise
[ "\n Read in `vasprun.xml` as a pymatgen Vasprun object.\n\n Args:\n None\n\n Returns:\n None\n\n None:\n If the vasprun.xml is not well formed this method will catch the ParseError\n and set self.vasprun = None.\n " ]
Please provide a description of the function:def functional( self ): if self.potcars_are_pbe(): # PBE base funtional if 'LHFCALC' in self.vasprun.parameters: alpha = float( self.vasprun.parameters['AEXX'] ) else: alpha = 0.0 if 'HFSCREEN' in self.vasprun.parameters: mu = float( self.vasprun.parameters['HFSCREEN'] ) else: mu = 0 if alpha > 0: if mu > 0: # screened hybrid if ( mu == 0.2 ) and ( alpha == 0.25 ): f = 'HSE06' else: f = "screened hybrid. alpha={}, mu={}".format( alpha, mu ) else: # unscreened hybrid if alpha == 0.25: f = 'PBE0' else: f = "hybrid. alpha={}".format( alpha ) else: # not hybrid. Plain PBE or some variant. pbe_list = { 'PS': 'PBEsol', 'PE': 'PBE', '91': 'PW91', 'RP': 'rPBE', 'AM': 'AM05' } f = pbe_list[ self.vasprun.parameters['GGA'] ] else: f = 'not recognised' return f
[ "\n String description of the calculation functional.\n \n Recognises:\n - PBE\n - PBEsol\n - PBE-based hybrids:\n - PBE0 (alpha=0.25, no screening)\n - HSE06 (alpha=0.25, mu=0.2)\n - generic hybrids (alpha=?, no screening)\n - generic screened hybrids (alpha=?, mu=?)\n \n Returns:\n (Str): String describing the calculation functional.\n\n " ]
Please provide a description of the function:def read_projected_dos( self ): pdos_list = [] for i in range( self.number_of_atoms ): df = self.read_atomic_dos_as_df( i+1 ) pdos_list.append( df ) self.pdos = np.vstack( [ np.array( df ) for df in pdos_list ] ).reshape( self.number_of_atoms, self.number_of_data_points, self.number_of_channels, self.ispin )
[ "\n Read the projected density of states data into " ]
Please provide a description of the function:def pdos_select( self, atoms=None, spin=None, l=None, m=None ): valid_m_values = { 's': [], 'p': [ 'x', 'y', 'z' ], 'd': [ 'xy', 'yz', 'z2-r2', 'xz', 'x2-y2' ], 'f': [ 'y(3x2-y2)', 'xyz', 'yz2', 'z3', 'xz2', 'z(x2-y2)', 'x(x2-3y2)' ] } if not atoms: atom_idx = list(range( self.number_of_atoms )) else: atom_idx = atoms to_return = self.pdos[ atom_idx, :, :, : ] if not spin: spin_idx = list(range( self.ispin )) elif spin is 'up': spin_idx = [0] elif spin is 'down': spin_idx = [1] elif spin is 'both': spin_idx = [0,1] else: raise ValueError( "valid spin values are 'up', 'down', and 'both'. The default is 'both'" ) to_return = to_return[ :, :, :, spin_idx ] if not l: channel_idx = list(range( self.number_of_channels )) elif l == 's': channel_idx = [ 0 ] elif l == 'p': if not m: channel_idx = [ 1, 2, 3 ] else: # TODO this looks like it should be i+1 channel_idx = [ i+1 for i, v in enumerate( valid_m_values['p'] ) if v in m ] elif l == 'd': if not m: channel_idx = [ 4, 5, 6, 7, 8 ] else: # TODO this looks like it should be i+4 channel_idx = [ i+4 for i, v in enumerate( valid_m_values['d'] ) if v in m ] elif l == 'f': if not m: channel_idx = [ 9, 10, 11, 12, 13, 14, 15 ] else: # TODO this looks like it should be i+9 channel_idx = [ i+9 for i, v in enumerate( valid_m_values['f'] ) if v in m ] else: raise ValueError return to_return[ :, :, channel_idx, : ]
[ "\n Returns a subset of the projected density of states array.\n\n Args:\n atoms (int or list(int)): Atom numbers to include in the selection. Atom numbers count from 1. \n Default is to select all atoms.\n spin (str): Select up or down, or both spin channels to include in the selection.\n Accepted options are 'up', 'down', and 'both'. Default is to select both spins.\n l (str): Select one angular momentum to include in the selectrion.\n Accepted options are 's', 'p', 'd', and 'f'. Default is to include all l-values.\n Setting `l` and not setting `m` will return all projections for that angular momentum value.\n m (list(str)): Select one or more m-values. Requires `l` to be set. \n The accepted values depend on the value of `l`:\n `l='s'`: Only one projection. Not set.\n `l='p'`: One or more of [ 'x', 'y', 'z' ]\n `l='d'`: One or more of [ 'xy', 'yz', 'z2-r2', 'xz', 'x2-y2' ]\n `l='f'`: One or more of [ 'y(3x2-y2)', 'xyz', 'yz2', 'z3', 'xz2', 'z(x2-y2)', 'x(x2-3y2)' ]\n\n Returns:\n np.array: A 4-dimensional numpy array containing the selected pdos values. \n The array dimensions are [ atom_no, energy_value, lm-projection, spin ]\n\n " ]
Please provide a description of the function:def delta_E( reactants, products, check_balance=True ): if check_balance: if delta_stoichiometry( reactants, products ) != {}: raise ValueError( "reaction is not balanced: {}".format( delta_stoichiometry( reactants, products) ) ) return sum( [ r.energy for r in products ] ) - sum( [ r.energy for r in reactants ] )
[ "\n Calculate the change in energy for reactants --> products.\n \n Args:\n reactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state.\n products (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state.\n check_balance (bool:optional): Check that the reaction stoichiometry is balanced. Default: True.\n\n Returns:\n (float) The change in energy.\n " ]
Please provide a description of the function:def delta_stoichiometry( reactants, products ): totals = Counter() for r in reactants: totals.update( ( r * -1.0 ).stoichiometry ) for p in products: totals.update( p.stoichiometry ) to_return = {} for c in totals: if totals[c] != 0: to_return[c] = totals[c] return to_return
[ "\n Calculate the change in stoichiometry for reactants --> products.\n\n Args:\n reactants (list(vasppy.Calculation): A list of vasppy.Calculation objects. The initial state.\n products (list(vasppy.Calculation): A list of vasppy.Calculation objects. The final state.\n\n Returns:\n (Counter): The change in stoichiometry.\n " ]
Please provide a description of the function:def energy_string_to_float( string ): energy_re = re.compile( "(-?\d+\.\d+)" ) return float( energy_re.match( string ).group(0) )
[ "\n Convert a string of a calculation energy, e.g. '-1.2345 eV' to a float.\n\n Args:\n string (str): The string to convert.\n \n Return\n (float) \n " ]
Please provide a description of the function:def import_calculations_from_file( filename ): calcs = {} with open( filename, 'r' ) as stream: docs = yaml.load_all( stream, Loader=yaml.SafeLoader ) for d in docs: stoichiometry = Counter() for s in d['stoichiometry']: stoichiometry.update( s ) calcs[ d['title'] ] = Calculation( title=d['title'], stoichiometry=stoichiometry, energy=energy_string_to_float( d['energy'] ) ) return calcs
[ "\n Construct a list of Calculation objects by reading a YAML file.\n Each YAML document should include 'title', 'stoichiometry', and 'energy' fields. e.g.::\n\n title: my calculation\n stoichiometry:\n - A: 1\n - B: 2\n energy: -0.1234 eV\n\n Separate calculations should be distinct YAML documents, separated by `---`\n \n Args:\n filename (str): Name of the YAML file to read.\n\n Returns:\n (dict(vasppy.Calculation)): A dictionary of Calculation objects. For each Calculation object, the 'title' field from the YAML input is used as the dictionary key.\n " ]
Please provide a description of the function:def scale_stoichiometry( self, scaling ): return { k:v*scaling for k,v in self.stoichiometry.items() }
[ "\n Scale the Calculation stoichiometry\n Returns the stoichiometry, scaled by the argument scaling.\n\n Args:\n scaling (float): The scaling factor.\n\n Returns:\n (Counter(Str:Int)): The scaled stoichiometry as a Counter of label: stoichiometry pairs\n " ]
Please provide a description of the function:def angle( x, y ): dot = np.dot( x, y ) x_mod = np.linalg.norm( x ) y_mod = np.linalg.norm( y ) cos_angle = dot / ( x_mod * y_mod ) return np.degrees( np.arccos( cos_angle ) )
[ "\n Calculate the angle between two vectors, in degrees.\n\n Args:\n x (np.array): one vector.\n y (np.array): the other vector.\n\n Returns:\n (float): the angle between x and y in degrees.\n " ]
Please provide a description of the function:def dr( self, r1, r2, cutoff=None ): delta_r_cartesian = ( r1 - r2 ).dot( self.matrix ) delta_r_squared = sum( delta_r_cartesian**2 ) if cutoff != None: cutoff_squared = cutoff ** 2 if delta_r_squared > cutoff_squared: return None return( math.sqrt( delta_r_squared ) )
[ "\n Calculate the distance between two fractional coordinates in the cell.\n \n Args:\n r1 (np.array): fractional coordinates for position 1.\n r2 (np.array): fractional coordinates for position 2.\n cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).\n\n Returns:\n (float): the distance between r1 and r2.\n " ]
Please provide a description of the function:def minimum_image( self, r1, r2 ): delta_r = r2 - r1 delta_r = np.array( [ x - math.copysign( 1.0, x ) if abs(x) > 0.5 else x for x in delta_r ] ) return( delta_r )
[ "\n Find the minimum image vector from point r1 to point r2.\n\n Args:\n r1 (np.array): fractional coordinates of point r1.\n r2 (np.array): fractional coordinates of point r2.\n\n Returns:\n (np.array): the fractional coordinate vector from r1 to the nearest image of r2.\n " ]
Please provide a description of the function:def minimum_image_dr( self, r1, r2, cutoff=None ): delta_r_vector = self.minimum_image( r1, r2 ) return( self.dr( np.zeros( 3 ), delta_r_vector, cutoff ) )
[ "\n Calculate the shortest distance between two points in the cell, \n accounting for periodic boundary conditions.\n\n Args:\n r1 (np.array): fractional coordinates of point r1.\n r2 (np.array): fractional coordinates of point r2.\n cutoff (:obj: `float`, optional): if set, return zero if the minimum distance is greater than `cutoff`. Defaults to None.\n\n Returns:\n (float): The distance between r1 and r2.\n " ]
Please provide a description of the function:def lengths( self ): return( np.array( [ math.sqrt( sum( row**2 ) ) for row in self.matrix ] ) )
[ "\n The cell lengths.\n\n Args:\n None\n\n Returns:\n (np.array(a,b,c)): The cell lengths.\n " ]
Please provide a description of the function:def angles( self ): ( a, b, c ) = [ row for row in self.matrix ] return [ angle( b, c ), angle( a, c ), angle( a, b ) ]
[ "\n The cell angles (in degrees).\n\n Args:\n None\n\n Returns:\n (list(alpha,beta,gamma)): The cell angles.\n " ]
Please provide a description of the function:def inside_cell( self, r ): centre = np.array( [ 0.5, 0.5, 0.5 ] ) new_r = self.nearest_image( centre, r ) return new_r
[ "\n Given a fractional-coordinate, if this lies outside the cell return the equivalent point inside the cell.\n\n Args:\n r (np.array): Fractional coordinates of a point (this may be outside the cell boundaries).\n\n Returns:\n (np.array): Fractional coordinates of an equivalent point, inside the cell boundaries.\n " ]
Please provide a description of the function:def volume( self ): return np.dot( self.matrix[0], np.cross( self.matrix[1], self.matrix[2] ) )
[ "\n The cell volume.\n\n Args:\n None\n\n Returns:\n (float): The cell volume.\n " ]
Please provide a description of the function:def from_file( cls, filename ): with open( filename, 'r' ) as stream: data = yaml.load( stream, Loader=yaml.SafeLoader ) notes = data.get( 'notes' ) v_type = data.get( 'type' ) track = data.get( 'track' ) xargs = {} if track: if type( track ) is str: track = [ track ] xargs['track'] = track vaspmeta = VASPMeta( data['title'], data['description'], data['status'], notes=notes, type=v_type, **xargs ) return vaspmeta
[ "\n Create a VASPMeta object by reading a `vaspmeta.yaml` file\n\n Args:\n filename (Str): filename to read in.\n\n Returns:\n (vasppy.VASPMeta): the VASPMeta object\n " ]
Please provide a description of the function:def reciprocal_lattice_from_outcar( filename ): # from https://github.com/MaterialsDiscovery/PyChemia outcar = open(filename, "r").read() # just keeping the last component recLat = re.findall(r"reciprocal\s*lattice\s*vectors\s*([-.\s\d]*)", outcar)[-1] recLat = recLat.split() recLat = np.array(recLat, dtype=float) # up to now I have, both direct and rec. lattices (3+3=6 columns) recLat.shape = (3, 6) recLat = recLat[:, 3:] return recLat
[ "\n Finds and returns the reciprocal lattice vectors, if more than\n one set present, it just returns the last one.\n Args:\n filename (Str): The name of the outcar file to be read\n\n Returns:\n List(Float): The reciprocal lattice vectors.\n " ]
Please provide a description of the function:def final_energy_from_outcar( filename='OUTCAR' ): with open( filename ) as f: outcar = f.read() energy_re = re.compile( "energy\(sigma->0\) =\s+([-\d\.]+)" ) energy = float( energy_re.findall( outcar )[-1] ) return energy
[ "\n Finds and returns the energy from a VASP OUTCAR file, by searching for the last `energy(sigma->0)` entry.\n\n Args:\n filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\n Returns:\n (Float): The last energy read from the OUTCAR file.\n " ]
Please provide a description of the function:def vasp_version_from_outcar( filename='OUTCAR' ): with open( filename ) as f: line = f.readline().strip() return line
[ "\n Returns the first line from a VASP OUTCAR file, to get the VASP source version string.\n\n Args:\n filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\n Returns:\n (Str): The first line read from the OUTCAR file.\n " ]
Please provide a description of the function:def potcar_eatom_list_from_outcar( filename='OUTCAR' ): with open( filename ) as f: outcar = f.read() eatom_re = re.compile( "energy of atom\s+\d+\s+EATOM=\s*([-\d\.]+)" ) eatom = [ float( e ) for e in eatom_re.findall( outcar ) ] return eatom
[ "\n Returns a list of EATOM values for the pseudopotentials used.\n\n Args:\n filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\n Returns:\n (List(Float)): A list of EATOM values, in the order they appear in the OUTCAR.\n " ]
Please provide a description of the function:def fermi_energy_from_outcar( filename='OUTCAR' ): outcar = open(filename, "r").read() # returns a match object fermi_energy = re.search(r"E-fermi\s*:\s*([-.\d]*)", outcar) # take the first group - group(0) contains entire match fermi_energy = float(fermi_energy.group(1)) return fermi_energy
[ "Finds and returns the fermi energy.\n Args:\n -filename: the name of the outcar file to be read\n\n Returns:\n (Float): The fermi energy as found in the OUTCAR \n " ]
Please provide a description of the function:def build_description(node=None): if node is None: from logging_tree.nodes import tree node = tree() return '\n'.join([ line.rstrip() for line in describe(node) ]) + '\n'
[ "Return a multi-line string describing a `logging_tree.nodes.Node`.\n\n If no `node` argument is provided, then the entire tree of currently\n active `logging` loggers is printed out.\n\n " ]
Please provide a description of the function:def _describe(node, parent): name, logger, children = node is_placeholder = isinstance(logger, logging.PlaceHolder) if is_placeholder: yield '<--[%s]' % name else: parent_is_correct = (parent is None) or (logger.parent is parent) if not logger.propagate: arrow = ' ' elif parent_is_correct: arrow = '<--' else: arrow = ' !-' yield '%s"%s"' % (arrow, name) if not parent_is_correct: if logger.parent is None: yield (' Broken .parent is None, so messages stop here') else: yield (' Broken .parent redirects messages to %r instead' % (logger.parent.name,)) if logger.level == logging.NOTSET: yield ' Level NOTSET so inherits level ' + logging.getLevelName( logger.getEffectiveLevel()) else: yield ' Level ' + logging.getLevelName(logger.level) if not logger.propagate: yield ' Propagate OFF' if logger.disabled: yield ' Disabled' # In case someone has defined a custom logger that lacks a # `filters` or `handlers` attribute, we call getattr() and # provide an empty sequence as a fallback. for f in getattr(logger, 'filters', ()): yield ' Filter %s' % describe_filter(f) for h in getattr(logger, 'handlers', ()): g = describe_handler(h) yield ' Handler %s' % next(g) for line in g: yield ' ' + line if children: if not is_placeholder: parent = logger last_child = children[-1] for child in children: g = _describe(child, parent) yield ' |' yield ' o' + next(g) if child is last_child: prefix = ' ' else: prefix = ' |' for line in g: yield prefix + line
[ "Generate lines describing the given `node` tuple.\n\n This is the recursive back-end that powers ``describe()``. With its\n extra ``parent`` parameter, this routine remembers the nearest\n non-placeholder ancestor so that it can compare it against the\n actual value of the ``.parent`` attribute of each node.\n\n " ]
Please provide a description of the function:def describe_filter(f): if f.__class__ is logging.Filter: # using type() breaks in Python <= 2.6 return 'name=%r' % f.name return repr(f)
[ "Return text describing the logging filter `f`." ]
Please provide a description of the function:def describe_handler(h): t = h.__class__ # using type() breaks in Python <= 2.6 format = handler_formats.get(t) if format is not None: yield format % h.__dict__ else: yield repr(h) level = getattr(h, 'level', logging.NOTSET) if level != logging.NOTSET: yield ' Level ' + logging.getLevelName(level) for f in getattr(h, 'filters', ()): yield ' Filter %s' % describe_filter(f) formatter = getattr(h, 'formatter', None) if formatter is not None: if type(formatter) is logging.Formatter: yield ' Formatter fmt=%r datefmt=%r' % ( getattr(formatter, '_fmt', None), getattr(formatter, 'datefmt', None)) else: yield ' Formatter %r' % (formatter,) if t is logging.handlers.MemoryHandler and h.target is not None: yield ' Flushes output to:' g = describe_handler(h.target) yield ' Handler ' + next(g) for line in g: yield ' ' + line
[ "Yield one or more lines describing the logging handler `h`." ]
Please provide a description of the function:def tree(): root = ('', logging.root, []) nodes = {} items = list(logging.root.manager.loggerDict.items()) # for Python 2 and 3 items.sort() for name, logger in items: nodes[name] = node = (name, logger, []) i = name.rfind('.', 0, len(name) - 1) # same formula used in `logging` if i == -1: parent = root else: parent = nodes[name[:i]] parent[2].append(node) return root
[ "Return a tree of tuples representing the logger layout.\n\n Each tuple looks like ``('logger-name', <Logger>, [...])`` where the\n third element is a list of zero or more child tuples that share the\n same layout.\n\n " ]
Please provide a description of the function:def patched_str(self): def red(words): return u("\033[31m\033[49m%s\033[0m") % words def white(words): return u("\033[37m\033[49m%s\033[0m") % words def blue(words): return u("\033[34m\033[49m%s\033[0m") % words def teal(words): return u("\033[36m\033[49m%s\033[0m") % words def get_uri(code): return "https://www.signalwire.com/docs/errors/{0}".format(code) # If it makes sense to print a human readable error message, try to # do it. The one problem is that someone might catch this error and # try to display the message from it to an end user. if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): msg = ( "\n{red_error} {request_was}\n\n{http_line}" "\n\n{sw_returned}\n\n{message}\n".format( red_error=red("HTTP Error"), request_was=white("Your request was:"), http_line=teal("%s %s" % (self.method, self.uri)), sw_returned=white( "Signalwire returned the following information:"), message=blue(str(self.msg)) )) if self.code: msg = "".join([msg, "\n{more_info}\n\n{uri}\n\n".format( more_info=white("More information may be available here:"), uri=blue(get_uri(self.code))), ]) return msg else: return "HTTP {0} error: {1}".format(self.status, self.msg)
[ " Try to pretty-print the exception, if this is going on screen. " ]
Please provide a description of the function:def patched_fax_init(self, twilio): super(TwilioFax, self).__init__(twilio) self.base_url = '' self.account_sid = twilio.account_sid # Versions self._v1 = None
[ "\n Initialize the Fax Domain\n :returns: Domain for Fax\n :rtype: twilio.rest.fax.Fax\n " ]
Please provide a description of the function:def patched_fax_v1_init(self, domain): print(domain.__class__.__name__) super(TwilioV1, self).__init__(domain) self.version = "2010-04-01/Accounts/" + domain.account_sid self._faxes = None
[ "\n Initialize the V1 version of Fax\n :returns: V1 version of Fax\n :rtype: twilio.rest.fax.v1.V1.V1\n " ]
Please provide a description of the function:def find_this(search, filename=MODULE_PATH): if not search: return for line in open(str(filename)).readlines(): if search.lower() in line.lower(): line = line.split("=")[1].strip() if "'" in line or '"' in line or '', '') return line
[ "Take a string and a filename path string and return the found value.", "' in line:\n line = line.replace(\"'\", \"\").replace('\"', '').replace('" ]
Please provide a description of the function:def h(self): r if np.size(self._h) > 1: assert np.size(self._h) == self.n_modelparams return self._h else: return self._h * np.ones(self.n_modelparams)
[ "\n Returns the step size to be used in numerical differentiation with \n respect to the model parameters.\n \n The step size is given as a vector with length ``n_modelparams`` so \n that each model parameter can be weighted independently.\n " ]
Please provide a description of the function:def score(self, outcomes, modelparams, expparams, return_L=False): r if len(modelparams.shape) == 1: modelparams = modelparams[:, np.newaxis] # compute likelihood at central point L0 = self.likelihood(outcomes, modelparams, expparams) # allocate space for the score q = np.empty([self.n_modelparams, outcomes.shape[0], modelparams.shape[0], expparams.shape[0]]) h_perturb = np.empty(modelparams.shape) # just loop over the model parameter as there usually won't be so many # of them that vectorizing would be worth the effort. for mp_idx in range(self.n_modelparams): h_perturb[:] = np.zeros(modelparams.shape) h_perturb[:, mp_idx] = self.h[mp_idx] # use the chain rule since taking the numerical derivative of a # logarithm is unstable q[mp_idx, :] = ( self.likelihood(outcomes, modelparams + h_perturb, expparams) - self.likelihood(outcomes, modelparams - h_perturb, expparams) ) / (2 * self.h[mp_idx] * L0) if return_L: return q, L0 else: return q
[ "\n Returns the numerically computed score of the likelihood \n function, defined as:\n \n .. math::\n \n q(d, \\vec{x}; \\vec{e}) = \\vec{\\nabla}_{\\vec{x}} \\log \\Pr(d | \\vec{x}; \\vec{e}).\n \n Calls are represented as a four-index tensor\n ``score[idx_modelparam, idx_outcome, idx_model, idx_experiment]``.\n The left-most index may be suppressed for single-parameter models.\n \n The numerical gradient is computed using the central difference method, \n with step size given by the property `~ScoreMixin.h`.\n \n If return_L is True, both `q` and the likelihood `L` are returned as `q, L`.\n " ]
Please provide a description of the function:def clear_cache(self): self.underlying_model.clear_cache() try: logger.info('DirectView results has {} items. Clearing.'.format( len(self._dv.results) )) self._dv.purge_results('all') if self._purge_client: self._dv.client.purge_everything() except: pass
[ "\n Clears any cache associated with the serial model and the engines\n seen by the direct view.\n " ]
Please provide a description of the function:def likelihood(self, outcomes, modelparams, expparams): # By calling the superclass implementation, we can consolidate # call counting there. super(DirectViewParallelizedModel, self).likelihood(outcomes, modelparams, expparams) # If there's less models than some threshold, just use the serial model. # By default, we'll set that threshold to be the number of engines * 10. if modelparams.shape[0] <= self._serial_threshold: return self.underlying_model.likelihood(outcomes, modelparams, expparams) if self._dv is None: raise RuntimeError( "No direct view provided; this may be because the instance was " "loaded from a pickle or NumPy saved array without providing a " "new direct view." ) # Need to decorate with interactive to overcome namespace issues with # remote engines. @interactive def serial_likelihood(mps, sm, os, eps): return sm.likelihood(os, mps, eps) # TODO: check whether there's a better way to pass the extra parameters # that doesn't use so much memory. # The trick is that serial_likelihood will be pickled, so we need to be # careful about closures. L = self._dv.map_sync( serial_likelihood, np.array_split(modelparams, self.n_engines, axis=0), [self.underlying_model] * self.n_engines, [outcomes] * self.n_engines, [expparams] * self.n_engines ) return np.concatenate(L, axis=1)
[ "\n Returns the likelihood for the underlying (serial) model, distributing\n the model parameter array across the engines controlled by this\n parallelized model. Returns what the serial model would return, see\n :attr:`~Model.likelihood`\n " ]
Please provide a description of the function:def simulate_experiment(self, modelparams, expparams, repeat=1, split_by_modelparams=True): # By calling the superclass implementation, we can consolidate # simulation counting there. super(DirectViewParallelizedModel, self).simulate_experiment(modelparams, expparams, repeat=repeat) if self._dv is None: raise RuntimeError( "No direct view provided; this may be because the instance was " "loaded from a pickle or NumPy saved array without providing a " "new direct view." ) # Need to decorate with interactive to overcome namespace issues with # remote engines. @interactive def serial_simulator(sm, mps, eps, r): return sm.simulate_experiment(mps, eps, repeat=r) if split_by_modelparams: # If there's less models than some threshold, just use the serial model. # By default, we'll set that threshold to be the number of engines * 10. if modelparams.shape[0] <= self._serial_threshold: return self.underlying_model.simulate_experiment(modelparams, expparams, repeat=repeat) # The trick is that serial_likelihood will be pickled, so we need to be # careful about closures. os = self._dv.map_sync( serial_simulator, [self.underlying_model] * self.n_engines, np.array_split(modelparams, self.n_engines, axis=0), [expparams] * self.n_engines, [repeat] * self.n_engines ) return np.concatenate(os, axis=0) else: # If there's less models than some threshold, just use the serial model. # By default, we'll set that threshold to be the number of engines * 10. if expparams.shape[0] <= self._serial_threshold: return self.underlying_model.simulate_experiment(modelparams, expparams, repeat=repeat) # The trick is that serial_likelihood will be pickled, so we need to be # careful about closures. os = self._dv.map_sync( serial_simulator, [self.underlying_model] * self.n_engines, [modelparams] * self.n_engines, np.array_split(expparams, self.n_engines, axis=0), [repeat] * self.n_engines ) return np.concatenate(os, axis=1)
[ "\n Simulates the underlying (serial) model using the parallel \n engines. Returns what the serial model would return, see\n :attr:`~Simulatable.simulate_experiment`\n\n :param bool split_by_modelparams: If ``True``, splits up\n ``modelparams`` into `n_engines` chunks and distributes \n across engines. If ``False``, splits up ``expparams``.\n " ]
Please provide a description of the function:def _maybe_resample(self): ess = self.n_ess if ess <= 10: warnings.warn( "Extremely small n_ess encountered ({}). " "Resampling is likely to fail. Consider adding particles, or " "resampling more often.".format(ess), ApproximationWarning ) if ess < self.n_particles * self.resample_thresh: self.resample() pass
[ "\n Checks the resample threshold and conditionally resamples.\n " ]
Please provide a description of the function:def update(self, outcome, expparams, check_for_resample=True): # First, record the outcome. # TODO: record the experiment as well. self._data_record.append(outcome) self._just_resampled = False # Perform the update. weights, norm = self.hypothetical_update(outcome, expparams, return_normalization=True) # Check for negative weights before applying the update. if not np.all(weights >= 0): warnings.warn("Negative weights occured in particle approximation. Smallest weight observed == {}. Clipping weights.".format(np.min(weights)), ApproximationWarning) np.clip(weights, 0, 1, out=weights) # Next, check if we have caused the weights to go to zero, as can # happen if the likelihood is identically zero for all particles, # or if the previous clip step choked on a NaN. if np.sum(weights) <= self._zero_weight_thresh: if self._zero_weight_policy == 'ignore': pass elif self._zero_weight_policy == 'skip': return elif self._zero_weight_policy == 'warn': warnings.warn("All particle weights are zero. This will very likely fail quite badly.", ApproximationWarning) elif self._zero_weight_policy == 'error': raise RuntimeError("All particle weights are zero.") elif self._zero_weight_policy == 'reset': warnings.warn("All particle weights are zero. Resetting from initial prior.", ApproximationWarning) self.reset() else: raise ValueError("Invalid zero-weight policy {} encountered.".format(self._zero_weight_policy)) # Since hypothetical_update returns an array indexed by # [outcome, experiment, particle], we need to strip off those two # indices first. self.particle_weights[:] = weights[0,0,:] # Record the normalization self._normalization_record.append(norm[0][0]) # Update the particle locations according to the model's timestep. self.particle_locations = self.model.update_timestep( self.particle_locations, expparams )[:, :, 0] # Check if we need to update our min_n_ess attribute. if self.n_ess <= self._min_n_ess: self._min_n_ess = self.n_ess # Resample if needed. if check_for_resample: self._maybe_resample()
[ "\n Given an experiment and an outcome of that experiment, updates the\n posterior distribution to reflect knowledge of that experiment.\n\n After updating, resamples the posterior distribution if necessary.\n\n :param int outcome: Label for the outcome that was observed, as defined\n by the :class:`~qinfer.abstract_model.Model` instance under study.\n :param expparams: Parameters describing the experiment that was\n performed.\n :type expparams: :class:`~numpy.ndarray` of dtype given by the\n :attr:`~qinfer.abstract_model.Model.expparams_dtype` property\n of the underlying model\n :param bool check_for_resample: If :obj:`True`, after performing the\n update, the effective sample size condition will be checked and\n a resampling step may be performed.\n " ]
Please provide a description of the function:def batch_update(self, outcomes, expparams, resample_interval=5): r # TODO: write a faster implementation here using vectorized calls to # likelihood. # Check that the number of outcomes and experiments is the same. n_exps = outcomes.shape[0] if expparams.shape[0] != n_exps: raise ValueError("The number of outcomes and experiments must match.") if len(expparams.shape) == 1: expparams = expparams[:, None] # Loop over experiments and update one at a time. for idx_exp, (outcome, experiment) in enumerate(zip(iter(outcomes), iter(expparams))): self.update(outcome, experiment, check_for_resample=False) if (idx_exp + 1) % resample_interval == 0: self._maybe_resample()
[ "\n Updates based on a batch of outcomes and experiments, rather than just\n one.\n\n :param numpy.ndarray outcomes: An array of outcomes of the experiments that\n were performed.\n :param numpy.ndarray expparams: Either a scalar or record single-index\n array of experiments that were performed.\n :param int resample_interval: Controls how often to check whether\n :math:`N_{\\text{ess}}` falls below the resample threshold.\n " ]
Please provide a description of the function:def resample(self): if self.just_resampled: warnings.warn( "Resampling without additional data; this may not perform as " "desired.", ResamplerWarning ) # Record that we have performed a resampling step. self._just_resampled = True self._resample_count += 1 # If we're tracking divergences, make a copy of the weights and # locations. if self._resampling_divergences is not None: old_locs = self.particle_locations.copy() old_weights = self.particle_weights.copy() # Record the previous mean, cov if needed. if self._debug_resampling: old_mean = self.est_mean() old_cov = self.est_covariance_mtx() # Find the new particle locations according to the chosen resampling # algorithm. # We pass the model so that the resampler can check for validity of # newly placed particles. # FIXME This feels fishy. If we update particles elsewwhere new_distribution = self.resampler(self.model, self) self.particle_weights = new_distribution.particle_weights self.particle_locations = new_distribution.particle_locations # Possibly canonicalize, if we've been asked to do so. if self._canonicalize: self.particle_locations[:, :] = self.model.canonicalize(self.particle_locations) # Instruct the model to clear its cache, demoting any errors to # warnings. try: self.model.clear_cache() except Exception as e: warnings.warn("Exception raised when clearing model cache: {}. Ignoring.".format(e)) # Possibly track the new divergence. if self._resampling_divergences is not None: self._resampling_divergences.append( self._kl_divergence(old_locs, old_weights) ) # Report current and previous mean, cov. if self._debug_resampling: new_mean = self.est_mean() new_cov = self.est_covariance_mtx() logger.debug("Resampling changed mean by {}. Norm change in cov: {}.".format( old_mean - new_mean, np.linalg.norm(new_cov - old_cov) ))
[ "\n Forces the updater to perform a resampling step immediately.\n " ]
Please provide a description of the function:def bayes_risk(self, expparams): r # for models whose outcome number changes with experiment, we # take the easy way out and for-loop over experiments n_eps = expparams.size if n_eps > 1 and not self.model.is_n_outcomes_constant: risk = np.empty(n_eps) for idx in range(n_eps): risk[idx] = self.bayes_risk(expparams[idx, np.newaxis]) return risk # outcomes for the first experiment os = self.model.domain(expparams[0,np.newaxis])[0].values # compute the hypothetical weights, likelihoods and normalizations for # every possible outcome and expparam # the likelihood over outcomes should sum to 1, so don't compute for last outcome w_hyp, L, N = self.hypothetical_update( os[:-1], expparams, return_normalization=True, return_likelihood=True ) w_hyp_last_outcome = (1 - L.sum(axis=0)) * self.particle_weights[np.newaxis, :] N = np.concatenate([N[:,:,0], np.sum(w_hyp_last_outcome[np.newaxis,:,:], axis=2)], axis=0) w_hyp_last_outcome = w_hyp_last_outcome / N[-1,:,np.newaxis] w_hyp = np.concatenate([w_hyp, w_hyp_last_outcome[np.newaxis,:,:]], axis=0) # w_hyp.shape == (n_out, n_eps, n_particles) # N.shape == (n_out, n_eps) # compute the hypothetical means and variances given outcomes and exparams # mu_hyp.shape == (n_out, n_eps, n_models) # var_hyp.shape == (n_out, n_eps) mu_hyp = np.dot(w_hyp, self.particle_locations) var_hyp = np.sum( w_hyp * np.sum(self.model.Q * ( self.particle_locations[np.newaxis,np.newaxis,:,:] - mu_hyp[:,:,np.newaxis,:] ) ** 2, axis=3), axis=2 ) # the risk of a given expparam can be calculated as the mean posterior # variance weighted over all possible outcomes return np.sum(N * var_hyp, axis=0)
[ "\n Calculates the Bayes risk for hypothetical experiments, assuming the\n quadratic loss function defined by the current model's scale matrix\n (see :attr:`qinfer.abstract_model.Simulatable.Q`).\n\n :param expparams: The experiments at which to compute the risk.\n :type expparams: :class:`~numpy.ndarray` of dtype given by the current\n model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property,\n and of shape ``(1,)``\n\n :return np.ndarray: The Bayes risk for the current posterior distribution\n at each hypothetical experiment in ``expparams``, therefore \n has shape ``(expparams.size,)``\n " ]
Please provide a description of the function:def expected_information_gain(self, expparams): r # This is a special case of the KL divergence estimator (see below), # in which the other distribution is guaranteed to share support. # for models whose outcome number changes with experiment, we # take the easy way out and for-loop over experiments n_eps = expparams.size if n_eps > 1 and not self.model.is_n_outcomes_constant: risk = np.empty(n_eps) for idx in range(n_eps): risk[idx] = self.expected_information_gain(expparams[idx, np.newaxis]) return risk # number of outcomes for the first experiment os = self.model.domain(expparams[0,np.newaxis])[0].values # compute the hypothetical weights, likelihoods and normalizations for # every possible outcome and expparam # the likelihood over outcomes should sum to 1, so don't compute for last outcome w_hyp, L, N = self.hypothetical_update( os[:-1], expparams, return_normalization=True, return_likelihood=True ) w_hyp_last_outcome = (1 - L.sum(axis=0)) * self.particle_weights[np.newaxis, :] N = np.concatenate([N[:,:,0], np.sum(w_hyp_last_outcome[np.newaxis,:,:], axis=2)], axis=0) w_hyp_last_outcome = w_hyp_last_outcome / N[-1,:,np.newaxis] w_hyp = np.concatenate([w_hyp, w_hyp_last_outcome[np.newaxis,:,:]], axis=0) # w_hyp.shape == (n_out, n_eps, n_particles) # N.shape == (n_out, n_eps) # compute the Kullback-Liebler divergence for every experiment and possible outcome # KLD.shape == (n_out, n_eps) KLD = np.sum(w_hyp * np.log(w_hyp / self.particle_weights), axis=2) # return the expected KLD (ie expected info gain) for every experiment return np.sum(N * KLD, axis=0)
[ "\n Calculates the expected information gain for each hypothetical experiment.\n\n :param expparams: The experiments at which to compute expected\n information gain.\n :type expparams: :class:`~numpy.ndarray` of dtype given by the current\n model's :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` property,\n and of shape ``(n,)``\n\n :return float: The expected information gain for each \n hypothetical experiment in ``expparams``.\n " ]
Please provide a description of the function:def posterior_marginal(self, idx_param=0, res=100, smoothing=0, range_min=None, range_max=None): # We need to sort the particles to get cumsum to make sense. # interp1d would do it anyways (using argsort, too), so it's not a waste s = np.argsort(self.particle_locations[:,idx_param]) locs = self.particle_locations[s,idx_param] # relevant axis discretization r_min = np.min(locs) if range_min is None else range_min r_max = np.max(locs) if range_max is None else range_max ps = np.linspace(r_min, r_max, res) # interpolate the cdf of the marginal distribution using cumsum interp = scipy.interpolate.interp1d( np.append(locs, r_max + np.abs(r_max-r_min)), np.append(np.cumsum(self.particle_weights[s]), 1), #kind='cubic', bounds_error=False, fill_value=0, assume_sorted=True ) # get distribution from derivative of cdf, and smooth it pr = np.gradient(interp(ps), ps[1]-ps[0]) if smoothing > 0: gaussian_filter1d(pr, res*smoothing/(np.abs(r_max-r_min)), output=pr) del interp return ps, pr
[ "\n Returns an estimate of the marginal distribution of a given model parameter, based on\n taking the derivative of the interpolated cdf.\n\n :param int idx_param: Index of parameter to be marginalized.\n :param int res1: Resolution of of the axis.\n :param float smoothing: Standard deviation of the Gaussian kernel\n used to smooth; same units as parameter.\n :param float range_min: Minimum range of the output axis.\n :param float range_max: Maximum range of the output axis.\n\n .. seealso::\n\n :meth:`SMCUpdater.plot_posterior_marginal`\n " ]
Please provide a description of the function:def plot_posterior_marginal(self, idx_param=0, res=100, smoothing=0, range_min=None, range_max=None, label_xaxis=True, other_plot_args={}, true_model=None ): res = plt.plot(*self.posterior_marginal( idx_param, res, smoothing, range_min, range_max ), **other_plot_args) if label_xaxis: plt.xlabel('${}$'.format(self.model.modelparam_names[idx_param])) if true_model is not None: true_model = true_model[0, idx_param] if true_model.ndim == 2 else true_model[idx_param] old_ylim = plt.ylim() plt.vlines(true_model, old_ylim[0] - 0.1, old_ylim[1] + 0.1, color='k', linestyles='--') plt.ylim(old_ylim) return res
[ "\n Plots a marginal of the requested parameter.\n\n :param int idx_param: Index of parameter to be marginalized.\n :param int res1: Resolution of of the axis.\n :param float smoothing: Standard deviation of the Gaussian kernel\n used to smooth; same units as parameter.\n :param float range_min: Minimum range of the output axis.\n :param float range_max: Maximum range of the output axis.\n :param bool label_xaxis: Labels the :math:`x`-axis with the model parameter name\n given by this updater's model.\n :param dict other_plot_args: Keyword arguments to be passed to\n matplotlib's ``plot`` function.\n :param np.ndarray true_model: Plots a given model parameter vector\n as the \"true\" model for comparison.\n\n .. seealso::\n\n :meth:`SMCUpdater.posterior_marginal`\n " ]
Please provide a description of the function:def plot_covariance(self, corr=False, param_slice=None, tick_labels=None, tick_params=None): if mpls is None: raise ImportError("Hinton diagrams require mpltools.") if param_slice is None: param_slice = np.s_[:] tick_labels = ( list(range(len(self.model.modelparam_names[param_slice]))), tick_labels if tick_labels is not None else list(map(u"${}$".format, self.model.modelparam_names[param_slice])) ) cov = self.est_covariance_mtx(corr=corr)[param_slice, param_slice] retval = mpls.hinton(cov) plt.xticks(*tick_labels, **(tick_params if tick_params is not None else {})) plt.yticks(*tick_labels, **(tick_params if tick_params is not None else {})) plt.gca().xaxis.tick_top() return retval
[ "\n Plots the covariance matrix of the posterior as a Hinton diagram.\n\n .. note::\n\n This function requires that mpltools is installed.\n\n :param bool corr: If `True`, the covariance matrix is first normalized\n by the outer product of the square root diagonal of the covariance matrix\n such that the correlation matrix is plotted instead.\n :param slice param_slice: Slice of the modelparameters to\n be plotted.\n :param list tick_labels: List of tick labels for each component;\n by default, these are drawn from the model itself.\n " ]
Please provide a description of the function:def posterior_mesh(self, idx_param1=0, idx_param2=1, res1=100, res2=100, smoothing=0.01): # WARNING: fancy indexing is used here, which means that a copy is # made. locs = self.particle_locations[:, [idx_param1, idx_param2]] p1s, p2s = np.meshgrid( np.linspace(np.min(locs[:, 0]), np.max(locs[:, 0]), res1), np.linspace(np.min(locs[:, 1]), np.max(locs[:, 1]), res2) ) plot_locs = np.array([p1s, p2s]).T.reshape((np.prod(p1s.shape), 2)) pr = np.sum( # <- sum over the particles in the SMC approximation. np.prod( # <- product over model parameters to get a multinormal # Evaluate the PDF at the plotting locations, with a normal # located at the particle locations. scipy.stats.norm.pdf( plot_locs[:, np.newaxis, :], scale=smoothing, loc=locs ), axis=-1 ) * self.particle_weights, axis=1 ).reshape(p1s.shape) # Finally, reshape back into the same shape as the mesh. return p1s, p2s, pr
[ "\n Returns a mesh, useful for plotting, of kernel density estimation\n of a 2D projection of the current posterior distribution.\n\n :param int idx_param1: Parameter to be treated as :math:`x` when\n plotting.\n :param int idx_param2: Parameter to be treated as :math:`y` when\n plotting.\n :param int res1: Resolution along the :math:`x` direction.\n :param int res2: Resolution along the :math:`y` direction.\n :param float smoothing: Standard deviation of the Gaussian kernel\n used to smooth the particle approximation to the current posterior.\n\n .. seealso::\n\n :meth:`SMCUpdater.plot_posterior_contour`\n " ]
Please provide a description of the function:def plot_posterior_contour(self, idx_param1=0, idx_param2=1, res1=100, res2=100, smoothing=0.01): return plt.contour(*self.posterior_mesh(idx_param1, idx_param2, res1, res2, smoothing))
[ "\n Plots a contour of the kernel density estimation\n of a 2D projection of the current posterior distribution.\n\n :param int idx_param1: Parameter to be treated as :math:`x` when\n plotting.\n :param int idx_param2: Parameter to be treated as :math:`y` when\n plotting.\n :param int res1: Resolution along the :math:`x` direction.\n :param int res2: Resolution along the :math:`y` direction.\n :param float smoothing: Standard deviation of the Gaussian kernel\n used to smooth the particle approximation to the current posterior.\n\n .. seealso::\n\n :meth:`SMCUpdater.posterior_mesh`\n " ]
Please provide a description of the function:def prior_bayes_information(self, expparams, n_samples=None): if n_samples is None: n_samples = self.particle_locations.shape[0] return self._bim(self.prior.sample(n_samples), expparams)
[ "\n Evaluates the local Bayesian Information Matrix (BIM) for a set of\n samples from the SMC particle set, with uniform weights.\n\n :param expparams: Parameters describing the experiment that was\n performed.\n :type expparams: :class:`~numpy.ndarray` of dtype given by the\n :attr:`~qinfer.abstract_model.Model.expparams_dtype` property\n of the underlying model\n\n :param n_samples int: Number of samples to draw from particle distribution,\n to evaluate BIM over.\n " ]
Please provide a description of the function:def posterior_bayes_information(self, expparams): return self._bim( self.particle_locations, expparams, modelweights=self.particle_weights )
[ "\n Evaluates the local Bayesian Information Matrix (BIM) over all particles\n of the current posterior distribution with corresponding weights.\n\n :param expparams: Parameters describing the experiment that was\n performed.\n :type expparams: :class:`~numpy.ndarray` of dtype given by the\n :attr:`~qinfer.abstract_model.Model.expparams_dtype` property\n of the underlying model\n\n " ]
Please provide a description of the function:def update(self, outcome, expparams,check_for_resample=True): # Before we update, we need to commit the new Bayesian information # matrix corresponding to the measurement we just made. self._current_bim += self.prior_bayes_information(expparams)[:, :, 0] # If we're tracking the information content accessible to adaptive # algorithms, then we must use the current posterior as the prior # for the next step, then add that accordingly. if self._track_adaptive: self._adaptive_bim += self.posterior_bayes_information(expparams)[:, :, 0] # We now can update as normal. SMCUpdater.update(self, outcome, expparams,check_for_resample=check_for_resample)
[ "\n Given an experiment and an outcome of that experiment, updates the\n posterior distribution to reflect knowledge of that experiment.\n\n After updating, resamples the posterior distribution if necessary.\n\n :param int outcome: Label for the outcome that was observed, as defined\n by the :class:`~qinfer.abstract_model.Model` instance under study.\n :param expparams: Parameters describing the experiment that was\n performed.\n :type expparams: :class:`~numpy.ndarray` of dtype given by the\n :attr:`~qinfer.abstract_model.Model.expparams_dtype` property\n of the underlying model\n :param bool check_for_resample: If :obj:`True`, after performing the\n update, the effective sample size condition will be checked and\n a resampling step may be performed.\n " ]
Please provide a description of the function:def design_expparams_field(self, guess, field, cost_scale_k=1.0, disp=False, maxiter=None, maxfun=None, store_guess=False, grad_h=None, cost_mult=False ): r # Define some short names for commonly used properties. up = self._updater m = up.model # Generate a new guess or use a guess provided, depending on the # type of the guess argument. if isinstance(guess, Heuristic): raise NotImplementedError("Not yet implemented.") elif callable(guess): # Generate a new guess by calling the guess function provided. ep = guess( idx_exp=len(up.data_record), mean=up.est_mean(), cov=up.est_covariance_mtx() ) else: # Make a copy of the guess that we can manipulate, but otherwise # use it as-is. ep = np.copy(guess) # Define an objective function that wraps a vector of scalars into # an appropriate record array. if (cost_mult==False): def objective_function(x): ep[field] = x return up.bayes_risk(ep) + cost_scale_k * m.experiment_cost(ep) else: def objective_function(x): ep[field] = x return up.bayes_risk(ep)* m.experiment_cost(ep)**cost_scale_k # Some optimizers require gradients of the objective function. # Here, we create a FiniteDifference object to compute that for # us. d_dx_objective = FiniteDifference(objective_function, ep[field].size) # Allocate a variable to hold the local optimum value found. # This way, if an optimization algorithm doesn't support returning # the value as well as the location, we can find it manually. f_opt = None # Here's the core, where we break out and call the various optimization # routines provided by SciPy. if self._opt_algo == OptimizationAlgorithms.NULL: # This optimization algorithm does nothing locally, but only # exists to leverage the store_guess functionality below. x_opt = guess[0][field] elif self._opt_algo == OptimizationAlgorithms.CG: # Prepare any additional options. opt_options = {} if maxiter is not None: opt_options['maxiter'] = maxiter # Actually call fmin_cg, gathering all outputs we can. x_opt, f_opt, func_calls, grad_calls, warnflag = opt.fmin_cg( objective_function, guess[0][field], disp=disp, full_output=True, **opt_options ) elif self._opt_algo == OptimizationAlgorithms.NCG: # Prepare any additional options. opt_options = {} if maxfun is not None: opt_options['maxfun'] = maxfun if grad_h is not None: opt_options['epsilon'] = grad_h # Actually call fmin_tnc, gathering all outputs we can. # We use fmin_tnc in preference to fmin_ncg, as they implement the # same algorithm, but fmin_tnc seems better behaved with respect # to very flat gradient regions, due to respecting maxfun. # By contrast, fmin_ncg can get stuck in an infinite loop in # versions of SciPy < 0.11. # # Note that in some versions of SciPy, there was a bug in # fmin_ncg and fmin_tnc that can propagate outward if the gradient # is too flat. We catch it here and return the initial guess in that # case, since by hypothesis, it's too flat to make much difference # anyway. try: x_opt, f_opt, func_calls, grad_calls, h_calls, warnflag = opt.fmin_tnc( objective_function, guess[0][field], fprime=None, bounds=None, approx_grad=True, disp=disp, full_output=True, **opt_options ) except TypeError: warnings.warn( "Gradient function too flat for NCG.", RuntimeWarning) x_opt = guess[0][field] f_opt = None elif self._opt_algo == OptimizationAlgorithms.NELDER_MEAD: opt_options = {} if maxfun is not None: opt_options['maxfun'] = maxfun if maxiter is not None: opt_options['maxiter'] = maxiter x_opt, f_opt, iters, func_calls, warnflag = opt.fmin( objective_function, guess[0][field], disp=disp, full_output=True, **opt_options ) # Optionally compare the result to previous guesses. if store_guess: # Possibly compute the objective function value at the local optimum # if we don't already know it. if f_opt is None: guess_qual = objective_function(x_opt) # Compare to the known best cost so far. if self.__best_cost is None or (self.__best_cost > f_opt): # No known best yet, or we're better than the previous best, # so record this guess. ep[field] = x_opt self.__best_cost = f_opt self.__best_ep = ep else: ep = self.__best_ep # Guess is bad, return current best guess else: # We aren't using guess recording, so just pack the local optima # into ep for returning. ep[field] = x_opt # In any case, return the optimized guess. return ep
[ "\n Designs a new experiment by varying a single field of a shape ``(1,)``\n record array and minimizing the objective function\n \n .. math::\n O(\\vec{e}) = r(\\vec{e}) + k \\$(\\vec{e}),\n \n where :math:`r` is the Bayes risk as calculated by the updater, and\n where :math:`\\$` is the cost function specified by the model. Here,\n :math:`k` is a parameter specified to relate the units of the risk and\n the cost. See :ref:`expdesign` for more details.\n \n :param guess: Either a record array with a single guess, or\n a callable function that generates guesses.\n :type guess: Instance of :class:`~Heuristic`, `callable`\n or :class:`~numpy.ndarray` of ``dtype``\n :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype`\n :param str field: The name of the ``expparams`` field to be optimized.\n All other fields of ``guess`` will be held constant.\n :param float cost_scale_k: A scale parameter :math:`k` relating the\n Bayes risk to the experiment cost.\n See :ref:`expdesign`.\n :param bool disp: If `True`, the optimization will print additional\n information as it proceeds.\n :param int maxiter: For those optimization algorithms which support\n it (currently, only CG and NELDER_MEAD), limits the number of\n optimization iterations used for each guess.\n :param int maxfun: For those optimization algorithms which support it\n (currently, only NCG and NELDER_MEAD), limits the number of\n objective calls that can be made.\n :param bool store_guess: If ``True``, will compare the outcome of this\n guess to previous guesses and then either store the optimization of\n this experiment, or the previous best-known experiment design.\n :param float grad_h: Step size to use in estimating gradients. Used\n only if ``opt_algo`` is NCG.\n :return: An array representing the best experiment design found so\n far for the current experiment.\n ", "\n Used internally by design_expparams_field.\n If you see this, something probably went wrong.\n ", "\n Used internally by design_expparams_field.\n If you see this, something probably went wrong.\n " ]
Please provide a description of the function:def particle_clusters( particle_locations, particle_weights=None, eps=0.5, min_particles=5, metric='euclidean', weighted=False, w_pow=0.5, quiet=True ): if weighted == True and particle_weights is None: raise ValueError("Weights must be specified for weighted clustering.") # Allocate new arrays to hold the weights and locations. new_weights = np.empty(particle_weights.shape) new_locs = np.empty(particle_locations.shape) # Calculate and possibly reweight the metric. if weighted: M = sklearn.metrics.pairwise.pairwise_distances(particle_locations, metric=metric) M = metrics.weighted_pairwise_distances(M, particle_weights, w_pow=w_pow) # Create and run a SciKit-Learn DBSCAN clusterer. clusterer = sklearn.cluster.DBSCAN( min_samples=min_particles, eps=eps, metric='precomputed' ) cluster_labels = clusterer.fit_predict(M) else: clusterer = sklearn.cluster.DBSCAN( min_samples=min_particles, eps=eps, metric=metric ) cluster_labels = clusterer.fit_predict(particle_locations) # Find out how many clusters were identified. # Cluster counting logic from: # [http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html]. is_noise = -1 in cluster_labels n_clusters = len(set(cluster_labels)) - (1 if is_noise else 0) # If more than 10% of the particles were labeled as NOISE, # warn. n_noise = np.sum(cluster_labels == -1) if n_noise / particle_weights.shape[0] >= 0.1: warnings.warn("More than 10% of the particles were classified as NOISE. Consider increasing the neighborhood size ``eps``.", ResamplerWarning) # Print debugging info. if not quiet: print("[Clustering] DBSCAN identified {} cluster{}. "\ "{} particles identified as NOISE.".format( n_clusters, "s" if n_clusters > 1 else "", n_noise )) # Loop over clusters, calling the secondary resampler for each. # The loop should include -1 if noise was found. for idx_cluster in range(-1 if is_noise else 0, n_clusters): # Grab a boolean array identifying the particles in a particular # cluster. this_cluster = cluster_labels == idx_cluster yield idx_cluster, this_cluster
[ "\n Yields an iterator onto tuples ``(cluster_label, cluster_particles)``,\n where ``cluster_label`` is an `int` identifying the cluster (or ``NOISE``\n for the particles lying outside of all clusters), and where\n ``cluster_particles`` is an array of ``dtype`` `bool` specifying the indices\n of all particles in that cluster. That is, particle ``i`` is in the cluster\n if ``cluster_particles[i] == True``.\n " ]
Please provide a description of the function:def plot_rebit_modelparams(modelparams, rebit_axes=REBIT_AXES, **kwargs): mps = modelparams[:, rebit_axes] * np.sqrt(2) plt.scatter(mps[:, 0], mps[:, 1], **kwargs)
[ "\n Given model parameters representing rebits, plots the\n rebit states as a scatter plot. Additional keyword arguments\n are passed to :ref:`plt.scatter`.\n\n :param np.ndarray modelparams: Model parameters representing\n rebits.\n :param list rebit_axes: List containing indices for the :math:`x`\n and :math:`z` axes.\n " ]
Please provide a description of the function:def plot_decorate_rebits(basis=None, rebit_axes=REBIT_AXES): ax = plt.gca() if basis is not None: labels = list(map(r'$\langle\!\langle {} | \rho \rangle\!\rangle$'.format, # Pick out the x and z by default. [basis.labels[rebit_axes[0]], basis.labels[rebit_axes[1]]] )) plt.xlabel(labels[0]) plt.ylabel(labels[1]) ax.add_artist(plt.Circle([0, 0], 1, color='k', fill=False)) ax.set_xlim(-1.1, 1.1) ax.set_ylim(-1.1, 1.1) ax.set_aspect('equal')
[ "\n Decorates a figure with the boundary of rebit state space\n and basis labels drawn from a :ref:`~qinfer.tomography.TomographyBasis`.\n\n :param qinfer.tomography.TomographyBasis basis: Basis to use in\n labeling axes.\n :param list rebit_axes: List containing indices for the :math:`x`\n and :math:`z` axes.\n " ]
Please provide a description of the function:def plot_rebit_prior(prior, rebit_axes=REBIT_AXES, n_samples=2000, true_state=None, true_size=250, force_mean=None, legend=True, mean_color_index=2 ): pallette = plt.rcParams['axes.color_cycle'] plot_rebit_modelparams(prior.sample(n_samples), c=pallette[0], label='Prior', rebit_axes=rebit_axes ) if true_state is not None: plot_rebit_modelparams(true_state, c=pallette[1], label='True', marker='*', s=true_size, rebit_axes=rebit_axes ) if hasattr(prior, '_mean') or force_mean is not None: mean = force_mean if force_mean is not None else prior._mean plot_rebit_modelparams( prior._basis.state_to_modelparams(mean)[None, :], edgecolors=pallette[mean_color_index], s=250, facecolors='none', linewidth=3, label='Mean', rebit_axes=rebit_axes ) plot_decorate_rebits(prior.basis, rebit_axes=rebit_axes ) if legend: plt.legend(loc='lower left', ncol=3, scatterpoints=1)
[ "\n Plots rebit states drawn from a given prior.\n\n :param qinfer.tomography.DensityOperatorDistribution prior: Distribution over\n rebit states to plot.\n :param list rebit_axes: List containing indices for the :math:`x`\n and :math:`z` axes.\n :param int n_samples: Number of samples to draw from the\n prior.\n :param np.ndarray true_state: State to be plotted as a \"true\" state for\n comparison.\n " ]
Please provide a description of the function:def plot_rebit_posterior(updater, prior=None, true_state=None, n_std=3, rebit_axes=REBIT_AXES, true_size=250, legend=True, level=0.95, region_est_method='cov' ): pallette = plt.rcParams['axes.color_cycle'] plot_rebit_modelparams(updater.particle_locations, c=pallette[0], label='Posterior', s=12 * np.sqrt(updater.particle_weights * len(updater.particle_weights)), rebit_axes=rebit_axes, zorder=-10 ) plot_rebit_modelparams(true_state, c=pallette[1], label='True', marker='*', s=true_size, rebit_axes=rebit_axes ) if prior is not None: plot_rebit_modelparams( prior._basis.state_to_modelparams(prior._mean)[None, :], edgecolors=pallette[3], s=250, facecolors='none', linewidth=3, label='Prior Mean', rebit_axes=rebit_axes ) plot_rebit_modelparams( updater.est_mean()[None, :], edgecolors=pallette[2], s=250, facecolors='none', linewidth=3, label='Posterior Mean', rebit_axes=rebit_axes ) if region_est_method == 'cov': # Multiplying by sqrt{2} to rescale to Bloch ball. cov = 2 * updater.est_covariance_mtx() # Use fancy indexing to cut out all but the desired submatrix. cov = cov[rebit_axes, :][:, rebit_axes] plot_cov_ellipse( cov, updater.est_mean()[rebit_axes] * np.sqrt(2), nstd=n_std, edgecolor='k', fill=True, lw=2, facecolor=pallette[0], alpha=0.4, zorder=-9, label='Posterior Cov Ellipse ($Z = {}$)'.format(n_std) ) elif region_est_method == 'hull': # Find the convex hull from the updater, projected # on the rebit axes. faces, vertices = updater.region_est_hull(level, modelparam_slice=rebit_axes) polygon = Polygon(vertices * np.sqrt(2), facecolor=pallette[0], alpha=0.4, zorder=-9, label=r'Credible Region ($\alpha = {}$)'.format(level), edgecolor='k', lw=2, fill=True ) # TODO: consolidate add_patch code with that above. plt.gca().add_patch(polygon) plot_decorate_rebits(updater.model.base_model._basis, rebit_axes=rebit_axes ) if legend: plt.legend(loc='lower left', ncol=4, scatterpoints=1)
[ "\n Plots posterior distributions over rebits, including covariance ellipsoids\n\n :param qinfer.smc.SMCUpdater updater: Posterior distribution over rebits.\n :param qinfer.tomography.DensityOperatorDistribution: Prior distribution\n over rebit states.\n :param np.ndarray true_state: Model parameters for \"true\" state to plot\n as comparison.\n :param float n_std: Number of standard deviations out from the mean\n at which to draw the covariance ellipse. Only used if\n region_est_method is ``'cov'``.\n :param float level: Credibility level to use for computing\n region estimators from convex hulls.\n :param list rebit_axes: List containing indices for the :math:`x`\n and :math:`z` axes.\n :param str region_est_method: Method to use to draw region estimation.\n Must be one of None, ``'cov'`` or ``'hull'``.\n " ]
Please provide a description of the function:def data_to_params(data, expparams_dtype, col_outcomes=(0, 'counts'), cols_expparams=None ): BY_IDX, BY_NAME = range(2) is_exp_scalar = np.issctype(expparams_dtype) is_data_scalar = np.issctype(data.dtype) and not data.dtype.fields s_ = ( (lambda idx: np.s_[..., idx[BY_IDX]]) if is_data_scalar else (lambda idx: np.s_[idx[BY_NAME]]) ) outcomes = data[s_(col_outcomes)].astype(int) # mk new slicer t expparams = np.empty(outcomes.shape, dtype=expparams_dtype) if is_exp_scalar: expparams[:] = data[s_(cols_expparams)] else: for expparams_key, column in cols_expparams.items(): expparams[expparams_key] = data[s_(column)] return outcomes, expparams
[ "\n Given data as a NumPy array, separates out each column either as\n the outcomes, or as a field of an expparams array. Columns may be specified\n either as indices into a two-axis scalar array, or as field names for a one-axis\n record array.\n\n Since scalar arrays are homogenous in type, this may result in loss of precision\n due to casting between data types.\n " ]
Please provide a description of the function:def simple_est_prec(data, freq_min=0.0, freq_max=1.0, n_particles=6000, return_all=False): model = BinomialModel(SimplePrecessionModel(freq_min)) prior = UniformDistribution([0, freq_max]) data = load_data_or_txt(data, [ ('counts', 'uint'), ('t', float), ('n_shots', 'uint') ]) outcomes, expparams = data_to_params(data, model.expparams_dtype, cols_expparams={ 'x': (1, 't'), 'n_meas': (2, 'n_shots') } ) return do_update( model, n_particles, prior, outcomes, expparams, return_all )
[ "\n Estimates a simple precession (cos²) from experimental data.\n Note that this model is mainly for testing purposes, as it does not\n consider the phase or amplitude of precession, leaving only the frequency.\n\n :param data: Data to be used in estimating the precession frequency.\n :type data: see :ref:`simple_est_data_arg`\n :param float freq_min: The minimum feasible frequency to consider.\n :param float freq_max: The maximum feasible frequency to consider.\n :param int n_particles: The number of particles to be used in estimating\n the precession frequency.\n :param bool return_all: Controls whether additional return\n values are provided, such as the updater.\n\n :column counts (int): How many counts were observed at the sampled\n time.\n :column t (float): The evolutions time at which the samples\n were collected.\n :column n_shots (int): How many samples were collected at the\n given evolution time.\n\n :return mean: Bayesian mean estimator for the precession frequency.\n :return var: Variance of the final posterior over frequency.\n :return extra: See :ref:`simple_est_extra_return`. Only returned\n if ``return_all`` is `True`.\n " ]
Please provide a description of the function:def simple_est_rb(data, interleaved=False, p_min=0.0, p_max=1.0, n_particles=8000, return_all=False): r model = BinomialModel(RandomizedBenchmarkingModel(interleaved=interleaved)) prior = PostselectedDistribution(UniformDistribution([ [p_min, p_max], [0, 1], [0, 1] ] if not interleaved else [ [p_min, p_max], [p_min, p_max], [0, 1], [0, 1] ]), model ) data = load_data_or_txt(data, [ ('counts', 'uint'), ('m', 'uint'), ('n_shots', 'uint') ] + ([ ('reference', 'uint') ] if interleaved else [])) cols_expparams = { 'm': (1, 'm'), 'n_meas': (2, 'n_shots') } if interleaved: cols_expparams['reference'] = (3, 'reference') outcomes, expparams = data_to_params(data, model.expparams_dtype, cols_expparams=cols_expparams ) return do_update( model, n_particles, prior, outcomes, expparams, return_all )
[ "\n Estimates the fidelity of a gateset from a standard or interleaved randomized benchmarking\n experiment.\n \n :param data: Data to be used in estimating the gateset fidelity.\n :type data: see :ref:`simple_est_data_arg`\n :param float p_min: Minimum value of the parameter :math:`p`\n to consider feasible.\n :param float p_max: Minimum value of the parameter :math:`p`\n to consider feasible.\n :param int n_particles: The number of particles to be used in estimating\n the randomized benchmarking model.\n :param bool return_all: Controls whether additional return\n values are provided, such as the updater.\n\n :column counts (int): How many sequences of length :math:`m` were observed to\n survive.\n :column m (int): How many gates were used for sequences in this row of the data.\n :column n_shots (int): How many different sequences of length :math:`m`\n were measured.\n :column reference (bool): `True` if this row represents reference sequences, or\n `False` if the gate of interest is interleaved. Note that this column is omitted\n if ``interleaved`` is `False`.\n\n :return mean: Bayesian mean estimator for the model vector\n :math:`(p, A, B)`, or :math:`(\\tilde{p}, p_{\\text{ref}}, A, B)`\n for the interleaved case.\n :return var: Variance of the final posterior over RB model vectors.\n :return extra: See :ref:`simple_est_extra_return`. Only returned\n if ``return_all`` is `True`.\n " ]
Please provide a description of the function:def canonicalize(self, modelparams): modelparams = np.apply_along_axis(self.trunc_neg_eigs, 1, modelparams) # Renormalizes particles if allow_subnormalized=False. if not self._allow_subnormalied: modelparams = self.renormalize(modelparams) return modelparams
[ "\n Truncates negative eigenvalues and from each\n state represented by a tensor of model parameter\n vectors, and renormalizes as appropriate.\n\n :param np.ndarray modelparams: Array of shape\n ``(n_states, dim**2)`` containing model parameter\n representations of each of ``n_states`` different\n states.\n :return: The same model parameter tensor with all\n states truncated to be positive operators. If\n :attr:`~TomographyModel.allow_subnormalized` is\n `False`, all states are also renormalized to trace\n one. \n " ]
Please provide a description of the function:def trunc_neg_eigs(self, particle): arr = np.tensordot(particle, self._basis.data.conj(), 1) w, v = np.linalg.eig(arr) if np.all(w >= 0): return particle else: w[w < 0] = 0 new_arr = np.dot(v * w, v.conj().T) new_particle = np.real(np.dot(self._basis.flat(), new_arr.flatten())) assert new_particle[0] > 0 return new_particle
[ "\n Given a state represented as a model parameter vector,\n returns a model parameter vector representing the same\n state with any negative eigenvalues set to zero.\n\n :param np.ndarray particle: Vector of length ``(dim ** 2, )``\n representing a state.\n :return: The same state with any negative eigenvalues\n set to zero.\n " ]
Please provide a description of the function:def renormalize(self, modelparams): # The 0th basis element (identity) should have # a value 1 / sqrt{dim}, since the trace of that basis # element is fixed to be sqrt{dim} by convention. norm = modelparams[:, 0] * np.sqrt(self._dim) assert not np.sum(norm == 0) return modelparams / norm[:, None]
[ "\n Renormalizes one or more states represented as model\n parameter vectors, such that each state has trace 1.\n\n :param np.ndarray modelparams: Array of shape ``(n_states,\n dim ** 2)`` representing one or more states as \n model parameter vectors.\n :return: The same state, normalized to trace one.\n " ]
Please provide a description of the function:def n_members(self): if self.is_finite: return reduce(mul, [domain.n_members for domain in self._domains], 1) else: return np.inf
[ "\n Returns the number of members in the domain if it\n `is_finite`, otherwise, returns `np.inf`.\n\n :type: ``int`` or ``np.inf``\n " ]
Please provide a description of the function:def values(self): separate_values = [domain.values for domain in self._domains] return np.concatenate([ join_struct_arrays(list(map(np.array, value))) for value in product(*separate_values) ])
[ "\n Returns an `np.array` of type `dtype` containing\n some values from the domain.\n For domains where `is_finite` is ``True``, all elements\n of the domain will be yielded exactly once.\n\n :rtype: `np.ndarray`\n " ]
Please provide a description of the function:def from_regular_arrays(self, arrays): return self._mytype(join_struct_arrays([ array.astype(dtype) for dtype, array in zip(self._dtypes, arrays) ]))
[ "\n Merges a list of arrays (of the same shape) of dtypes \n corresponding to the factor domains into a single array \n with the dtype of the ``ProductDomain``.\n\n :param list array: A list with each element of type ``np.ndarray``\n\n :rtype: `np.ndarray`\n " ]
Please provide a description of the function:def in_domain(self, points): return all([ domain.in_domain(array) for domain, array in zip(self._domains, separate_struct_array(points, self._dtypes)) ])
[ "\n Returns ``True`` if all of the given points are in the domain,\n ``False`` otherwise.\n\n :param np.ndarray points: An `np.ndarray` of type `self.dtype`.\n\n :rtype: `bool`\n " ]
Please provide a description of the function:def example_point(self): if not np.isinf(self.min): return np.array([self.min], dtype=self.dtype) if not np.isinf(self.max): return np.array([self.max], dtype=self.dtype) else: return np.array([0], dtype=self.dtype)
[ "\n Returns any single point guaranteed to be in the domain, but\n no other guarantees; useful for testing purposes.\n This is given as a size 1 ``np.array`` of type ``dtype``.\n\n :type: ``np.ndarray``\n " ]
Please provide a description of the function:def in_domain(self, points): if np.all(np.isreal(points)): are_greater = np.all(np.greater_equal(points, self._min)) are_smaller = np.all(np.less_equal(points, self._max)) return are_greater and are_smaller else: return False
[ "\n Returns ``True`` if all of the given points are in the domain,\n ``False`` otherwise.\n\n :param np.ndarray points: An `np.ndarray` of type `self.dtype`.\n\n :rtype: `bool`\n " ]
Please provide a description of the function:def min(self): return int(self._min) if not np.isinf(self._min) else self._min
[ "\n Returns the minimum value of the domain.\n\n :rtype: `float` or `np.inf`\n " ]
Please provide a description of the function:def max(self): return int(self._max) if not np.isinf(self._max) else self._max
[ "\n Returns the maximum value of the domain.\n\n :rtype: `float` or `np.inf`\n " ]
Please provide a description of the function:def is_finite(self): return not np.isinf(self.min) and not np.isinf(self.max)
[ "\n Whether or not the domain contains a finite number of points.\n\n :type: `bool`\n " ]
Please provide a description of the function:def n_members(self): if self.is_finite: return int(self.max - self.min + 1) else: return np.inf
[ "\n Returns the number of members in the domain if it\n `is_finite`, otherwise, returns `np.inf`.\n\n :type: ``int`` or ``np.inf``\n " ]
Please provide a description of the function:def values(self): if self.is_finite: return np.arange(self.min, self.max + 1, dtype = self.dtype) else: return self.example_point
[ "\n Returns an `np.array` of type `self.dtype` containing\n some values from the domain.\n For domains where ``is_finite`` is ``True``, all elements\n of the domain will be yielded exactly once.\n\n :rtype: `np.ndarray`\n " ]
Please provide a description of the function:def n_members(self): return int(binom(self.n_meas + self.n_elements -1, self.n_elements - 1))
[ "\n Returns the number of members in the domain if it\n `is_finite`, otherwise, returns `None`.\n\n :type: ``int``\n " ]
Please provide a description of the function:def example_point(self): return np.array([([self.n_meas] + [0] * (self.n_elements-1),)], dtype=self.dtype)
[ "\n Returns any single point guaranteed to be in the domain, but\n no other guarantees; useful for testing purposes.\n This is given as a size 1 ``np.array`` of type ``dtype``.\n\n :type: ``np.ndarray``\n " ]
Please provide a description of the function:def values(self): # This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565 partition_array = np.empty((self.n_members, self.n_elements), dtype=int) masks = np.identity(self.n_elements, dtype=int) for i, c in enumerate(combinations_with_replacement(masks, self.n_meas)): partition_array[i,:] = sum(c) # Convert to dtype before returning return self.from_regular_array(partition_array)
[ "\n Returns an `np.array` of type `self.dtype` containing\n some values from the domain.\n For domains where ``is_finite`` is ``True``, all elements\n of the domain will be yielded exactly once.\n\n :rtype: `np.ndarray`\n " ]
Please provide a description of the function:def to_regular_array(self, A): # this could be a static method, but we choose to be consistent with # from_regular_array return A.view((int, len(A.dtype.names))).reshape(A.shape + (-1,))
[ "\n Converts from an array of type `self.dtype` to an array\n of type `int` with an additional index labeling the\n tuple indeces.\n\n :param np.ndarray A: An `np.array` of type `self.dtype`.\n\n :rtype: `np.ndarray`\n " ]
Please provide a description of the function:def from_regular_array(self, A): dims = A.shape[:-1] return A.reshape((np.prod(dims),-1)).view(dtype=self.dtype).squeeze(-1).reshape(dims)
[ "\n Converts from an array of type `int` where the last index\n is assumed to have length `self.n_elements` to an array\n of type `self.d_type` with one fewer index.\n\n :param np.ndarray A: An `np.array` of type `int`.\n\n :rtype: `np.ndarray`\n " ]
Please provide a description of the function:def in_domain(self, points): array_view = self.to_regular_array(points) non_negative = np.all(np.greater_equal(array_view, 0)) correct_sum = np.all(np.sum(array_view, axis=-1) == self.n_meas) return non_negative and correct_sum
[ "\n Returns ``True`` if all of the given points are in the domain,\n ``False`` otherwise.\n\n :param np.ndarray points: An `np.ndarray` of type `self.dtype`.\n\n :rtype: `bool`\n " ]
Please provide a description of the function:def start(self, max): try: self.widget.max = max display(self.widget) except: pass
[ "\n Displays the progress bar for a given maximum value.\n\n :param float max: Maximum value of the progress bar.\n " ]
Please provide a description of the function:def likelihood(self, outcomes, modelparams, expparams): # By calling the superclass implementation, we can consolidate # call counting there. super(QubitStatePauliModel, self).likelihood(outcomes, modelparams, expparams) # Note that expparams['axis'] has shape (n_exp, 3). pr0 = 0.5*(1 + np.sum(modelparams*expparams['axis'],1)) # Note that expparams['vis'] has shape (n_exp, ). pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5 pr0 = pr0[:,np.newaxis] # Now we concatenate over outcomes. return Model.pr0_to_likelihood_array(outcomes, pr0)
[ "\n Calculates the likelihood function at the states specified \n by modelparams and measurement specified by expparams.\n This is given by the Born rule and is the probability of\n outcomes given the state and measurement operator.\n \n Parameters\n ----------\n outcomes = \n measurement outcome\n expparams = \n Bloch vector of measurement axis and visibility\n modelparams = \n quantum state Bloch vector\n " ]
Please provide a description of the function:def likelihood(self, outcomes, modelparams, expparams): # By calling the superclass implementation, we can consolidate # call counting there. super(RebitStatePauliModel, self).likelihood(outcomes, modelparams, expparams) pr0 = np.zeros((modelparams.shape[0], expparams.shape[0])) # Note that expparams['axis'] has shape (n_exp, 3). pr0 = 0.5*(1 + np.sum(modelparams*expparams['axis'],1)) # Use the following hack if you don't want to ensure positive weights pr0[pr0 < 0] = 0 pr0[pr0 > 1] = 1 pr0 = pr0[:,np.newaxis] # Now we concatenate over outcomes. return Model.pr0_to_likelihood_array(outcomes, pr0)
[ "\n Calculates the likelihood function at the states specified \n by modelparams and measurement specified by expparams.\n This is given by the Born rule and is the probability of\n outcomes given the state and measurement operator.\n \n Parameters\n ----------\n outcomes = \n measurement outcome\n expparams = \n Bloch vector of measurement axis\n modelparams = \n quantum state Bloch vector\n " ]
Please provide a description of the function:def likelihood(self, outcomes, modelparams, expparams): # By calling the superclass implementation, we can consolidate # call counting there. super(MultiQubitStatePauliModel, self).likelihood(outcomes, modelparams, expparams) # Note that expparams['axis'] has shape (n_exp, 3). pr0 = 0.5*(1 + modelparams[:,expparams['pauli']]) # Use the following hack if you don't want to ensure positive weights pr0[pr0 < 0] = 0 pr0[pr0 > 1] = 1 # Note that expparams['vis'] has shape (n_exp, ). pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5 # Now we concatenate over outcomes. return Model.pr0_to_likelihood_array(outcomes, pr0)
[ "\n Calculates the likelihood function at the states specified \n by modelparams and measurement specified by expparams.\n This is given by the Born rule and is the probability of\n outcomes given the state and measurement operator.\n " ]
Please provide a description of the function:def simulate_experiment(self, modelparams, expparams, repeat=1): super(PoisonedModel, self).simulate_experiment(modelparams, expparams, repeat) return self.underlying_model.simulate_experiment(modelparams, expparams, repeat)
[ "\n Simulates experimental data according to the original (unpoisoned)\n model. Note that this explicitly causes the simulated data and the\n likelihood function to disagree. This is, strictly speaking, a violation\n of the assumptions made about `~qinfer.abstract_model.Model` subclasses.\n This violation is by intention, and allows for testing the robustness\n of inference algorithms against errors in that assumption.\n " ]
Please provide a description of the function:def domain(self, expparams): return [IntegerDomain(min=0,max=n_o-1) for n_o in self.n_outcomes(expparams)]
[ "\n Returns a list of ``Domain``s, one for each input expparam.\n\n :param numpy.ndarray expparams: Array of experimental parameters. This\n array must be of dtype agreeing with the ``expparams_dtype``\n property, or, in the case where ``n_outcomes_constant`` is ``True``,\n ``None`` should be a valid input.\n\n :rtype: list of ``Domain``\n " ]
Please provide a description of the function:def underlying_likelihood(self, binary_outcomes, modelparams, expparams): original_mps = modelparams[..., self._orig_mps_slice] return self.underlying_model.likelihood(binary_outcomes, original_mps, expparams)
[ "\n Given outcomes hypothesized for the underlying model, returns the likelihood\n which which those outcomes occur.\n " ]
Please provide a description of the function:def n_outcomes(self, expparams): # Standard combinatorial formula equal to the number of # possible tuples whose non-negative integer entries sum to n_meas. n = expparams['n_meas'] k = self.n_sides return scipy.special.binom(n + k - 1, k - 1)
[ "\n Returns an array of dtype ``uint`` describing the number of outcomes\n for each experiment specified by ``expparams``.\n \n :param numpy.ndarray expparams: Array of experimental parameters. This\n array must be of dtype agreeing with the ``expparams_dtype``\n property.\n " ]
Please provide a description of the function:def domain(self, expparams): return [ MultinomialDomain(n_elements=self.n_sides, n_meas=ep['n_meas']) for ep in expparams ]
[ "\n Returns a list of :class:`Domain` objects, one for each input expparam.\n :param numpy.ndarray expparams: Array of experimental parameters. This\n array must be of dtype agreeing with the ``expparams_dtype``\n property.\n :rtype: list of ``Domain``\n " ]
Please provide a description of the function:def est_update_covariance(self, modelparams): if self._diagonal: cov = (self._fixed_scale ** 2 if self._has_fixed_covariance \ else np.mean(modelparams[:, self._srw_idxs] ** 2, axis=0)) cov = np.diag(cov) else: if self._has_fixed_covariance: cov = np.dot(self._fixed_chol, self._fixed_chol.T) else: chol = np.zeros((modelparams.shape[0], self._n_rw, self._n_rw)) chol[(np.s_[:],) + self._srw_tri_idxs] = modelparams[:, self._srw_idxs] cov = np.mean(np.einsum('ijk,ilk->ijl', chol, chol), axis=0) return cov
[ "\n Returns the covariance of the gaussian noise process for one \n unit step. In the case where the covariance is being learned,\n the expected covariance matrix is returned.\n \n :param modelparams: Shape `(n_models, n_modelparams)` shape array\n of model parameters.\n " ]
Please provide a description of the function:def are_expparam_dtypes_consistent(self, expparams): if self.is_n_outcomes_constant: # This implies that all domains are equal, so this must be true return True # otherwise we have to actually check all the dtypes if expparams.size > 0: domains = self.domain(expparams) first_dtype = domains[0].dtype return all(domain.dtype == first_dtype for domain in domains[1:]) else: return True
[ "\n Returns ``True`` iff all of the given expparams \n correspond to outcome domains with the same dtype.\n For efficiency, concrete subclasses should override this method \n if the result is always ``True``.\n\n :param np.ndarray expparams: Array of expparamms \n of type ``expparams_dtype``\n :rtype: ``bool``\n " ]
Please provide a description of the function:def simulate_experiment(self, modelparams, expparams, repeat=1): self._sim_count += modelparams.shape[0] * expparams.shape[0] * repeat assert(self.are_expparam_dtypes_consistent(expparams))
[ "\n Produces data according to the given model parameters and experimental\n parameters, structured as a NumPy array.\n\n :param np.ndarray modelparams: A shape ``(n_models, n_modelparams)``\n array of model parameter vectors describing the hypotheses under\n which data should be simulated.\n :param np.ndarray expparams: A shape ``(n_experiments, )`` array of\n experimental control settings, with ``dtype`` given by \n :attr:`~qinfer.Model.expparams_dtype`, describing the\n experiments whose outcomes should be simulated.\n :param int repeat: How many times the specified experiment should\n be repeated.\n :rtype: np.ndarray\n :return: A three-index tensor ``data[i, j, k]``, where ``i`` is the repetition,\n ``j`` indexes which vector of model parameters was used, and where\n ``k`` indexes which experimental parameters where used. If ``repeat == 1``,\n ``len(modelparams) == 1`` and ``len(expparams) == 1``, then a scalar\n datum is returned instead.\n \n " ]
Please provide a description of the function:def distance(self, a, b): r return np.apply_along_axis( lambda vec: np.linalg.norm(vec, 1), 1, self.Q * (a - b) )
[ "\n Gives the distance between two model parameter vectors :math:`\\vec{a}` and\n :math:`\\vec{b}`. By default, this is the vector 1-norm of the difference\n :math:`\\mathbf{Q} (\\vec{a} - \\vec{b})` rescaled by\n :attr:`~Model.Q`.\n \n :param np.ndarray a: Array of model parameter vectors having shape\n ``(n_models, n_modelparams)``.\n :param np.ndarray b: Array of model parameters to compare to, having\n the same shape as ``a``.\n :return: An array ``d`` of distances ``d[i]`` between ``a[i, :]`` and\n ``b[i, :]``.\n " ]
Please provide a description of the function:def update_timestep(self, modelparams, expparams): r return np.tile(modelparams, (expparams.shape[0],1,1)).transpose((1,2,0))
[ "\n Returns a set of model parameter vectors that is the update of an\n input set of model parameter vectors, such that the new models are\n conditioned on a particular experiment having been performed.\n By default, this is the trivial function\n :math:`\\vec{x}(t_{k+1}) = \\vec{x}(t_k)`.\n \n :param np.ndarray modelparams: Set of model parameter vectors to be\n updated.\n :param np.ndarray expparams: An experiment parameter array describing\n the experiment that was just performed.\n \n :return np.ndarray: Array of shape\n ``(n_models, n_modelparams, n_experiments)`` describing the update\n of each model according to each experiment.\n " ]
Please provide a description of the function:def likelihood(self, outcomes, modelparams, expparams): r # Count the number of times the inner-most loop is called. self._call_count += ( safe_shape(outcomes) * safe_shape(modelparams) * safe_shape(expparams) )
[ "\n Calculates the probability of each given outcome, conditioned on each\n given model parameter vector and each given experimental control setting.\n\n :param np.ndarray modelparams: A shape ``(n_models, n_modelparams)``\n array of model parameter vectors describing the hypotheses for\n which the likelihood function is to be calculated.\n :param np.ndarray expparams: A shape ``(n_experiments, )`` array of\n experimental control settings, with ``dtype`` given by \n :attr:`~qinfer.Simulatable.expparams_dtype`, describing the\n experiments from which the given outcomes were drawn.\n \n :rtype: np.ndarray\n :return: A three-index tensor ``L[i, j, k]``, where ``i`` is the outcome\n being considered, ``j`` indexes which vector of model parameters was used,\n and where ``k`` indexes which experimental parameters where used.\n Each element ``L[i, j, k]`` then corresponds to the likelihood\n :math:`\\Pr(d_i | \\vec{x}_j; e_k)`.\n " ]
Please provide a description of the function:def domain(self, expparams): # As a convenience to most users, we define domain for them. If a # fancier domain is desired, this method can easily be overridden. if self.is_n_outcomes_constant: return self._domain if expparams is None else [self._domain for ep in expparams] else: return [IntegerDomain(min=0,max=n_o-1) for n_o in self.n_outcomes(expparams)]
[ "\n Returns a list of :class:`Domain` objects, one for each input expparam.\n\n :param numpy.ndarray expparams: Array of experimental parameters. This\n array must be of dtype agreeing with the ``expparams_dtype``\n property, or, in the case where ``n_outcomes_constant`` is ``True``,\n ``None`` should be a valid input.\n\n :rtype: list of ``Domain``\n " ]
Please provide a description of the function:def pr0_to_likelihood_array(outcomes, pr0): pr0 = pr0[np.newaxis, ...] pr1 = 1 - pr0 if len(np.shape(outcomes)) == 0: outcomes = np.array(outcomes)[None] return np.concatenate([ pr0 if outcomes[idx] == 0 else pr1 for idx in range(safe_shape(outcomes)) ])
[ "\n Assuming a two-outcome measurement with probabilities given by the\n array ``pr0``, returns an array of the form expected to be returned by\n ``likelihood`` method.\n \n :param numpy.ndarray outcomes: Array of integers indexing outcomes.\n :param numpy.ndarray pr0: Array of shape ``(n_models, n_experiments)``\n describing the probability of obtaining outcome ``0`` from each\n set of model parameters and experiment parameters.\n " ]
Please provide a description of the function:def fisher_information(self, modelparams, expparams): if self.is_n_outcomes_constant: outcomes = np.arange(self.n_outcomes(expparams)) scores, L = self.score(outcomes, modelparams, expparams, return_L=True) assert len(scores.shape) in (3, 4) if len(scores.shape) == 3: scores = scores[np.newaxis, :, :, :] # Note that E[score] = 0 by regularity assumptions, so we only # need the expectation over the outer product. return np.einsum("ome,iome,jome->ijme", L, scores, scores ) else: # Indexing will be a major pain here, so we need to start # by making an empty array, so that index errors will be raised # when (not if!) we make mistakes. fisher = np.empty(( self.n_modelparams, self.n_modelparams, modelparams.shape[0], expparams.shape[0] )) # Now we loop over experiments, since we cannot vectorize the # expectation value over data. for idx_experiment, experiment in enumerate(expparams): experiment = experiment.reshape((1,)) n_o = self.n_outcomes(experiment) outcomes = np.arange(n_o) scores, L = self.score(outcomes, modelparams, experiment, return_L=True) fisher[:, :, :, idx_experiment] = np.einsum("ome,iome,jome->ijme", L, scores, scores ) return fisher
[ "\n Returns the covariance of the score taken over possible outcomes,\n known as the Fisher information.\n \n The result is represented as the four-index tensor\n ``fisher[idx_modelparam_i, idx_modelparam_j, idx_model, idx_experiment]``,\n which gives the Fisher information matrix for each model vector\n and each experiment vector.\n \n .. note::\n \n The default implementation of this method calls\n :meth:`~DifferentiableModel.score()` for each possible outcome,\n which can be quite slow. If possible, overriding this method can\n give significant speed advantages.\n " ]
Please provide a description of the function:def get_qutip_module(required_version='3.2'): try: import qutip as qt from distutils.version import LooseVersion _qt_version = LooseVersion(qt.version.version) if _qt_version < LooseVersion(required_version): return None except ImportError: return None return qt
[ "\n Attempts to return the qutip module, but\n silently returns ``None`` if it can't be\n imported, or doesn't have version at\n least ``required_version``.\n\n :param str required_version: Valid input to\n ``distutils.version.LooseVersion``.\n :return: The qutip module or ``None``.\n :rtype: ``module`` or ``NoneType``\n " ]
Please provide a description of the function:def multinomial_pdf(n,p): r # work in log space to avoid overflow log_N_fac = gammaln(np.sum(n, axis=0) + 1)[np.newaxis,...] log_n_fac_sum = np.sum(gammaln(n + 1), axis=0) # since working in log space, we need special # consideration at p=0. deal with p=0, n>0 later. def nlogp(n,p): result = np.zeros(p.shape) mask = p!=0 result[mask] = n[mask] * np.log(p[mask]) return result if p.shape[0] == n.shape[0] - 1: ep = np.empty(n.shape) ep[:p.shape[0],...] = p ep[-1,...] = 1-np.sum(p,axis=0) else: ep = p log_p_sum = np.sum(nlogp(n, ep), axis=0) probs = np.exp(log_N_fac - log_n_fac_sum + log_p_sum) # if n_k>0 but p_k=0, the whole probability must be 0 mask = np.sum(np.logical_and(n!=0, ep==0), axis=0) == 0 probs = mask * probs return probs[0,...]
[ "\n Returns the PDF of the multinomial distribution\n :math:`\\operatorname{Multinomial}(N, n, p)=\n \\frac{N!}{n_1!\\cdots n_k!}p_1^{n_1}\\cdots p_k^{n_k}`\n\n :param np.ndarray n : Array of outcome integers\n of shape ``(sides, ...)`` where sides is the number of\n sides on the dice and summing over this index indicates\n the number of rolls for the given experiment.\n :param np.ndarray p : Array of (assumed) probabilities\n of shape ``(sides, ...)`` or ``(sides-1,...)``\n with the rest of the dimensions the same as ``n``.\n If ``sides-1``, the last probability is chosen so that the\n probabilities of all sides sums to 1. If ``sides``\n is the last index, these probabilities are assumed\n to sum to 1.\n\n Note that the numbers of experiments don't need to be given because\n they are implicit in the sum over the 0 index of ``n``.\n " ]