text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def selection(self): """A complete |Selection| object of all "supplying" and "routing" elements and required nodes. Selection("complete", nodes=("node_1123", "node_1125", "node_11269", "node_1129", "node_113", "node_outlet"), elements=("land_111", "land_1121", "land_1122", "land_1123", "land_1124", "land_1125", "land_11261", "land_11262", "land_11269", "land_1129", "land_113", "stream_1123", "stream_1125", "stream_11269", "stream_1129", "stream_113")) Besides the possible modifications on the names of the different nodes and elements, the name of the selection can be set differently: Selection("sel", """
return selectiontools.Selection( self.selection_name, self.nodes, self.elements)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def chars2str(chars) -> List[str]: """Inversion function of function |str2chars|. ['zeros', 'ones'] [] """
strings = collections.deque() for subchars in chars: substrings = collections.deque() for char in subchars: if char: substrings.append(char.decode('utf-8')) else: substrings.append('') strings.append(''.join(substrings)) return list(strings)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_dimension(ncfile, name, length) -> None: """Add a new dimension with the given name and length to the given NetCDF file. Essentially, |create_dimension| just calls the equally named method of the NetCDF library, but adds information to possible error messages: 5 While trying to add dimension `dim1` with length `5` \ """
try: ncfile.createDimension(name, length) except BaseException: objecttools.augment_excmessage( 'While trying to add dimension `%s` with length `%d` ' 'to the NetCDF file `%s`' % (name, length, get_filepath(ncfile)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_variable(ncfile, name, datatype, dimensions) -> None: """Add a new variable with the given name, datatype, and dimensions to the given NetCDF file. Essentially, |create_variable| just calls the equally named method of the NetCDF library, but adds information to possible error messages: While trying to add variable `var1` with datatype `f8` and \ dimensions `('dim1',)` to the NetCDF file `test.nc`, the following error \ array([ nan, nan, nan, nan, nan]) """
default = fillvalue if (datatype == 'f8') else None try: ncfile.createVariable( name, datatype, dimensions=dimensions, fill_value=default) ncfile[name].long_name = name except BaseException: objecttools.augment_excmessage( 'While trying to add variable `%s` with datatype `%s` ' 'and dimensions `%s` to the NetCDF file `%s`' % (name, datatype, dimensions, get_filepath(ncfile)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query_variable(ncfile, name) -> netcdf4.Variable: """Return the variable with the given name from the given NetCDF file. Essentially, |query_variable| just performs a key assess via the used NetCDF library, but adds information to possible error messages: Traceback (most recent call last): OSError: NetCDF file `model.nc` does not contain variable `flux_prec`. True """
try: return ncfile[name] except (IndexError, KeyError): raise OSError( 'NetCDF file `%s` does not contain variable `%s`.' % (get_filepath(ncfile), name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query_timegrid(ncfile) -> timetools.Timegrid: """Return the |Timegrid| defined by the given NetCDF file. Timegrid('1996-01-01 00:00:00', '2007-01-01 00:00:00', '1d') """
timepoints = ncfile[varmapping['timepoints']] refdate = timetools.Date.from_cfunits(timepoints.units) return timetools.Timegrid.from_timepoints( timepoints=timepoints[:], refdate=refdate, unit=timepoints.units.strip().split()[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query_array(ncfile, name) -> numpy.ndarray: """Return the data of the variable with the given name from the given NetCDF file. The following example shows that |query_array| returns |nan| entries to represent missing values even when the respective NetCDF variable defines a different fill value: array([-999., -999., -999., -999., -999.]) array([ nan, nan, nan, nan, nan]) """
variable = query_variable(ncfile, name) maskedarray = variable[:] fillvalue_ = getattr(variable, '_FillValue', numpy.nan) if not numpy.isnan(fillvalue_): maskedarray[maskedarray.mask] = numpy.nan return maskedarray.data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log(self, sequence, infoarray) -> None: """Prepare a |NetCDFFile| object suitable for the given |IOSequence| object, when necessary, and pass the given arguments to its |NetCDFFile.log| method."""
if isinstance(sequence, sequencetools.ModelSequence): descr = sequence.descr_model else: descr = 'node' if self._isolate: descr = '%s_%s' % (descr, sequence.descr_sequence) if ((infoarray is not None) and (infoarray.info['type'] != 'unmodified')): descr = '%s_%s' % (descr, infoarray.info['type']) dirpath = sequence.dirpath_ext try: files = self.folders[dirpath] except KeyError: files: Dict[str, 'NetCDFFile'] = collections.OrderedDict() self.folders[dirpath] = files try: file_ = files[descr] except KeyError: file_ = NetCDFFile( name=descr, flatten=self._flatten, isolate=self._isolate, timeaxis=self._timeaxis, dirpath=dirpath) files[descr] = file_ file_.log(sequence, infoarray)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self) -> None: """Call method |NetCDFFile.read| of all handled |NetCDFFile| objects. """
for folder in self.folders.values(): for file_ in folder.values(): file_.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self) -> None: """Call method |NetCDFFile.write| of all handled |NetCDFFile| objects. """
if self.folders: init = hydpy.pub.timegrids.init timeunits = init.firstdate.to_cfunits('hours') timepoints = init.to_timepoints('hours') for folder in self.folders.values(): for file_ in folder.values(): file_.write(timeunits, timepoints)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """A |tuple| of names of all handled |NetCDFFile| objects."""
return tuple(sorted(set(itertools.chain( *(_.keys() for _ in self.folders.values())))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log(self, sequence, infoarray) -> None: """Pass the given |IoSequence| to a suitable instance of a |NetCDFVariableBase| subclass. When writing data, the second argument should be an |InfoArray|. When reading data, this argument is ignored. Simply pass |None|. (1) We prepare some devices handling some sequences by applying function |prepare_io_example_1|. We limit our attention to the returned elements, which handle the more diverse sequences: (2) We define some shortcuts for the sequences used in the following examples: (3) We define a function that logs these example sequences to a given |NetCDFFile| object and prints some information about the resulting object structure. Note that sequence `nkor2` is logged twice, the first time with its original time series data, the second time with averaged values: (4) We prepare a |NetCDFFile| object with both options `flatten` and `isolate` being disabled: (5) We log all test sequences results in two |NetCDFVariableDeep| and one |NetCDFVariableAgg| objects. To keep both NetCDF variables related to |lland_fluxes.NKor| distinguishable, the name `flux_nkor_mean` includes information about the kind of aggregation performed: input_nied NetCDFVariableDeep ('element1', 'element2') flux_nkor NetCDFVariableDeep ('element2',) flux_nkor_mean NetCDFVariableAgg ('element2', 'element3') (6) We confirm that the |NetCDFVariableBase| objects received the required information: 'element2' InfoArray([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) 'element2' InfoArray([ 16.5, 18.5, 20.5, 22.5]) (7) We again prepare a |NetCDFFile| object, but now with both options `flatten` and `isolate` being enabled. To log test sequences with their original time series data does now trigger the initialisation of class |NetCDFVariableFlat|. When passing aggregated data, nothing changes: input_nied NetCDFVariableFlat ('element1', 'element2') flux_nkor NetCDFVariableFlat ('element2_0', 'element2_1') flux_nkor_mean NetCDFVariableAgg ('element2', 'element3') 'element2' InfoArray([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) 'element2' InfoArray([ 16.5, 18.5, 20.5, 22.5]) (8) We technically confirm that the `isolate` argument is passed to the constructor of subclasses of |NetCDFVariableBase| correctly: """
aggregated = ((infoarray is not None) and (infoarray.info['type'] != 'unmodified')) descr = sequence.descr_sequence if aggregated: descr = '_'.join([descr, infoarray.info['type']]) if descr in self.variables: var_ = self.variables[descr] else: if aggregated: cls = NetCDFVariableAgg elif self._flatten: cls = NetCDFVariableFlat else: cls = NetCDFVariableDeep var_ = cls(name=descr, isolate=self._isolate, timeaxis=self._timeaxis) self.variables[descr] = var_ var_.log(sequence, infoarray)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filepath(self) -> str: """The NetCDF file path."""
return os.path.join(self._dirpath, self.name + '.nc')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self) -> None: """Open an existing NetCDF file temporarily and call method |NetCDFVariableDeep.read| of all handled |NetCDFVariableBase| objects."""
try: with netcdf4.Dataset(self.filepath, "r") as ncfile: timegrid = query_timegrid(ncfile) for variable in self.variables.values(): variable.read(ncfile, timegrid) except BaseException: objecttools.augment_excmessage( f'While trying to read data from NetCDF file `{self.filepath}`')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, timeunit, timepoints) -> None: """Open a new NetCDF file temporarily and call method |NetCDFVariableBase.write| of all handled |NetCDFVariableBase| objects."""
with netcdf4.Dataset(self.filepath, "w") as ncfile: ncfile.Conventions = 'CF-1.6' self._insert_timepoints(ncfile, timepoints, timeunit) for variable in self.variables.values(): variable.write(ncfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_index(self, name_subdevice) -> int: """Item access to the wrapped |dict| object with a specialized error message."""
try: return self.dict_[name_subdevice] except KeyError: raise OSError( 'No data for sequence `%s` and (sub)device `%s` ' 'in NetCDF file `%s` available.' % (self.name_sequence, name_subdevice, self.name_ncfile))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log(self, sequence, infoarray) -> None: """Log the given |IOSequence| object either for reading or writing data. The optional `array` argument allows for passing alternative data in an |InfoArray| object replacing the series of the |IOSequence| object, which is useful for writing modified (e.g. spatially averaged) time series. Logged time series data is available via attribute access: True True False Traceback (most recent call last): AttributeError: The NetCDFVariable object `flux_nkor` does \ neither handle time series data under the (sub)device name `element2` \ nor does it define a member named `element2`. """
descr_device = sequence.descr_device self.sequences[descr_device] = sequence self.arrays[descr_device] = infoarray
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort_timeplaceentries(self, timeentry, placeentry) -> Tuple[Any, Any]: """Return a |tuple| containing the given `timeentry` and `placeentry` sorted in agreement with the currently selected `timeaxis`. ('place', 'time') ('time', 'place') """
if self._timeaxis: return placeentry, timeentry return timeentry, placeentry
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_timeplaceslice(self, placeindex) -> \ Union[Tuple[slice, int], Tuple[int, slice]]: """Return a |tuple| for indexing a complete time series of a certain location available in |NetCDFVariableBase.array|. (2, slice(None, None, None)) (slice(None, None, None), 2) """
return self.sort_timeplaceentries(slice(None), int(placeindex))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """A |tuple| containing the device names."""
self: NetCDFVariableBase return tuple(self.sequences.keys())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """Return a |tuple| of one |int| and some |slice| objects to accesses all values of a certain device within |NetCDFVariableDeep.array|. (2, slice(None, None, None), slice(0, 3, None)) (4, slice(None, None, None), slice(0, 1, None), slice(0, 2, None)) (slice(None, None, None), 4, slice(0, 1, None), slice(0, 2, None)) """
slices = list(self.get_timeplaceslice(idx)) for length in shape: slices.append(slice(0, length)) return tuple(slices)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """Required shape of |NetCDFVariableDeep.array|. For the default configuration, the first axis corresponds to the number of devices, and the second one to the number of timesteps. We show this for the 0-dimensional input sequence |lland_inputs.Nied|: (3, 4) For higher dimensional sequences, each new entry corresponds to the maximum number of fields the respective sequences require. In the next example, we select the 1-dimensional sequence |lland_fluxes.NKor|. The maximum number 3 (last value of the returned |tuple|) is due to the third element defining three hydrological response units: (3, 4, 3) When using the first axis for time (`timeaxis=0`) the order of the first two |tuple| entries turns: (4, 3, 3) """
nmb_place = len(self.sequences) nmb_time = len(hydpy.pub.timegrids.init) nmb_others = collections.deque() for sequence in self.sequences.values(): nmb_others.append(sequence.shape) nmb_others_max = tuple(numpy.max(nmb_others, axis=0)) return self.sort_timeplaceentries(nmb_time, nmb_place) + nmb_others_max
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def array(self) -> numpy.ndarray: """The series data of all logged |IOSequence| objects contained in one single |numpy.ndarray|. The documentation on |NetCDFVariableDeep.shape| explains how |NetCDFVariableDeep.array| is structured. The first example confirms that, for the default configuration, the first axis definces the location, while the second one defines time: array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) For higher dimensional sequences, |NetCDFVariableDeep.array| can contain missing values. Such missing values show up for some fiels of the second example element, which defines only two hydrological response units instead of three: array([[ 16., 17., nan], [ 18., 19., nan], [ 20., 21., nan], [ 22., 23., nan]]) When using the first axis for time (`timeaxis=0`) the same data can be accessed with slightly different indexing: array([[ 16., 17., nan], [ 18., 19., nan], [ 20., 21., nan], [ 22., 23., nan]]) """
array = numpy.full(self.shape, fillvalue, dtype=float) for idx, (descr, subarray) in enumerate(self.arrays.items()): sequence = self.sequences[descr] array[self.get_slices(idx, sequence.shape)] = subarray return array
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shape(self) -> Tuple[int, int]: """Required shape of |NetCDFVariableAgg.array|. For the default configuration, the first axis corresponds to the number of devices, and the second one to the number of timesteps. We show this for the 1-dimensional input sequence |lland_fluxes.NKor|: (3, 4) When using the first axis as the "timeaxis", the order of |tuple| entries turns: (4, 3) """
return self.sort_timeplaceentries( len(hydpy.pub.timegrids.init), len(self.sequences))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def array(self) -> numpy.ndarray: """The aggregated data of all logged |IOSequence| objects contained in one single |numpy.ndarray| object. The documentation on |NetCDFVariableAgg.shape| explains how |NetCDFVariableAgg.array| is structured. This first example confirms that, under default configuration (`timeaxis=1`), the first axis corresponds to the location, while the second one corresponds to time: array([[ 12. , 13. , 14. , 15. ], [ 16.5, 18.5, 20.5, 22.5], [ 25. , 28. , 31. , 34. ]]) When using the first axis as the "timeaxis", the resulting |NetCDFVariableAgg.array| is the transposed: array([[ 12. , 16.5, 25. ], [ 13. , 18.5, 28. ], [ 14. , 20.5, 31. ], [ 15. , 22.5, 34. ]]) """
array = numpy.full(self.shape, fillvalue, dtype=float) for idx, subarray in enumerate(self.arrays.values()): array[self.get_timeplaceslice(idx)] = subarray return array
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shape(self) -> Tuple[int, int]: """Required shape of |NetCDFVariableFlat.array|. For 0-dimensional sequences like |lland_inputs.Nied| and for the default configuration (`timeaxis=1`), the first axis corresponds to the number of devices, and the second one two the number of timesteps: (3, 4) For higher dimensional sequences, the first axis corresponds to "subdevices", e.g. hydrological response units within different elements. The 1-dimensional sequence |lland_fluxes.NKor| is logged for three elements with one, two, and three response units respectively, making up a sum of six subdevices: (6, 4) When using the first axis as the "timeaxis", the order of |tuple| entries turns: (4, 6) """
return self.sort_timeplaceentries( len(hydpy.pub.timegrids.init), sum(len(seq) for seq in self.sequences.values()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def array(self) -> numpy.ndarray: """The series data of all logged |IOSequence| objects contained in one single |numpy.ndarray| object. The documentation on |NetCDFVariableAgg.shape| explains how |NetCDFVariableAgg.array| is structured. The first example confirms that, under default configuration (`timeaxis=1`), the first axis corresponds to the location, while the second one corresponds to time: array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) Due to the flattening of higher dimensional sequences, their individual time series (e.g. of different hydrological response units) are spread over the rows of the array. For the 1-dimensional sequence |lland_fluxes.NKor|, the individual time series of the second element are stored in row two and three: array([[ 16., 18., 20., 22.], [ 17., 19., 21., 23.]]) When using the first axis as the "timeaxis", the individual time series of the second element are stored in column two and three: array([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) """
array = numpy.full(self.shape, fillvalue, dtype=float) idx0 = 0 idxs: List[Any] = [slice(None)] for seq, subarray in zip(self.sequences.values(), self.arrays.values()): for prod in self._product(seq.shape): subsubarray = subarray[tuple(idxs + list(prod))] array[self.get_timeplaceslice(idx0)] = subsubarray idx0 += 1 return array
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self): """Determine the number of substeps. Initialize a llake model and assume a simulation step size of 12 hours: If the maximum internal step size is also set to 12 hours, there is only one internal calculation step per outer simulation step: nmbsubsteps(1) Assigning smaller values to `maxdt` increases `nmbstepsize`: nmbsubsteps(12) In case the simulationstep is not a whole multiple of `dwmax`, the value of `nmbsubsteps` is rounded up: nmbsubsteps(13) Even for `maxdt` values exceeding the simulationstep, the value of `numbsubsteps` does not become smaller than one: nmbsubsteps(1) """
maxdt = self.subpars.pars.control.maxdt seconds = self.simulationstep.seconds self.value = numpy.ceil(seconds/maxdt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self): """Calulate the auxilary term. vq(toy_1_1_0_0_0=[0.0, 243200.0, 2086400.0], toy_7_1_0_0_0=[0.0, 286400.0, 2216000.0]) """
con = self.subpars.pars.control der = self.subpars for (toy, qs) in con.q: setattr(self, str(toy), 2.*con.v+der.seconds/der.nmbsubsteps*qs) self.refresh()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_io_example_1() -> Tuple[devicetools.Nodes, devicetools.Elements]: # noinspection PyUnresolvedReferences """Prepare an IO example configuration. (1) Prepares a short initialisation period of five days: Timegrids(Timegrid('2000-01-01 00:00:00', '2000-01-05 00:00:00', '1d')) (2) Prepares a plain IO testing directory structure: 'inputpath' 'outputpath' 'outputpath' 'nodepath' ['inputpath', 'nodepath', 'outputpath'] (3) Returns three |Element| objects handling either application model |lland_v1| or |lland_v2|, and two |Node| objects handling variables `Q` and `T`: element1 lland_v1 element2 lland_v1 element3 lland_v2 node1 Q node2 T (4) Prepares the time series data of the input sequence |lland_inputs.Nied|, flux sequence |lland_fluxes.NKor|, and state sequence |lland_states.BoWa| for each model instance, and |Sim| for each node instance (all values are different), e.g.: InfoArray([ 0., 1., 2., 3.]) InfoArray([[ 12.], [ 13.], [ 14.], [ 15.]]) InfoArray([[ 48., 49., 50.], [ 51., 52., 53.], [ 54., 55., 56.], [ 57., 58., 59.]]) InfoArray([ 64., 65., 66., 67.]) (5) All sequences carry |numpy.ndarray| objects with (deep) copies of the time series data for testing: InfoArray(True, dtype=bool) InfoArray(False, dtype=bool) """
from hydpy import TestIO TestIO.clear() from hydpy.core.filetools import SequenceManager hydpy.pub.sequencemanager = SequenceManager() with TestIO(): hydpy.pub.sequencemanager.inputdirpath = 'inputpath' hydpy.pub.sequencemanager.fluxdirpath = 'outputpath' hydpy.pub.sequencemanager.statedirpath = 'outputpath' hydpy.pub.sequencemanager.nodedirpath = 'nodepath' hydpy.pub.timegrids = '2000-01-01', '2000-01-05', '1d' from hydpy import Node, Nodes, Element, Elements, prepare_model node1 = Node('node1') node2 = Node('node2', variable='T') nodes = Nodes(node1, node2) element1 = Element('element1', outlets=node1) element2 = Element('element2', outlets=node1) element3 = Element('element3', outlets=node1) elements = Elements(element1, element2, element3) from hydpy.models import lland_v1, lland_v2 element1.model = prepare_model(lland_v1) element2.model = prepare_model(lland_v1) element3.model = prepare_model(lland_v2) from hydpy.models.lland import ACKER for idx, element in enumerate(elements): parameters = element.model.parameters parameters.control.nhru(idx+1) parameters.control.lnk(ACKER) parameters.derived.absfhru(10.0) with hydpy.pub.options.printprogress(False): nodes.prepare_simseries() elements.prepare_inputseries() elements.prepare_fluxseries() elements.prepare_stateseries() def init_values(seq, value1_): value2_ = value1_ + len(seq.series.flatten()) values_ = numpy.arange(value1_, value2_, dtype=float) seq.testarray = values_.reshape(seq.seriesshape) seq.series = seq.testarray.copy() return value2_ import numpy value1 = 0 for subname, seqname in zip(['inputs', 'fluxes', 'states'], ['nied', 'nkor', 'bowa']): for element in elements: subseqs = getattr(element.model.sequences, subname) value1 = init_values(getattr(subseqs, seqname), value1) for node in nodes: value1 = init_values(node.sequences.sim, value1) return nodes, elements
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_postalcodes_around_radius(self, pc, radius): postalcodes = self.get(pc) if postalcodes is None: raise PostalCodeNotFoundException("Could not find postal code you're searching for.") else: pc = postalcodes[0] radius = float(radius) ''' Bounding box calculations updated from pyzipcode ''' earth_radius = 6371 dlat = radius / earth_radius dlon = asin(sin(dlat) / cos(radians(pc.latitude))) lat_delta = degrees(dlat) lon_delta = degrees(dlon) if lat_delta < 0: lat_range = (pc.latitude + lat_delta, pc.latitude - lat_delta) else: lat_range = (pc.latitude - lat_delta, pc.latitude + lat_delta) long_range = (pc.longitude - lat_delta, pc.longitude + lon_delta) return format_result(self.conn_manager.query(PC_RANGE_QUERY % ( long_range[0], long_range[1], lat_range[0], lat_range[1] )))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_team_ids(): """Returns a pandas DataFrame with all Team IDs"""
df = get_all_player_ids("all_data") df = pd.DataFrame({"TEAM_NAME": df.TEAM_NAME.unique(), "TEAM_ID": df.TEAM_ID.unique()}) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_team_id(team_name): """ Returns the team ID associated with the team name that is passed in. Parameters team_name : str The team name whose ID we want. NOTE: Only pass in the team name (e.g. "Lakers"), not the city, or city and team name, or the team abbreviation. Returns ------- team_id : int The team ID associated with the team name. """
df = get_all_team_ids() df = df[df.TEAM_NAME == team_name] if len(df) == 0: er = "Invalid team name or there is no team with that name." raise ValueError(er) team_id = df.TEAM_ID.iloc[0] return team_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_game_logs(self): """Returns team game logs as a pandas DataFrame"""
logs = self.response.json()['resultSets'][0]['rowSet'] headers = self.response.json()['resultSets'][0]['headers'] df = pd.DataFrame(logs, columns=headers) df.GAME_DATE = pd.to_datetime(df.GAME_DATE) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_game_id(self, date): """Returns the Game ID associated with the date that is passed in. Parameters date : str The date associated with the game whose Game ID. The date that is passed in can take on a numeric format of MM/DD/YY (like "01/06/16" or "01/06/2016") or the expanded Month Day, Year format (like "Jan 06, 2016" or "January 06, 2016"). Returns ------- game_id : str The desired Game ID. """
df = self.get_game_logs() game_id = df[df.GAME_DATE == date].Game_ID.values[0] return game_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_params(self, parameters): """Pass in a dictionary to update url parameters for NBA stats API Parameters parameters : dict A dict containing key, value pairs that correspond with NBA stats API parameters. Returns ------- self : TeamLog The TeamLog object containing the updated NBA stats API parameters. """
self.url_paramaters.update(parameters) self.response = requests.get(self.base_url, params=self.url_paramaters, headers=HEADERS) # raise error if status code is not 200 self.response.raise_for_status() return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_shots(self): """Returns the shot chart data as a pandas DataFrame."""
shots = self.response.json()['resultSets'][0]['rowSet'] headers = self.response.json()['resultSets'][0]['headers'] return pd.DataFrame(shots, columns=headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unsubscribe(self, subscription, max=None): """ Unsubscribe will remove interest in the given subject. If max is provided an automatic Unsubscribe that is processed by the server when max messages have been received Args: subscription (pynats.Subscription): a Subscription object max (int=None): number of messages """
if max is None: self._send('UNSUB %d' % subscription.sid) self._subscriptions.pop(subscription.sid) else: subscription.max = max self._send('UNSUB %d %s' % (subscription.sid, max))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def request(self, subject, callback, msg=None): """ ublish a message with an implicit inbox listener as the reply. Message is optional. Args: subject (string): a string with the subject callback (function): callback to be called msg (string=None): payload string """
inbox = self._build_inbox() s = self.subscribe(inbox, callback) self.unsubscribe(s, 1) self.publish(subject, msg, inbox) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def draw_court(ax=None, color='gray', lw=1, outer_lines=False): """Returns an axes with a basketball court drawn onto to it. This function draws a court based on the x and y-axis values that the NBA stats API provides for the shot chart data. For example the center of the hoop is located at the (0,0) coordinate. Twenty-two feet from the left of the center of the hoop in is represented by the (-220,0) coordinates. So one foot equals +/-10 units on the x and y-axis. Parameters ax : Axes, optional The Axes object to plot the court onto. color : matplotlib color, optional The color of the court lines. lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If `True` it draws the out of bound lines in same style as the rest of the court. Returns ------- ax : Axes The Axes object with the court on it. """
if ax is None: ax = plt.gca() # Create the various parts of an NBA basketball court # Create the basketball hoop hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False) # Create backboard backboard = Rectangle((-30, -12.5), 60, 0, linewidth=lw, color=color) # The paint # Create the outer box 0f the paint, width=16ft, height=19ft outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color, fill=False) # Create the inner box of the paint, widt=12ft, height=19ft inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color, fill=False) # Create free throw top arc top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180, linewidth=lw, color=color, fill=False) # Create free throw bottom arc bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color, linestyle='dashed') # Restricted Zone, it is an arc with 4ft radius from center of the hoop restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw, color=color) # Three point line # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw, color=color) # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color) # 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw, color=color) # Center Court center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color) center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0, linewidth=lw, color=color) # List of the court elements to be plotted onto the axes court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw, bottom_free_throw, restricted, corner_three_a, corner_three_b, three_arc, center_outer_arc, center_inner_arc] if outer_lines: # Draw the half court line, baseline and side out bound lines outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw, color=color, fill=False) court_elements.append(outer_lines) # Add the court elements onto the axes for element in court_elements: ax.add_patch(element) return ax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shot_chart(x, y, kind="scatter", title="", color="b", cmap=None, xlim=(-250, 250), ylim=(422.5, -47.5), court_color="gray", court_lw=1, outer_lines=False, flip_court=False, kde_shade=True, gridsize=None, ax=None, despine=False, **kwargs): """ Returns an Axes object with player shots plotted. Parameters x, y : strings or vector The x and y coordinates of the shots taken. They can be passed in as vectors (such as a pandas Series) or as columns from the pandas DataFrame passed into ``data``. data : DataFrame, optional DataFrame containing shots where ``x`` and ``y`` represent the shot location coordinates. kind : { "scatter", "kde", "hex" }, optional The kind of shot chart to create. title : str, optional The title for the plot. color : matplotlib color, optional Color used to plot the shots cmap : matplotlib Colormap object or name, optional Colormap for the range of data values. If one isn't provided, the colormap is derived from the valuue passed to ``color``. Used for KDE and Hexbin plots. {x, y}lim : two-tuples, optional The axis limits of the plot. court_color : matplotlib color, optional The color of the court lines. court_lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If ``True`` the out of bound lines are drawn in as a matplotlib Rectangle. flip_court : boolean, optional If ``True`` orients the hoop towards the bottom of the plot. Default is ``False``, which orients the court where the hoop is towards the top of the plot. kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours. gridsize : int, optional Number of hexagons in the x-direction. The default is calculated using the Freedman-Diaconis method. ax : Axes, optional The Axes object to plot the court onto. despine : boolean, optional If ``True``, removes the spines. kwargs : key, value pairs Keyword arguments for matplotlib Collection properties or seaborn plots. Returns ------- ax : Axes The Axes object with the shot chart plotted on it. """
if ax is None: ax = plt.gca() if cmap is None: cmap = sns.light_palette(color, as_cmap=True) if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) ax.tick_params(labelbottom="off", labelleft="off") ax.set_title(title, fontsize=18) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) if kind == "scatter": ax.scatter(x, y, c=color, **kwargs) elif kind == "kde": sns.kdeplot(x, y, shade=kde_shade, cmap=cmap, ax=ax, **kwargs) ax.set_xlabel('') ax.set_ylabel('') elif kind == "hex": if gridsize is None: # Get the number of bins for hexbin using Freedman-Diaconis rule # This is idea was taken from seaborn, which got the calculation # from http://stats.stackexchange.com/questions/798/ from seaborn.distributions import _freedman_diaconis_bins x_bin = _freedman_diaconis_bins(x) y_bin = _freedman_diaconis_bins(y) gridsize = int(np.mean([x_bin, y_bin])) ax.hexbin(x, y, gridsize=gridsize, cmap=cmap, **kwargs) else: raise ValueError("kind must be 'scatter', 'kde', or 'hex'.") # Set the spines to match the rest of court lines, makes outer_lines # somewhate unnecessary for spine in ax.spines: ax.spines[spine].set_lw(court_lw) ax.spines[spine].set_color(court_color) if despine: ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) return ax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shot_chart_jointplot(x, y, data=None, kind="scatter", title="", color="b", cmap=None, xlim=(-250, 250), ylim=(422.5, -47.5), court_color="gray", court_lw=1, outer_lines=False, flip_court=False, size=(12, 11), space=0, despine=False, joint_kws=None, marginal_kws=None, **kwargs): """ Returns a seaborn JointGrid using sns.jointplot Parameters x, y : strings or vector The x and y coordinates of the shots taken. They can be passed in as vectors (such as a pandas Series) or as column names from the pandas DataFrame passed into ``data``. data : DataFrame, optional DataFrame containing shots where ``x`` and ``y`` represent the shot location coordinates. kind : { "scatter", "kde", "hex" }, optional The kind of shot chart to create. title : str, optional The title for the plot. color : matplotlib color, optional Color used to plot the shots cmap : matplotlib Colormap object or name, optional Colormap for the range of data values. If one isn't provided, the colormap is derived from the valuue passed to ``color``. Used for KDE and Hexbin joint plots. {x, y}lim : two-tuples, optional The axis limits of the plot. The defaults represent the out of bounds lines and half court line. court_color : matplotlib color, optional The color of the court lines. court_lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If ``True`` the out of bound lines are drawn in as a matplotlib Rectangle. flip_court : boolean, optional If ``True`` orients the hoop towards the bottom of the plot. Default is ``False``, which orients the court where the hoop is towards the top of the plot. gridsize : int, optional Number of hexagons in the x-direction. The default is calculated using the Freedman-Diaconis method. size : tuple, optional The width and height of the plot in inches. space : numeric, optional The space between the joint and marginal plots. {joint, marginal}_kws : dicts Additional kewyord arguments for joint and marginal plot components. kwargs : key, value pairs Keyword arguments for matplotlib Collection properties or seaborn plots. Returns ------- grid : JointGrid The JointGrid object with the shot chart plotted on it. """
# If a colormap is not provided, then it is based off of the color if cmap is None: cmap = sns.light_palette(color, as_cmap=True) if kind not in ["scatter", "kde", "hex"]: raise ValueError("kind must be 'scatter', 'kde', or 'hex'.") grid = sns.jointplot(x=x, y=y, data=data, stat_func=None, kind=kind, space=0, color=color, cmap=cmap, joint_kws=joint_kws, marginal_kws=marginal_kws, **kwargs) grid.fig.set_size_inches(size) # A joint plot has 3 Axes, the first one called ax_joint # is the one we want to draw our court onto and adjust some other settings ax = grid.ax_joint if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) # Get rid of axis labels and tick marks ax.set_xlabel('') ax.set_ylabel('') ax.tick_params(labelbottom='off', labelleft='off') # Add a title ax.set_title(title, y=1.2, fontsize=18) # Set the spines to match the rest of court lines, makes outer_lines # somewhate unnecessary for spine in ax.spines: ax.spines[spine].set_lw(court_lw) ax.spines[spine].set_color(court_color) # set the margin joint spines to be same as the rest of the plot grid.ax_marg_x.spines[spine].set_lw(court_lw) grid.ax_marg_x.spines[spine].set_color(court_color) grid.ax_marg_y.spines[spine].set_lw(court_lw) grid.ax_marg_y.spines[spine].set_color(court_color) if despine: ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) return grid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def heatmap(x, y, z, title="", cmap=plt.cm.YlOrRd, bins=20, xlim=(-250, 250), ylim=(422.5, -47.5), facecolor='lightgray', facecolor_alpha=0.4, court_color="black", court_lw=0.5, outer_lines=False, flip_court=False, ax=None, **kwargs): """ Returns an AxesImage object that contains a heatmap. TODO: Redo some code and explain parameters """
# Bin the FGA (x, y) and Calculcate the mean number of times shot was # made (z) within each bin # mean is the calculated FG percentage for each bin mean, xedges, yedges, binnumber = binned_statistic_2d(x=x, y=y, values=z, statistic='mean', bins=bins) if ax is None: ax = plt.gca() if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) ax.tick_params(labelbottom="off", labelleft="off") ax.set_title(title, fontsize=18) ax.patch.set_facecolor(facecolor) ax.patch.set_alpha(facecolor_alpha) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) heatmap = ax.imshow(mean.T, origin='lower', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], interpolation='nearest', cmap=cmap) return heatmap
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bokeh_draw_court(figure, line_color='gray', line_width=1): """Returns a figure with the basketball court lines drawn onto it This function draws a court based on the x and y-axis values that the NBA stats API provides for the shot chart data. For example the center of the hoop is located at the (0,0) coordinate. Twenty-two feet from the left of the center of the hoop in is represented by the (-220,0) coordinates. So one foot equals +/-10 units on the x and y-axis. Parameters figure : Bokeh figure object The Axes object to plot the court onto. line_color : str, optional The color of the court lines. Can be a a Hex value. line_width : float, optional The linewidth the of the court lines in pixels. Returns ------- figure : Figure The Figure object with the court on it. """
# hoop figure.circle(x=0, y=0, radius=7.5, fill_alpha=0, line_color=line_color, line_width=line_width) # backboard figure.line(x=range(-30, 31), y=-12.5, line_color=line_color) # The paint # outerbox figure.rect(x=0, y=47.5, width=160, height=190, fill_alpha=0, line_color=line_color, line_width=line_width) # innerbox # left inner box line figure.line(x=-60, y=np.arange(-47.5, 143.5), line_color=line_color, line_width=line_width) # right inner box line figure.line(x=60, y=np.arange(-47.5, 143.5), line_color=line_color, line_width=line_width) # Restricted Zone figure.arc(x=0, y=0, radius=40, start_angle=pi, end_angle=0, line_color=line_color, line_width=line_width) # top free throw arc figure.arc(x=0, y=142.5, radius=60, start_angle=pi, end_angle=0, line_color=line_color) # bottome free throw arc figure.arc(x=0, y=142.5, radius=60, start_angle=0, end_angle=pi, line_color=line_color, line_dash="dashed") # Three point line # corner three point lines figure.line(x=-220, y=np.arange(-47.5, 92.5), line_color=line_color, line_width=line_width) figure.line(x=220, y=np.arange(-47.5, 92.5), line_color=line_color, line_width=line_width) # # three point arc figure.arc(x=0, y=0, radius=237.5, start_angle=3.528, end_angle=-0.3863, line_color=line_color, line_width=line_width) # add center court # outer center arc figure.arc(x=0, y=422.5, radius=60, start_angle=0, end_angle=pi, line_color=line_color, line_width=line_width) # inner center arct figure.arc(x=0, y=422.5, radius=20, start_angle=0, end_angle=pi, line_color=line_color, line_width=line_width) # outer lines, consistting of half court lines and out of bounds lines figure.rect(x=0, y=187.5, width=500, height=470, fill_alpha=0, line_color=line_color, line_width=line_width) return figure
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bokeh_shot_chart(data, x="LOC_X", y="LOC_Y", fill_color="#1f77b4", scatter_size=10, fill_alpha=0.4, line_alpha=0.4, court_line_color='gray', court_line_width=1, hover_tool=False, tooltips=None, **kwargs): # TODO: Settings for hover tooltip """ Returns a figure with both FGA and basketball court lines drawn onto it. This function expects data to be a ColumnDataSource with the x and y values named "LOC_X" and "LOC_Y". Otherwise specify x and y. Parameters data : DataFrame The DataFrame that contains the shot chart data. x, y : str, optional The x and y coordinates of the shots taken. fill_color : str, optional The fill color of the shots. Can be a a Hex value. scatter_size : int, optional The size of the dots for the scatter plot. fill_alpha : float, optional Alpha value for the shots. Must be a floating point value between 0 (transparent) to 1 (opaque). line_alpha : float, optiona Alpha value for the outer lines of the plotted shots. Must be a floating point value between 0 (transparent) to 1 (opaque). court_line_color : str, optional The color of the court lines. Can be a a Hex value. court_line_width : float, optional The linewidth the of the court lines in pixels. hover_tool : boolean, optional If ``True``, creates hover tooltip for the plot. tooltips : List of tuples, optional Provides the information for the the hover tooltip. Returns ------- fig : Figure The Figure object with the shot chart plotted on it. """
source = ColumnDataSource(data) fig = figure(width=700, height=658, x_range=[-250, 250], y_range=[422.5, -47.5], min_border=0, x_axis_type=None, y_axis_type=None, outline_line_color="black", **kwargs) fig.scatter(x, y, source=source, size=scatter_size, color=fill_color, alpha=fill_alpha, line_alpha=line_alpha) bokeh_draw_court(fig, line_color=court_line_color, line_width=court_line_width) if hover_tool: hover = HoverTool(renderers=[fig.renderers[0]], tooltips=tooltips) fig.add_tools(hover) return fig
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _kmedoids_run(X, n_clusters, distance, max_iter, tol, rng): """ Run a single trial of k-medoids clustering on dataset X, and given number of clusters """
membs = np.empty(shape=X.shape[0], dtype=int) centers = kmeans._kmeans_init(X, n_clusters, method='', rng=rng) sse_last = 9999.9 n_iter = 0 for it in range(1,max_iter): membs = kmeans._assign_clusters(X, centers) centers,sse_arr = _update_centers(X, membs, n_clusters, distance) sse_total = np.sum(sse_arr) if np.abs(sse_total - sse_last) < tol: n_iter = it break sse_last = sse_total return(centers, membs, sse_total, sse_arr, n_iter)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init_mixture_params(X, n_mixtures, init_method): """ Initialize mixture density parameters with equal priors random means identity covariance matrices """
init_priors = np.ones(shape=n_mixtures, dtype=float) / n_mixtures if init_method == 'kmeans': km = _kmeans.KMeans(n_clusters = n_mixtures, n_trials=20) km.fit(X) init_means = km.centers_ else: inx_rand = np.random.choice(X.shape[0], size=n_mixtures) init_means = X[inx_rand,:] if np.any(np.isnan(init_means)): raise ValueError("Init means are NaN! ") n_features = X.shape[1] init_covars = np.empty(shape=(n_mixtures, n_features, n_features), dtype=float) for i in range(n_mixtures): init_covars[i] = np.eye(n_features) return(init_priors, init_means, init_covars)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __log_density_single(x, mean, covar): """ This is just a test function to calculate the normal density at x given mean and covariance matrix. Note: this function is not efficient, so _log_multivariate_density is recommended for use. """
n_dim = mean.shape[0] dx = x - mean covar_inv = scipy.linalg.inv(covar) covar_det = scipy.linalg.det(covar) den = np.dot(np.dot(dx.T, covar_inv), dx) + n_dim*np.log(2*np.pi) + np.log(covar_det) return(-1/2 * den)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_params(priors, means, covars): """ Validation Check for M.L. paramateres """
for i,(p,m,cv) in enumerate(zip(priors, means, covars)): if np.any(np.isinf(p)) or np.any(np.isnan(p)): raise ValueError("Component %d of priors is not valid " % i) if np.any(np.isinf(m)) or np.any(np.isnan(m)): raise ValueError("Component %d of means is not valid " % i) if np.any(np.isinf(cv)) or np.any(np.isnan(cv)): raise ValueError("Component %d of covars is not valid " % i) if (not np.allclose(cv, cv.T) or np.any(scipy.linalg.eigvalsh(cv) <= 0)): raise ValueError("Component %d of covars must be positive-definite" % i)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, X): """ Fit mixture-density parameters with EM algorithm """
params_dict = _fit_gmm_params(X=X, n_mixtures=self.n_clusters, \ n_init=self.n_trials, init_method=self.init_method, \ n_iter=self.max_iter, tol=self.tol) self.priors_ = params_dict['priors'] self.means_ = params_dict['means'] self.covars_ = params_dict['covars'] self.converged = True self.labels_ = self.predict(X)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _kmeans_init(X, n_clusters, method='balanced', rng=None): """ Initialize k=n_clusters centroids randomly """
n_samples = X.shape[0] if rng is None: cent_idx = np.random.choice(n_samples, replace=False, size=n_clusters) else: #print('Generate random centers using RNG') cent_idx = rng.choice(n_samples, replace=False, size=n_clusters) centers = X[cent_idx,:] mean_X = np.mean(X, axis=0) if method == 'balanced': centers[n_clusters-1] = n_clusters*mean_X - np.sum(centers[:(n_clusters-1)], axis=0) return (centers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cal_dist2center(X, center): """ Calculate the SSE to the cluster center """
dmemb2cen = scipy.spatial.distance.cdist(X, center.reshape(1,X.shape[1]), metric='seuclidean') return(np.sum(dmemb2cen))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _kmeans_run(X, n_clusters, max_iter, tol): """ Run a single trial of k-means clustering on dataset X, and given number of clusters """
membs = np.empty(shape=X.shape[0], dtype=int) centers = _kmeans_init(X, n_clusters) sse_last = 9999.9 n_iter = 0 for it in range(1,max_iter): membs = _assign_clusters(X, centers) centers,sse_arr = _update_centers(X, membs, n_clusters) sse_total = np.sum(sse_arr) if np.abs(sse_total - sse_last) < tol: n_iter = it break sse_last = sse_total return(centers, membs, sse_total, sse_arr, n_iter)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _kmeans(X, n_clusters, max_iter, n_trials, tol): """ Run multiple trials of k-means clustering, and outputt he best centers, and cluster labels """
n_samples, n_features = X.shape[0], X.shape[1] centers_best = np.empty(shape=(n_clusters,n_features), dtype=float) labels_best = np.empty(shape=n_samples, dtype=int) for i in range(n_trials): centers, labels, sse_tot, sse_arr, n_iter = _kmeans_run(X, n_clusters, max_iter, tol) if i==0: sse_tot_best = sse_tot sse_arr_best = sse_arr n_iter_best = n_iter centers_best = centers.copy() labels_best = labels.copy() if sse_tot < sse_tot_best: sse_tot_best = sse_tot sse_arr_best = sse_arr n_iter_best = n_iter centers_best = centers.copy() labels_best = labels.copy() return(centers_best, labels_best, sse_arr_best, n_iter_best)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cut_tree(tree, n_clusters, membs): """ Cut the tree to get desired number of clusters as n_clusters 2 <= n_desired <= n_clusters """
## starting from root, ## a node is added to the cut_set or ## its children are added to node_set assert(n_clusters >= 2) assert(n_clusters <= len(tree.leaves())) cut_centers = dict() #np.empty(shape=(n_clusters, ndim), dtype=float) for i in range(n_clusters-1): if i==0: search_set = set(tree.children(0)) node_set,cut_set = set(), set() else: search_set = node_set.union(cut_set) node_set,cut_set = set(), set() if i+2 == n_clusters: cut_set = search_set else: for _ in range(len(search_set)): n = search_set.pop() if n.data['ilev'] is None or n.data['ilev']>i+2: cut_set.add(n) else: nid = n.identifier if n.data['ilev']-2==i: node_set = node_set.union(set(tree.children(nid))) conv_membs = membs.copy() for node in cut_set: nid = node.identifier label = node.data['label'] cut_centers[label] = node.data['center'] sub_leaves = tree.leaves(nid) for leaf in sub_leaves: indx = np.where(conv_membs == leaf)[0] conv_membs[indx] = nid return(conv_membs, cut_centers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_tree_node(tree, label, ilev, X=None, size=None, center=None, sse=None, parent=None): """ Add a node to the tree if parent is not known, the node is a root The nodes of this tree keep properties of each cluster/subcluster: size --> cluster size as the number of points in the cluster center --> mean of the cluster label --> cluster label sse --> sum-squared-error for that single cluster ilev --> the level at which this node is split into 2 children """
if size is None: size = X.shape[0] if (center is None): center = np.mean(X, axis=0) if (sse is None): sse = _kmeans._cal_dist2center(X, center) center = list(center) datadict = { 'size' : size, 'center': center, 'label' : label, 'sse' : sse, 'ilev' : None } if (parent is None): tree.create_node(label, label, data=datadict) else: tree.create_node(label, label, parent=parent, data=datadict) tree.get_node(parent).data['ilev'] = ilev return(tree)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bisect_kmeans(X, n_clusters, n_trials, max_iter, tol): """ Apply Bisecting Kmeans clustering to reach n_clusters number of clusters """
membs = np.empty(shape=X.shape[0], dtype=int) centers = dict() #np.empty(shape=(n_clusters,X.shape[1]), dtype=float) sse_arr = dict() #-1.0*np.ones(shape=n_clusters, dtype=float) ## data structure to store cluster hierarchies tree = treelib.Tree() tree = _add_tree_node(tree, 0, ilev=0, X=X) km = _kmeans.KMeans(n_clusters=2, n_trials=n_trials, max_iter=max_iter, tol=tol) for i in range(1,n_clusters): sel_clust_id,sel_memb_ids = _select_cluster_2_split(membs, tree) X_sub = X[sel_memb_ids,:] km.fit(X_sub) #print("Bisecting Step %d :"%i, sel_clust_id, km.sse_arr_, km.centers_) ## Updating the clusters & properties #sse_arr[[sel_clust_id,i]] = km.sse_arr_ #centers[[sel_clust_id,i]] = km.centers_ tree = _add_tree_node(tree, 2*i-1, i, \ size=np.sum(km.labels_ == 0), center=km.centers_[0], \ sse=km.sse_arr_[0], parent= sel_clust_id) tree = _add_tree_node(tree, 2*i, i, \ size=np.sum(km.labels_ == 1), center=km.centers_[1], \ sse=km.sse_arr_[1], parent= sel_clust_id) pred_labels = km.labels_ pred_labels[np.where(pred_labels == 1)[0]] = 2*i pred_labels[np.where(pred_labels == 0)[0]] = 2*i - 1 #if sel_clust_id == 1: # pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id # pred_labels[np.where(pred_labels == 1)[0]] = i #else: # pred_labels[np.where(pred_labels == 1)[0]] = i # pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id membs[sel_memb_ids] = pred_labels for n in tree.leaves(): label = n.data['label'] centers[label] = n.data['center'] sse_arr[label] = n.data['sse'] return(centers, membs, sse_arr, tree)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def comparison_table(self, caption=None, label="tab:model_comp", hlines=True, aic=True, bic=True, dic=True, sort="bic", descending=True): # pragma: no cover """ Return a LaTeX ready table of model comparisons. Parameters caption : str, optional The table caption to insert. label : str, optional The table label to insert. hlines : bool, optional Whether to insert hlines in the table or not. aic : bool, optional Whether to include a column for AICc or not. bic : bool, optional Whether to include a column for BIC or not. dic : bool, optional Whether to include a column for DIC or not. sort : str, optional How to sort the models. Should be one of "bic", "aic" or "dic". descending : bool, optional The sort order. Returns ------- str A LaTeX table to be copied into your document. """
if sort == "bic": assert bic, "You cannot sort by BIC if you turn it off" if sort == "aic": assert aic, "You cannot sort by AIC if you turn it off" if sort == "dic": assert dic, "You cannot sort by DIC if you turn it off" if caption is None: caption = "" if label is None: label = "" base_string = get_latex_table_frame(caption, label) end_text = " \\\\ \n" num_cols = 1 + (1 if aic else 0) + (1 if bic else 0) column_text = "c" * (num_cols + 1) center_text = "" hline_text = "\\hline\n" if hlines: center_text += hline_text center_text += "\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else "") \ + (" & DIC " if dic else "") + end_text if hlines: center_text += "\t" + hline_text if aic: aics = self.aic() else: aics = np.zeros(len(self.parent.chains)) if bic: bics = self.bic() else: bics = np.zeros(len(self.parent.chains)) if dic: dics = self.dic() else: dics = np.zeros(len(self.parent.chains)) if sort == "bic": to_sort = bics elif sort == "aic": to_sort = aics elif sort == "dic": to_sort = dics else: raise ValueError("sort %s not recognised, must be dic, aic or dic" % sort) good = [i for i, t in enumerate(to_sort) if t is not None] names = [self.parent.chains[g].name for g in good] aics = [aics[g] for g in good] bics = [bics[g] for g in good] to_sort = bics if sort == "bic" else aics indexes = np.argsort(to_sort) if descending: indexes = indexes[::-1] for i in indexes: line = "\t" + names[i] if aic: line += " & %5.1f " % aics[i] if bic: line += " & %5.1f " % bics[i] if dic: line += " & %5.1f " % dics[i] line += end_text center_text += line if hlines: center_text += "\t" + hline_text return base_string % (column_text, center_text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_walks(self, parameters=None, truth=None, extents=None, display=False, filename=None, chains=None, convolve=None, figsize=None, plot_weights=True, plot_posterior=True, log_weight=None): # pragma: no cover """ Plots the chain walk; the parameter values as a function of step index. This plot is more for a sanity or consistency check than for use with final results. Plotting this before plotting with :func:`plot` allows you to quickly see if the chains are well behaved, or if certain parameters are suspect or require a greater burn in period. The desired outcome is to see an unchanging distribution along the x-axis of the plot. If there are obvious tails or features in the parameters, you probably want to investigate. Parameters parameters : list[str]|int, optional Specify a subset of parameters to plot. If not set, all parameters are plotted. If an integer is given, only the first so many parameters are plotted. truth : list[float]|dict[str], optional A list of truth values corresponding to parameters, or a dictionary of truth values keyed by the parameter. extents : list[tuple]|dict[str], optional A list of two-tuples for plot extents per parameter, or a dictionary of extents keyed by the parameter. display : bool, optional If set, shows the plot using ``plt.show()`` filename : str, optional If set, saves the figure to the filename chains : int|str, list[str|int], optional Used to specify which chain to show if more than one chain is loaded in. Can be an integer, specifying the chain index, or a str, specifying the chain name. convolve : int, optional If set, overplots a smoothed version of the steps using ``convolve`` as the width of the smoothing filter. figsize : tuple, optional If set, sets the created figure size. plot_weights : bool, optional If true, plots the weight if they are available plot_posterior : bool, optional If true, plots the log posterior if they are available log_weight : bool, optional Whether to display weights in log space or not. If None, the value is inferred by the mean weights of the plotted chains. Returns ------- figure the matplotlib figure created """
chains, parameters, truth, extents, _ = self._sanitise(chains, parameters, truth, extents) n = len(parameters) extra = 0 if plot_weights: plot_weights = plot_weights and np.any([np.any(c.weights != 1.0) for c in chains]) plot_posterior = plot_posterior and np.any([c.posterior is not None for c in chains]) if plot_weights: extra += 1 if plot_posterior: extra += 1 if figsize is None: figsize = (8, 0.75 + (n + extra)) fig, axes = plt.subplots(figsize=figsize, nrows=n + extra, squeeze=False, sharex=True) for i, axes_row in enumerate(axes): ax = axes_row[0] if i >= extra: p = parameters[i - n] for chain in chains: if p in chain.parameters: chain_row = chain.get_data(p) self._plot_walk(ax, p, chain_row, extents=extents.get(p), convolve=convolve, color=chain.config["color"]) if truth.get(p) is not None: self._plot_walk_truth(ax, truth.get(p)) else: if i == 0 and plot_posterior: for chain in chains: if chain.posterior is not None: self._plot_walk(ax, "$\log(P)$", chain.posterior - chain.posterior.max(), convolve=convolve, color=chain.config["color"]) else: if log_weight is None: log_weight = np.any([chain.weights.mean() < 0.1 for chain in chains]) if log_weight: for chain in chains: self._plot_walk(ax, r"$\log_{10}(w)$", np.log10(chain.weights), convolve=convolve, color=chain.config["color"]) else: for chain in chains: self._plot_walk(ax, "$w$", chain.weights, convolve=convolve, color=chain.config["color"]) if filename is not None: if isinstance(filename, str): filename = [filename] for f in filename: self._save_fig(fig, f, 300) if display: plt.show() return fig
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gelman_rubin(self, chain=None, threshold=0.05): r""" Runs the Gelman Rubin diagnostic on the supplied chains. Parameters chain : int|str, optional Which chain to run the diagnostic on. By default, this is `None`, which will run the diagnostic on all chains. You can also supply and integer (the chain index) or a string, for the chain name (if you set one). threshold : float, optional The maximum deviation permitted from 1 for the final value :math:`\hat{R}` Returns ------- float whether or not the chains pass the test Notes ----- I follow PyMC in calculating the Gelman-Rubin statistic, where, having :math:`m` chains of length :math:`n`, we compute .. math:: B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2 W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right] where :math:`\theta` represents each model parameter. We then compute :math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio :math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters, this ratio deviates from unity by less than the supplied threshold. """
if chain is None: return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))]) index = self.parent._get_chain(chain) assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index) chain = self.parent.chains[index[0]] num_walkers = chain.walkers parameters = chain.parameters name = chain.name data = chain.chain chains = np.split(data, num_walkers) assert num_walkers > 1, "Cannot run Gelman-Rubin statistic with only one walker" m = 1.0 * len(chains) n = 1.0 * chains[0].shape[0] all_mean = np.mean(data, axis=0) chain_means = np.array([np.mean(c, axis=0) for c in chains]) chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains]) b = n / (m - 1) * ((chain_means - all_mean)**2).sum(axis=0) w = (1 / m) * chain_var.sum(axis=0) var = (n - 1) * w / n + b / n v = var + b / (n * m) R = np.sqrt(v / w) passed = np.abs(R - 1) < threshold print("Gelman-Rubin Statistic values for chain %s" % name) for p, v, pas in zip(parameters, R, passed): param = "Param %d" % p if isinstance(p, int) else p print("%s: %7.5f (%s)" % (param, v, "Passed" if pas else "Failed")) return np.all(passed)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geweke(self, chain=None, first=0.1, last=0.5, threshold=0.05): """ Runs the Geweke diagnostic on the supplied chains. Parameters chain : int|str, optional Which chain to run the diagnostic on. By default, this is `None`, which will run the diagnostic on all chains. You can also supply and integer (the chain index) or a string, for the chain name (if you set one). first : float, optional The amount of the start of the chain to use last : float, optional The end amount of the chain to use threshold : float, optional The p-value to use when testing for normality. Returns ------- float whether or not the chains pass the test """
if chain is None: return np.all([self.geweke(k, threshold=threshold) for k in range(len(self.parent.chains))]) index = self.parent._get_chain(chain) assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index) chain = self.parent.chains[index[0]] num_walkers = chain.walkers assert num_walkers is not None and num_walkers > 0, \ "You need to specify the number of walkers to use the Geweke diagnostic." name = chain.name data = chain.chain chains = np.split(data, num_walkers) n = 1.0 * chains[0].shape[0] n_start = int(np.floor(first * n)) n_end = int(np.floor((1 - last) * n)) mean_start = np.array([np.mean(c[:n_start, i]) for c in chains for i in range(c.shape[1])]) var_start = np.array([self._spec(c[:n_start, i]) / c[:n_start, i].size for c in chains for i in range(c.shape[1])]) mean_end = np.array([np.mean(c[n_end:, i]) for c in chains for i in range(c.shape[1])]) var_end = np.array([self._spec(c[n_end:, i]) / c[n_end:, i].size for c in chains for i in range(c.shape[1])]) zs = (mean_start - mean_end) / (np.sqrt(var_start + var_end)) _, pvalue = normaltest(zs) print("Gweke Statistic for chain %s has p-value %e" % (name, pvalue)) return pvalue > threshold
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_latex_table(self, parameters=None, transpose=False, caption=None, label="tab:model_params", hlines=True, blank_fill="--"): # pragma: no cover """ Generates a LaTeX table from parameter summaries. Parameters parameters : list[str], optional A list of what parameters to include in the table. By default, includes all parameters transpose : bool, optional Defaults to False, which gives each column as a parameter, each chain (framework) as a row. You can swap it so that you have a parameter each row and a framework each column by setting this to True caption : str, optional If you want to generate a caption for the table through Python, use this. Defaults to an empty string label : str, optional If you want to generate a label for the table through Python, use this. Defaults to an empty string hlines : bool, optional Inserts ``\\hline`` before and after the header, and at the end of table. blank_fill : str, optional If a framework does not have a particular parameter, will fill that cell of the table with this string. Returns ------- str the LaTeX table. """
if parameters is None: parameters = self.parent._all_parameters for p in parameters: assert isinstance(p, str), \ "Generating a LaTeX table requires all parameters have labels" num_parameters = len(parameters) num_chains = len(self.parent.chains) fit_values = self.get_summary(squeeze=False) if label is None: label = "" if caption is None: caption = "" end_text = " \\\\ \n" if transpose: column_text = "c" * (num_chains + 1) else: column_text = "c" * (num_parameters + 1) center_text = "" hline_text = "\\hline\n" if hlines: center_text += hline_text + "\t\t" if transpose: center_text += " & ".join(["Parameter"] + [c.name for c in self.parent.chains]) + end_text if hlines: center_text += "\t\t" + hline_text for p in parameters: arr = ["\t\t" + p] for chain_res in fit_values: if p in chain_res: arr.append(self.get_parameter_text(*chain_res[p], wrap=True)) else: arr.append(blank_fill) center_text += " & ".join(arr) + end_text else: center_text += " & ".join(["Model"] + parameters) + end_text if hlines: center_text += "\t\t" + hline_text for name, chain_res in zip([c.name for c in self.parent.chains], fit_values): arr = ["\t\t" + name] for p in parameters: if p in chain_res: arr.append(self.get_parameter_text(*chain_res[p], wrap=True)) else: arr.append(blank_fill) center_text += " & ".join(arr) + end_text if hlines: center_text += "\t\t" + hline_text final_text = get_latex_table_frame(caption, label) % (column_text, center_text) return final_text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_summary(self, squeeze=True, parameters=None, chains=None): """ Gets a summary of the marginalised parameter distributions. Parameters squeeze : bool, optional Squeeze the summaries. If you only have one chain, squeeze will not return a length one list, just the single summary. If this is false, you will get a length one list. parameters : list[str], optional A list of parameters which to generate summaries for. chains : list[int|str], optional A list of the chains to get a summary of. Returns ------- list of dictionaries One entry per chain, parameter bounds stored in dictionary with parameter as key """
results = [] if chains is None: chains = self.parent.chains else: if isinstance(chains, (int, str)): chains = [chains] chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)] for chain in chains: res = {} params_to_find = parameters if parameters is not None else chain.parameters for p in params_to_find: if p not in chain.parameters: continue summary = self.get_parameter_summary(chain, p) res[p] = summary results.append(res) if squeeze and len(results) == 1: return results[0] return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_max_posteriors(self, parameters=None, squeeze=True, chains=None): """ Gets the maximum posterior point in parameter space from the passed parameters. Requires the chains to have set `posterior` values. Parameters parameters : str|list[str] The parameters to find squeeze : bool, optional Squeeze the summaries. If you only have one chain, squeeze will not return a length one list, just the single summary. If this is false, you will get a length one list. chains : list[int|str], optional A list of the chains to get a summary of. Returns ------- list of two-tuples One entry per chain, two-tuple represents the max-likelihood coordinate """
results = [] if chains is None: chains = self.parent.chains else: if isinstance(chains, (int, str)): chains = [chains] chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)] if isinstance(parameters, str): parameters = [parameters] for chain in chains: if chain.posterior_max_index is None: results.append(None) continue res = {} params_to_find = parameters if parameters is not None else chain.parameters for p in params_to_find: if p in chain.parameters: res[p] = chain.posterior_max_params[p] results.append(res) if squeeze and len(results) == 1: return results[0] return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_correlations(self, chain=0, parameters=None): """ Takes a chain and returns the correlation between chain parameters. Parameters chain : int|str, optional The chain index or name. Defaults to first chain. parameters : list[str], optional The list of parameters to compute correlations. Defaults to all parameters for the given chain. Returns ------- tuple The first index giving a list of parameter names, the second index being the 2D correlation matrix. """
parameters, cov = self.get_covariance(chain=chain, parameters=parameters) diag = np.sqrt(np.diag(cov)) divisor = diag[None, :] * diag[:, None] correlations = cov / divisor return parameters, correlations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_covariance(self, chain=0, parameters=None): """ Takes a chain and returns the covariance between chain parameters. Parameters chain : int|str, optional The chain index or name. Defaults to first chain. parameters : list[str], optional The list of parameters to compute correlations. Defaults to all parameters for the given chain. Returns ------- tuple The first index giving a list of parameter names, the second index being the 2D covariance matrix. """
index = self.parent._get_chain(chain) assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index) chain = self.parent.chains[index[0]] if parameters is None: parameters = chain.parameters data = chain.get_data(parameters) cov = np.atleast_2d(np.cov(data, aweights=chain.weights, rowvar=False)) return parameters, cov
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_correlation_table(self, chain=0, parameters=None, caption="Parameter Correlations", label="tab:parameter_correlations"): """ Gets a LaTeX table of parameter correlations. Parameters chain : int|str, optional The chain index or name. Defaults to first chain. parameters : list[str], optional The list of parameters to compute correlations. Defaults to all parameters for the given chain. caption : str, optional The LaTeX table caption. label : str, optional The LaTeX table label. Returns ------- str The LaTeX table ready to go! """
parameters, cor = self.get_correlations(chain=chain, parameters=parameters) return self._get_2d_latex_table(parameters, cor, caption, label)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_covariance_table(self, chain=0, parameters=None, caption="Parameter Covariance", label="tab:parameter_covariance"): """ Gets a LaTeX table of parameter covariance. Parameters chain : int|str, optional The chain index or name. Defaults to first chain. parameters : list[str], optional The list of parameters to compute correlations. Defaults to all parameters for the given chain. caption : str, optional The LaTeX table caption. label : str, optional The LaTeX table label. Returns ------- str The LaTeX table ready to go! """
parameters, cov = self.get_covariance(chain=chain, parameters=parameters) return self._get_2d_latex_table(parameters, cov, caption, label)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_parameter_text(self, lower, maximum, upper, wrap=False): """ Generates LaTeX appropriate text from marginalised parameter bounds. Parameters lower : float The lower bound on the parameter maximum : float The value of the parameter with maximum probability upper : float The upper bound on the parameter wrap : bool Wrap output text in dollar signs for LaTeX Returns ------- str The formatted text given the parameter bounds """
if lower is None or upper is None: return "" upper_error = upper - maximum lower_error = maximum - lower if upper_error != 0 and lower_error != 0: resolution = min(np.floor(np.log10(np.abs(upper_error))), np.floor(np.log10(np.abs(lower_error)))) elif upper_error == 0 and lower_error != 0: resolution = np.floor(np.log10(np.abs(lower_error))) elif upper_error != 0 and lower_error == 0: resolution = np.floor(np.log10(np.abs(upper_error))) else: resolution = np.floor(np.log10(np.abs(maximum))) factor = 0 fmt = "%0.1f" r = 1 if np.abs(resolution) > 2: factor = -resolution if resolution == 2: fmt = "%0.0f" factor = -1 r = 0 if resolution == 1: fmt = "%0.0f" if resolution == -1: fmt = "%0.2f" r = 2 elif resolution == -2: fmt = "%0.3f" r = 3 upper_error *= 10 ** factor lower_error *= 10 ** factor maximum *= 10 ** factor upper_error = round(upper_error, r) lower_error = round(lower_error, r) maximum = round(maximum, r) if maximum == -0.0: maximum = 0.0 if resolution == 2: upper_error *= 10 ** -factor lower_error *= 10 ** -factor maximum *= 10 ** -factor factor = 0 fmt = "%0.0f" upper_error_text = fmt % upper_error lower_error_text = fmt % lower_error if upper_error_text == lower_error_text: text = r"%s\pm %s" % (fmt, "%s") % (maximum, lower_error_text) else: text = r"%s^{+%s}_{-%s}" % (fmt, "%s", "%s") % \ (maximum, upper_error_text, lower_error_text) if factor != 0: text = r"\left( %s \right) \times 10^{%d}" % (text, -factor) if wrap: text = "$%s$" % text return text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_chain(self, chain=-1): """ Removes a chain from ChainConsumer. Calling this will require any configurations set to be redone! Parameters chain : int|str, list[str|int] The chain(s) to remove. You can pass in either the chain index, or the chain name, to remove it. By default removes the last chain added. Returns ------- ChainConsumer Itself, to allow chaining calls. """
if isinstance(chain, str) or isinstance(chain, int): chain = [chain] chain = sorted([i for c in chain for i in self._get_chain(c)])[::-1] assert len(chain) == len(list(set(chain))), "Error, you are trying to remove a chain more than once." for index in chain: del self.chains[index] seen = set() self._all_parameters = [p for c in self.chains for p in c.parameters if not (p in seen or seen.add(p))] # Need to reconfigure self._init_params() return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure_truth(self, **kwargs): # pragma: no cover """ Configure the arguments passed to the ``axvline`` and ``axhline`` methods when plotting truth values. If you do not call this explicitly, the :func:`plot` method will invoke this method automatically. Recommended to set the parameters ``linestyle``, ``color`` and/or ``alpha`` if you want some basic control. Default is to use an opaque black dashed line. Parameters kwargs : dict The keyword arguments to unwrap when calling ``axvline`` and ``axhline``. Returns ------- ChainConsumer Itself, to allow chaining calls. """
if kwargs.get("ls") is None and kwargs.get("linestyle") is None: kwargs["ls"] = "--" kwargs["dashes"] = (3, 3) if kwargs.get("color") is None: kwargs["color"] = "#000000" self.config_truth = kwargs self._configured_truth = True return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def divide_chain(self, chain=0): """ Returns a ChainConsumer instance containing all the walks of a given chain as individual chains themselves. This method might be useful if, for example, your chain was made using MCMC with 4 walkers. To check the sampling of all 4 walkers agree, you could call this to get a ChainConsumer instance with one chain for ech of the four walks. If you then plot, hopefully all four contours you would see agree. Parameters chain : int|str, optional The index or name of the chain you want divided Returns ------- ChainConsumer A new ChainConsumer instance with the same settings as the parent instance, containing ``num_walker`` chains. """
indexes = self._get_chain(chain) con = ChainConsumer() for index in indexes: chain = self.chains[index] assert chain.walkers is not None, "The chain you have selected was not added with any walkers!" num_walkers = chain.walkers data = np.split(chain.chain, num_walkers) ws = np.split(chain.weights, num_walkers) for j, (c, w) in enumerate(zip(data, ws)): con.add_chain(c, weights=w, name="Chain %d" % j, parameters=chain.parameters) return con
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def threshold(args): """Calculate motif score threshold for a given FPR."""
if args.fpr < 0 or args.fpr > 1: print("Please specify a FPR between 0 and 1") sys.exit(1) motifs = read_motifs(args.pwmfile) s = Scanner() s.set_motifs(args.pwmfile) s.set_threshold(args.fpr, filename=args.inputfile) print("Motif\tScore\tCutoff") for motif in motifs: min_score = motif.pwm_min_score() max_score = motif.pwm_max_score() opt_score = s.threshold[motif.id] if opt_score is None: opt_score = motif.pwm_max_score() threshold = (opt_score - min_score) / (max_score - min_score) print("{0}\t{1}\t{2}".format( motif.id, opt_score, threshold))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def values_to_labels(fg_vals, bg_vals): """ Convert two arrays of values to an array of labels and an array of scores. Parameters fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. Returns ------- y_true : array Labels. y_score : array Values. """
y_true = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals)))) y_score = np.hstack((fg_vals, bg_vals)) return y_true, y_score
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_enrichment(fg_vals, bg_vals, minbg=2): """ Computes the maximum enrichment. Parameters fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. minbg : int, optional Minimum number of matches in background. The default is 2. Returns ------- enrichment : float Maximum enrichment. """
scores = np.hstack((fg_vals, bg_vals)) idx = np.argsort(scores) x = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals)))) xsort = x[idx] l_fg = len(fg_vals) l_bg = len(bg_vals) m = 0 s = 0 for i in range(len(scores), 0, -1): bgcount = float(len(xsort[i:][xsort[i:] == 0])) if bgcount >= minbg: enr = (len(xsort[i:][xsort[i:] == 1]) / l_fg) / (bgcount / l_bg) if enr > m: m = enr s = scores[idx[i]] return m
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def roc_auc_xlim(x_bla, y_bla, xlim=0.1): """ Computes the ROC Area Under Curve until a certain FPR value. Parameters fg_vals : array_like list of values for positive set bg_vals : array_like list of values for negative set xlim : float, optional FPR value Returns ------- score : float ROC AUC score """
x = x_bla[:] y = y_bla[:] x.sort() y.sort() u = {} for i in x + y: u[i] = 1 vals = sorted(u.keys()) len_x = float(len(x)) len_y = float(len(y)) new_x = [] new_y = [] x_p = 0 y_p = 0 for val in vals[::-1]: while len(x) > 0 and x[-1] >= val: x.pop() x_p += 1 while len(y) > 0 and y[-1] >= val: y.pop() y_p += 1 new_y.append((len_x - x_p) / len_x) new_x.append((len_y - y_p) / len_y) #print new_x #print new_y new_x = 1 - np.array(new_x) new_y = 1 - np.array(new_y) #plot(new_x, new_y) #show() x = new_x y = new_y if len(x) != len(y): raise ValueError("Unequal!") if not xlim: xlim = 1.0 auc = 0.0 bla = zip(stats.rankdata(x), range(len(x))) bla = sorted(bla, key=lambda x: x[1]) prev_x = x[bla[0][1]] prev_y = y[bla[0][1]] index = 1 while index < len(bla) and x[bla[index][1]] <= xlim: _, i = bla[index] auc += y[i] * (x[i] - prev_x) - ((x[i] - prev_x) * (y[i] - prev_y) / 2.0) prev_x = x[i] prev_y = y[i] index += 1 if index < len(bla): (rank, i) = bla[index] auc += prev_y * (xlim - prev_x) + ((y[i] - prev_y)/(x[i] - prev_x) * (xlim -prev_x) * (xlim - prev_x)/2) return auc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_fmeasure(fg_vals, bg_vals): """ Computes the maximum F-measure. Parameters fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. Returns ------- f : float Maximum f-measure. """
x, y = roc_values(fg_vals, bg_vals) x, y = x[1:], y[1:] # don't include origin p = y / (y + x) filt = np.logical_and((p * y) > 0, (p + y) > 0) p = p[filt] y = y[filt] f = (2 * p * y) / (p + y) if len(f) > 0: #return np.nanmax(f), np.nanmax(y[f == np.nanmax(f)]) return np.nanmax(f) else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ks_pvalue(fg_pos, bg_pos=None): """ Computes the Kolmogorov-Smirnov p-value of position distribution. Parameters fg_pos : array_like The list of values for the positive set. bg_pos : array_like, optional The list of values for the negative set. Returns ------- p : float KS p-value. """
if len(fg_pos) == 0: return 1.0 a = np.array(fg_pos, dtype="float") / max(fg_pos) p = kstest(a, "uniform")[1] return p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ks_significance(fg_pos, bg_pos=None): """ Computes the -log10 of Kolmogorov-Smirnov p-value of position distribution. Parameters fg_pos : array_like The list of values for the positive set. bg_pos : array_like, optional The list of values for the negative set. Returns ------- p : float -log10(KS p-value). """
p = ks_pvalue(fg_pos, max(fg_pos)) if p > 0: return -np.log10(p) else: return np.inf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_data(): """Load and shape data for training with Keras + Pescador. Returns ------- input_shape : tuple, len=3 Shape of each sample; adapts to channel configuration of Keras. X_train, y_train : np.ndarrays Images and labels for training. X_test, y_test : np.ndarrays Images and labels for test. """
# The data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) return input_shape, (x_train, y_train), (x_test, y_test)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_model(input_shape): """Create a compiled Keras model. Parameters input_shape : tuple, len=3 Shape of each image sample. Returns ------- model : keras.Model Constructed model. """
model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def sampler(X, y): '''A basic generator for sampling data. Parameters ---------- X : np.ndarray, len=n_samples, ndim=4 Image data. y : np.ndarray, len=n_samples, ndim=2 One-hot encoded class vectors. Yields ------ data : dict Single image sample, like {X: np.ndarray, y: np.ndarray} ''' X = np.atleast_2d(X) # y's are binary vectors, and should be of shape (10,) after this. y = np.atleast_1d(y) n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'y': y[i]}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def additive_noise(stream, key='X', scale=1e-1): '''Add noise to a data stream. Parameters ---------- stream : iterable A stream that yields data objects. key : string, default='X' Name of the field to add noise. scale : float, default=0.1 Scale factor for gaussian noise. Yields ------ data : dict Updated data objects in the stream. ''' for data in stream: noise_shape = data[key].shape noise = scale * np.random.randn(*noise_shape) data[key] = data[key] + noise yield data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_denovo_params(user_params=None): """Return default GimmeMotifs parameters. Defaults will be replaced with parameters defined in user_params. Parameters user_params : dict, optional User-defined parameters. Returns ------- params : dict """
config = MotifConfig() if user_params is None: user_params = {} params = config.get_default_params() params.update(user_params) if params.get("torque"): logger.debug("Using torque") else: logger.debug("Using multiprocessing") params["background"] = [x.strip() for x in params["background"].split(",")] logger.debug("Parameters:") for param, value in params.items(): logger.debug(" %s: %s", param, value) # Maximum time? if params["max_time"]: try: max_time = params["max_time"] = float(params["max_time"]) except Exception: logger.debug("Could not parse max_time value, setting to no limit") params["max_time"] = -1 if params["max_time"] > 0: logger.debug("Time limit for motif prediction: %0.2f hours", max_time) params["max_time"] = 3600 * params["max_time"] logger.debug("Max_time in seconds %0.0f", max_time) else: logger.debug("No time limit for motif prediction") return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rankagg(df, method="stuart"): """Return aggregated ranks. Implementation is ported from the RobustRankAggreg R package References: Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709 Stuart et al., 2003, DOI: 10.1126/science.1087447 Parameters df : pandas.DataFrame DataFrame with values to be ranked and aggregated Returns ------- pandas.DataFrame with aggregated ranks """
rmat = pd.DataFrame(index=df.iloc[:,0]) step = 1 / rmat.shape[0] for col in df.columns: rmat[col] = pd.DataFrame({col:np.arange(step, 1 + step, step)}, index=df[col]).loc[rmat.index] rmat = rmat.apply(sorted, 1, result_type="expand") p = rmat.apply(qStuart, 1) df = pd.DataFrame( {"p.adjust":multipletests(p, method="h")[1]}, index=rmat.index).sort_values('p.adjust') return df["p.adjust"]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_gen(n_ops=100): """Yield data, while optionally burning compute cycles. Parameters n_ops : int, default=100 Number of operations to run between yielding data. Returns ------- data : dict A object which looks like it might come from some machine learning problem, with X as features, and y as targets. """
while True: X = np.random.uniform(size=(64, 64)) yield dict(X=costly_function(X, n_ops), y=np.random.randint(10, size=(1,)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mp_calc_stats(motifs, fg_fa, bg_fa, bg_name=None): """Parallel calculation of motif statistics."""
try: stats = calc_stats(motifs, fg_fa, bg_fa, ncpus=1) except Exception as e: raise sys.stderr.write("ERROR: {}\n".format(str(e))) stats = {} if not bg_name: bg_name = "default" return bg_name, stats
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_tool(job_name, t, fastafile, params): """Parallel motif prediction."""
try: result = t.run(fastafile, params, mytmpdir()) except Exception as e: result = ([], "", "{} failed to run: {}".format(job_name, e)) return job_name, result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict_motifs(infile, bgfile, outfile, params=None, stats_fg=None, stats_bg=None): """ Predict motifs, input is a FASTA-file"""
# Parse parameters required_params = ["tools", "available_tools", "analysis", "genome", "use_strand", "max_time"] if params is None: params = parse_denovo_params() else: for p in required_params: if p not in params: params = parse_denovo_params() break # Define all tools tools = dict( [ (x.strip(), x in [y.strip() for y in params["tools"].split(",")]) for x in params["available_tools"].split(",") ] ) # Predict the motifs analysis = params["analysis"] logger.info("starting motif prediction (%s)", analysis) logger.info("tools: %s", ", ".join([x for x in tools.keys() if tools[x]])) result = pp_predict_motifs( infile, outfile, analysis, params.get("genome", None), params["use_strand"], bgfile, tools, None, #logger=logger, max_time=params["max_time"], stats_fg=stats_fg, stats_bg=stats_bg ) motifs = result.motifs logger.info("predicted %s motifs", len(motifs)) logger.debug("written to %s", outfile) if len(motifs) == 0: logger.info("no motifs found") result.motifs = [] return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_motifs(self, args): """Add motifs to the result object."""
self.lock.acquire() # Callback function for motif programs if args is None or len(args) != 2 or len(args[1]) != 3: try: job = args[0] logger.warn("job %s failed", job) self.finished.append(job) except Exception: logger.warn("job failed") return job, (motifs, stdout, stderr) = args logger.info("%s finished, found %s motifs", job, len(motifs)) for motif in motifs: if self.do_counter: self.counter += 1 motif.id = "gimme_{}_".format(self.counter) + motif.id f = open(self.outfile, "a") f.write("%s\n" % motif.to_pfm()) f.close() self.motifs.append(motif) if self.do_stats and len(motifs) > 0: #job_id = "%s_%s" % (motif.id, motif.to_consensus()) logger.debug("Starting stats job of %s motifs", len(motifs)) for bg_name, bg_fa in self.background.items(): job = self.job_server.apply_async( mp_calc_stats, (motifs, self.fg_fa, bg_fa, bg_name), callback=self.add_stats ) self.stat_jobs.append(job) logger.debug("stdout %s: %s", job, stdout) logger.debug("stdout %s: %s", job, stderr) self.finished.append(job) self.lock.release()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_for_stats(self): """Make sure all jobs are finished."""
logging.debug("waiting for statistics to finish") for job in self.stat_jobs: job.get() sleep(2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_stats(self, args): """Callback to add motif statistics."""
bg_name, stats = args logger.debug("Stats: %s %s", bg_name, stats) for motif_id in stats.keys(): if motif_id not in self.stats: self.stats[motif_id] = {} self.stats[motif_id][bg_name] = stats[motif_id]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_denovo_input_narrowpeak(inputfile, params, outdir): """Prepare a narrowPeak file for de novo motif prediction. All regions to same size; split in test and validation set; converted to FASTA. Parameters inputfile : str BED file with input regions. params : dict Dictionary with parameters. outdir : str Output directory to save files. """
bedfile = os.path.join(outdir, "input.from.narrowpeak.bed") p = re.compile(r'^(#|track|browser)') width = int(params["width"]) logger.info("preparing input (narrowPeak to BED, width %s)", width) warn_no_summit = True with open(bedfile, "w") as f_out: with open(inputfile) as f_in: for line in f_in: if p.search(line): continue vals = line.strip().split("\t") start, end = int(vals[1]), int(vals[2]) summit = int(vals[9]) if summit == -1: if warn_no_summit: logger.warn("No summit present in narrowPeak file, using the peak center.") warn_no_summit = False summit = (end - start) // 2 start = start + summit - (width // 2) end = start + width f_out.write("{}\t{}\t{}\t{}\n".format( vals[0], start, end, vals[6] )) prepare_denovo_input_bed(bedfile, params, outdir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_denovo_input_bed(inputfile, params, outdir): """Prepare a BED file for de novo motif prediction. All regions to same size; split in test and validation set; converted to FASTA. Parameters inputfile : str BED file with input regions. params : dict Dictionary with parameters. outdir : str Output directory to save files. """
logger.info("preparing input (BED)") # Create BED file with regions of equal size width = int(params["width"]) bedfile = os.path.join(outdir, "input.bed") write_equalwidth_bedfile(inputfile, width, bedfile) abs_max = int(params["abs_max"]) fraction = float(params["fraction"]) pred_bedfile = os.path.join(outdir, "prediction.bed") val_bedfile = os.path.join(outdir, "validation.bed") # Split input into prediction and validation set logger.debug( "Splitting %s into prediction set (%s) and validation set (%s)", bedfile, pred_bedfile, val_bedfile) divide_file(bedfile, pred_bedfile, val_bedfile, fraction, abs_max) config = MotifConfig() genome = Genome(params["genome"]) for infile in [pred_bedfile, val_bedfile]: genome.track2fasta( infile, infile.replace(".bed", ".fa"), ) # Create file for location plots lwidth = int(params["lwidth"]) extend = (lwidth - width) // 2 genome.track2fasta( val_bedfile, os.path.join(outdir, "localization.fa"), extend_up=extend, extend_down=extend, stranded=params["use_strand"], )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_background(bg_type, fafile, outfile, genome="hg18", width=200, nr_times=10, custom_background=None): """Create background of a specific type. Parameters bg_type : str Name of background type. fafile : str Name of input FASTA file. outfile : str Name of output FASTA file. genome : str, optional Genome name. width : int, optional Size of regions. nr_times : int, optional Generate this times as many background sequences as compared to input file. Returns ------- nr_seqs : int Number of sequences created. """
width = int(width) config = MotifConfig() fg = Fasta(fafile) if bg_type in ["genomic", "gc"]: if not genome: logger.error("Need a genome to create background") sys.exit(1) if bg_type == "random": f = MarkovFasta(fg, k=1, n=nr_times * len(fg)) logger.debug("Random background: %s", outfile) elif bg_type == "genomic": logger.debug("Creating genomic background") f = RandomGenomicFasta(genome, width, nr_times * len(fg)) elif bg_type == "gc": logger.debug("Creating GC matched background") f = MatchedGcFasta(fafile, genome, nr_times * len(fg)) logger.debug("GC matched background: %s", outfile) elif bg_type == "promoter": fname = Genome(genome).filename gene_file = fname.replace(".fa", ".annotation.bed.gz") if not gene_file: gene_file = os.path.join(config.get_gene_dir(), "%s.bed" % genome) if not os.path.exists(gene_file): print("Could not find a gene file for genome {}") print("Did you use the --annotation flag for genomepy?") print("Alternatively make sure there is a file called {}.bed in {}".format(genome, config.get_gene_dir())) raise ValueError() logger.info( "Creating random promoter background (%s, using genes in %s)", genome, gene_file) f = PromoterFasta(gene_file, genome, width, nr_times * len(fg)) logger.debug("Random promoter background: %s", outfile) elif bg_type == "custom": bg_file = custom_background if not bg_file: raise IOError( "Background file not specified!") if not os.path.exists(bg_file): raise IOError( "Custom background file %s does not exist!", bg_file) else: logger.info("Copying custom background file %s to %s.", bg_file, outfile) f = Fasta(bg_file) l = np.median([len(seq) for seq in f.seqs]) if l < (width * 0.95) or l > (width * 1.05): logger.warn( "The custom background file %s contains sequences with a " "median length of %s, while GimmeMotifs predicts motifs in sequences " "of length %s. This will influence the statistics! It is recommended " "to use background sequences of the same length.", bg_file, l, width) f.writefasta(outfile) return len(f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_backgrounds(outdir, background=None, genome="hg38", width=200, custom_background=None): """Create different backgrounds for motif prediction and validation. Parameters outdir : str Directory to save results. background : list, optional Background types to create, default is 'random'. genome : str, optional Genome name (for genomic and gc backgrounds). width : int, optional Size of background regions Returns ------- bg_info : dict Keys: background name, values: file name. """
if background is None: background = ["random"] nr_sequences = {} # Create background for motif prediction if "gc" in background: pred_bg = "gc" else: pred_bg = background[0] create_background( pred_bg, os.path.join(outdir, "prediction.fa"), os.path.join(outdir, "prediction.bg.fa"), genome=genome, width=width, custom_background=custom_background) # Get background fasta files for statistics bg_info = {} nr_sequences = {} for bg in background: fname = os.path.join(outdir, "bg.{}.fa".format(bg)) nr_sequences[bg] = create_background( bg, os.path.join(outdir, "validation.fa"), fname, genome=genome, width=width, custom_background=custom_background) bg_info[bg] = fname return bg_info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_significant_motifs(fname, result, bg, metrics=None): """Filter significant motifs based on several statistics. Parameters fname : str Filename of output file were significant motifs will be saved. result : PredictionResult instance Contains motifs and associated statistics. bg : str Name of background type to use. metrics : sequence Metric with associated minimum values. The default is (("max_enrichment", 3), ("roc_auc", 0.55), ("enr_at_f[r", 0.55)) Returns ------- motifs : list List of Motif instances. """
sig_motifs = [] with open(fname, "w") as f: for motif in result.motifs: stats = result.stats.get( "%s_%s" % (motif.id, motif.to_consensus()), {}).get(bg, {} ) if _is_significant(stats, metrics): f.write("%s\n" % motif.to_pfm()) sig_motifs.append(motif) logger.info("%s motifs are significant", len(sig_motifs)) logger.debug("written to %s", fname) return sig_motifs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def best_motif_in_cluster(single_pwm, clus_pwm, clusters, fg_fa, background, stats=None, metrics=("roc_auc", "recall_at_fdr")): """Return the best motif per cluster for a clustering results. The motif can be either the average motif or one of the clustered motifs. Parameters single_pwm : str Filename of motifs. clus_pwm : str Filename of motifs. clusters : Motif clustering result. fg_fa : str Filename of FASTA file. background : dict Dictionary for background file names. stats : dict, optional If statistics are not supplied they will be computed. metrics : sequence, optional Metrics to use for motif evaluation. Default are "roc_auc" and "recall_at_fdr". Returns ------- motifs : list List of Motif instances. """
# combine original and clustered motifs motifs = read_motifs(single_pwm) + read_motifs(clus_pwm) motifs = dict([(str(m), m) for m in motifs]) # get the statistics for those motifs that were not yet checked clustered_motifs = [] for clus,singles in clusters: for motif in set([clus] + singles): if str(motif) not in stats: clustered_motifs.append(motifs[str(motif)]) new_stats = {} for bg, bg_fa in background.items(): for m,s in calc_stats(clustered_motifs, fg_fa, bg_fa).items(): if m not in new_stats: new_stats[m] = {} new_stats[m][bg] = s stats.update(new_stats) rank = rank_motifs(stats, metrics) # rank the motifs best_motifs = [] for clus, singles in clusters: if len(singles) > 1: eval_motifs = singles if clus not in motifs: eval_motifs.append(clus) eval_motifs = [motifs[str(e)] for e in eval_motifs] best_motif = sorted(eval_motifs, key=lambda x: rank[str(x)])[-1] best_motifs.append(best_motif) else: best_motifs.append(clus) for bg in background: stats[str(best_motifs[-1])][bg]["num_cluster"] = len(singles) best_motifs = sorted(best_motifs, key=lambda x: rank[str(x)], reverse=True) return best_motifs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rename_motifs(motifs, stats=None): """Rename motifs to GimmeMotifs_1..GimmeMotifs_N. If stats object is passed, stats will be copied."""
final_motifs = [] for i, motif in enumerate(motifs): old = str(motif) motif.id = "GimmeMotifs_{}".format(i + 1) final_motifs.append(motif) if stats: stats[str(motif)] = stats[old].copy() if stats: return final_motifs, stats else: return final_motifs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_db(cls, dbname): """Register method to keep list of dbs."""
def decorator(subclass): """Register as decorator function.""" cls._dbs[dbname] = subclass subclass.name = dbname return subclass return decorator