text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Appends the given text line with prefixed spaces in accordance with <END_TASK> <USER_TASK:> Description: def add(self, indent, line): """Appends the given text line with prefixed spaces in accordance with the given number of indentation levels. """
if isinstance(line, str): list.append(self, indent*4*' ' + line) else: for subline in line: list.append(self, indent*4*' ' + subline)
<SYSTEM_TASK:> Name of the compiled module. <END_TASK> <USER_TASK:> Description: def pyname(self): """Name of the compiled module."""
if self.pymodule.endswith('__init__'): return self.pymodule.split('.')[-2] else: return self.pymodule.split('.')[-1]
<SYSTEM_TASK:> All source files of the actual models Python classes and their <END_TASK> <USER_TASK:> Description: def pysourcefiles(self): """All source files of the actual models Python classes and their respective base classes."""
sourcefiles = set() for (name, child) in vars(self).items(): try: parents = inspect.getmro(child) except AttributeError: continue for parent in parents: try: sourcefile = inspect.getfile(parent) except TypeError: break sourcefiles.add(sourcefile) return Lines(*sourcefiles)
<SYSTEM_TASK:> True if at least one of the |Cythonizer.pysourcefiles| is <END_TASK> <USER_TASK:> Description: def outdated(self): """True if at least one of the |Cythonizer.pysourcefiles| is newer than the compiled file under |Cythonizer.pyxfilepath|, otherwise False. """
if hydpy.pub.options.forcecompiling: return True if os.path.split(hydpy.__path__[0])[-2].endswith('-packages'): return False if not os.path.exists(self.dllfilepath): return True cydate = os.stat(self.dllfilepath).st_mtime for pysourcefile in self.pysourcefiles: pydate = os.stat(pysourcefile).st_mtime if pydate > cydate: return True return False
<SYSTEM_TASK:> Translate cython code to C code and compile it. <END_TASK> <USER_TASK:> Description: def compile_(self): """Translate cython code to C code and compile it."""
from Cython import Build argv = copy.deepcopy(sys.argv) sys.argv = [sys.argv[0], 'build_ext', '--build-lib='+self.buildpath] exc_modules = [ distutils.extension.Extension( 'hydpy.cythons.autogen.'+self.cyname, [self.pyxfilepath], extra_compile_args=['-O2'])] distutils.core.setup(ext_modules=Build.cythonize(exc_modules), include_dirs=[numpy.get_include()]) sys.argv = argv
<SYSTEM_TASK:> Try to find the resulting dll file and to move it into the <END_TASK> <USER_TASK:> Description: def move_dll(self): """Try to find the resulting dll file and to move it into the `cythons` package. Things to be aware of: * The file extension either `pyd` (Window) or `so` (Linux). * The folder containing the dll file is system dependent, but is always a subfolder of the `cythons` package. * Under Linux, the filename might contain system information, e.g. ...cpython-36m-x86_64-linux-gnu.so. """
dirinfos = os.walk(self.buildpath) next(dirinfos) system_dependent_filename = None for dirinfo in dirinfos: for filename in dirinfo[2]: if (filename.startswith(self.cyname) and filename.endswith(dllextension)): system_dependent_filename = filename break if system_dependent_filename: try: shutil.move(os.path.join(dirinfo[0], system_dependent_filename), os.path.join(self.cydirpath, self.cyname+dllextension)) break except BaseException: prefix = ('After trying to cythonize module %s, when ' 'trying to move the final cython module %s ' 'from directory %s to directory %s' % (self.pyname, system_dependent_filename, self.buildpath, self.cydirpath)) suffix = ('A likely error cause is that the cython module ' '%s does already exist in this directory and is ' 'currently blocked by another Python process. ' 'Maybe it helps to close all Python processes ' 'and restart the cyhonization afterwards.' % self.cyname+dllextension) objecttools.augment_excmessage(prefix, suffix) else: raise IOError('After trying to cythonize module %s, the resulting ' 'file %s could neither be found in directory %s nor ' 'its subdirectories. The distul report should tell ' 'whether the file has been stored somewhere else,' 'is named somehow else, or could not be build at ' 'all.' % self.buildpath)
<SYSTEM_TASK:> Special declaration lines for the given |IOSequence| object. <END_TASK> <USER_TASK:> Description: def iosequence(seq): """Special declaration lines for the given |IOSequence| object. """
lines = Lines() lines.add(1, 'cdef public bint _%s_diskflag' % seq.name) lines.add(1, 'cdef public str _%s_path' % seq.name) lines.add(1, 'cdef FILE *_%s_file' % seq.name) lines.add(1, 'cdef public bint _%s_ramflag' % seq.name) ctype = 'double' + NDIM2STR[seq.NDIM+1] lines.add(1, 'cdef public %s _%s_array' % (ctype, seq.name)) return lines
<SYSTEM_TASK:> Set_pointer functions for link sequences. <END_TASK> <USER_TASK:> Description: def set_pointer(self, subseqs): """Set_pointer functions for link sequences."""
lines = Lines() for seq in subseqs: if seq.NDIM == 0: lines.extend(self.set_pointer0d(subseqs)) break for seq in subseqs: if seq.NDIM == 1: lines.extend(self.alloc(subseqs)) lines.extend(self.dealloc(subseqs)) lines.extend(self.set_pointer1d(subseqs)) break return lines
<SYSTEM_TASK:> Allocate memory for 1-dimensional link sequences. <END_TASK> <USER_TASK:> Description: def alloc(subseqs): """Allocate memory for 1-dimensional link sequences."""
print(' . setlength') lines = Lines() lines.add(1, 'cpdef inline alloc(self, name, int length):') for seq in subseqs: lines.add(2, 'if name == "%s":' % seq.name) lines.add(3, 'self._%s_length_0 = length' % seq.name) lines.add(3, 'self.%s = <double**> ' 'PyMem_Malloc(length * sizeof(double*))' % seq.name) return lines
<SYSTEM_TASK:> Deallocate memory for 1-dimensional link sequences. <END_TASK> <USER_TASK:> Description: def dealloc(subseqs): """Deallocate memory for 1-dimensional link sequences."""
print(' . dealloc') lines = Lines() lines.add(1, 'cpdef inline dealloc(self):') for seq in subseqs: lines.add(2, 'PyMem_Free(self.%s)' % seq.name) return lines
<SYSTEM_TASK:> Numeric parameter declaration lines. <END_TASK> <USER_TASK:> Description: def numericalparameters(self): """Numeric parameter declaration lines."""
lines = Lines() if self.model.NUMERICAL: lines.add(0, '@cython.final') lines.add(0, 'cdef class NumConsts(object):') for name in ('nmb_methods', 'nmb_stages'): lines.add(1, 'cdef public %s %s' % (TYPE2STR[int], name)) for name in ('dt_increase', 'dt_decrease'): lines.add(1, 'cdef public %s %s' % (TYPE2STR[float], name)) lines.add(1, 'cdef public configutils.Config pub') lines.add(1, 'cdef public double[:, :, :] a_coefs') lines.add(0, 'cdef class NumVars(object):') for name in ('nmb_calls', 'idx_method', 'idx_stage'): lines.add(1, 'cdef public %s %s' % (TYPE2STR[int], name)) for name in ('t0', 't1', 'dt', 'dt_est', 'error', 'last_error', 'extrapolated_error'): lines.add(1, 'cdef public %s %s' % (TYPE2STR[float], name)) lines.add(1, 'cdef public %s f0_ready' % TYPE2STR[bool]) return lines
<SYSTEM_TASK:> Attribute declarations of the model class. <END_TASK> <USER_TASK:> Description: def modeldeclarations(self): """Attribute declarations of the model class."""
lines = Lines() lines.add(0, '@cython.final') lines.add(0, 'cdef class Model(object):') lines.add(1, 'cdef public int idx_sim') lines.add(1, 'cdef public Parameters parameters') lines.add(1, 'cdef public Sequences sequences') if hasattr(self.model, 'numconsts'): lines.add(1, 'cdef public NumConsts numconsts') if hasattr(self.model, 'numvars'): lines.add(1, 'cdef public NumVars numvars') return lines
<SYSTEM_TASK:> Standard functions of the model class. <END_TASK> <USER_TASK:> Description: def modelstandardfunctions(self): """Standard functions of the model class."""
lines = Lines() lines.extend(self.doit) lines.extend(self.iofunctions) lines.extend(self.new2old) lines.extend(self.run) lines.extend(self.update_inlets) lines.extend(self.update_outlets) lines.extend(self.update_receivers) lines.extend(self.update_senders) return lines
<SYSTEM_TASK:> Numerical functions of the model class. <END_TASK> <USER_TASK:> Description: def modelnumericfunctions(self): """Numerical functions of the model class."""
lines = Lines() lines.extend(self.solve) lines.extend(self.calculate_single_terms) lines.extend(self.calculate_full_terms) lines.extend(self.get_point_states) lines.extend(self.set_point_states) lines.extend(self.set_result_states) lines.extend(self.get_sum_fluxes) lines.extend(self.set_point_fluxes) lines.extend(self.set_result_fluxes) lines.extend(self.integrate_fluxes) lines.extend(self.reset_sum_fluxes) lines.extend(self.addup_fluxes) lines.extend(self.calculate_error) lines.extend(self.extrapolate_error) return lines
<SYSTEM_TASK:> Lines of model method with the same name. <END_TASK> <USER_TASK:> Description: def calculate_single_terms(self): """Lines of model method with the same name."""
lines = self._call_methods('calculate_single_terms', self.model.PART_ODE_METHODS) if lines: lines.insert(1, (' self.numvars.nmb_calls =' 'self.numvars.nmb_calls+1')) return lines
<SYSTEM_TASK:> User functions of the model class. <END_TASK> <USER_TASK:> Description: def listofmodeluserfunctions(self): """User functions of the model class."""
lines = [] for (name, member) in vars(self.model.__class__).items(): if (inspect.isfunction(member) and (name not in ('run', 'new2old')) and ('fastaccess' in inspect.getsource(member))): lines.append((name, member)) run = vars(self.model.__class__).get('run') if run is not None: lines.append(('run', run)) for (name, member) in vars(self.model).items(): if (inspect.ismethod(member) and ('fastaccess' in inspect.getsource(member))): lines.append((name, member)) return lines
<SYSTEM_TASK:> r"""Remove line breaks within equations. <END_TASK> <USER_TASK:> Description: def remove_linebreaks_within_equations(code): r"""Remove line breaks within equations. This is not a exhaustive test, but shows how the method works: >>> code = 'asdf = \\\n(a\n+b)' >>> from hydpy.cythons.modelutils import FuncConverter >>> FuncConverter.remove_linebreaks_within_equations(code) 'asdf = (a+b)' """
code = code.replace('\\\n', '') chars = [] counter = 0 for char in code: if char in ('(', '[', '{'): counter += 1 elif char in (')', ']', '}'): counter -= 1 if not (counter and (char == '\n')): chars.append(char) return ''.join(chars)
<SYSTEM_TASK:> Remove mathematical expressions that require Pythons global <END_TASK> <USER_TASK:> Description: def remove_imath_operators(lines): """Remove mathematical expressions that require Pythons global interpreter locking mechanism. This is not a exhaustive test, but shows how the method works: >>> lines = [' x += 1*1'] >>> from hydpy.cythons.modelutils import FuncConverter >>> FuncConverter.remove_imath_operators(lines) >>> lines [' x = x + (1*1)'] """
for idx, line in enumerate(lines): for operator in ('+=', '-=', '**=', '*=', '//=', '/=', '%='): sublines = line.split(operator) if len(sublines) > 1: indent = line.count(' ') - line.lstrip().count(' ') sublines = [sl.strip() for sl in sublines] line = ('%s%s = %s %s (%s)' % (indent*' ', sublines[0], sublines[0], operator[:-1], sublines[1])) lines[idx] = line
<SYSTEM_TASK:> Cython code lines. <END_TASK> <USER_TASK:> Description: def pyxlines(self): """Cython code lines. Assumptions: * Function shall be a method * Method shall be inlined * Method returns nothing * Method arguments are of type `int` (except self) * Local variables are generally of type `int` but of type `double` when their name starts with `d_` """
lines = [' '+line for line in self.cleanlines] lines[0] = lines[0].replace('def ', 'cpdef inline void ') lines[0] = lines[0].replace('):', ') %s:' % _nogil) for name in self.untypedarguments: lines[0] = lines[0].replace(', %s ' % name, ', int %s ' % name) lines[0] = lines[0].replace(', %s)' % name, ', int %s)' % name) for name in self.untypedinternalvarnames: if name.startswith('d_'): lines.insert(1, ' cdef double ' + name) else: lines.insert(1, ' cdef int ' + name) return Lines(*lines)
<SYSTEM_TASK:> Return the smoothing parameter corresponding to the given meta <END_TASK> <USER_TASK:> Description: def calc_smoothpar_logistic2(metapar): """Return the smoothing parameter corresponding to the given meta parameter when using |smooth_logistic2|. Calculate the smoothing parameter value corresponding the meta parameter value 2.5: >>> from hydpy.auxs.smoothtools import calc_smoothpar_logistic2 >>> smoothpar = calc_smoothpar_logistic2(2.5) Using this smoothing parameter value, the output of function |smooth_logistic2| differs by 1 % from the related `true` discontinuous step function for the input values -2.5 and 2.5 (which are located at a distance of 2.5 from the position of the discontinuity): >>> from hydpy.cythons import smoothutils >>> from hydpy import round_ >>> round_(smoothutils.smooth_logistic2(-2.5, smoothpar)) 0.01 >>> round_(smoothutils.smooth_logistic2(2.5, smoothpar)) 2.51 For zero or negative meta parameter values, a zero smoothing parameter value is returned: >>> round_(calc_smoothpar_logistic2(0.0)) 0.0 >>> round_(calc_smoothpar_logistic2(-1.0)) 0.0 """
if metapar <= 0.: return 0. return optimize.newton(_error_smoothpar_logistic2, .3 * metapar**.84, _smooth_logistic2_derivative, args=(metapar,))
<SYSTEM_TASK:> Return a |Date| object representing the reference date of the <END_TASK> <USER_TASK:> Description: def from_cfunits(cls, units) -> 'Date': """Return a |Date| object representing the reference date of the given `units` string agreeing with the NetCDF-CF conventions. The following example string is taken from the `Time Coordinate`_ chapter of the NetCDF-CF conventions documentation (modified). Note that the first entry (the unit) is ignored: >>> from hydpy import Date >>> Date.from_cfunits('seconds since 1992-10-8 15:15:42 -6:00') Date('1992-10-08 22:15:42') >>> Date.from_cfunits(' day since 1992-10-8 15:15:00') Date('1992-10-08 15:15:00') >>> Date.from_cfunits('seconds since 1992-10-8 -6:00') Date('1992-10-08 07:00:00') >>> Date.from_cfunits('m since 1992-10-8') Date('1992-10-08 00:00:00') Without modification, when "0" is included as the decimal fractions of a second, the example string from `Time Coordinate`_ can also be passed. However, fractions different from "0" result in an error: >>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.') Date('1992-10-08 15:15:42') >>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.00') Date('1992-10-08 15:15:42') >>> Date.from_cfunits('seconds since 1992-10-8 15:15:42. -6:00') Date('1992-10-08 22:15:42') >>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.0 -6:00') Date('1992-10-08 22:15:42') >>> Date.from_cfunits('seconds since 1992-10-8 15:15:42.005 -6:00') Traceback (most recent call last): ... ValueError: While trying to parse the date of the NetCDF-CF "units" \ string `seconds since 1992-10-8 15:15:42.005 -6:00`, the following error \ occurred: No other decimal fraction of a second than "0" allowed. """
try: string = units[units.find('since')+6:] idx = string.find('.') if idx != -1: jdx = None for jdx, char in enumerate(string[idx+1:]): if not char.isnumeric(): break if char != '0': raise ValueError( 'No other decimal fraction of a second ' 'than "0" allowed.') else: if jdx is None: jdx = idx+1 else: jdx += 1 string = f'{string[:idx]}{string[idx+jdx+1:]}' return cls(string) except BaseException: objecttools.augment_excmessage( f'While trying to parse the date of the NetCDF-CF "units" ' f'string `{units}`')
<SYSTEM_TASK:> Return a `units` string agreeing with the NetCDF-CF conventions. <END_TASK> <USER_TASK:> Description: def to_cfunits(self, unit='hours', utcoffset=None): """Return a `units` string agreeing with the NetCDF-CF conventions. By default, |Date.to_cfunits| takes `hours` as time unit, and the the actual value of |Options.utcoffset| as time zone information: >>> from hydpy import Date >>> date = Date('1992-10-08 15:15:42') >>> date.to_cfunits() 'hours since 1992-10-08 15:15:42 +01:00' Other time units are allowed (no checks are performed, so select something useful): >>> date.to_cfunits(unit='minutes') 'minutes since 1992-10-08 15:15:42 +01:00' For changing the time zone, pass the corresponding offset in minutes: >>> date.to_cfunits(unit='sec', utcoffset=-60) 'sec since 1992-10-08 13:15:42 -01:00' """
if utcoffset is None: utcoffset = hydpy.pub.options.utcoffset string = self.to_string('iso2', utcoffset) string = ' '.join((string[:-6], string[-6:])) return f'{unit} since {string}'
<SYSTEM_TASK:> The actual hydrological year according to the selected <END_TASK> <USER_TASK:> Description: def wateryear(self): """The actual hydrological year according to the selected reference month. The reference mont reference |Date.refmonth| defaults to November: >>> october = Date('1996.10.01') >>> november = Date('1996.11.01') >>> october.wateryear 1996 >>> november.wateryear 1997 Note that changing |Date.refmonth| affects all |Date| objects: >>> october.refmonth = 10 >>> october.wateryear 1997 >>> november.wateryear 1997 >>> october.refmonth = 'November' >>> october.wateryear 1996 >>> november.wateryear 1997 """
if self.month < self._firstmonth_wateryear: return self.year return self.year + 1
<SYSTEM_TASK:> Return a |Period| instance based on a given number of seconds. <END_TASK> <USER_TASK:> Description: def fromseconds(cls, seconds): """Return a |Period| instance based on a given number of seconds."""
try: seconds = int(seconds) except TypeError: seconds = int(seconds.flatten()[0]) return cls(datetime.timedelta(0, int(seconds)))
<SYSTEM_TASK:> Guess the unit of the period as the largest one, which results in <END_TASK> <USER_TASK:> Description: def _guessunit(self): """Guess the unit of the period as the largest one, which results in an integer duration. """
if not self.days % 1: return 'd' elif not self.hours % 1: return 'h' elif not self.minutes % 1: return 'm' elif not self.seconds % 1: return 's' else: raise ValueError( 'The stepsize is not a multiple of one ' 'second, which is not allowed.')
<SYSTEM_TASK:> Returns a |Timegrid| instance based on two date and one period <END_TASK> <USER_TASK:> Description: def from_array(cls, array): """Returns a |Timegrid| instance based on two date and one period information stored in the first 13 rows of a |numpy.ndarray| object. """
try: return cls(Date.from_array(array[:6]), Date.from_array(array[6:12]), Period.fromseconds(array[12])) except IndexError: raise IndexError( f'To define a Timegrid instance via an array, 13 ' f'numbers are required. However, the given array ' f'consist of {len(array)} entries/rows only.')
<SYSTEM_TASK:> Returns a 1-dimensional |numpy| |numpy.ndarray| with thirteen <END_TASK> <USER_TASK:> Description: def to_array(self): """Returns a 1-dimensional |numpy| |numpy.ndarray| with thirteen entries first defining the start date, secondly defining the end date and thirdly the step size in seconds. """
values = numpy.empty(13, dtype=float) values[:6] = self.firstdate.to_array() values[6:12] = self.lastdate.to_array() values[12] = self.stepsize.seconds return values
<SYSTEM_TASK:> Return a |Timegrid| object representing the given starting <END_TASK> <USER_TASK:> Description: def from_timepoints(cls, timepoints, refdate, unit='hours'): """Return a |Timegrid| object representing the given starting `timepoints` in relation to the given `refdate`. The following examples identical with the ones of |Timegrid.to_timepoints| but reversed. At least two given time points must be increasing and equidistant. By default, they are assumed in hours since the given reference date: >>> from hydpy import Timegrid >>> Timegrid.from_timepoints( ... [0.0, 6.0, 12.0, 18.0], '01.01.2000') Timegrid('01.01.2000 00:00:00', '02.01.2000 00:00:00', '6h') >>> Timegrid.from_timepoints( ... [24.0, 30.0, 36.0, 42.0], '1999-12-31') Timegrid('2000-01-01 00:00:00', '2000-01-02 00:00:00', '6h') Other time units (`days` or `min`) must be passed explicitely (only the first character counts): >>> Timegrid.from_timepoints( ... [0.0, 0.25, 0.5, 0.75], '01.01.2000', unit='d') Timegrid('01.01.2000 00:00:00', '02.01.2000 00:00:00', '6h') >>> Timegrid.from_timepoints( ... [1.0, 1.25, 1.5, 1.75], '1999-12-31', unit='day') Timegrid('2000-01-01 00:00:00', '2000-01-02 00:00:00', '6h') """
refdate = Date(refdate) unit = Period.from_cfunits(unit) delta = timepoints[1]-timepoints[0] firstdate = refdate+timepoints[0]*unit lastdate = refdate+(timepoints[-1]+delta)*unit stepsize = (lastdate-firstdate)/len(timepoints) return cls(firstdate, lastdate, stepsize)
<SYSTEM_TASK:> Return an |numpy.ndarray| representing the starting time points <END_TASK> <USER_TASK:> Description: def to_timepoints(self, unit='hours', offset=None): """Return an |numpy.ndarray| representing the starting time points of the |Timegrid| object. The following examples identical with the ones of |Timegrid.from_timepoints| but reversed. By default, the time points are given in hours: >>> from hydpy import Timegrid >>> timegrid = Timegrid('2000-01-01', '2000-01-02', '6h') >>> timegrid.to_timepoints() array([ 0., 6., 12., 18.]) Other time units (`days` or `min`) can be defined (only the first character counts): >>> timegrid.to_timepoints(unit='d') array([ 0. , 0.25, 0.5 , 0.75]) Additionally, one can pass an `offset` that must be of type |int| or an valid |Period| initialization argument: >>> timegrid.to_timepoints(offset=24) array([ 24., 30., 36., 42.]) >>> timegrid.to_timepoints(offset='1d') array([ 24., 30., 36., 42.]) >>> timegrid.to_timepoints(unit='day', offset='1d') array([ 1. , 1.25, 1.5 , 1.75]) """
unit = Period.from_cfunits(unit) if offset is None: offset = 0. else: try: offset = Period(offset)/unit except TypeError: offset = offset step = self.stepsize/unit nmb = len(self) variable = numpy.linspace(offset, offset+step*(nmb-1), nmb) return variable
<SYSTEM_TASK:> Prefix the information of the actual Timegrid object to the given <END_TASK> <USER_TASK:> Description: def array2series(self, array): """Prefix the information of the actual Timegrid object to the given array and return it. The Timegrid information is stored in the first thirteen values of the first axis of the returned series. Initialize a Timegrid object and apply its `array2series` method on a simple list containing numbers: >>> from hydpy import Timegrid >>> timegrid = Timegrid('2000-11-01 00:00', '2000-11-01 04:00', '1h') >>> series = timegrid.array2series([1, 2, 3.5, '5.0']) The first six entries contain the first date of the timegrid (year, month, day, hour, minute, second): >>> from hydpy import round_ >>> round_(series[:6]) 2000.0, 11.0, 1.0, 0.0, 0.0, 0.0 The six subsequent entries contain the last date: >>> round_(series[6:12]) 2000.0, 11.0, 1.0, 4.0, 0.0, 0.0 The thirteens value is the step size in seconds: >>> round_(series[12]) 3600.0 The last four value are the ones of the given vector: >>> round_(series[-4:]) 1.0, 2.0, 3.5, 5.0 The given array can have an arbitrary number of dimensions: >>> import numpy >>> array = numpy.eye(4) >>> series = timegrid.array2series(array) Now the timegrid information is stored in the first column: >>> round_(series[:13, 0]) 2000.0, 11.0, 1.0, 0.0, 0.0, 0.0, 2000.0, 11.0, 1.0, 4.0, 0.0, 0.0, \ 3600.0 All other columns of the first thirteen rows contain nan values, e.g.: >>> round_(series[12, :]) 3600.0, nan, nan, nan The original values are stored in the last four rows, e.g.: >>> round_(series[13, :]) 1.0, 0.0, 0.0, 0.0 Inappropriate array objects result in error messages like: >>> timegrid.array2series([[1, 2], [3]]) Traceback (most recent call last): ... ValueError: While trying to prefix timegrid information to the given \ array, the following error occurred: setting an array element with a sequence. If the given array does not fit to the defined timegrid, a special error message is returned: >>> timegrid.array2series([[1, 2], [3, 4]]) Traceback (most recent call last): ... ValueError: When converting an array to a sequence, the lengths of \ the timegrid and the given array must be equal, but the length of the \ timegrid object is `4` and the length of the array object is `2`. """
try: array = numpy.array(array, dtype=float) except BaseException: objecttools.augment_excmessage( 'While trying to prefix timegrid information to the ' 'given array') if len(array) != len(self): raise ValueError( f'When converting an array to a sequence, the lengths of the ' f'timegrid and the given array must be equal, but the length ' f'of the timegrid object is `{len(self)}` and the length of ' f'the array object is `{len(array)}`.') shape = list(array.shape) shape[0] += 13 series = numpy.full(shape, numpy.nan) slices = [slice(0, 13)] subshape = [13] for dummy in range(1, series.ndim): slices.append(slice(0, 1)) subshape.append(1) series[tuple(slices)] = self.to_array().reshape(subshape) series[13:] = array return series
<SYSTEM_TASK:> Raise an |ValueError| if the dates or the step size of the time <END_TASK> <USER_TASK:> Description: def verify(self): """Raise an |ValueError| if the dates or the step size of the time frame are inconsistent. """
if self.firstdate >= self.lastdate: raise ValueError( f'Unplausible timegrid. The first given date ' f'{self.firstdate}, the second given date is {self.lastdate}.') if (self.lastdate-self.firstdate) % self.stepsize: raise ValueError( f'Unplausible timegrid. The period span between the given ' f'dates {self.firstdate} and {self.lastdate} is not ' f'a multiple of the given step size {self.stepsize}.')
<SYSTEM_TASK:> Return a |repr| string with an prefixed assignement. <END_TASK> <USER_TASK:> Description: def assignrepr(self, prefix, style=None, utcoffset=None): """Return a |repr| string with an prefixed assignement. Without option arguments given, printing the returned string looks like: >>> from hydpy import Timegrid >>> timegrid = Timegrid('1996-11-01 00:00:00', ... '1997-11-01 00:00:00', ... '1d') >>> print(timegrid.assignrepr(prefix='timegrid = ')) timegrid = Timegrid('1996-11-01 00:00:00', '1997-11-01 00:00:00', '1d') The optional arguments are passed to method |Date.to_repr| without any modifications: >>> print(timegrid.assignrepr( ... prefix='', style='iso1', utcoffset=120)) Timegrid('1996-11-01T01:00:00+02:00', '1997-11-01T01:00:00+02:00', '1d') """
skip = len(prefix) + 9 blanks = ' ' * skip return (f"{prefix}Timegrid('" f"{self.firstdate.to_string(style, utcoffset)}',\n" f"{blanks}'{self.lastdate.to_string(style, utcoffset)}',\n" f"{blanks}'{str(self.stepsize)}')")
<SYSTEM_TASK:> Raise an |ValueError| it the different time grids are <END_TASK> <USER_TASK:> Description: def verify(self): """Raise an |ValueError| it the different time grids are inconsistent."""
self.init.verify() self.sim.verify() if self.init.firstdate > self.sim.firstdate: raise ValueError( f'The first date of the initialisation period ' f'({self.init.firstdate}) must not be later ' f'than the first date of the simulation period ' f'({self.sim.firstdate}).') elif self.init.lastdate < self.sim.lastdate: raise ValueError( f'The last date of the initialisation period ' f'({self.init.lastdate}) must not be earlier ' f'than the last date of the simulation period ' f'({self.sim.lastdate}).') elif self.init.stepsize != self.sim.stepsize: raise ValueError( f'The initialization stepsize ({self.init.stepsize}) ' f'must be identical with the simulation stepsize ' f'({self.sim.stepsize}).') else: try: self.init[self.sim.firstdate] except ValueError: raise ValueError( 'The simulation time grid is not properly ' 'alligned on the initialization time grid.')
<SYSTEM_TASK:> Amount of time passed in seconds since the beginning of the year. <END_TASK> <USER_TASK:> Description: def seconds_passed(self): """Amount of time passed in seconds since the beginning of the year. In the first example, the year is only one minute and thirty seconds old: >>> from hydpy.core.timetools import TOY >>> TOY('1_1_0_1_30').seconds_passed 90 The second example shows that the 29th February is generally included: >>> TOY('3').seconds_passed 5184000 """
return int((Date(self).datetime - self._STARTDATE.datetime).total_seconds())
<SYSTEM_TASK:> Remaining part of the year in seconds. <END_TASK> <USER_TASK:> Description: def seconds_left(self): """Remaining part of the year in seconds. In the first example, only one minute and thirty seconds of the year remain: >>> from hydpy.core.timetools import TOY >>> TOY('12_31_23_58_30').seconds_left 90 The second example shows that the 29th February is generally included: >>> TOY('2').seconds_left 28944000 """
return int((self._ENDDATE.datetime - Date(self).datetime).total_seconds())
<SYSTEM_TASK:> Return a |Timegrid| object defining the central time points <END_TASK> <USER_TASK:> Description: def centred_timegrid(cls, simulationstep): """Return a |Timegrid| object defining the central time points of the year 2000 for the given simulation step. >>> from hydpy.core.timetools import TOY >>> TOY.centred_timegrid('1d') Timegrid('2000-01-01 12:00:00', '2001-01-01 12:00:00', '1d') """
simulationstep = Period(simulationstep) return Timegrid( cls._STARTDATE+simulationstep/2, cls._ENDDATE+simulationstep/2, simulationstep)
<SYSTEM_TASK:> The prefered way for HydPy objects to respond to |dir|. <END_TASK> <USER_TASK:> Description: def dir_(self): """The prefered way for HydPy objects to respond to |dir|. Note the depencence on the `pub.options.dirverbose`. If this option is set `True`, all attributes and methods of the given instance and its class (including those inherited from the parent classes) are returned: >>> from hydpy import pub >>> pub.options.dirverbose = True >>> from hydpy.core.objecttools import dir_ >>> class Test(object): ... only_public_attribute = None >>> print(len(dir_(Test())) > 1) # Long list, try it yourself... True If the option is set to `False`, only the `public` attributes and methods (which do need begin with `_`) are returned: >>> pub.options.dirverbose = False >>> print(dir_(Test())) # Short list with one single entry... ['only_public_attribute'] If none of those does exists, |dir_| returns a list with a single string containing a single empty space (which seems to work better for most IDEs than returning an emtpy list): >>> del Test.only_public_attribute >>> print(dir_(Test())) [' '] """
names = set() for thing in list(inspect.getmro(type(self))) + [self]: for key in vars(thing).keys(): if hydpy.pub.options.dirverbose or not key.startswith('_'): names.add(key) if names: names = list(names) else: names = [' '] return names
<SYSTEM_TASK:> Return the class name of the given instance object or class. <END_TASK> <USER_TASK:> Description: def classname(self): """Return the class name of the given instance object or class. >>> from hydpy.core.objecttools import classname >>> from hydpy import pub >>> print(classname(float)) float >>> print(classname(pub.options)) Options """
if inspect.isclass(self): string = str(self) else: string = str(type(self)) try: string = string.split("'")[1] except IndexError: pass return string.split('.')[-1]
<SYSTEM_TASK:> Name of the class of the given instance in lower case letters. <END_TASK> <USER_TASK:> Description: def name(self): """Name of the class of the given instance in lower case letters. This function is thought to be implemented as a property. Otherwise it would violate the principle not to access or manipulate private attributes ("_name"): >>> from hydpy.core.objecttools import name >>> class Test(object): ... name = property(name) >>> test1 = Test() >>> test1.name 'test' >>> test1._name 'test' The private attribute is added for performance reasons only. Note that it is a class attribute: >>> test2 = Test() >>> test2._name 'test' """
cls = type(self) try: return cls.__dict__['_name'] except KeyError: setattr(cls, '_name', instancename(self)) return cls.__dict__['_name']
<SYSTEM_TASK:> Raises an |ValueError| if the given name is not a valid Python <END_TASK> <USER_TASK:> Description: def valid_variable_identifier(string): """Raises an |ValueError| if the given name is not a valid Python identifier. For example, the string `test_1` (with underscore) is valid... >>> from hydpy.core.objecttools import valid_variable_identifier >>> valid_variable_identifier('test_1') ...but the string `test 1` (with white space) is not: >>> valid_variable_identifier('test 1') Traceback (most recent call last): ... ValueError: The given name string `test 1` does not define a valid \ variable identifier. Valid identifiers do not contain characters like \ `-` or empty spaces, do not start with numbers, cannot be mistaken with \ Python built-ins like `for`...) Also, names of Python built ins are not allowed: >>> valid_variable_identifier('print') # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: The given name string `print` does not define... """
string = str(string) try: exec('%s = None' % string) if string in dir(builtins): raise SyntaxError() except SyntaxError: raise ValueError( 'The given name string `%s` does not define a valid variable ' 'identifier. Valid identifiers do not contain characters like ' '`-` or empty spaces, do not start with numbers, cannot be ' 'mistaken with Python built-ins like `for`...)' % string)
<SYSTEM_TASK:> Augment an exception message with additional information while keeping <END_TASK> <USER_TASK:> Description: def augment_excmessage(prefix=None, suffix=None) -> NoReturn: """Augment an exception message with additional information while keeping the original traceback. You can prefix and/or suffix text. If you prefix something (which happens much more often in the HydPy framework), the sub-clause ', the following error occurred:' is automatically included: >>> from hydpy.core import objecttools >>> import textwrap >>> try: ... 1 + '1' ... except BaseException: ... prefix = 'While showing how prefixing works' ... suffix = '(This is a final remark.)' ... objecttools.augment_excmessage(prefix, suffix) Traceback (most recent call last): ... TypeError: While showing how prefixing works, the following error \ occurred: unsupported operand type(s) for +: 'int' and 'str' \ (This is a final remark.) Some exceptions derived by site-packages do not support exception chaining due to requiring multiple initialisation arguments. In such cases, |augment_excmessage| generates an exception with the same name on the fly and raises it afterwards, which is pointed out by the exception name mentioning to the "objecttools" module: >>> class WrongError(BaseException): ... def __init__(self, arg1, arg2): ... pass >>> try: ... raise WrongError('info 1', 'info 2') ... except BaseException: ... objecttools.augment_excmessage( ... 'While showing how prefixing works') Traceback (most recent call last): ... hydpy.core.objecttools.hydpy.core.objecttools.WrongError: While showing \ how prefixing works, the following error occurred: ('info 1', 'info 2') """
exc_old = sys.exc_info()[1] message = str(exc_old) if prefix is not None: message = f'{prefix}, the following error occurred: {message}' if suffix is not None: message = f'{message} {suffix}' try: exc_new = type(exc_old)(message) except BaseException: exc_name = str(type(exc_old)).split("'")[1] exc_type = type(exc_name, (BaseException,), {}) exc_type.__module = exc_old.__module__ raise exc_type(message) from exc_old raise exc_new from exc_old
<SYSTEM_TASK:> Wrap a function with |augment_excmessage|. <END_TASK> <USER_TASK:> Description: def excmessage_decorator(description) -> Callable: """Wrap a function with |augment_excmessage|. Function |excmessage_decorator| is a means to apply function |augment_excmessage| more efficiently. Suppose you would apply function |augment_excmessage| in a function that adds and returns to numbers: >>> from hydpy.core import objecttools >>> def add(x, y): ... try: ... return x + y ... except BaseException: ... objecttools.augment_excmessage( ... 'While trying to add `x` and `y`') This works as excepted... >>> add(1, 2) 3 >>> add(1, []) Traceback (most recent call last): ... TypeError: While trying to add `x` and `y`, the following error \ occurred: unsupported operand type(s) for +: 'int' and 'list' ...but can be achieved with much less code using |excmessage_decorator|: >>> @objecttools.excmessage_decorator( ... 'add `x` and `y`') ... def add(x, y): ... return x+y >>> add(1, 2) 3 >>> add(1, []) Traceback (most recent call last): ... TypeError: While trying to add `x` and `y`, the following error \ occurred: unsupported operand type(s) for +: 'int' and 'list' Additionally, exception messages related to wrong function calls are now also augmented: >>> add(1) Traceback (most recent call last): ... TypeError: While trying to add `x` and `y`, the following error \ occurred: add() missing 1 required positional argument: 'y' |excmessage_decorator| evaluates the given string like an f-string, allowing to mention the argument values of the called function and to make use of all string modification functions provided by modules |objecttools|: >>> @objecttools.excmessage_decorator( ... 'add `x` ({repr_(x, 2)}) and `y` ({repr_(y, 2)})') ... def add(x, y): ... return x+y >>> add(1.1111, 'wrong') Traceback (most recent call last): ... TypeError: While trying to add `x` (1.11) and `y` (wrong), the following \ error occurred: unsupported operand type(s) for +: 'float' and 'str' >>> add(1) Traceback (most recent call last): ... TypeError: While trying to add `x` (1) and `y` (?), the following error \ occurred: add() missing 1 required positional argument: 'y' >>> add(y=1) Traceback (most recent call last): ... TypeError: While trying to add `x` (?) and `y` (1), the following error \ occurred: add() missing 1 required positional argument: 'x' Apply |excmessage_decorator| on methods also works fine: >>> class Adder: ... def __init__(self): ... self.value = 0 ... @objecttools.excmessage_decorator( ... 'add an instance of class `{classname(self)}` with value ' ... '`{repr_(other, 2)}` of type `{classname(other)}`') ... def __iadd__(self, other): ... self.value += other ... return self >>> adder = Adder() >>> adder += 1 >>> adder.value 1 >>> adder += 'wrong' Traceback (most recent call last): ... TypeError: While trying to add an instance of class `Adder` with value \ `wrong` of type `str`, the following error occurred: unsupported operand \ type(s) for +=: 'int' and 'str' It is made sure that no information of the decorated function is lost: >>> add.__name__ 'add' """
@wrapt.decorator def wrapper(wrapped, instance, args, kwargs): """Apply |augment_excmessage| when the wrapped function fails.""" # pylint: disable=unused-argument try: return wrapped(*args, **kwargs) except BaseException: info = kwargs.copy() info['self'] = instance argnames = inspect.getfullargspec(wrapped).args if argnames[0] == 'self': argnames = argnames[1:] for argname, arg in zip(argnames, args): info[argname] = arg for argname in argnames: if argname not in info: info[argname] = '?' message = eval( f"f'While trying to {description}'", globals(), info) augment_excmessage(message) return wrapper
<SYSTEM_TASK:> Print the given values in multiple lines with a certain maximum width. <END_TASK> <USER_TASK:> Description: def print_values(values, width=70): """Print the given values in multiple lines with a certain maximum width. By default, each line contains at most 70 characters: >>> from hydpy import print_values >>> print_values(range(21)) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 You can change this default behaviour by passing an alternative number of characters: >>> print_values(range(21), width=30) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 """
for line in textwrap.wrap(repr_values(values), width=width): print(line)
<SYSTEM_TASK:> Return a prefixed, wrapped and properly aligned string representation <END_TASK> <USER_TASK:> Description: def assignrepr_values(values, prefix, width=None, _fakeend=0): """Return a prefixed, wrapped and properly aligned string representation of the given values using function |repr|. >>> from hydpy.core.objecttools import assignrepr_values >>> print(assignrepr_values(range(1, 13), 'test(', 20) + ')') test(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) If no width is given, no wrapping is performed: >>> print(assignrepr_values(range(1, 13), 'test(') + ')') test(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) To circumvent defining too long string representations, make use of the ellipsis option: >>> from hydpy import pub >>> with pub.options.ellipsis(1): ... print(assignrepr_values(range(1, 13), 'test(', 20) + ')') test(1, ...,12) >>> with pub.options.ellipsis(5): ... print(assignrepr_values(range(1, 13), 'test(', 20) + ')') test(1, 2, 3, 4, 5, ...,8, 9, 10, 11, 12) >>> with pub.options.ellipsis(6): ... print(assignrepr_values(range(1, 13), 'test(', 20) + ')') test(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) """
ellipsis_ = hydpy.pub.options.ellipsis if (ellipsis_ > 0) and (len(values) > 2*ellipsis_): string = (repr_values(values[:ellipsis_]) + ', ...,' + repr_values(values[-ellipsis_:])) else: string = repr_values(values) blanks = ' '*len(prefix) if width is None: wrapped = [string] _fakeend = 0 else: width -= len(prefix) wrapped = textwrap.wrap(string+'_'*_fakeend, width) if not wrapped: wrapped = [''] lines = [] for (idx, line) in enumerate(wrapped): if idx == 0: lines.append('%s%s' % (prefix, line)) else: lines.append('%s%s' % (blanks, line)) string = '\n'.join(lines) return string[:len(string)-_fakeend]
<SYSTEM_TASK:> Return a prefixed and properly aligned string representation <END_TASK> <USER_TASK:> Description: def assignrepr_values2(values, prefix): """Return a prefixed and properly aligned string representation of the given 2-dimensional value matrix using function |repr|. >>> from hydpy.core.objecttools import assignrepr_values2 >>> import numpy >>> print(assignrepr_values2(numpy.eye(3), 'test(') + ')') test(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0) Functions |assignrepr_values2| works also on empty iterables: >>> print(assignrepr_values2([[]], 'test(') + ')') test() """
lines = [] blanks = ' '*len(prefix) for (idx, subvalues) in enumerate(values): if idx == 0: lines.append('%s%s,' % (prefix, repr_values(subvalues))) else: lines.append('%s%s,' % (blanks, repr_values(subvalues))) lines[-1] = lines[-1][:-1] return '\n'.join(lines)
<SYSTEM_TASK:> Return a prefixed, wrapped and properly aligned bracketed string <END_TASK> <USER_TASK:> Description: def _assignrepr_bracketed2(assignrepr_bracketed1, values, prefix, width=None): """Return a prefixed, wrapped and properly aligned bracketed string representation of the given 2-dimensional value matrix using function |repr|."""
brackets = getattr(assignrepr_bracketed1, '_brackets') prefix += brackets[0] lines = [] blanks = ' '*len(prefix) for (idx, subvalues) in enumerate(values): if idx == 0: lines.append(assignrepr_bracketed1(subvalues, prefix, width)) else: lines.append(assignrepr_bracketed1(subvalues, blanks, width)) lines[-1] += ',' if (len(values) > 1) or (brackets != '()'): lines[-1] = lines[-1][:-1] lines[-1] += brackets[1] return '\n'.join(lines)
<SYSTEM_TASK:> Prints values with a maximum number of digits in doctests. <END_TASK> <USER_TASK:> Description: def round_(values, decimals=None, width=0, lfill=None, rfill=None, **kwargs): """Prints values with a maximum number of digits in doctests. See the documentation on function |repr| for more details. And note thate the option keyword arguments are passed to the print function. Usually one would apply function |round_| on a single or a vector of numbers: >>> from hydpy import round_ >>> round_(1./3., decimals=6) 0.333333 >>> round_((1./2., 1./3., 1./4.), decimals=4) 0.5, 0.3333, 0.25 Additionally, one can supply a `width` and a `rfill` argument: >>> round_(1.0, width=6, rfill='0') 1.0000 Alternatively, one can use the `lfill` arguments, which might e.g. be usefull for aligning different strings: >>> round_('test', width=6, lfill='_') __test Using both the `lfill` and the `rfill` argument raises an error: >>> round_(1.0, lfill='_', rfill='0') Traceback (most recent call last): ... ValueError: For function `round_` values are passed for both \ arguments `lfill` and `rfill`. This is not allowed. """
if decimals is None: decimals = hydpy.pub.options.reprdigits with hydpy.pub.options.reprdigits(decimals): if isinstance(values, abctools.IterableNonStringABC): string = repr_values(values) else: string = repr_(values) if (lfill is not None) and (rfill is not None): raise ValueError( 'For function `round_` values are passed for both arguments ' '`lfill` and `rfill`. This is not allowed.') if (lfill is not None) or (rfill is not None): width = max(width, len(string)) if lfill is not None: string = string.rjust(width, lfill) else: string = string.ljust(width, rfill) print(string, **kwargs)
<SYSTEM_TASK:> Return a generator that extracts certain objects from `values`. <END_TASK> <USER_TASK:> Description: def extract(values, types, skip=False): """Return a generator that extracts certain objects from `values`. This function is thought for supporting the definition of functions with arguments, that can be objects of of contain types or that can be iterables containing these objects. The following examples show that function |extract| basically implements a type specific flattening mechanism: >>> from hydpy.core.objecttools import extract >>> tuple(extract('str1', (str, int))) ('str1',) >>> tuple(extract(['str1', 'str2'], (str, int))) ('str1', 'str2') >>> tuple(extract((['str1', 'str2'], [1,]), (str, int))) ('str1', 'str2', 1) If an object is neither iterable nor of the required type, the following exception is raised: >>> tuple(extract((['str1', 'str2'], [None, 1]), (str, int))) Traceback (most recent call last): ... TypeError: The given value `None` is neither iterable nor \ an instance of the following classes: str and int. Optionally, |None| values can be skipped: >>> tuple(extract(None, (str, int), True)) () >>> tuple(extract((['str1', 'str2'], [None, 1]), (str, int), True)) ('str1', 'str2', 1) """
if isinstance(values, types): yield values elif skip and (values is None): return else: try: for value in values: for subvalue in extract(value, types, skip): yield subvalue except TypeError as exc: if exc.args[0].startswith('The given value'): raise exc else: raise TypeError( f'The given value `{repr(values)}` is neither iterable ' f'nor an instance of the following classes: ' f'{enumeration(types, converter=instancename)}.')
<SYSTEM_TASK:> Return an enumeration string based on the given values. <END_TASK> <USER_TASK:> Description: def enumeration(values, converter=str, default=''): """Return an enumeration string based on the given values. The following four examples show the standard output of function |enumeration|: >>> from hydpy.core.objecttools import enumeration >>> enumeration(('text', 3, [])) 'text, 3, and []' >>> enumeration(('text', 3)) 'text and 3' >>> enumeration(('text',)) 'text' >>> enumeration(()) '' All given objects are converted to strings by function |str|, as shown by the first two examples. This behaviour can be changed by another function expecting a single argument and returning a string: >>> from hydpy.core.objecttools import classname >>> enumeration(('text', 3, []), converter=classname) 'str, int, and list' Furthermore, you can define a default string that is returned in case an empty iterable is given: >>> enumeration((), default='nothing') 'nothing' """
values = tuple(converter(value) for value in values) if not values: return default if len(values) == 1: return values[0] if len(values) == 2: return ' and '.join(values) return ', and '.join((', '.join(values[:-1]), values[-1]))
<SYSTEM_TASK:> Trim negative value whenever there is no internal lake within <END_TASK> <USER_TASK:> Description: def trim(self, lower=None, upper=None): """Trim negative value whenever there is no internal lake within the respective subbasin. >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zonetype(FIELD, ILAKE) >>> states.lz(-1.0) >>> states.lz lz(-1.0) >>> zonetype(FIELD, FOREST) >>> states.lz(-1.0) >>> states.lz lz(0.0) >>> states.lz(1.0) >>> states.lz lz(1.0) """
if upper is None: control = self.subseqs.seqs.model.parameters.control if not any(control.zonetype.values == ILAKE): lower = 0. sequencetools.StateSequence.trim(self, lower, upper)
<SYSTEM_TASK:> Nested dictionary containing the values of all condition <END_TASK> <USER_TASK:> Description: def conditions(self) -> Dict[str, Dict[str, Union[float, numpy.ndarray]]]: """Nested dictionary containing the values of all condition sequences. See the documentation on property |HydPy.conditions| for further information. """
conditions = {} for subname in NAMES_CONDITIONSEQUENCES: subseqs = getattr(self, subname, ()) subconditions = {seq.name: copy.deepcopy(seq.values) for seq in subseqs} if subconditions: conditions[subname] = subconditions return conditions
<SYSTEM_TASK:> Absolute path of the directory of the internal data file. <END_TASK> <USER_TASK:> Description: def dirpath_int(self): """Absolute path of the directory of the internal data file. Normally, each sequence queries its current "internal" directory path from the |SequenceManager| object stored in module |pub|: >>> from hydpy import pub, repr_, TestIO >>> from hydpy.core.filetools import SequenceManager >>> pub.sequencemanager = SequenceManager() We overwrite |FileManager.basepath| and prepare a folder in teh `iotesting` directory to simplify the following examples: >>> basepath = SequenceManager.basepath >>> SequenceManager.basepath = 'test' >>> TestIO.clear() >>> import os >>> with TestIO(): ... os.makedirs('test/temp') Generally, |SequenceManager.tempdirpath| is queried: >>> from hydpy.core import sequencetools as st >>> seq = st.InputSequence(None) >>> with TestIO(): ... repr_(seq.dirpath_int) 'test/temp' Alternatively, you can specify |IOSequence.dirpath_int| for each sequence object individually: >>> seq.dirpath_int = 'path' >>> os.path.split(seq.dirpath_int) ('', 'path') >>> del seq.dirpath_int >>> with TestIO(): ... os.path.split(seq.dirpath_int) ('test', 'temp') If neither an individual definition nor |SequenceManager| is available, the following error is raised: >>> del pub.sequencemanager >>> seq.dirpath_int Traceback (most recent call last): ... RuntimeError: For sequence `inputsequence` the directory of \ the internal data file cannot be determined. Either set it manually \ or prepare `pub.sequencemanager` correctly. Remove the `basepath` mock: >>> SequenceManager.basepath = basepath """
try: return hydpy.pub.sequencemanager.tempdirpath except RuntimeError: raise RuntimeError( f'For sequence {objecttools.devicephrase(self)} ' f'the directory of the internal data file cannot ' f'be determined. Either set it manually or prepare ' f'`pub.sequencemanager` correctly.')
<SYSTEM_TASK:> Shape of the array of temporary values required for the numerical <END_TASK> <USER_TASK:> Description: def numericshape(self): """Shape of the array of temporary values required for the numerical solver actually being selected."""
try: numericshape = [self.subseqs.seqs.model.numconsts.nmb_stages] except AttributeError: objecttools.augment_excmessage( 'The `numericshape` of a sequence like `%s` depends on the ' 'configuration of the actual integration algorithm. ' 'While trying to query the required configuration data ' '`nmb_stages` of the model associated with element `%s`' % (self.name, objecttools.devicename(self))) # noinspection PyUnboundLocalVariable numericshape.extend(self.shape) return tuple(numericshape)
<SYSTEM_TASK:> Internal time series data within an |numpy.ndarray|. <END_TASK> <USER_TASK:> Description: def series(self) -> InfoArray: """Internal time series data within an |numpy.ndarray|."""
if self.diskflag: array = self._load_int() elif self.ramflag: array = self.__get_array() else: raise AttributeError( f'Sequence {objecttools.devicephrase(self)} is not requested ' f'to make any internal data available to the user.') return InfoArray(array, info={'type': 'unmodified'})
<SYSTEM_TASK:> Read the internal data from an external data file. <END_TASK> <USER_TASK:> Description: def load_ext(self): """Read the internal data from an external data file."""
try: sequencemanager = hydpy.pub.sequencemanager except AttributeError: raise RuntimeError( 'The time series of sequence %s cannot be loaded. Firstly, ' 'you have to prepare `pub.sequencemanager` correctly.' % objecttools.devicephrase(self)) sequencemanager.load_file(self)
<SYSTEM_TASK:> Adjust a short time series to a longer timegrid. <END_TASK> <USER_TASK:> Description: def adjust_short_series(self, timegrid, values): """Adjust a short time series to a longer timegrid. Normally, time series data to be read from a external data files should span (at least) the whole initialization time period of a HydPy project. However, for some variables which are only used for comparison (e.g. observed runoff used for calibration), incomplete time series might also be helpful. This method it thought for adjusting such incomplete series to the public initialization time grid stored in module |pub|. It is automatically called in method |IOSequence.adjust_series| when necessary provided that the option |Options.checkseries| is disabled. Assume the initialization time period of a HydPy project spans five day: >>> from hydpy import pub >>> pub.timegrids = '2000.01.10', '2000.01.15', '1d' Prepare a node series object for observational data: >>> from hydpy.core.sequencetools import Obs >>> obs = Obs(None) Prepare a test function that expects the timegrid of the data and the data itself, which returns the ajdusted array by means of calling method |IOSequence.adjust_short_series|: >>> import numpy >>> def test(timegrid): ... values = numpy.ones(len(timegrid)) ... return obs.adjust_short_series(timegrid, values) The following calls to the test function shows the arrays returned for different kinds of misalignments: >>> from hydpy import Timegrid >>> test(Timegrid('2000.01.05', '2000.01.20', '1d')) array([ 1., 1., 1., 1., 1.]) >>> test(Timegrid('2000.01.12', '2000.01.15', '1d')) array([ nan, nan, 1., 1., 1.]) >>> test(Timegrid('2000.01.12', '2000.01.17', '1d')) array([ nan, nan, 1., 1., 1.]) >>> test(Timegrid('2000.01.10', '2000.01.13', '1d')) array([ 1., 1., 1., nan, nan]) >>> test(Timegrid('2000.01.08', '2000.01.13', '1d')) array([ 1., 1., 1., nan, nan]) >>> test(Timegrid('2000.01.12', '2000.01.13', '1d')) array([ nan, nan, 1., nan, nan]) >>> test(Timegrid('2000.01.05', '2000.01.10', '1d')) array([ nan, nan, nan, nan, nan]) >>> test(Timegrid('2000.01.05', '2000.01.08', '1d')) array([ nan, nan, nan, nan, nan]) >>> test(Timegrid('2000.01.15', '2000.01.18', '1d')) array([ nan, nan, nan, nan, nan]) >>> test(Timegrid('2000.01.16', '2000.01.18', '1d')) array([ nan, nan, nan, nan, nan]) Through enabling option |Options.usedefaultvalues| the missing values are initialised with zero instead of nan: >>> with pub.options.usedefaultvalues(True): ... test(Timegrid('2000.01.12', '2000.01.17', '1d')) array([ 0., 0., 1., 1., 1.]) """
idxs = [timegrid[hydpy.pub.timegrids.init.firstdate], timegrid[hydpy.pub.timegrids.init.lastdate]] valcopy = values values = numpy.full(self.seriesshape, self.initinfo[0]) len_ = len(valcopy) jdxs = [] for idx in idxs: if idx < 0: jdxs.append(0) elif idx <= len_: jdxs.append(idx) else: jdxs.append(len_) valcopy = valcopy[jdxs[0]:jdxs[1]] zdx1 = max(-idxs[0], 0) zdx2 = zdx1+jdxs[1]-jdxs[0] values[zdx1:zdx2] = valcopy return values
<SYSTEM_TASK:> Raise a |RuntimeError| if the |IOSequence.series| contains at <END_TASK> <USER_TASK:> Description: def check_completeness(self): """Raise a |RuntimeError| if the |IOSequence.series| contains at least one |numpy.nan| value, if option |Options.checkseries| is enabled. >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-11', '1d' >>> from hydpy.core.sequencetools import IOSequence >>> class Seq(IOSequence): ... NDIM = 0 >>> seq = Seq(None) >>> seq.activate_ram() >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 10 nan values. >>> seq.series = 1.0 >>> seq.check_completeness() >>> seq.series[3] = numpy.nan >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 1 nan value. >>> with pub.options.checkseries(False): ... seq.check_completeness() """
if hydpy.pub.options.checkseries: isnan = numpy.isnan(self.series) if numpy.any(isnan): nmb = numpy.sum(isnan) valuestring = 'value' if nmb == 1 else 'values' raise RuntimeError( f'The series array of sequence ' f'{objecttools.devicephrase(self)} contains ' f'{nmb} nan {valuestring}.')
<SYSTEM_TASK:> Write the internal data into an external data file. <END_TASK> <USER_TASK:> Description: def save_ext(self): """Write the internal data into an external data file."""
try: sequencemanager = hydpy.pub.sequencemanager except AttributeError: raise RuntimeError( 'The time series of sequence %s cannot be saved. Firstly,' 'you have to prepare `pub.sequencemanager` correctly.' % objecttools.devicephrase(self)) sequencemanager.save_file(self)
<SYSTEM_TASK:> Load internal data from file and return it. <END_TASK> <USER_TASK:> Description: def _load_int(self): """Load internal data from file and return it."""
values = numpy.fromfile(self.filepath_int) if self.NDIM > 0: values = values.reshape(self.seriesshape) return values
<SYSTEM_TASK:> Average the actual time series of the |Variable| object for all <END_TASK> <USER_TASK:> Description: def average_series(self, *args, **kwargs) -> InfoArray: """Average the actual time series of the |Variable| object for all time points. Method |IOSequence.average_series| works similarly as method |Variable.average_values| of class |Variable|, from which we borrow some examples. However, firstly, we have to prepare a |Timegrids| object to define the |IOSequence.series| length: >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-04', '1d' As shown for method |Variable.average_values|, for 0-dimensional |IOSequence| objects the result of |IOSequence.average_series| equals |IOSequence.series| itself: >>> from hydpy.core.sequencetools import IOSequence >>> class SoilMoisture(IOSequence): ... NDIM = 0 >>> sm = SoilMoisture(None) >>> sm.activate_ram() >>> import numpy >>> sm.series = numpy.array([190.0, 200.0, 210.0]) >>> sm.average_series() InfoArray([ 190., 200., 210.]) For |IOSequence| objects with an increased dimensionality, a weighting parameter is required, again: >>> SoilMoisture.NDIM = 1 >>> sm.shape = 3 >>> sm.activate_ram() >>> sm.series = ( ... [190.0, 390.0, 490.0], ... [200.0, 400.0, 500.0], ... [210.0, 410.0, 510.0]) >>> from hydpy.core.parametertools import Parameter >>> class Area(Parameter): ... NDIM = 1 ... shape = (3,) ... value = numpy.array([1.0, 1.0, 2.0]) >>> area = Area(None) >>> SoilMoisture.refweights = property(lambda self: area) >>> sm.average_series() InfoArray([ 390., 400., 410.]) The documentation on method |Variable.average_values| provides many examples on how to use different masks in different ways. Here we restrict ourselves to the first example, where a new mask enforces that |IOSequence.average_series| takes only the first two columns of the `series` into account: >>> from hydpy.core.masktools import DefaultMask >>> class Soil(DefaultMask): ... @classmethod ... def new(cls, variable, **kwargs): ... return cls.array2mask([True, True, False]) >>> SoilMoisture.mask = Soil() >>> sm.average_series() InfoArray([ 290., 300., 310.]) """
try: if not self.NDIM: array = self.series else: mask = self.get_submask(*args, **kwargs) if numpy.any(mask): weights = self.refweights[mask] weights /= numpy.sum(weights) series = self.series[:, mask] axes = tuple(range(1, self.NDIM+1)) array = numpy.sum(weights*series, axis=axes) else: return numpy.nan return InfoArray(array, info={'type': 'mean'}) except BaseException: objecttools.augment_excmessage( 'While trying to calculate the mean value of ' 'the internal time series of sequence %s' % objecttools.devicephrase(self))
<SYSTEM_TASK:> Aggregates time series data based on the actual <END_TASK> <USER_TASK:> Description: def aggregate_series(self, *args, **kwargs) -> InfoArray: """Aggregates time series data based on the actual |FluxSequence.aggregation_ext| attribute of |IOSequence| subclasses. We prepare some nodes and elements with the help of method |prepare_io_example_1| and select a 1-dimensional flux sequence of type |lland_fluxes.NKor| as an example: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> seq = elements.element3.model.sequences.fluxes.nkor If no |FluxSequence.aggregation_ext| is `none`, the original time series values are returned: >>> seq.aggregation_ext 'none' >>> seq.aggregate_series() InfoArray([[ 24., 25., 26.], [ 27., 28., 29.], [ 30., 31., 32.], [ 33., 34., 35.]]) If no |FluxSequence.aggregation_ext| is `mean`, function |IOSequence.aggregate_series| is called: >>> seq.aggregation_ext = 'mean' >>> seq.aggregate_series() InfoArray([ 25., 28., 31., 34.]) In case the state of the sequence is invalid: >>> seq.aggregation_ext = 'nonexistent' >>> seq.aggregate_series() Traceback (most recent call last): ... RuntimeError: Unknown aggregation mode `nonexistent` for \ sequence `nkor` of element `element3`. The following technical test confirms that all potential positional and keyword arguments are passed properly: >>> seq.aggregation_ext = 'mean' >>> from unittest import mock >>> seq.average_series = mock.MagicMock() >>> _ = seq.aggregate_series(1, x=2) >>> seq.average_series.assert_called_with(1, x=2) """
mode = self.aggregation_ext if mode == 'none': return self.series elif mode == 'mean': return self.average_series(*args, **kwargs) else: raise RuntimeError( 'Unknown aggregation mode `%s` for sequence %s.' % (mode, objecttools.devicephrase(self)))
<SYSTEM_TASK:> Open all files with an activated disk flag. <END_TASK> <USER_TASK:> Description: def open_files(self, idx): """Open all files with an activated disk flag."""
for name in self: if getattr(self, '_%s_diskflag' % name): path = getattr(self, '_%s_path' % name) file_ = open(path, 'rb+') ndim = getattr(self, '_%s_ndim' % name) position = 8*idx for idim in range(ndim): length = getattr(self, '_%s_length_%d' % (name, idim)) position *= length file_.seek(position) setattr(self, '_%s_file' % name, file_)
<SYSTEM_TASK:> Close all files with an activated disk flag. <END_TASK> <USER_TASK:> Description: def close_files(self): """Close all files with an activated disk flag."""
for name in self: if getattr(self, '_%s_diskflag' % name): file_ = getattr(self, '_%s_file' % name) file_.close()
<SYSTEM_TASK:> Load the internal data of all sequences. Load from file if the <END_TASK> <USER_TASK:> Description: def load_data(self, idx): """Load the internal data of all sequences. Load from file if the corresponding disk flag is activated, otherwise load from RAM."""
for name in self: ndim = getattr(self, '_%s_ndim' % name) diskflag = getattr(self, '_%s_diskflag' % name) ramflag = getattr(self, '_%s_ramflag' % name) if diskflag: file_ = getattr(self, '_%s_file' % name) length_tot = 1 shape = [] for jdx in range(ndim): length = getattr(self, '_%s_length_%s' % (name, jdx)) length_tot *= length shape.append(length) raw = file_.read(length_tot*8) values = struct.unpack(length_tot*'d', raw) if ndim: values = numpy.array(values).reshape(shape) else: values = values[0] elif ramflag: array = getattr(self, '_%s_array' % name) values = array[idx] if diskflag or ramflag: if ndim == 0: setattr(self, name, values) else: getattr(self, name)[:] = values
<SYSTEM_TASK:> Save the internal data of all sequences with an activated flag. <END_TASK> <USER_TASK:> Description: def save_data(self, idx): """Save the internal data of all sequences with an activated flag. Write to file if the corresponding disk flag is activated; store in working memory if the corresponding ram flag is activated."""
for name in self: actual = getattr(self, name) diskflag = getattr(self, '_%s_diskflag' % name) ramflag = getattr(self, '_%s_ramflag' % name) if diskflag: file_ = getattr(self, '_%s_file' % name) ndim = getattr(self, '_%s_ndim' % name) length_tot = 1 for jdx in range(ndim): length = getattr(self, '_%s_length_%s' % (name, jdx)) length_tot *= length if ndim: raw = struct.pack(length_tot*'d', *actual.flatten()) else: raw = struct.pack('d', actual) file_.write(raw) elif ramflag: array = getattr(self, '_%s_array' % name) array[idx] = actual
<SYSTEM_TASK:> Update |QFactor| based on |FT| and the current simulation step size. <END_TASK> <USER_TASK:> Description: def update(self): """Update |QFactor| based on |FT| and the current simulation step size. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('1d') >>> ft(10.0) >>> derived.qfactor.update() >>> derived.qfactor qfactor(0.115741) """
con = self.subpars.pars.control self(con.ft*1000./self.simulationstep.seconds)
<SYSTEM_TASK:> A tuple of the numbers of all "routing" basins. <END_TASK> <USER_TASK:> Description: def _router_numbers(self): """A tuple of the numbers of all "routing" basins."""
return tuple(up for up in self._up2down.keys() if up in self._up2down.values())
<SYSTEM_TASK:> A |Elements| collection of all "supplying" basins. <END_TASK> <USER_TASK:> Description: def supplier_elements(self): """A |Elements| collection of all "supplying" basins. (All river basins are assumed to supply something to the downstream basin.) >>> from hydpy import RiverBasinNumbers2Selection >>> rbns2s = RiverBasinNumbers2Selection( ... (111, 113, 1129, 11269, 1125, 11261, ... 11262, 1123, 1124, 1122, 1121)) The following elements are properly connected to the required outlet nodes already: >>> for element in rbns2s.supplier_elements: ... print(repr(element)) Element("land_111", outlets="node_113") Element("land_1121", outlets="node_1123") Element("land_1122", outlets="node_1123") Element("land_1123", outlets="node_1125") Element("land_1124", outlets="node_1125") Element("land_1125", outlets="node_1129") Element("land_11261", outlets="node_11269") Element("land_11262", outlets="node_11269") Element("land_11269", outlets="node_1129") Element("land_1129", outlets="node_113") Element("land_113", outlets="node_outlet") It is both possible to change the prefix names of the elements and nodes, as long as it results in a valid variable name (e.g. does not start with a number): >>> rbns2s.supplier_prefix = 'a_' >>> rbns2s.node_prefix = 'b_' >>> rbns2s.supplier_elements Elements("a_111", "a_1121", "a_1122", "a_1123", "a_1124", "a_1125", "a_11261", "a_11262", "a_11269", "a_1129", "a_113") """
elements = devicetools.Elements() for supplier in self._supplier_numbers: element = self._get_suppliername(supplier) try: outlet = self._get_nodename(self._up2down[supplier]) except TypeError: outlet = self.last_node elements += devicetools.Element(element, outlets=outlet) return elements
<SYSTEM_TASK:> A |Elements| collection of all "routing" basins. <END_TASK> <USER_TASK:> Description: def router_elements(self): """A |Elements| collection of all "routing" basins. (Only river basins with a upstream basin are assumed to route something to the downstream basin.) >>> from hydpy import RiverBasinNumbers2Selection >>> rbns2s = RiverBasinNumbers2Selection( ... (111, 113, 1129, 11269, 1125, 11261, ... 11262, 1123, 1124, 1122, 1121)) The following elements are properly connected to the required inlet and outlet nodes already: >>> for element in rbns2s.router_elements: ... print(repr(element)) Element("stream_1123", inlets="node_1123", outlets="node_1125") Element("stream_1125", inlets="node_1125", outlets="node_1129") Element("stream_11269", inlets="node_11269", outlets="node_1129") Element("stream_1129", inlets="node_1129", outlets="node_113") Element("stream_113", inlets="node_113", outlets="node_outlet") It is both possible to change the prefix names of the elements and nodes, as long as it results in a valid variable name (e.g. does not start with a number): >>> rbns2s.router_prefix = 'c_' >>> rbns2s.node_prefix = 'd_' >>> rbns2s.router_elements Elements("c_1123", "c_1125", "c_11269", "c_1129", "c_113") """
elements = devicetools.Elements() for router in self._router_numbers: element = self._get_routername(router) inlet = self._get_nodename(router) try: outlet = self._get_nodename(self._up2down[router]) except TypeError: outlet = self.last_node elements += devicetools.Element( element, inlets=inlet, outlets=outlet) return elements
<SYSTEM_TASK:> A |Nodes| collection of all required nodes. <END_TASK> <USER_TASK:> Description: def nodes(self): """A |Nodes| collection of all required nodes. >>> from hydpy import RiverBasinNumbers2Selection >>> rbns2s = RiverBasinNumbers2Selection( ... (111, 113, 1129, 11269, 1125, 11261, ... 11262, 1123, 1124, 1122, 1121)) Note that the required outlet node is added: >>> rbns2s.nodes Nodes("node_1123", "node_1125", "node_11269", "node_1129", "node_113", "node_outlet") It is both possible to change the prefix names of the nodes and the name of the outlet node separately: >>> rbns2s.node_prefix = 'b_' >>> rbns2s.last_node = 'l_node' >>> rbns2s.nodes Nodes("b_1123", "b_1125", "b_11269", "b_1129", "b_113", "l_node") """
return ( devicetools.Nodes( self.node_prefix+routers for routers in self._router_numbers) + devicetools.Node(self.last_node))
<SYSTEM_TASK:> A complete |Selection| object of all "supplying" and "routing" <END_TASK> <USER_TASK:> Description: def selection(self): """A complete |Selection| object of all "supplying" and "routing" elements and required nodes. >>> from hydpy import RiverBasinNumbers2Selection >>> rbns2s = RiverBasinNumbers2Selection( ... (111, 113, 1129, 11269, 1125, 11261, ... 11262, 1123, 1124, 1122, 1121)) >>> rbns2s.selection Selection("complete", nodes=("node_1123", "node_1125", "node_11269", "node_1129", "node_113", "node_outlet"), elements=("land_111", "land_1121", "land_1122", "land_1123", "land_1124", "land_1125", "land_11261", "land_11262", "land_11269", "land_1129", "land_113", "stream_1123", "stream_1125", "stream_11269", "stream_1129", "stream_113")) Besides the possible modifications on the names of the different nodes and elements, the name of the selection can be set differently: >>> rbns2s.selection_name = 'sel' >>> from hydpy import pub >>> with pub.options.ellipsis(1): ... print(repr(rbns2s.selection)) Selection("sel", nodes=("node_1123", ...,"node_outlet"), elements=("land_111", ...,"stream_113")) """
return selectiontools.Selection( self.selection_name, self.nodes, self.elements)
<SYSTEM_TASK:> Add a new dimension with the given name and length to the given <END_TASK> <USER_TASK:> Description: def create_dimension(ncfile, name, length) -> None: """Add a new dimension with the given name and length to the given NetCDF file. Essentially, |create_dimension| just calls the equally named method of the NetCDF library, but adds information to possible error messages: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> with TestIO(): ... ncfile = netcdf4.Dataset('test.nc', 'w') >>> from hydpy.core.netcdftools import create_dimension >>> create_dimension(ncfile, 'dim1', 5) >>> dim = ncfile.dimensions['dim1'] >>> dim.size if hasattr(dim, 'size') else dim 5 >>> try: ... create_dimension(ncfile, 'dim1', 5) ... except BaseException as exc: ... print(exc) # doctest: +ELLIPSIS While trying to add dimension `dim1` with length `5` \ to the NetCDF file `test.nc`, the following error occurred: ... >>> ncfile.close() """
try: ncfile.createDimension(name, length) except BaseException: objecttools.augment_excmessage( 'While trying to add dimension `%s` with length `%d` ' 'to the NetCDF file `%s`' % (name, length, get_filepath(ncfile)))
<SYSTEM_TASK:> Add a new variable with the given name, datatype, and dimensions <END_TASK> <USER_TASK:> Description: def create_variable(ncfile, name, datatype, dimensions) -> None: """Add a new variable with the given name, datatype, and dimensions to the given NetCDF file. Essentially, |create_variable| just calls the equally named method of the NetCDF library, but adds information to possible error messages: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> with TestIO(): ... ncfile = netcdf4.Dataset('test.nc', 'w') >>> from hydpy.core.netcdftools import create_variable >>> try: ... create_variable(ncfile, 'var1', 'f8', ('dim1',)) ... except BaseException as exc: ... print(str(exc).strip('"')) # doctest: +ELLIPSIS While trying to add variable `var1` with datatype `f8` and \ dimensions `('dim1',)` to the NetCDF file `test.nc`, the following error \ occurred: ... >>> from hydpy.core.netcdftools import create_dimension >>> create_dimension(ncfile, 'dim1', 5) >>> create_variable(ncfile, 'var1', 'f8', ('dim1',)) >>> import numpy >>> numpy.array(ncfile['var1'][:]) array([ nan, nan, nan, nan, nan]) >>> ncfile.close() """
default = fillvalue if (datatype == 'f8') else None try: ncfile.createVariable( name, datatype, dimensions=dimensions, fill_value=default) ncfile[name].long_name = name except BaseException: objecttools.augment_excmessage( 'While trying to add variable `%s` with datatype `%s` ' 'and dimensions `%s` to the NetCDF file `%s`' % (name, datatype, dimensions, get_filepath(ncfile)))
<SYSTEM_TASK:> Return the variable with the given name from the given NetCDF file. <END_TASK> <USER_TASK:> Description: def query_variable(ncfile, name) -> netcdf4.Variable: """Return the variable with the given name from the given NetCDF file. Essentially, |query_variable| just performs a key assess via the used NetCDF library, but adds information to possible error messages: >>> from hydpy.core.netcdftools import query_variable >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> with TestIO(): ... file_ = netcdf4.Dataset('model.nc', 'w') >>> query_variable(file_, 'flux_prec') Traceback (most recent call last): ... OSError: NetCDF file `model.nc` does not contain variable `flux_prec`. >>> from hydpy.core.netcdftools import create_variable >>> create_variable(file_, 'flux_prec', 'f8', ()) >>> isinstance(query_variable(file_, 'flux_prec'), netcdf4.Variable) True >>> file_.close() """
try: return ncfile[name] except (IndexError, KeyError): raise OSError( 'NetCDF file `%s` does not contain variable `%s`.' % (get_filepath(ncfile), name))
<SYSTEM_TASK:> Return the |Timegrid| defined by the given NetCDF file. <END_TASK> <USER_TASK:> Description: def query_timegrid(ncfile) -> timetools.Timegrid: """Return the |Timegrid| defined by the given NetCDF file. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> from hydpy.core.netcdftools import query_timegrid >>> filepath = 'LahnH/series/input/hland_v1_input_t.nc' >>> with TestIO(): ... with netcdf4.Dataset(filepath) as ncfile: ... query_timegrid(ncfile) Timegrid('1996-01-01 00:00:00', '2007-01-01 00:00:00', '1d') """
timepoints = ncfile[varmapping['timepoints']] refdate = timetools.Date.from_cfunits(timepoints.units) return timetools.Timegrid.from_timepoints( timepoints=timepoints[:], refdate=refdate, unit=timepoints.units.strip().split()[0])
<SYSTEM_TASK:> Return the data of the variable with the given name from the given <END_TASK> <USER_TASK:> Description: def query_array(ncfile, name) -> numpy.ndarray: """Return the data of the variable with the given name from the given NetCDF file. The following example shows that |query_array| returns |nan| entries to represent missing values even when the respective NetCDF variable defines a different fill value: >>> from hydpy import TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> from hydpy.core import netcdftools >>> netcdftools.fillvalue = -999.0 >>> with TestIO(): ... with netcdf4.Dataset('test.nc', 'w') as ncfile: ... netcdftools.create_dimension(ncfile, 'dim1', 5) ... netcdftools.create_variable(ncfile, 'var1', 'f8', ('dim1',)) ... ncfile = netcdf4.Dataset('test.nc', 'r') >>> netcdftools.query_variable(ncfile, 'var1')[:].data array([-999., -999., -999., -999., -999.]) >>> netcdftools.query_array(ncfile, 'var1') array([ nan, nan, nan, nan, nan]) >>> import numpy >>> netcdftools.fillvalue = numpy.nan """
variable = query_variable(ncfile, name) maskedarray = variable[:] fillvalue_ = getattr(variable, '_FillValue', numpy.nan) if not numpy.isnan(fillvalue_): maskedarray[maskedarray.mask] = numpy.nan return maskedarray.data
<SYSTEM_TASK:> Prepare a |NetCDFFile| object suitable for the given |IOSequence| <END_TASK> <USER_TASK:> Description: def log(self, sequence, infoarray) -> None: """Prepare a |NetCDFFile| object suitable for the given |IOSequence| object, when necessary, and pass the given arguments to its |NetCDFFile.log| method."""
if isinstance(sequence, sequencetools.ModelSequence): descr = sequence.descr_model else: descr = 'node' if self._isolate: descr = '%s_%s' % (descr, sequence.descr_sequence) if ((infoarray is not None) and (infoarray.info['type'] != 'unmodified')): descr = '%s_%s' % (descr, infoarray.info['type']) dirpath = sequence.dirpath_ext try: files = self.folders[dirpath] except KeyError: files: Dict[str, 'NetCDFFile'] = collections.OrderedDict() self.folders[dirpath] = files try: file_ = files[descr] except KeyError: file_ = NetCDFFile( name=descr, flatten=self._flatten, isolate=self._isolate, timeaxis=self._timeaxis, dirpath=dirpath) files[descr] = file_ file_.log(sequence, infoarray)
<SYSTEM_TASK:> Pass the given |IoSequence| to a suitable instance of <END_TASK> <USER_TASK:> Description: def log(self, sequence, infoarray) -> None: """Pass the given |IoSequence| to a suitable instance of a |NetCDFVariableBase| subclass. When writing data, the second argument should be an |InfoArray|. When reading data, this argument is ignored. Simply pass |None|. (1) We prepare some devices handling some sequences by applying function |prepare_io_example_1|. We limit our attention to the returned elements, which handle the more diverse sequences: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, (element1, element2, element3) = prepare_io_example_1() (2) We define some shortcuts for the sequences used in the following examples: >>> nied1 = element1.model.sequences.inputs.nied >>> nied2 = element2.model.sequences.inputs.nied >>> nkor2 = element2.model.sequences.fluxes.nkor >>> nkor3 = element3.model.sequences.fluxes.nkor (3) We define a function that logs these example sequences to a given |NetCDFFile| object and prints some information about the resulting object structure. Note that sequence `nkor2` is logged twice, the first time with its original time series data, the second time with averaged values: >>> from hydpy import classname >>> def test(ncfile): ... ncfile.log(nied1, nied1.series) ... ncfile.log(nied2, nied2.series) ... ncfile.log(nkor2, nkor2.series) ... ncfile.log(nkor2, nkor2.average_series()) ... ncfile.log(nkor3, nkor3.average_series()) ... for name, variable in ncfile.variables.items(): ... print(name, classname(variable), variable.subdevicenames) (4) We prepare a |NetCDFFile| object with both options `flatten` and `isolate` being disabled: >>> from hydpy.core.netcdftools import NetCDFFile >>> ncfile = NetCDFFile( ... 'model', flatten=False, isolate=False, timeaxis=1, dirpath='') (5) We log all test sequences results in two |NetCDFVariableDeep| and one |NetCDFVariableAgg| objects. To keep both NetCDF variables related to |lland_fluxes.NKor| distinguishable, the name `flux_nkor_mean` includes information about the kind of aggregation performed: >>> test(ncfile) input_nied NetCDFVariableDeep ('element1', 'element2') flux_nkor NetCDFVariableDeep ('element2',) flux_nkor_mean NetCDFVariableAgg ('element2', 'element3') (6) We confirm that the |NetCDFVariableBase| objects received the required information: >>> ncfile.flux_nkor.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor.element2.array InfoArray([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) >>> ncfile.flux_nkor_mean.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor_mean.element2.array InfoArray([ 16.5, 18.5, 20.5, 22.5]) (7) We again prepare a |NetCDFFile| object, but now with both options `flatten` and `isolate` being enabled. To log test sequences with their original time series data does now trigger the initialisation of class |NetCDFVariableFlat|. When passing aggregated data, nothing changes: >>> ncfile = NetCDFFile( ... 'model', flatten=True, isolate=True, timeaxis=1, dirpath='') >>> test(ncfile) input_nied NetCDFVariableFlat ('element1', 'element2') flux_nkor NetCDFVariableFlat ('element2_0', 'element2_1') flux_nkor_mean NetCDFVariableAgg ('element2', 'element3') >>> ncfile.flux_nkor.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor.element2.array InfoArray([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) >>> ncfile.flux_nkor_mean.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor_mean.element2.array InfoArray([ 16.5, 18.5, 20.5, 22.5]) (8) We technically confirm that the `isolate` argument is passed to the constructor of subclasses of |NetCDFVariableBase| correctly: >>> from unittest.mock import patch >>> with patch('hydpy.core.netcdftools.NetCDFVariableFlat') as mock: ... ncfile = NetCDFFile( ... 'model', flatten=True, isolate=False, timeaxis=0, ... dirpath='') ... ncfile.log(nied1, nied1.series) ... mock.assert_called_once_with( ... name='input_nied', timeaxis=0, isolate=False) """
aggregated = ((infoarray is not None) and (infoarray.info['type'] != 'unmodified')) descr = sequence.descr_sequence if aggregated: descr = '_'.join([descr, infoarray.info['type']]) if descr in self.variables: var_ = self.variables[descr] else: if aggregated: cls = NetCDFVariableAgg elif self._flatten: cls = NetCDFVariableFlat else: cls = NetCDFVariableDeep var_ = cls(name=descr, isolate=self._isolate, timeaxis=self._timeaxis) self.variables[descr] = var_ var_.log(sequence, infoarray)
<SYSTEM_TASK:> Item access to the wrapped |dict| object with a specialized <END_TASK> <USER_TASK:> Description: def get_index(self, name_subdevice) -> int: """Item access to the wrapped |dict| object with a specialized error message."""
try: return self.dict_[name_subdevice] except KeyError: raise OSError( 'No data for sequence `%s` and (sub)device `%s` ' 'in NetCDF file `%s` available.' % (self.name_sequence, name_subdevice, self.name_ncfile))
<SYSTEM_TASK:> Log the given |IOSequence| object either for reading or writing <END_TASK> <USER_TASK:> Description: def log(self, sequence, infoarray) -> None: """Log the given |IOSequence| object either for reading or writing data. The optional `array` argument allows for passing alternative data in an |InfoArray| object replacing the series of the |IOSequence| object, which is useful for writing modified (e.g. spatially averaged) time series. Logged time series data is available via attribute access: >>> from hydpy.core.netcdftools import NetCDFVariableBase >>> from hydpy import make_abc_testable >>> NCVar = make_abc_testable(NetCDFVariableBase) >>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1) >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> nkor = elements.element1.model.sequences.fluxes.nkor >>> ncvar.log(nkor, nkor.series) >>> 'element1' in dir(ncvar) True >>> ncvar.element1.sequence is nkor True >>> 'element2' in dir(ncvar) False >>> ncvar.element2 Traceback (most recent call last): ... AttributeError: The NetCDFVariable object `flux_nkor` does \ neither handle time series data under the (sub)device name `element2` \ nor does it define a member named `element2`. """
descr_device = sequence.descr_device self.sequences[descr_device] = sequence self.arrays[descr_device] = infoarray
<SYSTEM_TASK:> Return a |tuple| containing the given `timeentry` and `placeentry` <END_TASK> <USER_TASK:> Description: def sort_timeplaceentries(self, timeentry, placeentry) -> Tuple[Any, Any]: """Return a |tuple| containing the given `timeentry` and `placeentry` sorted in agreement with the currently selected `timeaxis`. >>> from hydpy.core.netcdftools import NetCDFVariableBase >>> from hydpy import make_abc_testable >>> NCVar = make_abc_testable(NetCDFVariableBase) >>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1) >>> ncvar.sort_timeplaceentries('time', 'place') ('place', 'time') >>> ncvar = NetCDFVariableDeep('test', isolate=False, timeaxis=0) >>> ncvar.sort_timeplaceentries('time', 'place') ('time', 'place') """
if self._timeaxis: return placeentry, timeentry return timeentry, placeentry
<SYSTEM_TASK:> A |tuple| containing the device names. <END_TASK> <USER_TASK:> Description: def subdevicenames(self) -> Tuple[str, ...]: """A |tuple| containing the device names."""
self: NetCDFVariableBase return tuple(self.sequences.keys())
<SYSTEM_TASK:> Required shape of |NetCDFVariableDeep.array|. <END_TASK> <USER_TASK:> Description: def shape(self) -> Tuple[int, ...]: """Required shape of |NetCDFVariableDeep.array|. For the default configuration, the first axis corresponds to the number of devices, and the second one to the number of timesteps. We show this for the 0-dimensional input sequence |lland_inputs.Nied|: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableDeep >>> ncvar = NetCDFVariableDeep('input_nied', isolate=False, timeaxis=1) >>> for element in elements: ... ncvar.log(element.model.sequences.inputs.nied, None) >>> ncvar.shape (3, 4) For higher dimensional sequences, each new entry corresponds to the maximum number of fields the respective sequences require. In the next example, we select the 1-dimensional sequence |lland_fluxes.NKor|. The maximum number 3 (last value of the returned |tuple|) is due to the third element defining three hydrological response units: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (3, 4, 3) When using the first axis for time (`timeaxis=0`) the order of the first two |tuple| entries turns: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (4, 3, 3) """
nmb_place = len(self.sequences) nmb_time = len(hydpy.pub.timegrids.init) nmb_others = collections.deque() for sequence in self.sequences.values(): nmb_others.append(sequence.shape) nmb_others_max = tuple(numpy.max(nmb_others, axis=0)) return self.sort_timeplaceentries(nmb_time, nmb_place) + nmb_others_max
<SYSTEM_TASK:> The series data of all logged |IOSequence| objects contained <END_TASK> <USER_TASK:> Description: def array(self) -> numpy.ndarray: """The series data of all logged |IOSequence| objects contained in one single |numpy.ndarray|. The documentation on |NetCDFVariableDeep.shape| explains how |NetCDFVariableDeep.array| is structured. The first example confirms that, for the default configuration, the first axis definces the location, while the second one defines time: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableDeep >>> ncvar = NetCDFVariableDeep('input_nied', isolate=False, timeaxis=1) >>> for element in elements: ... nied1 = element.model.sequences.inputs.nied ... ncvar.log(nied1, nied1.series) >>> ncvar.array array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) For higher dimensional sequences, |NetCDFVariableDeep.array| can contain missing values. Such missing values show up for some fiels of the second example element, which defines only two hydrological response units instead of three: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[1] array([[ 16., 17., nan], [ 18., 19., nan], [ 20., 21., nan], [ 22., 23., nan]]) When using the first axis for time (`timeaxis=0`) the same data can be accessed with slightly different indexing: >>> ncvar = NetCDFVariableDeep('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[:, 1] array([[ 16., 17., nan], [ 18., 19., nan], [ 20., 21., nan], [ 22., 23., nan]]) """
array = numpy.full(self.shape, fillvalue, dtype=float) for idx, (descr, subarray) in enumerate(self.arrays.items()): sequence = self.sequences[descr] array[self.get_slices(idx, sequence.shape)] = subarray return array
<SYSTEM_TASK:> Required shape of |NetCDFVariableAgg.array|. <END_TASK> <USER_TASK:> Description: def shape(self) -> Tuple[int, int]: """Required shape of |NetCDFVariableAgg.array|. For the default configuration, the first axis corresponds to the number of devices, and the second one to the number of timesteps. We show this for the 1-dimensional input sequence |lland_fluxes.NKor|: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableAgg >>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (3, 4) When using the first axis as the "timeaxis", the order of |tuple| entries turns: >>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (4, 3) """
return self.sort_timeplaceentries( len(hydpy.pub.timegrids.init), len(self.sequences))
<SYSTEM_TASK:> The aggregated data of all logged |IOSequence| objects contained <END_TASK> <USER_TASK:> Description: def array(self) -> numpy.ndarray: """The aggregated data of all logged |IOSequence| objects contained in one single |numpy.ndarray| object. The documentation on |NetCDFVariableAgg.shape| explains how |NetCDFVariableAgg.array| is structured. This first example confirms that, under default configuration (`timeaxis=1`), the first axis corresponds to the location, while the second one corresponds to time: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableAgg >>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.average_series()) >>> ncvar.array array([[ 12. , 13. , 14. , 15. ], [ 16.5, 18.5, 20.5, 22.5], [ 25. , 28. , 31. , 34. ]]) When using the first axis as the "timeaxis", the resulting |NetCDFVariableAgg.array| is the transposed: >>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.average_series()) >>> ncvar.array array([[ 12. , 16.5, 25. ], [ 13. , 18.5, 28. ], [ 14. , 20.5, 31. ], [ 15. , 22.5, 34. ]]) """
array = numpy.full(self.shape, fillvalue, dtype=float) for idx, subarray in enumerate(self.arrays.values()): array[self.get_timeplaceslice(idx)] = subarray return array
<SYSTEM_TASK:> Required shape of |NetCDFVariableFlat.array|. <END_TASK> <USER_TASK:> Description: def shape(self) -> Tuple[int, int]: """Required shape of |NetCDFVariableFlat.array|. For 0-dimensional sequences like |lland_inputs.Nied| and for the default configuration (`timeaxis=1`), the first axis corresponds to the number of devices, and the second one two the number of timesteps: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableFlat >>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1) >>> for element in elements: ... ncvar.log(element.model.sequences.inputs.nied, None) >>> ncvar.shape (3, 4) For higher dimensional sequences, the first axis corresponds to "subdevices", e.g. hydrological response units within different elements. The 1-dimensional sequence |lland_fluxes.NKor| is logged for three elements with one, two, and three response units respectively, making up a sum of six subdevices: >>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (6, 4) When using the first axis as the "timeaxis", the order of |tuple| entries turns: >>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (4, 6) """
return self.sort_timeplaceentries( len(hydpy.pub.timegrids.init), sum(len(seq) for seq in self.sequences.values()))
<SYSTEM_TASK:> The series data of all logged |IOSequence| objects contained in <END_TASK> <USER_TASK:> Description: def array(self) -> numpy.ndarray: """The series data of all logged |IOSequence| objects contained in one single |numpy.ndarray| object. The documentation on |NetCDFVariableAgg.shape| explains how |NetCDFVariableAgg.array| is structured. The first example confirms that, under default configuration (`timeaxis=1`), the first axis corresponds to the location, while the second one corresponds to time: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableFlat >>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1) >>> for element in elements: ... nied1 = element.model.sequences.inputs.nied ... ncvar.log(nied1, nied1.series) >>> ncvar.array array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) Due to the flattening of higher dimensional sequences, their individual time series (e.g. of different hydrological response units) are spread over the rows of the array. For the 1-dimensional sequence |lland_fluxes.NKor|, the individual time series of the second element are stored in row two and three: >>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[1:3] array([[ 16., 18., 20., 22.], [ 17., 19., 21., 23.]]) When using the first axis as the "timeaxis", the individual time series of the second element are stored in column two and three: >>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... nkor1 = element.model.sequences.fluxes.nkor ... ncvar.log(nkor1, nkor1.series) >>> ncvar.array[:, 1:3] array([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) """
array = numpy.full(self.shape, fillvalue, dtype=float) idx0 = 0 idxs: List[Any] = [slice(None)] for seq, subarray in zip(self.sequences.values(), self.arrays.values()): for prod in self._product(seq.shape): subsubarray = subarray[tuple(idxs + list(prod))] array[self.get_timeplaceslice(idx0)] = subsubarray idx0 += 1 return array
<SYSTEM_TASK:> Determine the number of substeps. <END_TASK> <USER_TASK:> Description: def update(self): """Determine the number of substeps. Initialize a llake model and assume a simulation step size of 12 hours: >>> from hydpy.models.llake import * >>> parameterstep('1d') >>> simulationstep('12h') If the maximum internal step size is also set to 12 hours, there is only one internal calculation step per outer simulation step: >>> maxdt('12h') >>> derived.nmbsubsteps.update() >>> derived.nmbsubsteps nmbsubsteps(1) Assigning smaller values to `maxdt` increases `nmbstepsize`: >>> maxdt('1h') >>> derived.nmbsubsteps.update() >>> derived.nmbsubsteps nmbsubsteps(12) In case the simulationstep is not a whole multiple of `dwmax`, the value of `nmbsubsteps` is rounded up: >>> maxdt('59m') >>> derived.nmbsubsteps.update() >>> derived.nmbsubsteps nmbsubsteps(13) Even for `maxdt` values exceeding the simulationstep, the value of `numbsubsteps` does not become smaller than one: >>> maxdt('2d') >>> derived.nmbsubsteps.update() >>> derived.nmbsubsteps nmbsubsteps(1) """
maxdt = self.subpars.pars.control.maxdt seconds = self.simulationstep.seconds self.value = numpy.ceil(seconds/maxdt)
<SYSTEM_TASK:> Prepare an IO example configuration. <END_TASK> <USER_TASK:> Description: def prepare_io_example_1() -> Tuple[devicetools.Nodes, devicetools.Elements]: # noinspection PyUnresolvedReferences """Prepare an IO example configuration. >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() (1) Prepares a short initialisation period of five days: >>> from hydpy import pub >>> pub.timegrids Timegrids(Timegrid('2000-01-01 00:00:00', '2000-01-05 00:00:00', '1d')) (2) Prepares a plain IO testing directory structure: >>> pub.sequencemanager.inputdirpath 'inputpath' >>> pub.sequencemanager.fluxdirpath 'outputpath' >>> pub.sequencemanager.statedirpath 'outputpath' >>> pub.sequencemanager.nodedirpath 'nodepath' >>> import os >>> from hydpy import TestIO >>> with TestIO(): ... print(sorted(filename for filename in os.listdir('.') ... if not filename.startswith('_'))) ['inputpath', 'nodepath', 'outputpath'] (3) Returns three |Element| objects handling either application model |lland_v1| or |lland_v2|, and two |Node| objects handling variables `Q` and `T`: >>> for element in elements: ... print(element.name, element.model) element1 lland_v1 element2 lland_v1 element3 lland_v2 >>> for node in nodes: ... print(node.name, node.variable) node1 Q node2 T (4) Prepares the time series data of the input sequence |lland_inputs.Nied|, flux sequence |lland_fluxes.NKor|, and state sequence |lland_states.BoWa| for each model instance, and |Sim| for each node instance (all values are different), e.g.: >>> nied1 = elements.element1.model.sequences.inputs.nied >>> nied1.series InfoArray([ 0., 1., 2., 3.]) >>> nkor1 = elements.element1.model.sequences.fluxes.nkor >>> nkor1.series InfoArray([[ 12.], [ 13.], [ 14.], [ 15.]]) >>> bowa3 = elements.element3.model.sequences.states.bowa >>> bowa3.series InfoArray([[ 48., 49., 50.], [ 51., 52., 53.], [ 54., 55., 56.], [ 57., 58., 59.]]) >>> sim2 = nodes.node2.sequences.sim >>> sim2.series InfoArray([ 64., 65., 66., 67.]) (5) All sequences carry |numpy.ndarray| objects with (deep) copies of the time series data for testing: >>> import numpy >>> (numpy.all(nied1.series == nied1.testarray) and ... numpy.all(nkor1.series == nkor1.testarray) and ... numpy.all(bowa3.series == bowa3.testarray) and ... numpy.all(sim2.series == sim2.testarray)) InfoArray(True, dtype=bool) >>> bowa3.series[1, 2] = -999.0 >>> numpy.all(bowa3.series == bowa3.testarray) InfoArray(False, dtype=bool) """
from hydpy import TestIO TestIO.clear() from hydpy.core.filetools import SequenceManager hydpy.pub.sequencemanager = SequenceManager() with TestIO(): hydpy.pub.sequencemanager.inputdirpath = 'inputpath' hydpy.pub.sequencemanager.fluxdirpath = 'outputpath' hydpy.pub.sequencemanager.statedirpath = 'outputpath' hydpy.pub.sequencemanager.nodedirpath = 'nodepath' hydpy.pub.timegrids = '2000-01-01', '2000-01-05', '1d' from hydpy import Node, Nodes, Element, Elements, prepare_model node1 = Node('node1') node2 = Node('node2', variable='T') nodes = Nodes(node1, node2) element1 = Element('element1', outlets=node1) element2 = Element('element2', outlets=node1) element3 = Element('element3', outlets=node1) elements = Elements(element1, element2, element3) from hydpy.models import lland_v1, lland_v2 element1.model = prepare_model(lland_v1) element2.model = prepare_model(lland_v1) element3.model = prepare_model(lland_v2) from hydpy.models.lland import ACKER for idx, element in enumerate(elements): parameters = element.model.parameters parameters.control.nhru(idx+1) parameters.control.lnk(ACKER) parameters.derived.absfhru(10.0) with hydpy.pub.options.printprogress(False): nodes.prepare_simseries() elements.prepare_inputseries() elements.prepare_fluxseries() elements.prepare_stateseries() def init_values(seq, value1_): value2_ = value1_ + len(seq.series.flatten()) values_ = numpy.arange(value1_, value2_, dtype=float) seq.testarray = values_.reshape(seq.seriesshape) seq.series = seq.testarray.copy() return value2_ import numpy value1 = 0 for subname, seqname in zip(['inputs', 'fluxes', 'states'], ['nied', 'nkor', 'bowa']): for element in elements: subseqs = getattr(element.model.sequences, subname) value1 = init_values(getattr(subseqs, seqname), value1) for node in nodes: value1 = init_values(node.sequences.sim, value1) return nodes, elements
<SYSTEM_TASK:> Returns the team ID associated with the team name that is passed in. <END_TASK> <USER_TASK:> Description: def get_team_id(team_name): """ Returns the team ID associated with the team name that is passed in. Parameters ---------- team_name : str The team name whose ID we want. NOTE: Only pass in the team name (e.g. "Lakers"), not the city, or city and team name, or the team abbreviation. Returns ------- team_id : int The team ID associated with the team name. """
df = get_all_team_ids() df = df[df.TEAM_NAME == team_name] if len(df) == 0: er = "Invalid team name or there is no team with that name." raise ValueError(er) team_id = df.TEAM_ID.iloc[0] return team_id
<SYSTEM_TASK:> Returns the Game ID associated with the date that is passed in. <END_TASK> <USER_TASK:> Description: def get_game_id(self, date): """Returns the Game ID associated with the date that is passed in. Parameters ---------- date : str The date associated with the game whose Game ID. The date that is passed in can take on a numeric format of MM/DD/YY (like "01/06/16" or "01/06/2016") or the expanded Month Day, Year format (like "Jan 06, 2016" or "January 06, 2016"). Returns ------- game_id : str The desired Game ID. """
df = self.get_game_logs() game_id = df[df.GAME_DATE == date].Game_ID.values[0] return game_id
<SYSTEM_TASK:> Pass in a dictionary to update url parameters for NBA stats API <END_TASK> <USER_TASK:> Description: def update_params(self, parameters): """Pass in a dictionary to update url parameters for NBA stats API Parameters ---------- parameters : dict A dict containing key, value pairs that correspond with NBA stats API parameters. Returns ------- self : TeamLog The TeamLog object containing the updated NBA stats API parameters. """
self.url_paramaters.update(parameters) self.response = requests.get(self.base_url, params=self.url_paramaters, headers=HEADERS) # raise error if status code is not 200 self.response.raise_for_status() return self
<SYSTEM_TASK:> Unsubscribe will remove interest in the given subject. If max is <END_TASK> <USER_TASK:> Description: def unsubscribe(self, subscription, max=None): """ Unsubscribe will remove interest in the given subject. If max is provided an automatic Unsubscribe that is processed by the server when max messages have been received Args: subscription (pynats.Subscription): a Subscription object max (int=None): number of messages """
if max is None: self._send('UNSUB %d' % subscription.sid) self._subscriptions.pop(subscription.sid) else: subscription.max = max self._send('UNSUB %d %s' % (subscription.sid, max))
<SYSTEM_TASK:> ublish a message with an implicit inbox listener as the reply. <END_TASK> <USER_TASK:> Description: def request(self, subject, callback, msg=None): """ ublish a message with an implicit inbox listener as the reply. Message is optional. Args: subject (string): a string with the subject callback (function): callback to be called msg (string=None): payload string """
inbox = self._build_inbox() s = self.subscribe(inbox, callback) self.unsubscribe(s, 1) self.publish(subject, msg, inbox) return s
<SYSTEM_TASK:> Returns an axes with a basketball court drawn onto to it. <END_TASK> <USER_TASK:> Description: def draw_court(ax=None, color='gray', lw=1, outer_lines=False): """Returns an axes with a basketball court drawn onto to it. This function draws a court based on the x and y-axis values that the NBA stats API provides for the shot chart data. For example the center of the hoop is located at the (0,0) coordinate. Twenty-two feet from the left of the center of the hoop in is represented by the (-220,0) coordinates. So one foot equals +/-10 units on the x and y-axis. Parameters ---------- ax : Axes, optional The Axes object to plot the court onto. color : matplotlib color, optional The color of the court lines. lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If `True` it draws the out of bound lines in same style as the rest of the court. Returns ------- ax : Axes The Axes object with the court on it. """
if ax is None: ax = plt.gca() # Create the various parts of an NBA basketball court # Create the basketball hoop hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False) # Create backboard backboard = Rectangle((-30, -12.5), 60, 0, linewidth=lw, color=color) # The paint # Create the outer box 0f the paint, width=16ft, height=19ft outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color, fill=False) # Create the inner box of the paint, widt=12ft, height=19ft inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color, fill=False) # Create free throw top arc top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180, linewidth=lw, color=color, fill=False) # Create free throw bottom arc bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color, linestyle='dashed') # Restricted Zone, it is an arc with 4ft radius from center of the hoop restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw, color=color) # Three point line # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw, color=color) # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color) # 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw, color=color) # Center Court center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color) center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0, linewidth=lw, color=color) # List of the court elements to be plotted onto the axes court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw, bottom_free_throw, restricted, corner_three_a, corner_three_b, three_arc, center_outer_arc, center_inner_arc] if outer_lines: # Draw the half court line, baseline and side out bound lines outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw, color=color, fill=False) court_elements.append(outer_lines) # Add the court elements onto the axes for element in court_elements: ax.add_patch(element) return ax
<SYSTEM_TASK:> Returns an Axes object with player shots plotted. <END_TASK> <USER_TASK:> Description: def shot_chart(x, y, kind="scatter", title="", color="b", cmap=None, xlim=(-250, 250), ylim=(422.5, -47.5), court_color="gray", court_lw=1, outer_lines=False, flip_court=False, kde_shade=True, gridsize=None, ax=None, despine=False, **kwargs): """ Returns an Axes object with player shots plotted. Parameters ---------- x, y : strings or vector The x and y coordinates of the shots taken. They can be passed in as vectors (such as a pandas Series) or as columns from the pandas DataFrame passed into ``data``. data : DataFrame, optional DataFrame containing shots where ``x`` and ``y`` represent the shot location coordinates. kind : { "scatter", "kde", "hex" }, optional The kind of shot chart to create. title : str, optional The title for the plot. color : matplotlib color, optional Color used to plot the shots cmap : matplotlib Colormap object or name, optional Colormap for the range of data values. If one isn't provided, the colormap is derived from the valuue passed to ``color``. Used for KDE and Hexbin plots. {x, y}lim : two-tuples, optional The axis limits of the plot. court_color : matplotlib color, optional The color of the court lines. court_lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If ``True`` the out of bound lines are drawn in as a matplotlib Rectangle. flip_court : boolean, optional If ``True`` orients the hoop towards the bottom of the plot. Default is ``False``, which orients the court where the hoop is towards the top of the plot. kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours. gridsize : int, optional Number of hexagons in the x-direction. The default is calculated using the Freedman-Diaconis method. ax : Axes, optional The Axes object to plot the court onto. despine : boolean, optional If ``True``, removes the spines. kwargs : key, value pairs Keyword arguments for matplotlib Collection properties or seaborn plots. Returns ------- ax : Axes The Axes object with the shot chart plotted on it. """
if ax is None: ax = plt.gca() if cmap is None: cmap = sns.light_palette(color, as_cmap=True) if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) ax.tick_params(labelbottom="off", labelleft="off") ax.set_title(title, fontsize=18) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) if kind == "scatter": ax.scatter(x, y, c=color, **kwargs) elif kind == "kde": sns.kdeplot(x, y, shade=kde_shade, cmap=cmap, ax=ax, **kwargs) ax.set_xlabel('') ax.set_ylabel('') elif kind == "hex": if gridsize is None: # Get the number of bins for hexbin using Freedman-Diaconis rule # This is idea was taken from seaborn, which got the calculation # from http://stats.stackexchange.com/questions/798/ from seaborn.distributions import _freedman_diaconis_bins x_bin = _freedman_diaconis_bins(x) y_bin = _freedman_diaconis_bins(y) gridsize = int(np.mean([x_bin, y_bin])) ax.hexbin(x, y, gridsize=gridsize, cmap=cmap, **kwargs) else: raise ValueError("kind must be 'scatter', 'kde', or 'hex'.") # Set the spines to match the rest of court lines, makes outer_lines # somewhate unnecessary for spine in ax.spines: ax.spines[spine].set_lw(court_lw) ax.spines[spine].set_color(court_color) if despine: ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) return ax
<SYSTEM_TASK:> Returns a seaborn JointGrid using sns.jointplot <END_TASK> <USER_TASK:> Description: def shot_chart_jointplot(x, y, data=None, kind="scatter", title="", color="b", cmap=None, xlim=(-250, 250), ylim=(422.5, -47.5), court_color="gray", court_lw=1, outer_lines=False, flip_court=False, size=(12, 11), space=0, despine=False, joint_kws=None, marginal_kws=None, **kwargs): """ Returns a seaborn JointGrid using sns.jointplot Parameters ---------- x, y : strings or vector The x and y coordinates of the shots taken. They can be passed in as vectors (such as a pandas Series) or as column names from the pandas DataFrame passed into ``data``. data : DataFrame, optional DataFrame containing shots where ``x`` and ``y`` represent the shot location coordinates. kind : { "scatter", "kde", "hex" }, optional The kind of shot chart to create. title : str, optional The title for the plot. color : matplotlib color, optional Color used to plot the shots cmap : matplotlib Colormap object or name, optional Colormap for the range of data values. If one isn't provided, the colormap is derived from the valuue passed to ``color``. Used for KDE and Hexbin joint plots. {x, y}lim : two-tuples, optional The axis limits of the plot. The defaults represent the out of bounds lines and half court line. court_color : matplotlib color, optional The color of the court lines. court_lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If ``True`` the out of bound lines are drawn in as a matplotlib Rectangle. flip_court : boolean, optional If ``True`` orients the hoop towards the bottom of the plot. Default is ``False``, which orients the court where the hoop is towards the top of the plot. gridsize : int, optional Number of hexagons in the x-direction. The default is calculated using the Freedman-Diaconis method. size : tuple, optional The width and height of the plot in inches. space : numeric, optional The space between the joint and marginal plots. {joint, marginal}_kws : dicts Additional kewyord arguments for joint and marginal plot components. kwargs : key, value pairs Keyword arguments for matplotlib Collection properties or seaborn plots. Returns ------- grid : JointGrid The JointGrid object with the shot chart plotted on it. """
# If a colormap is not provided, then it is based off of the color if cmap is None: cmap = sns.light_palette(color, as_cmap=True) if kind not in ["scatter", "kde", "hex"]: raise ValueError("kind must be 'scatter', 'kde', or 'hex'.") grid = sns.jointplot(x=x, y=y, data=data, stat_func=None, kind=kind, space=0, color=color, cmap=cmap, joint_kws=joint_kws, marginal_kws=marginal_kws, **kwargs) grid.fig.set_size_inches(size) # A joint plot has 3 Axes, the first one called ax_joint # is the one we want to draw our court onto and adjust some other settings ax = grid.ax_joint if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) # Get rid of axis labels and tick marks ax.set_xlabel('') ax.set_ylabel('') ax.tick_params(labelbottom='off', labelleft='off') # Add a title ax.set_title(title, y=1.2, fontsize=18) # Set the spines to match the rest of court lines, makes outer_lines # somewhate unnecessary for spine in ax.spines: ax.spines[spine].set_lw(court_lw) ax.spines[spine].set_color(court_color) # set the margin joint spines to be same as the rest of the plot grid.ax_marg_x.spines[spine].set_lw(court_lw) grid.ax_marg_x.spines[spine].set_color(court_color) grid.ax_marg_y.spines[spine].set_lw(court_lw) grid.ax_marg_y.spines[spine].set_color(court_color) if despine: ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) return grid
<SYSTEM_TASK:> Returns an AxesImage object that contains a heatmap. <END_TASK> <USER_TASK:> Description: def heatmap(x, y, z, title="", cmap=plt.cm.YlOrRd, bins=20, xlim=(-250, 250), ylim=(422.5, -47.5), facecolor='lightgray', facecolor_alpha=0.4, court_color="black", court_lw=0.5, outer_lines=False, flip_court=False, ax=None, **kwargs): """ Returns an AxesImage object that contains a heatmap. TODO: Redo some code and explain parameters """
# Bin the FGA (x, y) and Calculcate the mean number of times shot was # made (z) within each bin # mean is the calculated FG percentage for each bin mean, xedges, yedges, binnumber = binned_statistic_2d(x=x, y=y, values=z, statistic='mean', bins=bins) if ax is None: ax = plt.gca() if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) ax.tick_params(labelbottom="off", labelleft="off") ax.set_title(title, fontsize=18) ax.patch.set_facecolor(facecolor) ax.patch.set_alpha(facecolor_alpha) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) heatmap = ax.imshow(mean.T, origin='lower', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], interpolation='nearest', cmap=cmap) return heatmap
<SYSTEM_TASK:> Returns a figure with the basketball court lines drawn onto it <END_TASK> <USER_TASK:> Description: def bokeh_draw_court(figure, line_color='gray', line_width=1): """Returns a figure with the basketball court lines drawn onto it This function draws a court based on the x and y-axis values that the NBA stats API provides for the shot chart data. For example the center of the hoop is located at the (0,0) coordinate. Twenty-two feet from the left of the center of the hoop in is represented by the (-220,0) coordinates. So one foot equals +/-10 units on the x and y-axis. Parameters ---------- figure : Bokeh figure object The Axes object to plot the court onto. line_color : str, optional The color of the court lines. Can be a a Hex value. line_width : float, optional The linewidth the of the court lines in pixels. Returns ------- figure : Figure The Figure object with the court on it. """
# hoop figure.circle(x=0, y=0, radius=7.5, fill_alpha=0, line_color=line_color, line_width=line_width) # backboard figure.line(x=range(-30, 31), y=-12.5, line_color=line_color) # The paint # outerbox figure.rect(x=0, y=47.5, width=160, height=190, fill_alpha=0, line_color=line_color, line_width=line_width) # innerbox # left inner box line figure.line(x=-60, y=np.arange(-47.5, 143.5), line_color=line_color, line_width=line_width) # right inner box line figure.line(x=60, y=np.arange(-47.5, 143.5), line_color=line_color, line_width=line_width) # Restricted Zone figure.arc(x=0, y=0, radius=40, start_angle=pi, end_angle=0, line_color=line_color, line_width=line_width) # top free throw arc figure.arc(x=0, y=142.5, radius=60, start_angle=pi, end_angle=0, line_color=line_color) # bottome free throw arc figure.arc(x=0, y=142.5, radius=60, start_angle=0, end_angle=pi, line_color=line_color, line_dash="dashed") # Three point line # corner three point lines figure.line(x=-220, y=np.arange(-47.5, 92.5), line_color=line_color, line_width=line_width) figure.line(x=220, y=np.arange(-47.5, 92.5), line_color=line_color, line_width=line_width) # # three point arc figure.arc(x=0, y=0, radius=237.5, start_angle=3.528, end_angle=-0.3863, line_color=line_color, line_width=line_width) # add center court # outer center arc figure.arc(x=0, y=422.5, radius=60, start_angle=0, end_angle=pi, line_color=line_color, line_width=line_width) # inner center arct figure.arc(x=0, y=422.5, radius=20, start_angle=0, end_angle=pi, line_color=line_color, line_width=line_width) # outer lines, consistting of half court lines and out of bounds lines figure.rect(x=0, y=187.5, width=500, height=470, fill_alpha=0, line_color=line_color, line_width=line_width) return figure