text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Print a simple progress bar while processing the given iterable. <END_TASK> <USER_TASK:> Description: def progressbar(iterable, length=23): """Print a simple progress bar while processing the given iterable. Function |progressbar| does print the progress bar when option `printprogress` is activted: >>> from hydpy import pub >>> pub.options.printprogress = True You can pass an iterable object. Say you want to calculate the the sum of all integer values from 1 to 100 and print the progress of the calculation. Using function |range| (which returns a list in Python 2 and an iterator in Python3, but both are fine), one just has to interpose function |progressbar|: >>> from hydpy.core.printtools import progressbar >>> x_sum = 0 >>> for x in progressbar(range(1, 101)): ... x_sum += x |---------------------| *********************** >>> x_sum 5050 To prevent possible interim print commands from dismembering the status bar, they are delayed until the status bar is complete. For intermediate print outs of each fiftieth calculation, the result looks as follows: >>> x_sum = 0 >>> for x in progressbar(range(1, 101)): ... x_sum += x ... if not x % 50: ... print(x, x_sum) |---------------------| *********************** 50 1275 100 5050 The number of characters of the progress bar can be changed: >>> for i in progressbar(range(100), length=50): ... continue |------------------------------------------------| ************************************************** But its maximum number of characters is restricted by the length of the given iterable: >>> for i in progressbar(range(10), length=50): ... continue |--------| ********** The smallest possible progress bar has two characters: >>> for i in progressbar(range(2)): ... continue || ** For iterables of length one or zero, no progress bar is plottet: >>> for i in progressbar(range(1)): ... continue The same is True when the `printprogress` option is inactivated: >>> pub.options.printprogress = False >>> for i in progressbar(range(100)): ... continue """
if hydpy.pub.options.printprogress and (len(iterable) > 1): temp_name = os.path.join(tempfile.gettempdir(), 'HydPy_progressbar_stdout') temp_stdout = open(temp_name, 'w') real_stdout = sys.stdout try: sys.stdout = temp_stdout nmbstars = min(len(iterable), length) nmbcounts = len(iterable)/nmbstars indentation = ' '*max(_printprogress_indentation, 0) with PrintStyle(color=36, font=1, file=real_stdout): print(' %s|%s|\n%s ' % (indentation, '-'*(nmbstars-2), indentation), end='', file=real_stdout) counts = 1. for next_ in iterable: counts += 1. if counts >= nmbcounts: print(end='*', file=real_stdout) counts -= nmbcounts yield next_ finally: try: temp_stdout.close() except BaseException: pass sys.stdout = real_stdout print() with open(temp_name, 'r') as temp_stdout: sys.stdout.write(temp_stdout.read()) sys.stdout.flush() else: for next_ in iterable: yield next_
<SYSTEM_TASK:> Get the types of all current exchange items supposed to change <END_TASK> <USER_TASK:> Description: def GET_parameteritemtypes(self) -> None: """Get the types of all current exchange items supposed to change the values of |Parameter| objects."""
for item in state.parameteritems: self._outputs[item.name] = self._get_itemtype(item)
<SYSTEM_TASK:> Get the types of all current exchange items supposed to change <END_TASK> <USER_TASK:> Description: def GET_conditionitemtypes(self) -> None: """Get the types of all current exchange items supposed to change the values of |StateSequence| or |LogSequence| objects."""
for item in state.conditionitems: self._outputs[item.name] = self._get_itemtype(item)
<SYSTEM_TASK:> Get the types of all current exchange items supposed to return <END_TASK> <USER_TASK:> Description: def GET_getitemtypes(self) -> None: """Get the types of all current exchange items supposed to return the values of |Parameter| or |Sequence| objects or the time series of |IOSequence| objects."""
for item in state.getitems: type_ = self._get_itemtype(item) for name, _ in item.yield_name2value(): self._outputs[name] = type_
<SYSTEM_TASK:> Get the values of all |Variable| objects observed by the <END_TASK> <USER_TASK:> Description: def GET_getitemvalues(self) -> None: """Get the values of all |Variable| objects observed by the current |GetItem| objects. For |GetItem| objects observing time series, |HydPyServer.GET_getitemvalues| returns only the values within the current simulation period. """
for item in state.getitems: for name, value in item.yield_name2value(state.idx1, state.idx2): self._outputs[name] = value
<SYSTEM_TASK:> Assign the |StateSequence| or |LogSequence| object values available <END_TASK> <USER_TASK:> Description: def GET_load_conditionvalues(self) -> None: """Assign the |StateSequence| or |LogSequence| object values available for the current simulation start point to the current |HydPy| instance. When the simulation start point is identical with the initialisation time point and you did not save conditions for it beforehand, the "original" initial conditions are used (normally those of the conditions files of the respective *HydPy* project). """
try: state.hp.conditions = state.conditions[self._id][state.idx1] except KeyError: if state.idx1: self._statuscode = 500 raise RuntimeError( f'Conditions for ID `{self._id}` and time point ' f'`{hydpy.pub.timegrids.sim.firstdate}` are required, ' f'but have not been calculated so far.') else: state.hp.conditions = state.init_conditions
<SYSTEM_TASK:> Save the |StateSequence| and |LogSequence| object values of the <END_TASK> <USER_TASK:> Description: def GET_save_conditionvalues(self) -> None: """Save the |StateSequence| and |LogSequence| object values of the current |HydPy| instance for the current simulation endpoint."""
state.conditions[self._id] = state.conditions.get(self._id, {}) state.conditions[self._id][state.idx2] = state.hp.conditions
<SYSTEM_TASK:> Save the values of those |ChangeItem| objects which are <END_TASK> <USER_TASK:> Description: def GET_save_parameteritemvalues(self) -> None: """Save the values of those |ChangeItem| objects which are handling |Parameter| objects."""
for item in state.parameteritems: state.parameteritemvalues[self._id][item.name] = item.value.copy()
<SYSTEM_TASK:> Save the current simulation period. <END_TASK> <USER_TASK:> Description: def GET_save_timegrid(self) -> None: """Save the current simulation period."""
state.timegrids[self._id] = copy.deepcopy(hydpy.pub.timegrids.sim)
<SYSTEM_TASK:> Get the previously saved simulation period. <END_TASK> <USER_TASK:> Description: def GET_savedtimegrid(self) -> None: """Get the previously saved simulation period."""
try: self._write_timegrid(state.timegrids[self._id]) except KeyError: self._write_timegrid(hydpy.pub.timegrids.init)
<SYSTEM_TASK:> Return a function usable as a comparison method for class |Variable|. <END_TASK> <USER_TASK:> Description: def _compare_variables_function_generator( method_string, aggregation_func): """Return a function usable as a comparison method for class |Variable|. Pass the specific method (e.g. `__eq__`) and the corresponding operator (e.g. `==`) as strings. Also pass either |numpy.all| or |numpy.any| for aggregating multiple boolean values. """
def comparison_function(self, other): """Wrapper for comparison functions for class |Variable|.""" if self is other: return method_string in ('__eq__', '__le__', '__ge__') method = getattr(self.value, method_string) try: if hasattr(type(other), '__hydpy__get_value__'): other = other.__hydpy__get_value__() result = method(other) if result is NotImplemented: return result return aggregation_func(result) except BaseException: objecttools.augment_excmessage( f'While trying to compare variable ' f'{objecttools.elementphrase(self)} with object ' f'`{other}` of type `{objecttools.classname(other)}`') return comparison_function
<SYSTEM_TASK:> Raises a |RuntimeError| if at least one of the required values <END_TASK> <USER_TASK:> Description: def verify(self) -> None: """Raises a |RuntimeError| if at least one of the required values of a |Variable| object is |None| or |numpy.nan|. The descriptor `mask` defines, which values are considered to be necessary. Example on a 0-dimensional |Variable|: >>> from hydpy.core.variabletools import Variable >>> class Var(Variable): ... NDIM = 0 ... TYPE = float ... __hydpy__connect_variable2subgroup__ = None ... initinfo = 0.0, False >>> var = Var(None) >>> import numpy >>> var.shape = () >>> var.value = 1.0 >>> var.verify() >>> var.value = numpy.nan >>> var.verify() Traceback (most recent call last): ... RuntimeError: For variable `var`, 1 required value has not been set yet. Example on a 2-dimensional |Variable|: >>> Var.NDIM = 2 >>> var = Var(None) >>> var.shape = (2, 3) >>> var.value = numpy.ones((2,3)) >>> var.value[:, 1] = numpy.nan >>> var.verify() Traceback (most recent call last): ... RuntimeError: For variable `var`, 2 required values \ have not been set yet. >>> Var.mask = var.mask >>> Var.mask[0, 1] = False >>> var.verify() Traceback (most recent call last): ... RuntimeError: For variable `var`, 1 required value has not been set yet. >>> Var.mask[1, 1] = False >>> var.verify() """
nmbnan: int = numpy.sum(numpy.isnan( numpy.array(self.value)[self.mask])) if nmbnan: if nmbnan == 1: text = 'value has' else: text = 'values have' raise RuntimeError( f'For variable {objecttools.devicephrase(self)}, ' f'{nmbnan} required {text} not been set yet.')
<SYSTEM_TASK:> Average the actual values of the |Variable| object. <END_TASK> <USER_TASK:> Description: def average_values(self, *args, **kwargs) -> float: """Average the actual values of the |Variable| object. For 0-dimensional |Variable| objects, the result of method |Variable.average_values| equals |Variable.value|. The following example shows this for the sloppily defined class `SoilMoisture`: >>> from hydpy.core.variabletools import Variable >>> class SoilMoisture(Variable): ... NDIM = 0 ... TYPE = float ... refweigths = None ... availablemasks = None ... __hydpy__connect_variable2subgroup__ = None ... initinfo = None >>> sm = SoilMoisture(None) >>> sm.value = 200.0 >>> sm.average_values() 200.0 When the dimensionality of this class is increased to one, applying method |Variable.average_values| results in the following error: >>> SoilMoisture.NDIM = 1 >>> import numpy >>> SoilMoisture.shape = (3,) >>> SoilMoisture.value = numpy.array([200.0, 400.0, 500.0]) >>> sm.average_values() Traceback (most recent call last): ... AttributeError: While trying to calculate the mean value \ of variable `soilmoisture`, the following error occurred: Variable \ `soilmoisture` does not define any weighting coefficients. So model developers have to define another (in this case 1-dimensional) |Variable| subclass (usually a |Parameter| subclass), and make the relevant object available via property |Variable.refweights|: >>> class Area(Variable): ... NDIM = 1 ... shape = (3,) ... value = numpy.array([1.0, 1.0, 2.0]) ... __hydpy__connect_variable2subgroup__ = None ... initinfo = None >>> area = Area(None) >>> SoilMoisture.refweights = property(lambda self: area) >>> sm.average_values() 400.0 In the examples above, all single entries of `values` are relevant, which is the default case. However, subclasses of |Variable| can define an alternative mask, allowing to make some entries irrelevant. Assume for example, that our `SoilMoisture` object contains three single values, each one associated with a specific hydrological response unit (hru). To indicate that soil moisture is undefined for the third unit, (maybe because it is a water area), we set the third entry of the verification mask to |False|: >>> from hydpy.core.masktools import DefaultMask >>> class Soil(DefaultMask): ... @classmethod ... def new(cls, variable, **kwargs): ... return cls.array2mask([True, True, False]) >>> SoilMoisture.mask = Soil() >>> sm.average_values() 300.0 Alternatively, method |Variable.average_values| accepts additional masking information as positional or keyword arguments. Therefore, the corresponding model must implement some alternative masks, which are provided by property |Variable.availablemasks|. We mock this property with a new |Masks| object, handling one mask for flat soils (only the first hru), one mask for deep soils (only the second hru), and one mask for water areas (only the third hru): >>> class FlatSoil(DefaultMask): ... @classmethod ... def new(cls, variable, **kwargs): ... return cls.array2mask([True, False, False]) >>> class DeepSoil(DefaultMask): ... @classmethod ... def new(cls, variable, **kwargs): ... return cls.array2mask([False, True, False]) >>> class Water(DefaultMask): ... @classmethod ... def new(cls, variable, **kwargs): ... return cls.array2mask([False, False, True]) >>> from hydpy.core import masktools >>> class Masks(masktools.Masks): ... CLASSES = (FlatSoil, ... DeepSoil, ... Water) >>> SoilMoisture.availablemasks = Masks(None) One can pass either the mask classes themselves or their names: >>> sm.average_values(sm.availablemasks.flatsoil) 200.0 >>> sm.average_values('deepsoil') 400.0 Both variants can be combined: >>> sm.average_values(sm.availablemasks.deepsoil, 'flatsoil') 300.0 The following error happens if the general mask of the variable does not contain the given masks: >>> sm.average_values('flatsoil', 'water') Traceback (most recent call last): ... ValueError: While trying to calculate the mean value of variable \ `soilmoisture`, the following error occurred: Based on the arguments \ `('flatsoil', 'water')` and `{}` the mask `CustomMask([ True, False, True])` \ has been determined, which is not a submask of `Soil([ True, True, False])`. Applying masks with custom options is also supported. One can change the behaviour of the following mask via the argument `complete`: >>> class AllOrNothing(DefaultMask): ... @classmethod ... def new(cls, variable, complete): ... if complete: ... bools = [True, True, True] ... else: ... bools = [False, False, False] ... return cls.array2mask(bools) >>> class Masks(Masks): ... CLASSES = (FlatSoil, ... DeepSoil, ... Water, ... AllOrNothing) >>> SoilMoisture.availablemasks = Masks(None) Again, one can apply the mask class directly (but note that one has to pass the relevant variable as the first argument.): >>> sm.average_values( # doctest: +ELLIPSIS ... sm.availablemasks.allornothing(sm, complete=True)) Traceback (most recent call last): ... ValueError: While trying to... Alternatively, one can pass the mask name as a keyword and pack the mask's options into a |dict| object: >>> sm.average_values(allornothing={'complete': False}) nan You can combine all variants explained above: >>> sm.average_values( ... 'deepsoil', flatsoil={}, allornothing={'complete': False}) 300.0 """
try: if not self.NDIM: return self.value mask = self.get_submask(*args, **kwargs) if numpy.any(mask): weights = self.refweights[mask] return numpy.sum(weights*self[mask])/numpy.sum(weights) return numpy.nan except BaseException: objecttools.augment_excmessage( f'While trying to calculate the mean value of variable ' f'{objecttools.devicephrase(self)}')
<SYSTEM_TASK:> Get a sub-mask of the mask handled by the actual |Variable| object <END_TASK> <USER_TASK:> Description: def get_submask(self, *args, **kwargs) -> masktools.CustomMask: """Get a sub-mask of the mask handled by the actual |Variable| object based on the given arguments. See the documentation on method |Variable.average_values| for further information. """
if args or kwargs: masks = self.availablemasks mask = masktools.CustomMask(numpy.full(self.shape, False)) for arg in args: mask = mask + self._prepare_mask(arg, masks) for key, value in kwargs.items(): mask = mask + self._prepare_mask(key, masks, **value) if mask not in self.mask: raise ValueError( f'Based on the arguments `{args}` and `{kwargs}` ' f'the mask `{repr(mask)}` has been determined, ' f'which is not a submask of `{repr(self.mask)}`.') else: mask = self.mask return mask
<SYSTEM_TASK:> A list with comments for making string representations <END_TASK> <USER_TASK:> Description: def commentrepr(self) -> List[str]: """A list with comments for making string representations more informative. With option |Options.reprcomments| being disabled, |Variable.commentrepr| is empty. """
if hydpy.pub.options.reprcomments: return [f'# {line}' for line in textwrap.wrap(objecttools.description(self), 72)] return []
<SYSTEM_TASK:> Return the header of a regular or auxiliary parameter control file. <END_TASK> <USER_TASK:> Description: def get_controlfileheader( model: Union[str, 'modeltools.Model'], parameterstep: timetools.PeriodConstrArg = None, simulationstep: timetools.PeriodConstrArg = None) -> str: """Return the header of a regular or auxiliary parameter control file. The header contains the default coding information, the import command for the given model and the actual parameter and simulation step sizes. The first example shows that, if you pass the model argument as a string, you have to take care that this string makes sense: >>> from hydpy.core.parametertools import get_controlfileheader, Parameter >>> from hydpy import Period, prepare_model, pub, Timegrids, Timegrid >>> print(get_controlfileheader(model='no model class', ... parameterstep='-1h', ... simulationstep=Period('1h'))) # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.no model class import * <BLANKLINE> simulationstep('1h') parameterstep('-1h') <BLANKLINE> <BLANKLINE> The second example shows the saver option to pass the proper model object. It also shows that function |get_controlfileheader| tries to gain the parameter and simulation step sizes from the global |Timegrids| object contained in the module |pub| when necessary: >>> model = prepare_model('lland_v1') >>> _ = Parameter.parameterstep('1d') >>> pub.timegrids = '2000.01.01', '2001.01.01', '1h' >>> print(get_controlfileheader(model=model)) # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.lland_v1 import * <BLANKLINE> simulationstep('1h') parameterstep('1d') <BLANKLINE> <BLANKLINE> """
with Parameter.parameterstep(parameterstep): if simulationstep is None: simulationstep = Parameter.simulationstep else: simulationstep = timetools.Period(simulationstep) return (f"# -*- coding: utf-8 -*-\n\n" f"from hydpy.models.{model} import *\n\n" f"simulationstep('{simulationstep}')\n" f"parameterstep('{Parameter.parameterstep}')\n\n")
<SYSTEM_TASK:> Call method |Parameter.update| of all "secondary" parameters. <END_TASK> <USER_TASK:> Description: def update(self) -> None: """Call method |Parameter.update| of all "secondary" parameters. Directly after initialisation, neither the primary (`control`) parameters nor the secondary (`derived`) parameters of application model |hstream_v1| are ready for usage: >>> from hydpy.models.hstream_v1 import * >>> parameterstep('1d') >>> simulationstep('1d') >>> derived nmbsegments(?) c1(?) c3(?) c2(?) Trying to update the values of the secondary parameters while the primary ones are still not defined, raises errors like the following: >>> model.parameters.update() Traceback (most recent call last): ... AttributeError: While trying to update parameter ``nmbsegments` \ of element `?``, the following error occurred: For variable `lag`, \ no value has been defined so far. With proper values both for parameter |hstream_control.Lag| and |hstream_control.Damp|, updating the derived parameters succeeds: >>> lag(0.0) >>> damp(0.0) >>> model.parameters.update() >>> derived nmbsegments(0) c1(0.0) c3(0.0) c2(1.0) """
for subpars in self.secondary_subpars: for par in subpars: try: par.update() except BaseException: objecttools.augment_excmessage( f'While trying to update parameter ' f'`{objecttools.elementphrase(par)}`')
<SYSTEM_TASK:> Write the control parameters to file. <END_TASK> <USER_TASK:> Description: def save_controls(self, filepath: Optional[str] = None, parameterstep: timetools.PeriodConstrArg = None, simulationstep: timetools.PeriodConstrArg = None, auxfiler: 'auxfiletools.Auxfiler' = None): """Write the control parameters to file. Usually, a control file consists of a header (see the documentation on the method |get_controlfileheader|) and the string representations of the individual |Parameter| objects handled by the `control` |SubParameters| object. The main functionality of method |Parameters.save_controls| is demonstrated in the documentation on the method |HydPy.save_controls| of class |HydPy|, which one would apply to write the parameter information of complete *HydPy* projects. However, to call |Parameters.save_controls| on individual |Parameters| objects offers the advantage to choose an arbitrary file path, as shown in the following example: >>> from hydpy.models.hstream_v1 import * >>> parameterstep('1d') >>> simulationstep('1h') >>> lag(1.0) >>> damp(0.5) >>> from hydpy import Open >>> with Open(): ... model.parameters.save_controls('otherdir/otherfile.py') ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ otherdir/otherfile.py ------------------------------------- # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.hstream_v1 import * <BLANKLINE> simulationstep('1h') parameterstep('1d') <BLANKLINE> lag(1.0) damp(0.5) <BLANKLINE> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Without a given file path and a proper project configuration, method |Parameters.save_controls| raises the following error: >>> model.parameters.save_controls() Traceback (most recent call last): ... RuntimeError: To save the control parameters of a model to a file, \ its filename must be known. This can be done, by passing a filename to \ function `save_controls` directly. But in complete HydPy applications, \ it is usally assumed to be consistent with the name of the element \ handling the model. """
if self.control: variable2auxfile = getattr(auxfiler, str(self.model), None) lines = [get_controlfileheader( self.model, parameterstep, simulationstep)] with Parameter.parameterstep(parameterstep): for par in self.control: if variable2auxfile: auxfilename = variable2auxfile.get_filename(par) if auxfilename: lines.append( f"{par.name}(auxfile='{auxfilename}')\n") continue lines.append(repr(par) + '\n') text = ''.join(lines) if filepath: with open(filepath, mode='w', encoding='utf-8') as controlfile: controlfile.write(text) else: filename = objecttools.devicename(self) if filename == '?': raise RuntimeError( 'To save the control parameters of a model to a file, ' 'its filename must be known. This can be done, by ' 'passing a filename to function `save_controls` ' 'directly. But in complete HydPy applications, it is ' 'usally assumed to be consistent with the name of the ' 'element handling the model.') hydpy.pub.controlmanager.save_file(filename, text)
<SYSTEM_TASK:> Try to return the parameter values from the auxiliary control file <END_TASK> <USER_TASK:> Description: def _get_values_from_auxiliaryfile(self, auxfile): """Try to return the parameter values from the auxiliary control file with the given name. Things are a little complicated here. To understand this method, you should first take a look at the |parameterstep| function. """
try: frame = inspect.currentframe().f_back.f_back while frame: namespace = frame.f_locals try: subnamespace = {'model': namespace['model'], 'focus': self} break except KeyError: frame = frame.f_back else: raise RuntimeError( 'Cannot determine the corresponding model. Use the ' '`auxfile` keyword in usual parameter control files only.') filetools.ControlManager.read2dict(auxfile, subnamespace) try: subself = subnamespace[self.name] except KeyError: raise RuntimeError( f'The selected file does not define value(s) for ' f'parameter {self.name}') return subself.values except BaseException: objecttools.augment_excmessage( f'While trying to extract information for parameter ' f'`{self.name}` from file `{auxfile}`')
<SYSTEM_TASK:> The actual initial value of the given parameter. <END_TASK> <USER_TASK:> Description: def initinfo(self) -> Tuple[Union[float, int, bool], bool]: """The actual initial value of the given parameter. Some |Parameter| subclasses define another value for class attribute `INIT` than |None| to provide a default value. Let's define a parameter test class and prepare a function for initialising it and connecting the resulting instance to a |SubParameters| object: >>> from hydpy.core.parametertools import Parameter, SubParameters >>> class Test(Parameter): ... NDIM = 0 ... TYPE = float ... TIME = None ... INIT = 2.0 >>> class SubGroup(SubParameters): ... CLASSES = (Test,) >>> def prepare(): ... subpars = SubGroup(None) ... test = Test(subpars) ... test.__hydpy__connect_variable2subgroup__() ... return test By default, making use of the `INIT` attribute is disabled: >>> test = prepare() >>> test test(?) Enable it through setting |Options.usedefaultvalues| to |True|: >>> from hydpy import pub >>> pub.options.usedefaultvalues = True >>> test = prepare() >>> test test(2.0) When no `INIT` attribute is defined, enabling |Options.usedefaultvalues| has no effect, of course: >>> del Test.INIT >>> test = prepare() >>> test test(?) For time-dependent parameter values, the `INIT` attribute is assumed to be related to a |Parameterstep| of one day: >>> test.parameterstep = '2d' >>> test.simulationstep = '12h' >>> Test.INIT = 2.0 >>> Test.TIME = True >>> test = prepare() >>> test test(4.0) >>> test.value 1.0 """
init = self.INIT if (init is not None) and hydpy.pub.options.usedefaultvalues: with Parameter.parameterstep('1d'): return self.apply_timefactor(init), True return variabletools.TYPE2MISSINGVALUE[self.TYPE], False
<SYSTEM_TASK:> Factor to adjust a new value of a time-dependent parameter. <END_TASK> <USER_TASK:> Description: def get_timefactor(cls) -> float: """Factor to adjust a new value of a time-dependent parameter. For a time-dependent parameter, its effective value depends on the simulation step size. Method |Parameter.get_timefactor| returns the fraction between the current simulation step size and the current parameter step size. .. testsetup:: >>> from hydpy import pub >>> del pub.timegrids >>> from hydpy.core.parametertools import Parameter >>> Parameter.simulationstep.delete() Period() Method |Parameter.get_timefactor| raises the following error when time information is not available: >>> from hydpy.core.parametertools import Parameter >>> Parameter.get_timefactor() Traceback (most recent call last): ... RuntimeError: To calculate the conversion factor for adapting the \ values of the time-dependent parameters, you need to define both a \ parameter and a simulation time step size first. One can define both time step sizes directly: >>> _ = Parameter.parameterstep('1d') >>> _ = Parameter.simulationstep('6h') >>> Parameter.get_timefactor() 0.25 As usual, the "global" simulation step size of the |Timegrids| object of module |pub| is prefered: >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2001-01-01', '12h' >>> Parameter.get_timefactor() 0.5 """
try: parfactor = hydpy.pub.timegrids.parfactor except RuntimeError: if not (cls.parameterstep and cls.simulationstep): raise RuntimeError( f'To calculate the conversion factor for adapting ' f'the values of the time-dependent parameters, ' f'you need to define both a parameter and a simulation ' f'time step size first.') else: date1 = timetools.Date('2000.01.01') date2 = date1 + cls.simulationstep parfactor = timetools.Timegrids(timetools.Timegrid( date1, date2, cls.simulationstep)).parfactor return parfactor(cls.parameterstep)
<SYSTEM_TASK:> The inverse version of method |Parameter.apply_timefactor|. <END_TASK> <USER_TASK:> Description: def revert_timefactor(cls, values): """The inverse version of method |Parameter.apply_timefactor|. See the explanations on method Parameter.apply_timefactor| to understand the following examples: .. testsetup:: >>> from hydpy import pub >>> del pub.timegrids >>> from hydpy.core.parametertools import Parameter >>> class Par(Parameter): ... TIME = None >>> Par.parameterstep = '1d' >>> Par.simulationstep = '6h' >>> Par.revert_timefactor(4.0) 4.0 >>> Par.TIME = True >>> Par.revert_timefactor(4.0) 16.0 >>> Par.TIME = False >>> Par.revert_timefactor(4.0) 1.0 """
if cls.TIME is True: return values / cls.get_timefactor() if cls.TIME is False: return values * cls.get_timefactor() return values
<SYSTEM_TASK:> Try to find a compressed parameter value representation and <END_TASK> <USER_TASK:> Description: def compress_repr(self) -> Optional[str]: """Try to find a compressed parameter value representation and return it. |Parameter.compress_repr| raises a |NotImplementedError| when failing to find a compressed representation. .. testsetup:: >>> from hydpy import pub >>> del pub.timegrids For the following examples, we define a 1-dimensional sequence handling time-dependent floating point values: >>> from hydpy.core.parametertools import Parameter >>> class Test(Parameter): ... NDIM = 1 ... TYPE = float ... TIME = True >>> test = Test(None) Before and directly after defining the parameter shape, `nan` is returned: >>> test.compress_repr() '?' >>> test test(?) >>> test.shape = 4 >>> test test(?) Due to the time-dependence of the values of our test class, we need to specify a parameter and a simulation time step: >>> test.parameterstep = '1d' >>> test.simulationstep = '8h' Compression succeeds when all required values are identical: >>> test(3.0, 3.0, 3.0, 3.0) >>> test.values array([ 1., 1., 1., 1.]) >>> test.compress_repr() '3.0' >>> test test(3.0) Method |Parameter.compress_repr| returns |None| in case the required values are not identical: >>> test(1.0, 2.0, 3.0, 3.0) >>> test.compress_repr() >>> test test(1.0, 2.0, 3.0, 3.0) If some values are not required, indicate this by the `mask` descriptor: >>> import numpy >>> test(3.0, 3.0, 3.0, numpy.nan) >>> test test(3.0, 3.0, 3.0, nan) >>> Test.mask = numpy.array([True, True, True, False]) >>> test test(3.0) For a shape of zero, the string representing includes an empty list: >>> test.shape = 0 >>> test.compress_repr() '[]' >>> test test([]) Method |Parameter.compress_repr| works similarly for different |Parameter| subclasses. The following examples focus on a 2-dimensional parameter handling integer values: >>> from hydpy.core.parametertools import Parameter >>> class Test(Parameter): ... NDIM = 2 ... TYPE = int ... TIME = None >>> test = Test(None) >>> test.compress_repr() '?' >>> test test(?) >>> test.shape = (2, 3) >>> test test(?) >>> test([[3, 3, 3], ... [3, 3, 3]]) >>> test test(3) >>> test([[3, 3, -999999], ... [3, 3, 3]]) >>> test test([[3, 3, -999999], [3, 3, 3]]) >>> Test.mask = numpy.array([ ... [True, True, False], ... [True, True, True]]) >>> test test(3) >>> test.shape = (0, 0) >>> test test([[]]) """
if not hasattr(self, 'value'): return '?' if not self: return f"{self.NDIM * '['}{self.NDIM * ']'}" unique = numpy.unique(self[self.mask]) if sum(numpy.isnan(unique)) == len(unique.flatten()): unique = numpy.array([numpy.nan]) else: unique = self.revert_timefactor(unique) if len(unique) == 1: return objecttools.repr_(unique[0]) return None
<SYSTEM_TASK:> Update the actual simulation values based on the toy-value pairs. <END_TASK> <USER_TASK:> Description: def refresh(self) -> None: """Update the actual simulation values based on the toy-value pairs. Usually, one does not need to call refresh explicitly. The "magic" methods __call__, __setattr__, and __delattr__ invoke it automatically, when required. Instantiate a 1-dimensional |SeasonalParameter| object: >>> from hydpy.core.parametertools import SeasonalParameter >>> class Par(SeasonalParameter): ... NDIM = 1 ... TYPE = float ... TIME = None >>> par = Par(None) >>> par.simulationstep = '1d' >>> par.shape = (None,) When a |SeasonalParameter| object does not contain any toy-value pairs yet, the method |SeasonalParameter.refresh| sets all actual simulation values to zero: >>> par.values = 1. >>> par.refresh() >>> par.values[0] 0.0 When there is only one toy-value pair, its values are relevant for all actual simulation values: >>> par.toy_1 = 2. # calls refresh automatically >>> par.values[0] 2.0 Method |SeasonalParameter.refresh| performs a linear interpolation for the central time points of each simulation time step. Hence, in the following example, the original values of the toy-value pairs do not show up: >>> par.toy_12_31 = 4. >>> from hydpy import round_ >>> round_(par.values[0]) 2.00274 >>> round_(par.values[-2]) 3.99726 >>> par.values[-1] 3.0 If one wants to preserve the original values in this example, one would have to set the corresponding toy instances in the middle of some simulation step intervals: >>> del par.toy_1 >>> del par.toy_12_31 >>> par.toy_1_1_12 = 2 >>> par.toy_12_31_12 = 4. >>> par.values[0] 2.0 >>> round_(par.values[1]) 2.005479 >>> round_(par.values[-2]) 3.994521 >>> par.values[-1] 4.0 """
if not self: self.values[:] = 0. elif len(self) == 1: values = list(self._toy2values.values())[0] self.values[:] = self.apply_timefactor(values) else: for idx, date in enumerate( timetools.TOY.centred_timegrid(self.simulationstep)): values = self.interp(date) self.values[idx] = self.apply_timefactor(values)
<SYSTEM_TASK:> Perform a linear value interpolation for the given `date` and <END_TASK> <USER_TASK:> Description: def interp(self, date: timetools.Date) -> float: """Perform a linear value interpolation for the given `date` and return the result. Instantiate a 1-dimensional |SeasonalParameter| object: >>> from hydpy.core.parametertools import SeasonalParameter >>> class Par(SeasonalParameter): ... NDIM = 1 ... TYPE = float ... TIME = None >>> par = Par(None) >>> par.simulationstep = '1d' >>> par.shape = (None,) Define three toy-value pairs: >>> par(_1=2.0, _2=5.0, _12_31=4.0) Passing a |Date| object matching a |TOY| object exactly returns the corresponding |float| value: >>> from hydpy import Date >>> par.interp(Date('2000.01.01')) 2.0 >>> par.interp(Date('2000.02.01')) 5.0 >>> par.interp(Date('2000.12.31')) 4.0 For all intermediate points, |SeasonalParameter.interp| performs a linear interpolation: >>> from hydpy import round_ >>> round_(par.interp(Date('2000.01.02'))) 2.096774 >>> round_(par.interp(Date('2000.01.31'))) 4.903226 >>> round_(par.interp(Date('2000.02.02'))) 4.997006 >>> round_(par.interp(Date('2000.12.30'))) 4.002994 Linear interpolation is also allowed between the first and the last pair when they do not capture the endpoints of the year: >>> par(_1_2=2.0, _12_30=4.0) >>> round_(par.interp(Date('2000.12.29'))) 3.99449 >>> par.interp(Date('2000.12.30')) 4.0 >>> round_(par.interp(Date('2000.12.31'))) 3.333333 >>> round_(par.interp(Date('2000.01.01'))) 2.666667 >>> par.interp(Date('2000.01.02')) 2.0 >>> round_(par.interp(Date('2000.01.03'))) 2.00551 The following example briefly shows interpolation performed for a 2-dimensional parameter: >>> Par.NDIM = 2 >>> par = Par(None) >>> par.shape = (None, 2) >>> par(_1_1=[1., 2.], _1_3=[-3, 0.]) >>> result = par.interp(Date('2000.01.02')) >>> round_(result[0]) -1.0 >>> round_(result[1]) 1.0 """
xnew = timetools.TOY(date) xys = list(self) for idx, (x_1, y_1) in enumerate(xys): if x_1 > xnew: x_0, y_0 = xys[idx-1] break else: x_0, y_0 = xys[-1] x_1, y_1 = xys[0] return y_0+(y_1-y_0)/(x_1-x_0)*(xnew-x_0)
<SYSTEM_TASK:> A user-defined value to be used instead of the value of class <END_TASK> <USER_TASK:> Description: def alternative_initvalue(self) -> Union[bool, int, float]: """A user-defined value to be used instead of the value of class constant `INIT`. See the main documentation on class |SolverParameter| for more information. """
if self._alternative_initvalue is None: raise AttributeError( f'No alternative initial value for solver parameter ' f'{objecttools.elementphrase(self)} has been defined so far.') else: return self._alternative_initvalue
<SYSTEM_TASK:> Support for custom company premises model <END_TASK> <USER_TASK:> Description: def get_premises_model(): """ Support for custom company premises model with developer friendly validation. """
try: app_label, model_name = PREMISES_MODEL.split('.') except ValueError: raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL must be of the" " form 'app_label.model_name'") premises_model = get_model(app_label=app_label, model_name=model_name) if premises_model is None: raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL refers to" " model '%s' that has not been installed" % PREMISES_MODEL) return premises_model
<SYSTEM_TASK:> Allows to access global request and read a timestamp from query. <END_TASK> <USER_TASK:> Description: def get_now(): """ Allows to access global request and read a timestamp from query. """
if not get_current_request: return datetime.datetime.now() request = get_current_request() if request: openinghours_now = request.GET.get('openinghours-now') if openinghours_now: return datetime.datetime.strptime(openinghours_now, '%Y%m%d%H%M%S') return datetime.datetime.now()
<SYSTEM_TASK:> Returns QuerySet of ClosingRules that are currently valid <END_TASK> <USER_TASK:> Description: def get_closing_rule_for_now(location): """ Returns QuerySet of ClosingRules that are currently valid """
now = get_now() if location: return ClosingRules.objects.filter(company=location, start__lte=now, end__gte=now) return Company.objects.first().closingrules_set.filter(start__lte=now, end__gte=now)
<SYSTEM_TASK:> Is the company currently open? Pass "now" to test with a specific <END_TASK> <USER_TASK:> Description: def is_open(location, now=None): """ Is the company currently open? Pass "now" to test with a specific timestamp. Can be used stand-alone or as a helper. """
if now is None: now = get_now() if has_closing_rule_for_now(location): return False now_time = datetime.time(now.hour, now.minute, now.second) if location: ohs = OpeningHours.objects.filter(company=location) else: ohs = Company.objects.first().openinghours_set.all() for oh in ohs: is_open = False # start and end is on the same day if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and now_time <= oh.to_hour): is_open = oh # start and end are not on the same day and we test on the start day if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and ((oh.to_hour < oh.from_hour) and (now_time < datetime.time(23, 59, 59)))): is_open = oh # start and end are not on the same day and we test on the end day if (oh.weekday == (now.isoweekday() - 1) % 7 and oh.from_hour >= now_time and oh.to_hour >= now_time and oh.to_hour < oh.from_hour): is_open = oh # print " 'Special' case after midnight", oh if is_open is not False: return oh return False
<SYSTEM_TASK:> A |numpy| |numpy.ndarray| with equal weights for all segment <END_TASK> <USER_TASK:> Description: def refweights(self): """A |numpy| |numpy.ndarray| with equal weights for all segment junctions.. >>> from hydpy.models.hstream import * >>> parameterstep('1d') >>> states.qjoints.shape = 5 >>> states.qjoints.refweights array([ 0.2, 0.2, 0.2, 0.2, 0.2]) """
# pylint: disable=unsubscriptable-object # due to a pylint bug (see https://github.com/PyCQA/pylint/issues/870) return numpy.full(self.shape, 1./self.shape[0], dtype=float)
<SYSTEM_TASK:> Add a directory and optionally its path. <END_TASK> <USER_TASK:> Description: def add(self, directory, path=None) -> None: """Add a directory and optionally its path."""
objecttools.valid_variable_identifier(directory) if path is None: path = directory setattr(self, directory, path)
<SYSTEM_TASK:> Absolute path pointing to the available working directories. <END_TASK> <USER_TASK:> Description: def basepath(self) -> str: """Absolute path pointing to the available working directories. >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> from hydpy import repr_, TestIO >>> with TestIO(): ... repr_(filemanager.basepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/projectname/basename' """
return os.path.abspath( os.path.join(self.projectdir, self.BASEDIR))
<SYSTEM_TASK:> Names and paths of the available working directories. <END_TASK> <USER_TASK:> Description: def availabledirs(self) -> Folder2Path: """Names and paths of the available working directories. Available working directories are those beeing stored in the base directory of the respective |FileManager| subclass. Folders with names starting with an underscore are ignored (use this for directories handling additional data files, if you like). Zipped directories, which can be unpacked on the fly, do also count as available directories: >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> import os >>> from hydpy import repr_, TestIO >>> TestIO.clear() >>> with TestIO(): ... os.makedirs('projectname/basename/folder1') ... os.makedirs('projectname/basename/folder2') ... open('projectname/basename/folder3.zip', 'w').close() ... os.makedirs('projectname/basename/_folder4') ... open('projectname/basename/folder5.tar', 'w').close() ... filemanager.availabledirs # doctest: +ELLIPSIS Folder2Path(folder1=.../projectname/basename/folder1, folder2=.../projectname/basename/folder2, folder3=.../projectname/basename/folder3.zip) """
directories = Folder2Path() for directory in os.listdir(self.basepath): if not directory.startswith('_'): path = os.path.join(self.basepath, directory) if os.path.isdir(path): directories.add(directory, path) elif directory.endswith('.zip'): directories.add(directory[:-4], path) return directories
<SYSTEM_TASK:> Name of the current working directory containing the relevant files. <END_TASK> <USER_TASK:> Description: def currentdir(self) -> str: """Name of the current working directory containing the relevant files. To show most of the functionality of |property| |FileManager.currentdir| (unpacking zip files on the fly is explained in the documentation on function (|FileManager.zip_currentdir|), we first prepare a |FileManager| object corresponding to the |FileManager.basepath| `projectname/basename`: >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> import os >>> from hydpy import repr_, TestIO >>> TestIO.clear() >>> with TestIO(): ... os.makedirs('projectname/basename') ... repr_(filemanager.basepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/projectname/basename' At first, the base directory is empty and asking for the current working directory results in the following error: >>> with TestIO(): ... filemanager.currentdir # doctest: +ELLIPSIS Traceback (most recent call last): ... RuntimeError: The current working directory of the FileManager object \ has not been defined manually and cannot be determined automatically: \ `.../projectname/basename` does not contain any available directories. If only one directory exists, it is considered as the current working directory automatically: >>> with TestIO(): ... os.mkdir('projectname/basename/dir1') ... filemanager.currentdir 'dir1' |property| |FileManager.currentdir| memorises the name of the current working directory, even if another directory is later added to the base path: >>> with TestIO(): ... os.mkdir('projectname/basename/dir2') ... filemanager.currentdir 'dir1' Set the value of |FileManager.currentdir| to |None| to let it forget the memorised directory. After that, asking for the current working directory now results in another error, as it is not clear which directory to select: >>> with TestIO(): ... filemanager.currentdir = None ... filemanager.currentdir # doctest: +ELLIPSIS Traceback (most recent call last): ... RuntimeError: The current working directory of the FileManager object \ has not been defined manually and cannot be determined automatically: \ `....../projectname/basename` does contain multiple available directories \ (dir1 and dir2). Setting |FileManager.currentdir| manually solves the problem: >>> with TestIO(): ... filemanager.currentdir = 'dir1' ... filemanager.currentdir 'dir1' Remove the current working directory `dir1` with the `del` statement: >>> with TestIO(): ... del filemanager.currentdir ... os.path.exists('projectname/basename/dir1') False |FileManager| subclasses can define a default directory name. When many directories exist and none is selected manually, the default directory is selected automatically. The following example shows an error message due to multiple directories without any having the default name: >>> with TestIO(): ... os.mkdir('projectname/basename/dir1') ... filemanager.DEFAULTDIR = 'dir3' ... del filemanager.currentdir ... filemanager.currentdir # doctest: +ELLIPSIS Traceback (most recent call last): ... RuntimeError: The current working directory of the FileManager object \ has not been defined manually and cannot be determined automatically: The \ default directory (dir3) is not among the available directories (dir1 and dir2). We can fix this by adding the required default directory manually: >>> with TestIO(): ... os.mkdir('projectname/basename/dir3') ... filemanager.currentdir 'dir3' Setting the |FileManager.currentdir| to `dir4` not only overwrites the default name, but also creates the required folder: >>> with TestIO(): ... filemanager.currentdir = 'dir4' ... filemanager.currentdir 'dir4' >>> with TestIO(): ... sorted(os.listdir('projectname/basename')) ['dir1', 'dir2', 'dir3', 'dir4'] Failed attempts in removing directories result in error messages like the following one: >>> import shutil >>> from unittest.mock import patch >>> with patch.object(shutil, 'rmtree', side_effect=AttributeError): ... with TestIO(): ... del filemanager.currentdir # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: While trying to delete the current working directory \ `.../projectname/basename/dir4` of the FileManager object, the following \ error occurred: ... Then, the current working directory still exists and is remembered by |FileManager.currentdir|: >>> with TestIO(): ... filemanager.currentdir 'dir4' >>> with TestIO(): ... sorted(os.listdir('projectname/basename')) ['dir1', 'dir2', 'dir3', 'dir4'] """
if self._currentdir is None: directories = self.availabledirs.folders if len(directories) == 1: self.currentdir = directories[0] elif self.DEFAULTDIR in directories: self.currentdir = self.DEFAULTDIR else: prefix = (f'The current working directory of the ' f'{objecttools.classname(self)} object ' f'has not been defined manually and cannot ' f'be determined automatically:') if not directories: raise RuntimeError( f'{prefix} `{objecttools.repr_(self.basepath)}` ' f'does not contain any available directories.') if self.DEFAULTDIR is None: raise RuntimeError( f'{prefix} `{objecttools.repr_(self.basepath)}` ' f'does contain multiple available directories ' f'({objecttools.enumeration(directories)}).') raise RuntimeError( f'{prefix} The default directory ({self.DEFAULTDIR}) ' f'is not among the available directories ' f'({objecttools.enumeration(directories)}).') return self._currentdir
<SYSTEM_TASK:> Absolute path of the current working directory. <END_TASK> <USER_TASK:> Description: def currentpath(self) -> str: """Absolute path of the current working directory. >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> from hydpy import repr_, TestIO >>> with TestIO(): ... filemanager.currentdir = 'testdir' ... repr_(filemanager.currentpath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/projectname/basename/testdir' """
return os.path.join(self.basepath, self.currentdir)
<SYSTEM_TASK:> Names of the files contained in the the current working directory. <END_TASK> <USER_TASK:> Description: def filenames(self) -> List[str]: """Names of the files contained in the the current working directory. Files names starting with underscores are ignored: >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> from hydpy import TestIO >>> with TestIO(): ... filemanager.currentdir = 'testdir' ... open('projectname/basename/testdir/file1.txt', 'w').close() ... open('projectname/basename/testdir/file2.npy', 'w').close() ... open('projectname/basename/testdir/_file1.nc', 'w').close() ... filemanager.filenames ['file1.txt', 'file2.npy'] """
return sorted( fn for fn in os.listdir(self.currentpath) if not fn.startswith('_'))
<SYSTEM_TASK:> Absolute path names of the files contained in the current <END_TASK> <USER_TASK:> Description: def filepaths(self) -> List[str]: """Absolute path names of the files contained in the current working directory. Files names starting with underscores are ignored: >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> from hydpy import repr_, TestIO >>> with TestIO(): ... filemanager.currentdir = 'testdir' ... open('projectname/basename/testdir/file1.txt', 'w').close() ... open('projectname/basename/testdir/file2.npy', 'w').close() ... open('projectname/basename/testdir/_file1.nc', 'w').close() ... for filepath in filemanager.filepaths: ... repr_(filepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/projectname/basename/testdir/file1.txt' '...hydpy/tests/iotesting/projectname/basename/testdir/file2.npy' """
path = self.currentpath return [os.path.join(path, name) for name in self.filenames]
<SYSTEM_TASK:> Pack the current working directory in a `zip` file. <END_TASK> <USER_TASK:> Description: def zip_currentdir(self) -> None: """Pack the current working directory in a `zip` file. |FileManager| subclasses allow for manual packing and automatic unpacking of working directories. The only supported format is `zip`. To avoid possible inconsistencies, origin directories and zip files are removed after packing or unpacking, respectively. As an example scenario, we prepare a |FileManager| object with the current working directory `folder` containing the files `test1.txt` and `text2.txt`: >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> import os >>> from hydpy import repr_, TestIO >>> TestIO.clear() >>> basepath = 'projectname/basename' >>> with TestIO(): ... os.makedirs(basepath) ... filemanager.currentdir = 'folder' ... open(f'{basepath}/folder/file1.txt', 'w').close() ... open(f'{basepath}/folder/file2.txt', 'w').close() ... filemanager.filenames ['file1.txt', 'file2.txt'] The directories existing under the base path are identical with the ones returned by property |FileManager.availabledirs|: >>> with TestIO(): ... sorted(os.listdir(basepath)) ... filemanager.availabledirs # doctest: +ELLIPSIS ['folder'] Folder2Path(folder=.../projectname/basename/folder) After packing the current working directory manually, it is still counted as a available directory: >>> with TestIO(): ... filemanager.zip_currentdir() ... sorted(os.listdir(basepath)) ... filemanager.availabledirs # doctest: +ELLIPSIS ['folder.zip'] Folder2Path(folder=.../projectname/basename/folder.zip) Instead of the complete directory, only the contained files are packed: >>> from zipfile import ZipFile >>> with TestIO(): ... with ZipFile('projectname/basename/folder.zip', 'r') as zp: ... sorted(zp.namelist()) ['file1.txt', 'file2.txt'] The zip file is unpacked again, as soon as `folder` becomes the current working directory: >>> with TestIO(): ... filemanager.currentdir = 'folder' ... sorted(os.listdir(basepath)) ... filemanager.availabledirs ... filemanager.filenames # doctest: +ELLIPSIS ['folder'] Folder2Path(folder=.../projectname/basename/folder) ['file1.txt', 'file2.txt'] """
with zipfile.ZipFile(f'{self.currentpath}.zip', 'w') as zipfile_: for filepath, filename in zip(self.filepaths, self.filenames): zipfile_.write(filename=filepath, arcname=filename) del self.currentdir
<SYSTEM_TASK:> Read all network files of the current working directory, structure <END_TASK> <USER_TASK:> Description: def load_files(self) -> selectiontools.Selections: """Read all network files of the current working directory, structure their contents in a |selectiontools.Selections| object, and return it. """
devicetools.Node.clear_all() devicetools.Element.clear_all() selections = selectiontools.Selections() for (filename, path) in zip(self.filenames, self.filepaths): # Ensure both `Node` and `Element`start with a `fresh` memory. devicetools.Node.extract_new() devicetools.Element.extract_new() try: info = runpy.run_path(path) except BaseException: objecttools.augment_excmessage( f'While trying to load the network file `{path}`') try: node: devicetools.Node = info['Node'] element: devicetools.Element = info['Element'] selections += selectiontools.Selection( filename.split('.')[0], node.extract_new(), element.extract_new()) except KeyError as exc: raise RuntimeError( f'The class {exc.args[0]} cannot be loaded from the ' f'network file `{path}`.') selections += selectiontools.Selection( 'complete', info['Node'].query_all(), info['Element'].query_all()) return selections
<SYSTEM_TASK:> Save the |Selection| objects contained in the given |Selections| <END_TASK> <USER_TASK:> Description: def save_files(self, selections) -> None: """Save the |Selection| objects contained in the given |Selections| instance to separate network files."""
try: currentpath = self.currentpath selections = selectiontools.Selections(selections) for selection in selections: if selection.name == 'complete': continue path = os.path.join(currentpath, selection.name+'.py') selection.save_networkfile(filepath=path) except BaseException: objecttools.augment_excmessage( 'While trying to save selections `%s` into network files' % selections)
<SYSTEM_TASK:> Save the given text under the given control filename and the <END_TASK> <USER_TASK:> Description: def save_file(self, filename, text): """Save the given text under the given control filename and the current path."""
if not filename.endswith('.py'): filename += '.py' path = os.path.join(self.currentpath, filename) with open(path, 'w', encoding="utf-8") as file_: file_.write(text)
<SYSTEM_TASK:> Read and return the content of the given file. <END_TASK> <USER_TASK:> Description: def load_file(self, filename): """Read and return the content of the given file. If the current directory is not defined explicitly, the directory name is constructed with the actual simulation start date. If such an directory does not exist, it is created immediately. """
_defaultdir = self.DEFAULTDIR try: if not filename.endswith('.py'): filename += '.py' try: self.DEFAULTDIR = ( 'init_' + hydpy.pub.timegrids.sim.firstdate.to_string('os')) except KeyError: pass filepath = os.path.join(self.currentpath, filename) with open(filepath) as file_: return file_.read() except BaseException: objecttools.augment_excmessage( 'While trying to read the conditions file `%s`' % filename) finally: self.DEFAULTDIR = _defaultdir
<SYSTEM_TASK:> Save the given text under the given condition filename and the <END_TASK> <USER_TASK:> Description: def save_file(self, filename, text): """Save the given text under the given condition filename and the current path. If the current directory is not defined explicitly, the directory name is constructed with the actual simulation end date. If such an directory does not exist, it is created immediately. """
_defaultdir = self.DEFAULTDIR try: if not filename.endswith('.py'): filename += '.py' try: self.DEFAULTDIR = ( 'init_' + hydpy.pub.timegrids.sim.lastdate.to_string('os')) except AttributeError: pass path = os.path.join(self.currentpath, filename) with open(path, 'w', encoding="utf-8") as file_: file_.write(text) except BaseException: objecttools.augment_excmessage( 'While trying to write the conditions file `%s`' % filename) finally: self.DEFAULTDIR = _defaultdir
<SYSTEM_TASK:> Load data from an "external" data file an pass it to <END_TASK> <USER_TASK:> Description: def load_file(self, sequence): """Load data from an "external" data file an pass it to the given |IOSequence|."""
try: if sequence.filetype_ext == 'npy': sequence.series = sequence.adjust_series( *self._load_npy(sequence)) elif sequence.filetype_ext == 'asc': sequence.series = sequence.adjust_series( *self._load_asc(sequence)) elif sequence.filetype_ext == 'nc': self._load_nc(sequence) except BaseException: objecttools.augment_excmessage( 'While trying to load the external data of sequence %s' % objecttools.devicephrase(sequence))
<SYSTEM_TASK:> Write the date stored in |IOSequence.series| of the given <END_TASK> <USER_TASK:> Description: def save_file(self, sequence, array=None): """Write the date stored in |IOSequence.series| of the given |IOSequence| into an "external" data file. """
if array is None: array = sequence.aggregate_series() try: if sequence.filetype_ext == 'nc': self._save_nc(sequence, array) else: filepath = sequence.filepath_ext if ((array is not None) and (array.info['type'] != 'unmodified')): filepath = (f'{filepath[:-4]}_{array.info["type"]}' f'{filepath[-4:]}') if not sequence.overwrite_ext and os.path.exists(filepath): raise OSError( f'Sequence {objecttools.devicephrase(sequence)} ' f'is not allowed to overwrite the existing file ' f'`{sequence.filepath_ext}`.') if sequence.filetype_ext == 'npy': self._save_npy(array, filepath) elif sequence.filetype_ext == 'asc': self._save_asc(array, filepath) except BaseException: objecttools.augment_excmessage( 'While trying to save the external data of sequence %s' % objecttools.devicephrase(sequence))
<SYSTEM_TASK:> Prepare a new |NetCDFInterface| object for reading data. <END_TASK> <USER_TASK:> Description: def open_netcdf_reader(self, flatten=False, isolate=False, timeaxis=1): """Prepare a new |NetCDFInterface| object for reading data."""
self._netcdf_reader = netcdftools.NetCDFInterface( flatten=bool(flatten), isolate=bool(isolate), timeaxis=int(timeaxis))
<SYSTEM_TASK:> Prepare a new |NetCDFInterface| object for writing data. <END_TASK> <USER_TASK:> Description: def open_netcdf_writer(self, flatten=False, isolate=False, timeaxis=1): """Prepare a new |NetCDFInterface| object for writing data."""
self._netcdf_writer = netcdftools.NetCDFInterface( flatten=bool(flatten), isolate=bool(isolate), timeaxis=int(timeaxis))
<SYSTEM_TASK:> Adjust the given precipitation values. <END_TASK> <USER_TASK:> Description: def calc_nkor_v1(self): """Adjust the given precipitation values. Required control parameters: |NHRU| |KG| Required input sequence: |Nied| Calculated flux sequence: |NKor| Basic equation: :math:`NKor = KG \\cdot Nied` Example: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> kg(0.8, 1.0, 1.2) >>> inputs.nied = 10.0 >>> model.calc_nkor_v1() >>> fluxes.nkor nkor(8.0, 10.0, 12.0) """
con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.nkor[k] = con.kg[k] * inp.nied
<SYSTEM_TASK:> Adjust the given air temperature values. <END_TASK> <USER_TASK:> Description: def calc_tkor_v1(self): """Adjust the given air temperature values. Required control parameters: |NHRU| |KT| Required input sequence: |TemL| Calculated flux sequence: |TKor| Basic equation: :math:`TKor = KT + TemL` Example: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> kt(-2.0, 0.0, 2.0) >>> inputs.teml(1.) >>> model.calc_tkor_v1() >>> fluxes.tkor tkor(-1.0, 1.0, 3.0) """
con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.tkor[k] = con.kt[k] + inp.teml
<SYSTEM_TASK:> Calculate reference evapotranspiration after Turc-Wendling. <END_TASK> <USER_TASK:> Description: def calc_et0_v1(self): """Calculate reference evapotranspiration after Turc-Wendling. Required control parameters: |NHRU| |KE| |KF| |HNN| Required input sequence: |Glob| Required flux sequence: |TKor| Calculated flux sequence: |ET0| Basic equation: :math:`ET0 = KE \\cdot \\frac{(8.64 \\cdot Glob+93 \\cdot KF) \\cdot (TKor+22)} {165 \\cdot (TKor+123) \\cdot (1 + 0.00019 \\cdot min(HNN, 600))}` Example: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(3) >>> ke(1.1) >>> kf(0.6) >>> hnn(200.0, 600.0, 1000.0) >>> inputs.glob = 200.0 >>> fluxes.tkor = 15.0 >>> model.calc_et0_v1() >>> fluxes.et0 et0(3.07171, 2.86215, 2.86215) """
con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.et0[k] = (con.ke[k]*(((8.64*inp.glob+93.*con.kf[k]) * (flu.tkor[k]+22.)) / (165.*(flu.tkor[k]+123.) * (1.+0.00019*min(con.hnn[k], 600.)))))
<SYSTEM_TASK:> Correct the given reference evapotranspiration and update the <END_TASK> <USER_TASK:> Description: def calc_et0_wet0_v1(self): """Correct the given reference evapotranspiration and update the corresponding log sequence. Required control parameters: |NHRU| |KE| |WfET0| Required input sequence: |PET| Calculated flux sequence: |ET0| Updated log sequence: |WET0| Basic equations: :math:`ET0_{new} = WfET0 \\cdot KE \\cdot PET + (1-WfET0) \\cdot ET0_{alt}` Example: Prepare four hydrological response units with different value combinations of parameters |KE| and |WfET0|: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(4) >>> ke(0.8, 1.2, 0.8, 1.2) >>> wfet0(2.0, 2.0, 0.2, 0.2) Note that the actual value of time dependend parameter |WfET0| is reduced due the difference between the given parameter and simulation time steps: >>> from hydpy import round_ >>> round_(wfet0.values) 1.0, 1.0, 0.1, 0.1 For the first two hydrological response units, the given |PET| value is modified by -0.4 mm and +0.4 mm, respectively. For the other two response units, which weight the "new" evaporation value with 10 %, |ET0| does deviate from the old value of |WET0| by -0.04 mm and +0.04 mm only: >>> inputs.pet = 2.0 >>> logs.wet0 = 2.0 >>> model.calc_et0_wet0_v1() >>> fluxes.et0 et0(1.6, 2.4, 1.96, 2.04) >>> logs.wet0 wet0([[1.6, 2.4, 1.96, 2.04]]) """
con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for k in range(con.nhru): flu.et0[k] = (con.wfet0[k]*con.ke[k]*inp.pet + (1.-con.wfet0[k])*log.wet0[0, k]) log.wet0[0, k] = flu.et0[k]
<SYSTEM_TASK:> Calculate land use and month specific values of potential <END_TASK> <USER_TASK:> Description: def calc_evpo_v1(self): """Calculate land use and month specific values of potential evapotranspiration. Required control parameters: |NHRU| |Lnk| |FLn| Required derived parameter: |MOY| Required flux sequence: |ET0| Calculated flux sequence: |EvPo| Additional requirements: |Model.idx_sim| Basic equation: :math:`EvPo = FLn \\cdot ET0` Example: For clarity, this is more of a kind of an integration example. Parameter |FLn| both depends on time (the actual month) and space (the actual land use). Firstly, let us define a initialization time period spanning the transition from June to July: >>> from hydpy import pub >>> pub.timegrids = '30.06.2000', '02.07.2000', '1d' Secondly, assume that the considered subbasin is differenciated in two HRUs, one of primarily consisting of arable land and the other one of deciduous forests: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(2) >>> lnk(ACKER, LAUBW) Thirdly, set the |FLn| values, one for the relevant months and land use classes: >>> fln.acker_jun = 1.299 >>> fln.acker_jul = 1.304 >>> fln.laubw_jun = 1.350 >>> fln.laubw_jul = 1.365 Fourthly, the index array connecting the simulation time steps defined above and the month indexes (0...11) can be retrieved from the |pub| module. This can be done manually more conveniently via its update method: >>> derived.moy.update() >>> derived.moy moy(5, 6) Finally, the actual method (with its simple equation) is applied as usual: >>> fluxes.et0 = 2.0 >>> model.idx_sim = 0 >>> model.calc_evpo_v1() >>> fluxes.evpo evpo(2.598, 2.7) >>> model.idx_sim = 1 >>> model.calc_evpo_v1() >>> fluxes.evpo evpo(2.608, 2.73) Reset module |pub| to not interfere the following examples: >>> del pub.timegrids """
con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.evpo[k] = con.fln[con.lnk[k]-1, der.moy[self.idx_sim]] * flu.et0[k]
<SYSTEM_TASK:> Calculate stand precipitation and update the interception storage <END_TASK> <USER_TASK:> Description: def calc_nbes_inzp_v1(self): """Calculate stand precipitation and update the interception storage accordingly. Required control parameters: |NHRU| |Lnk| Required derived parameter: |KInz| Required flux sequence: |NKor| Calculated flux sequence: |NBes| Updated state sequence: |Inzp| Additional requirements: |Model.idx_sim| Basic equation: :math:`NBes = \\Bigl \\lbrace { {PKor \\ | \\ Inzp = KInz} \\atop {0 \\ | \\ Inzp < KInz} }` Examples: Initialize five HRUs with different land usages: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(5) >>> lnk(SIED_D, FEUCHT, GLETS, FLUSS, SEE) Define |KInz| values for July the selected land usages directly: >>> derived.kinz.sied_d_jul = 2.0 >>> derived.kinz.feucht_jul = 1.0 >>> derived.kinz.glets_jul = 0.0 >>> derived.kinz.fluss_jul = 1.0 >>> derived.kinz.see_jul = 1.0 Now we prepare a |MOY| object, that assumes that the first, second, and third simulation time steps are in June, July, and August respectively (we make use of the value defined above for July, but setting the values of parameter |MOY| this way allows for a more rigorous testing of proper indexing): >>> derived.moy.shape = 3 >>> derived.moy = 5, 6, 7 >>> model.idx_sim = 1 The dense settlement (|SIED_D|), the wetland area (|FEUCHT|), and both water areas (|FLUSS| and |SEE|) start with a initial interception storage of 1/2 mm, the glacier (|GLETS|) and water areas (|FLUSS| and |SEE|) start with 0 mm. In the first example, actual precipition is 1 mm: >>> states.inzp = 0.5, 0.5, 0.0, 1.0, 1.0 >>> fluxes.nkor = 1.0 >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(1.5, 1.0, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.0, 0.5, 1.0, 0.0, 0.0) Only for the settled area, interception capacity is not exceeded, meaning no stand precipitation occurs. Note that it is common in define zero interception capacities for glacier areas, but not mandatory. Also note that the |KInz|, |Inzp| and |NKor| values given for both water areas are ignored completely, and |Inzp| and |NBes| are simply set to zero. If there is no precipitation, there is of course also no stand precipitation and interception storage remains unchanged: >>> states.inzp = 0.5, 0.5, 0.0, 0.0, 0.0 >>> fluxes.nkor = 0. >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(0.5, 0.5, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.0, 0.0, 0.0, 0.0, 0.0) Interception capacities change discontinuously between consecutive months. This can result in little stand precipitation events in periods without precipitation: >>> states.inzp = 1.0, 0.0, 0.0, 0.0, 0.0 >>> derived.kinz.sied_d_jul = 0.6 >>> fluxes.nkor = 0.0 >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(0.6, 0.0, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.4, 0.0, 0.0, 0.0, 0.0) """
con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if con.lnk[k] in (WASSER, FLUSS, SEE): flu.nbes[k] = 0. sta.inzp[k] = 0. else: flu.nbes[k] = \ max(flu.nkor[k]+sta.inzp[k] - der.kinz[con.lnk[k]-1, der.moy[self.idx_sim]], 0.) sta.inzp[k] += flu.nkor[k]-flu.nbes[k]
<SYSTEM_TASK:> Calculate the frozen part of stand precipitation. <END_TASK> <USER_TASK:> Description: def calc_sbes_v1(self): """Calculate the frozen part of stand precipitation. Required control parameters: |NHRU| |TGr| |TSp| Required flux sequences: |TKor| |NBes| Calculated flux sequence: |SBes| Examples: In the first example, the threshold temperature of seven hydrological response units is 0 °C and the corresponding temperature interval of mixed precipitation 2 °C: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(7) >>> tgr(0.0) >>> tsp(2.0) The value of |NBes| is zero above 1 °C and equal to the value of |NBes| below -1 °C. Between these temperature values, |NBes| decreases linearly: >>> fluxes.nbes = 4.0 >>> fluxes.tkor = -10.0, -1.0, -0.5, 0.0, 0.5, 1.0, 10.0 >>> model.calc_sbes_v1() >>> fluxes.sbes sbes(4.0, 4.0, 3.0, 2.0, 1.0, 0.0, 0.0) Note the special case of a zero temperature interval. With the actual temperature being equal to the threshold temperature, the the value of `sbes` is zero: >>> tsp(0.) >>> model.calc_sbes_v1() >>> fluxes.sbes sbes(4.0, 4.0, 4.0, 0.0, 0.0, 0.0, 0.0) """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): if flu.nbes[k] <= 0.: flu.sbes[k] = 0. elif flu.tkor[k] >= (con.tgr[k]+con.tsp[k]/2.): flu.sbes[k] = 0. elif flu.tkor[k] <= (con.tgr[k]-con.tsp[k]/2.): flu.sbes[k] = flu.nbes[k] else: flu.sbes[k] = ((((con.tgr[k]+con.tsp[k]/2.)-flu.tkor[k]) / con.tsp[k])*flu.nbes[k])
<SYSTEM_TASK:> Calculate the potential snowmelt. <END_TASK> <USER_TASK:> Description: def calc_wgtf_v1(self): """Calculate the potential snowmelt. Required control parameters: |NHRU| |Lnk| |GTF| |TRefT| |TRefN| |RSchmelz| |CPWasser| Required flux sequence: |TKor| Calculated fluxes sequence: |WGTF| Basic equation: :math:`WGTF = max(GTF \\cdot (TKor - TRefT), 0) + max(\\frac{CPWasser}{RSchmelz} \\cdot (TKor - TRefN), 0)` Examples: Initialize seven HRUs with identical degree-day factors and temperature thresholds, but different combinations of land use and air temperature: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(7) >>> lnk(ACKER, LAUBW, FLUSS, SEE, ACKER, ACKER, ACKER) >>> gtf(5.0) >>> treft(0.0) >>> trefn(1.0) >>> fluxes.tkor = 2.0, 2.0, 2.0, 2.0, -1.0, 0.0, 1.0 Compared to most other LARSIM parameters, the specific heat capacity and melt heat capacity of water can be seen as fixed properties: >>> cpwasser(4.1868) >>> rschmelz(334.0) Note that the values of the degree-day factor are only half as much as the given value, due to the simulation step size being only half as long as the parameter step size: >>> gtf gtf(5.0) >>> gtf.values array([ 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5]) After performing the calculation, one can see that the potential melting rate is identical for the first two HRUs (|ACKER| and |LAUBW|). The land use class results in no difference, except for water areas (third and forth HRU, |FLUSS| and |SEE|), where no potential melt needs to be calculated. The last three HRUs (again |ACKER|) show the usual behaviour of the degree day method, when the actual temperature is below (fourth HRU), equal to (fifth HRU) or above (sixths zone) the threshold temperature. Additionally, the first two zones show the influence of the additional energy intake due to "warm" precipitation. Obviously, this additional term is quite negligible for common parameterizations, even if lower values for the separate threshold temperature |TRefT| would be taken into account: >>> model.calc_wgtf_v1() >>> fluxes.wgtf wgtf(5.012535, 5.012535, 0.0, 0.0, 0.0, 0.0, 2.5) """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): if con.lnk[k] in (WASSER, FLUSS, SEE): flu.wgtf[k] = 0. else: flu.wgtf[k] = ( max(con.gtf[k]*(flu.tkor[k]-con.treft[k]), 0) + max(con.cpwasser/con.rschmelz*(flu.tkor[k]-con.trefn[k]), 0.))
<SYSTEM_TASK:> Calculate the actual amount of water melting within the snow cover. <END_TASK> <USER_TASK:> Description: def calc_schm_wats_v1(self): """Calculate the actual amount of water melting within the snow cover. Required control parameters: |NHRU| |Lnk| Required flux sequences: |SBes| |WGTF| Calculated flux sequence: |Schm| Updated state sequence: |WATS| Basic equations: :math:`\\frac{dWATS}{dt} = SBes - Schm` :math:`Schm = \\Bigl \\lbrace { {WGTF \\ | \\ WATS > 0} \\atop {0 \\ | \\ WATS = 0} }` Examples: Initialize two water (|FLUSS| and |SEE|) and four arable land (|ACKER|) HRUs. Assume the same values for the initial amount of frozen water (|WATS|) and the frozen part of stand precipitation (|SBes|), but different values for potential snowmelt (|WGTF|): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(6) >>> lnk(FLUSS, SEE, ACKER, ACKER, ACKER, ACKER) >>> states.wats = 2.0 >>> fluxes.sbes = 1.0 >>> fluxes.wgtf = 1.0, 1.0, 0.0, 1.0, 3.0, 5.0 >>> model.calc_schm_wats_v1() >>> states.wats wats(0.0, 0.0, 3.0, 2.0, 0.0, 0.0) >>> fluxes.schm schm(0.0, 0.0, 0.0, 1.0, 3.0, 3.0) For the water areas, both the frozen amount of water and actual melt are set to zero. For all other land use classes, actual melt is either limited by potential melt or the available frozen water, which is the sum of initial frozen water and the frozen part of stand precipitation. """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if con.lnk[k] in (WASSER, FLUSS, SEE): sta.wats[k] = 0. flu.schm[k] = 0. else: sta.wats[k] += flu.sbes[k] flu.schm[k] = min(flu.wgtf[k], sta.wats[k]) sta.wats[k] -= flu.schm[k]
<SYSTEM_TASK:> Calculate the amount of base flow released from the soil. <END_TASK> <USER_TASK:> Description: def calc_qbb_v1(self): """Calculate the amount of base flow released from the soil. Required control parameters: |NHRU| |Lnk| |Beta| |FBeta| Required derived parameter: |WB| |WZ| Required state sequence: |BoWa| Calculated flux sequence: |QBB| Basic equations: :math:`Beta_{eff} = \\Bigl \\lbrace { {Beta \\ | \\ BoWa \\leq WZ} \\atop {Beta \\cdot (1+(FBeta-1)\\cdot\\frac{BoWa-WZ}{NFk-WZ}) \\|\\ BoWa > WZ} }` :math:`QBB = \\Bigl \\lbrace { {0 \\ | \\ BoWa \\leq WB} \\atop {Beta_{eff} \\cdot (BoWa - WB) \\|\\ BoWa > WB} }` Examples: For water and sealed areas, no base flow is calculated (see the first three HRUs of type |VERS|, |FLUSS|, and |SEE|). No principal distinction is made between the remaining land use classes (arable land |ACKER| has been selected for the last five HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(8) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER, ACKER) >>> beta(0.04) >>> fbeta(2.0) >>> nfk(100.0, 100.0, 100.0, 0.0, 100.0, 100.0, 100.0, 200.0) >>> derived.wb(10.0) >>> derived.wz(70.0) Note the time dependence of parameter |Beta|: >>> beta beta(0.04) >>> beta.values array([ 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02]) In the first example, the actual soil water content |BoWa| is set to low values. For values below the threshold |WB|, not percolation occurs. Above |WB| (but below |WZ|), |QBB| increases linearly by an amount defined by parameter |Beta|: >>> states.bowa = 20.0, 20.0, 20.0, 0.0, 0.0, 10.0, 20.0, 20.0 >>> model.calc_qbb_v1() >>> fluxes.qbb qbb(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.2) Note that for the last two HRUs the same amount of base flow generation is determined, in spite of the fact that both exhibit different relative soil moistures. It is common to modify this "pure absolute dependency" to a "mixed absolute/relative dependency" through defining the values of parameter |WB| indirectly via parameter |RelWB|. In the second example, the actual soil water content |BoWa| is set to high values. For values below threshold |WZ|, the discussion above remains valid. For values above |WZ|, percolation shows a nonlinear behaviour when factor |FBeta| is set to values larger than one: >>> nfk(0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 200.0) >>> states.bowa = 0.0, 0.0, 0.0, 60.0, 70.0, 80.0, 100.0, 200.0 >>> model.calc_qbb_v1() >>> fluxes.qbb qbb(0.0, 0.0, 0.0, 1.0, 1.2, 1.866667, 3.6, 7.6) """
con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if ((con.lnk[k] in (VERS, WASSER, FLUSS, SEE)) or (sta.bowa[k] <= der.wb[k]) or (con.nfk[k] <= 0.)): flu.qbb[k] = 0. elif sta.bowa[k] <= der.wz[k]: flu.qbb[k] = con.beta[k]*(sta.bowa[k]-der.wb[k]) else: flu.qbb[k] = (con.beta[k]*(sta.bowa[k]-der.wb[k]) * (1.+(con.fbeta[k]-1.)*((sta.bowa[k]-der.wz[k]) / (con.nfk[k]-der.wz[k]))))
<SYSTEM_TASK:> Calculate direct runoff released from the soil. <END_TASK> <USER_TASK:> Description: def calc_qdb_v1(self): """Calculate direct runoff released from the soil. Required control parameters: |NHRU| |Lnk| |NFk| |BSf| Required state sequence: |BoWa| Required flux sequence: |WaDa| Calculated flux sequence: |QDB| Basic equations: :math:`QDB = \\Bigl \\lbrace { {max(Exz, 0) \\ | \\ SfA \\leq 0} \\atop {max(Exz + NFk \\cdot SfA^{BSf+1}, 0) \\ | \\ SfA > 0} }` :math:`SFA = (1 - \\frac{BoWa}{NFk})^\\frac{1}{BSf+1} - \\frac{WaDa}{(BSf+1) \\cdot NFk}` :math:`Exz = (BoWa + WaDa) - NFk` Examples: For water areas (|FLUSS| and |SEE|), sealed areas (|VERS|), and areas without any soil storage capacity, all water is completely routed as direct runoff |QDB| (see the first four HRUs). No principal distinction is made between the remaining land use classes (arable land |ACKER| has been selected for the last five HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(9) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER, ACKER, ACKER) >>> bsf(0.4) >>> nfk(100.0, 100.0, 100.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0) >>> fluxes.wada = 10.0 >>> states.bowa = ( ... 100.0, 100.0, 100.0, 0.0, -0.1, 0.0, 50.0, 100.0, 100.1) >>> model.calc_qdb_v1() >>> fluxes.qdb qdb(10.0, 10.0, 10.0, 10.0, 0.142039, 0.144959, 1.993649, 10.0, 10.1) With the common |BSf| value of 0.4, the discharge coefficient increases more or less exponentially with soil moisture. For soil moisture values slightly below zero or above usable field capacity, plausible amounts of generated direct runoff are ensured. """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess aid = self.sequences.aides.fastaccess for k in range(con.nhru): if con.lnk[k] == WASSER: flu.qdb[k] = 0. elif ((con.lnk[k] in (VERS, FLUSS, SEE)) or (con.nfk[k] <= 0.)): flu.qdb[k] = flu.wada[k] else: if sta.bowa[k] < con.nfk[k]: aid.sfa[k] = ( (1.-sta.bowa[k]/con.nfk[k])**(1./(con.bsf[k]+1.)) - (flu.wada[k]/((con.bsf[k]+1.)*con.nfk[k]))) else: aid.sfa[k] = 0. aid.exz[k] = sta.bowa[k]+flu.wada[k]-con.nfk[k] flu.qdb[k] = aid.exz[k] if aid.sfa[k] > 0.: flu.qdb[k] += aid.sfa[k]**(con.bsf[k]+1.)*con.nfk[k] flu.qdb[k] = max(flu.qdb[k], 0.)
<SYSTEM_TASK:> Update soil moisture and correct fluxes if necessary. <END_TASK> <USER_TASK:> Description: def calc_bowa_v1(self): """Update soil moisture and correct fluxes if necessary. Required control parameters: |NHRU| |Lnk| Required flux sequence: |WaDa| Updated state sequence: |BoWa| Required (and eventually corrected) flux sequences: |EvB| |QBB| |QIB1| |QIB2| |QDB| Basic equations: :math:`\\frac{dBoWa}{dt} = WaDa - EvB - QBB - QIB1 - QIB2 - QDB` :math:`BoWa \\geq 0` Examples: For water areas (|FLUSS| and |SEE|) and sealed areas (|VERS|), soil moisture |BoWa| is simply set to zero and no flux correction are performed (see the first three HRUs). No principal distinction is made between the remaining land use classes (arable land |ACKER| has been selected for the last four HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(7) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER) >>> states.bowa = 2.0 >>> fluxes.wada = 1.0 >>> fluxes.evb = 1.0, 1.0, 1.0, 0.0, 0.1, 0.2, 0.3 >>> fluxes.qbb = 1.0, 1.0, 1.0, 0.0, 0.2, 0.4, 0.6 >>> fluxes.qib1 = 1.0, 1.0, 1.0, 0.0, 0.3, 0.6, 0.9 >>> fluxes.qib2 = 1.0, 1.0, 1.0, 0.0, 0.4, 0.8, 1.2 >>> fluxes.qdb = 1.0, 1.0, 1.0, 0.0, 0.5, 1.0, 1.5 >>> model.calc_bowa_v1() >>> states.bowa bowa(0.0, 0.0, 0.0, 3.0, 1.5, 0.0, 0.0) >>> fluxes.evb evb(1.0, 1.0, 1.0, 0.0, 0.1, 0.2, 0.2) >>> fluxes.qbb qbb(1.0, 1.0, 1.0, 0.0, 0.2, 0.4, 0.4) >>> fluxes.qib1 qib1(1.0, 1.0, 1.0, 0.0, 0.3, 0.6, 0.6) >>> fluxes.qib2 qib2(1.0, 1.0, 1.0, 0.0, 0.4, 0.8, 0.8) >>> fluxes.qdb qdb(1.0, 1.0, 1.0, 0.0, 0.5, 1.0, 1.0) For the seventh HRU, the original total loss terms would result in a negative soil moisture value. Hence it is reduced to the total loss term of the sixt HRU, which results exactly in a complete emptying of the soil storage. """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess aid = self.sequences.aides.fastaccess for k in range(con.nhru): if con.lnk[k] in (VERS, WASSER, FLUSS, SEE): sta.bowa[k] = 0. else: aid.bvl[k] = ( flu.evb[k]+flu.qbb[k]+flu.qib1[k]+flu.qib2[k]+flu.qdb[k]) aid.mvl[k] = sta.bowa[k]+flu.wada[k] if aid.bvl[k] > aid.mvl[k]: aid.rvl[k] = aid.mvl[k]/aid.bvl[k] flu.evb[k] *= aid.rvl[k] flu.qbb[k] *= aid.rvl[k] flu.qib1[k] *= aid.rvl[k] flu.qib2[k] *= aid.rvl[k] flu.qdb[k] *= aid.rvl[k] sta.bowa[k] = 0. else: sta.bowa[k] = aid.mvl[k]-aid.bvl[k]
<SYSTEM_TASK:> Aggregate the amount of base flow released by all "soil type" HRUs <END_TASK> <USER_TASK:> Description: def calc_qbgz_v1(self): """Aggregate the amount of base flow released by all "soil type" HRUs and the "net precipitation" above water areas of type |SEE|. Water areas of type |SEE| are assumed to be directly connected with groundwater, but not with the stream network. This is modelled by adding their (positive or negative) "net input" (|NKor|-|EvI|) to the "percolation output" of the soil containing HRUs. Required control parameters: |Lnk| |NHRU| |FHRU| Required flux sequences: |QBB| |NKor| |EvI| Calculated state sequence: |QBGZ| Basic equation: :math:`QBGZ = \\Sigma(FHRU \\cdot QBB) + \\Sigma(FHRU \\cdot (NKor_{SEE}-EvI_{SEE}))` Examples: The first example shows that |QBGZ| is the area weighted sum of |QBB| from "soil type" HRUs like arable land (|ACKER|) and of |NKor|-|EvI| from water areas of type |SEE|. All other water areas (|WASSER| and |FLUSS|) and also sealed surfaces (|VERS|) have no impact on |QBGZ|: >>> from hydpy.models.lland import * >>> parameterstep() >>> nhru(6) >>> lnk(ACKER, ACKER, VERS, WASSER, FLUSS, SEE) >>> fhru(0.1, 0.2, 0.1, 0.1, 0.1, 0.4) >>> fluxes.qbb = 2., 4.0, 300.0, 300.0, 300.0, 300.0 >>> fluxes.nkor = 200.0, 200.0, 200.0, 200.0, 200.0, 20.0 >>> fluxes.evi = 100.0, 100.0, 100.0, 100.0, 100.0, 10.0 >>> model.calc_qbgz_v1() >>> states.qbgz qbgz(5.0) The second example shows that large evaporation values above a HRU of type |SEE| can result in negative values of |QBGZ|: >>> fluxes.evi[5] = 30 >>> model.calc_qbgz_v1() >>> states.qbgz qbgz(-3.0) """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess sta.qbgz = 0. for k in range(con.nhru): if con.lnk[k] == SEE: sta.qbgz += con.fhru[k]*(flu.nkor[k]-flu.evi[k]) elif con.lnk[k] not in (WASSER, FLUSS, VERS): sta.qbgz += con.fhru[k]*flu.qbb[k]
<SYSTEM_TASK:> Aggregate the amount of the first interflow component released <END_TASK> <USER_TASK:> Description: def calc_qigz1_v1(self): """Aggregate the amount of the first interflow component released by all HRUs. Required control parameters: |NHRU| |FHRU| Required flux sequence: |QIB1| Calculated state sequence: |QIGZ1| Basic equation: :math:`QIGZ1 = \\Sigma(FHRU \\cdot QIB1)` Example: >>> from hydpy.models.lland import * >>> parameterstep() >>> nhru(2) >>> fhru(0.75, 0.25) >>> fluxes.qib1 = 1.0, 5.0 >>> model.calc_qigz1_v1() >>> states.qigz1 qigz1(2.0) """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess sta.qigz1 = 0. for k in range(con.nhru): sta.qigz1 += con.fhru[k]*flu.qib1[k]
<SYSTEM_TASK:> Aggregate the amount of the second interflow component released <END_TASK> <USER_TASK:> Description: def calc_qigz2_v1(self): """Aggregate the amount of the second interflow component released by all HRUs. Required control parameters: |NHRU| |FHRU| Required flux sequence: |QIB2| Calculated state sequence: |QIGZ2| Basic equation: :math:`QIGZ2 = \\Sigma(FHRU \\cdot QIB2)` Example: >>> from hydpy.models.lland import * >>> parameterstep() >>> nhru(2) >>> fhru(0.75, 0.25) >>> fluxes.qib2 = 1.0, 5.0 >>> model.calc_qigz2_v1() >>> states.qigz2 qigz2(2.0) """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess sta.qigz2 = 0. for k in range(con.nhru): sta.qigz2 += con.fhru[k]*flu.qib2[k]
<SYSTEM_TASK:> Aggregate the amount of total direct flow released by all HRUs. <END_TASK> <USER_TASK:> Description: def calc_qdgz_v1(self): """Aggregate the amount of total direct flow released by all HRUs. Required control parameters: |Lnk| |NHRU| |FHRU| Required flux sequence: |QDB| |NKor| |EvI| Calculated flux sequence: |QDGZ| Basic equation: :math:`QDGZ = \\Sigma(FHRU \\cdot QDB) + \\Sigma(FHRU \\cdot (NKor_{FLUSS}-EvI_{FLUSS}))` Examples: The first example shows that |QDGZ| is the area weighted sum of |QDB| from "land type" HRUs like arable land (|ACKER|) and sealed surfaces (|VERS|) as well as of |NKor|-|EvI| from water areas of type |FLUSS|. Water areas of type |WASSER| and |SEE| have no impact on |QDGZ|: >>> from hydpy.models.lland import * >>> parameterstep() >>> nhru(5) >>> lnk(ACKER, VERS, WASSER, SEE, FLUSS) >>> fhru(0.1, 0.2, 0.1, 0.2, 0.4) >>> fluxes.qdb = 2., 4.0, 300.0, 300.0, 300.0 >>> fluxes.nkor = 200.0, 200.0, 200.0, 200.0, 20.0 >>> fluxes.evi = 100.0, 100.0, 100.0, 100.0, 10.0 >>> model.calc_qdgz_v1() >>> fluxes.qdgz qdgz(5.0) The second example shows that large evaporation values above a HRU of type |FLUSS| can result in negative values of |QDGZ|: >>> fluxes.evi[4] = 30 >>> model.calc_qdgz_v1() >>> fluxes.qdgz qdgz(-3.0) """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess flu.qdgz = 0. for k in range(con.nhru): if con.lnk[k] == FLUSS: flu.qdgz += con.fhru[k]*(flu.nkor[k]-flu.evi[k]) elif con.lnk[k] not in (WASSER, SEE): flu.qdgz += con.fhru[k]*flu.qdb[k]
<SYSTEM_TASK:> Seperate total direct flow into a small and a fast component. <END_TASK> <USER_TASK:> Description: def calc_qdgz1_qdgz2_v1(self): """Seperate total direct flow into a small and a fast component. Required control parameters: |A1| |A2| Required flux sequence: |QDGZ| Calculated state sequences: |QDGZ1| |QDGZ2| Basic equation: :math:`QDGZ2 = \\frac{(QDGZ-A2)^2}{QDGZ+A1-A2}` :math:`QDGZ1 = QDGZ - QDGZ1` Examples: The formula for calculating the amount of the fast component of direct flow is borrowed from the famous curve number approach. Parameter |A2| would be the initial loss and parameter |A1| the maximum storage, but one should not take this analogy too serious. Instead, with the value of parameter |A1| set to zero, parameter |A2| just defines the maximum amount of "slow" direct runoff per time step: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> a1(0.0) Let us set the value of |A2| to 4 mm/d, which is 2 mm/12h with respect to the selected simulation step size: >>> a2(4.0) >>> a2 a2(4.0) >>> a2.value 2.0 Define a test function and let it calculate |QDGZ1| and |QDGZ1| for values of |QDGZ| ranging from -10 to 100 mm/12h: >>> from hydpy import UnitTest >>> test = UnitTest(model, ... model.calc_qdgz1_qdgz2_v1, ... last_example=6, ... parseqs=(fluxes.qdgz, ... states.qdgz1, ... states.qdgz2)) >>> test.nexts.qdgz = -10.0, 0.0, 1.0, 2.0, 3.0, 100.0 >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 2.0 | 0.0 | | 5 | 3.0 | 2.0 | 1.0 | | 6 | 100.0 | 2.0 | 98.0 | Setting |A2| to zero and |A1| to 4 mm/d (or 2 mm/12h) results in a smoother transition: >>> a2(0.0) >>> a1(4.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | -------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 0.666667 | 0.333333 | | 4 | 2.0 | 1.0 | 1.0 | | 5 | 3.0 | 1.2 | 1.8 | | 6 | 100.0 | 1.960784 | 98.039216 | Alternatively, one can mix these two configurations by setting the values of both parameters to 2 mm/h: >>> a2(2.0) >>> a1(2.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 1.5 | 0.5 | | 5 | 3.0 | 1.666667 | 1.333333 | | 6 | 100.0 | 1.99 | 98.01 | Note the similarity of the results for very high values of total direct flow |QDGZ| in all three examples, which converge to the sum of the values of parameter |A1| and |A2|, representing the maximum value of `slow` direct flow generation per simulation step """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess if flu.qdgz > con.a2: sta.qdgz2 = (flu.qdgz-con.a2)**2/(flu.qdgz+con.a1-con.a2) sta.qdgz1 = flu.qdgz-sta.qdgz2 else: sta.qdgz2 = 0. sta.qdgz1 = flu.qdgz
<SYSTEM_TASK:> Perform the runoff concentration calculation for base flow. <END_TASK> <USER_TASK:> Description: def calc_qbga_v1(self): """Perform the runoff concentration calculation for base flow. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required derived parameter: |KB| Required flux sequence: |QBGZ| Calculated state sequence: |QBGA| Basic equation: :math:`QBGA_{neu} = QBGA_{alt} + (QBGZ_{alt}-QBGA_{alt}) \\cdot (1-exp(-KB^{-1})) + (QBGZ_{neu}-QBGZ_{alt}) \\cdot (1-KB\\cdot(1-exp(-KB^{-1})))` Examples: A normal test case: >>> from hydpy.models.lland import * >>> parameterstep() >>> derived.kb(0.1) >>> states.qbgz.old = 2.0 >>> states.qbgz.new = 4.0 >>> states.qbga.old = 3.0 >>> model.calc_qbga_v1() >>> states.qbga qbga(3.800054) First extreme test case (zero division is circumvented): >>> derived.kb(0.0) >>> model.calc_qbga_v1() >>> states.qbga qbga(4.0) Second extreme test case (numerical overflow is circumvented): >>> derived.kb(1e500) >>> model.calc_qbga_v1() >>> states.qbga qbga(5.0) """
der = self.parameters.derived.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new if der.kb <= 0.: new.qbga = new.qbgz elif der.kb > 1e200: new.qbga = old.qbga+new.qbgz-old.qbgz else: d_temp = (1.-modelutils.exp(-1./der.kb)) new.qbga = (old.qbga + (old.qbgz-old.qbga)*d_temp + (new.qbgz-old.qbgz)*(1.-der.kb*d_temp))
<SYSTEM_TASK:> Perform the runoff concentration calculation for the first <END_TASK> <USER_TASK:> Description: def calc_qiga1_v1(self): """Perform the runoff concentration calculation for the first interflow component. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required derived parameter: |KI1| Required state sequence: |QIGZ1| Calculated state sequence: |QIGA1| Basic equation: :math:`QIGA1_{neu} = QIGA1_{alt} + (QIGZ1_{alt}-QIGA1_{alt}) \\cdot (1-exp(-KI1^{-1})) + (QIGZ1_{neu}-QIGZ1_{alt}) \\cdot (1-KI1\\cdot(1-exp(-KI1^{-1})))` Examples: A normal test case: >>> from hydpy.models.lland import * >>> parameterstep() >>> derived.ki1(0.1) >>> states.qigz1.old = 2.0 >>> states.qigz1.new = 4.0 >>> states.qiga1.old = 3.0 >>> model.calc_qiga1_v1() >>> states.qiga1 qiga1(3.800054) First extreme test case (zero division is circumvented): >>> derived.ki1(0.0) >>> model.calc_qiga1_v1() >>> states.qiga1 qiga1(4.0) Second extreme test case (numerical overflow is circumvented): >>> derived.ki1(1e500) >>> model.calc_qiga1_v1() >>> states.qiga1 qiga1(5.0) """
der = self.parameters.derived.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new if der.ki1 <= 0.: new.qiga1 = new.qigz1 elif der.ki1 > 1e200: new.qiga1 = old.qiga1+new.qigz1-old.qigz1 else: d_temp = (1.-modelutils.exp(-1./der.ki1)) new.qiga1 = (old.qiga1 + (old.qigz1-old.qiga1)*d_temp + (new.qigz1-old.qigz1)*(1.-der.ki1*d_temp))
<SYSTEM_TASK:> Perform the runoff concentration calculation for the second <END_TASK> <USER_TASK:> Description: def calc_qiga2_v1(self): """Perform the runoff concentration calculation for the second interflow component. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required derived parameter: |KI2| Required state sequence: |QIGZ2| Calculated state sequence: |QIGA2| Basic equation: :math:`QIGA2_{neu} = QIGA2_{alt} + (QIGZ2_{alt}-QIGA2_{alt}) \\cdot (1-exp(-KI2^{-1})) + (QIGZ2_{neu}-QIGZ2_{alt}) \\cdot (1-KI2\\cdot(1-exp(-KI2^{-1})))` Examples: A normal test case: >>> from hydpy.models.lland import * >>> parameterstep() >>> derived.ki2(0.1) >>> states.qigz2.old = 2.0 >>> states.qigz2.new = 4.0 >>> states.qiga2.old = 3.0 >>> model.calc_qiga2_v1() >>> states.qiga2 qiga2(3.800054) First extreme test case (zero division is circumvented): >>> derived.ki2(0.0) >>> model.calc_qiga2_v1() >>> states.qiga2 qiga2(4.0) Second extreme test case (numerical overflow is circumvented): >>> derived.ki2(1e500) >>> model.calc_qiga2_v1() >>> states.qiga2 qiga2(5.0) """
der = self.parameters.derived.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new if der.ki2 <= 0.: new.qiga2 = new.qigz2 elif der.ki2 > 1e200: new.qiga2 = old.qiga2+new.qigz2-old.qigz2 else: d_temp = (1.-modelutils.exp(-1./der.ki2)) new.qiga2 = (old.qiga2 + (old.qigz2-old.qiga2)*d_temp + (new.qigz2-old.qigz2)*(1.-der.ki2*d_temp))
<SYSTEM_TASK:> Perform the runoff concentration calculation for "slow" direct runoff. <END_TASK> <USER_TASK:> Description: def calc_qdga1_v1(self): """Perform the runoff concentration calculation for "slow" direct runoff. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required derived parameter: |KD1| Required state sequence: |QDGZ1| Calculated state sequence: |QDGA1| Basic equation: :math:`QDGA1_{neu} = QDGA1_{alt} + (QDGZ1_{alt}-QDGA1_{alt}) \\cdot (1-exp(-KD1^{-1})) + (QDGZ1_{neu}-QDGZ1_{alt}) \\cdot (1-KD1\\cdot(1-exp(-KD1^{-1})))` Examples: A normal test case: >>> from hydpy.models.lland import * >>> parameterstep() >>> derived.kd1(0.1) >>> states.qdgz1.old = 2.0 >>> states.qdgz1.new = 4.0 >>> states.qdga1.old = 3.0 >>> model.calc_qdga1_v1() >>> states.qdga1 qdga1(3.800054) First extreme test case (zero division is circumvented): >>> derived.kd1(0.0) >>> model.calc_qdga1_v1() >>> states.qdga1 qdga1(4.0) Second extreme test case (numerical overflow is circumvented): >>> derived.kd1(1e500) >>> model.calc_qdga1_v1() >>> states.qdga1 qdga1(5.0) """
der = self.parameters.derived.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new if der.kd1 <= 0.: new.qdga1 = new.qdgz1 elif der.kd1 > 1e200: new.qdga1 = old.qdga1+new.qdgz1-old.qdgz1 else: d_temp = (1.-modelutils.exp(-1./der.kd1)) new.qdga1 = (old.qdga1 + (old.qdgz1-old.qdga1)*d_temp + (new.qdgz1-old.qdgz1)*(1.-der.kd1*d_temp))
<SYSTEM_TASK:> Perform the runoff concentration calculation for "fast" direct runoff. <END_TASK> <USER_TASK:> Description: def calc_qdga2_v1(self): """Perform the runoff concentration calculation for "fast" direct runoff. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required derived parameter: |KD2| Required state sequence: |QDGZ2| Calculated state sequence: |QDGA2| Basic equation: :math:`QDGA2_{neu} = QDGA2_{alt} + (QDGZ2_{alt}-QDGA2_{alt}) \\cdot (1-exp(-KD2^{-1})) + (QDGZ2_{neu}-QDGZ2_{alt}) \\cdot (1-KD2\\cdot(1-exp(-KD2^{-1})))` Examples: A normal test case: >>> from hydpy.models.lland import * >>> parameterstep() >>> derived.kd2(0.1) >>> states.qdgz2.old = 2.0 >>> states.qdgz2.new = 4.0 >>> states.qdga2.old = 3.0 >>> model.calc_qdga2_v1() >>> states.qdga2 qdga2(3.800054) First extreme test case (zero division is circumvented): >>> derived.kd2(0.0) >>> model.calc_qdga2_v1() >>> states.qdga2 qdga2(4.0) Second extreme test case (numerical overflow is circumvented): >>> derived.kd2(1e500) >>> model.calc_qdga2_v1() >>> states.qdga2 qdga2(5.0) """
der = self.parameters.derived.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new if der.kd2 <= 0.: new.qdga2 = new.qdgz2 elif der.kd2 > 1e200: new.qdga2 = old.qdga2+new.qdgz2-old.qdgz2 else: d_temp = (1.-modelutils.exp(-1./der.kd2)) new.qdga2 = (old.qdga2 + (old.qdgz2-old.qdga2)*d_temp + (new.qdgz2-old.qdgz2)*(1.-der.kd2*d_temp))
<SYSTEM_TASK:> Calculate the final runoff. <END_TASK> <USER_TASK:> Description: def calc_q_v1(self): """Calculate the final runoff. Note that, in case there are water areas, their |NKor| values are added and their |EvPo| values are subtracted from the "potential" runoff value, if possible. This hold true for |WASSER| only and is due to compatibility with the original LARSIM implementation. Using land type |WASSER| can result in problematic modifications of simulated runoff series. It seems advisable to use land type |FLUSS| and/or land type |SEE| instead. Required control parameters: |NHRU| |FHRU| |Lnk| |NegQ| Required flux sequence: |NKor| Updated flux sequence: |EvI| Required state sequences: |QBGA| |QIGA1| |QIGA2| |QDGA1| |QDGA2| Calculated flux sequence: |lland_fluxes.Q| Basic equations: :math:`Q = QBGA + QIGA1 + QIGA2 + QDGA1 + QDGA2 + NKor_{WASSER} - EvI_{WASSER}` :math:`Q \\geq 0` Examples: When there are no water areas in the respective subbasin (we choose arable land |ACKER| arbitrarily), the different runoff components are simply summed up: >>> from hydpy.models.lland import * >>> parameterstep() >>> nhru(3) >>> lnk(ACKER, ACKER, ACKER) >>> fhru(0.5, 0.2, 0.3) >>> negq(False) >>> states.qbga = 0.1 >>> states.qiga1 = 0.3 >>> states.qiga2 = 0.5 >>> states.qdga1 = 0.7 >>> states.qdga2 = 0.9 >>> fluxes.nkor = 10.0 >>> fluxes.evi = 4.0, 5.0, 3.0 >>> model.calc_q_v1() >>> fluxes.q q(2.5) >>> fluxes.evi evi(4.0, 5.0, 3.0) The defined values of interception evaporation do not show any impact on the result of the given example, the predefined values for sequence |EvI| remain unchanged. But when the first HRU is assumed to be a water area (|WASSER|), its adjusted precipitaton |NKor| value and its interception evaporation |EvI| value are added to and subtracted from |lland_fluxes.Q| respectively: >>> control.lnk(WASSER, VERS, NADELW) >>> model.calc_q_v1() >>> fluxes.q q(5.5) >>> fluxes.evi evi(4.0, 5.0, 3.0) Note that only 5 mm are added (instead of the |NKor| value 10 mm) and that only 2 mm are substracted (instead of the |EvI| value 4 mm, as the first HRU`s area only accounts for 50 % of the subbasin area. Setting also the land use class of the second HRU to land type |WASSER| and resetting |NKor| to zero would result in overdrying. To avoid this, both actual water evaporation values stored in sequence |EvI| are reduced by the same factor: >>> control.lnk(WASSER, WASSER, NADELW) >>> fluxes.nkor = 0.0 >>> model.calc_q_v1() >>> fluxes.q q(0.0) >>> fluxes.evi evi(3.333333, 4.166667, 3.0) The handling from water areas of type |FLUSS| and |SEE| differs from those of type |WASSER|, as these do receive their net input before the runoff concentration routines are applied. This should be more realistic in most cases (especially for type |SEE| representing lakes not direct connected to the stream network). But it could sometimes result in negative outflow values. This is avoided by simply setting |lland_fluxes.Q| to zero and adding the truncated negative outflow value to the |EvI| value of all HRUs of type |FLUSS| and |SEE|: >>> control.lnk(FLUSS, SEE, NADELW) >>> states.qbga = -1.0 >>> states.qdga2 = -1.5 >>> fluxes.evi = 4.0, 5.0, 3.0 >>> model.calc_q_v1() >>> fluxes.q q(0.0) >>> fluxes.evi evi(2.571429, 3.571429, 3.0) This adjustment of |EvI| is only correct regarding the total water balance. Neither spatial nor temporal consistency of the resulting |EvI| values are assured. In the most extreme case, even negative |EvI| values might occur. This seems acceptable, as long as the adjustment of |EvI| is rarely triggered. When in doubt about this, check sequences |EvPo| and |EvI| of HRUs of types |FLUSS| and |SEE| for possible discrepancies. Also note that there might occur unnecessary corrections of |lland_fluxes.Q| in case landtype |WASSER| is combined with either landtype |SEE| or |FLUSS|. Eventually you might want to avoid correcting |lland_fluxes.Q|. This can be achieved by setting parameter |NegQ| to `True`: >>> negq(True) >>> fluxes.evi = 4.0, 5.0, 3.0 >>> model.calc_q_v1() >>> fluxes.q q(-1.0) >>> fluxes.evi evi(4.0, 5.0, 3.0) """
con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess aid = self.sequences.aides.fastaccess flu.q = sta.qbga+sta.qiga1+sta.qiga2+sta.qdga1+sta.qdga2 if (not con.negq) and (flu.q < 0.): d_area = 0. for k in range(con.nhru): if con.lnk[k] in (FLUSS, SEE): d_area += con.fhru[k] if d_area > 0.: for k in range(con.nhru): if con.lnk[k] in (FLUSS, SEE): flu.evi[k] += flu.q/d_area flu.q = 0. aid.epw = 0. for k in range(con.nhru): if con.lnk[k] == WASSER: flu.q += con.fhru[k]*flu.nkor[k] aid.epw += con.fhru[k]*flu.evi[k] if (flu.q > aid.epw) or con.negq: flu.q -= aid.epw elif aid.epw > 0.: for k in range(con.nhru): if con.lnk[k] == WASSER: flu.evi[k] *= flu.q/aid.epw flu.q = 0.
<SYSTEM_TASK:> Performs the actual interpolation or extrapolation. <END_TASK> <USER_TASK:> Description: def calc_outputs_v1(self): """Performs the actual interpolation or extrapolation. Required control parameters: |XPoints| |YPoints| Required derived parameter: |NmbPoints| |NmbBranches| Required flux sequence: |Input| Calculated flux sequence: |Outputs| Examples: As a simple example, assume a weir directing all discharge into `branch1` until the capacity limit of 2 m³/s is reached. The discharge exceeding this threshold is directed into `branch2`: >>> from hydpy.models.hbranch import * >>> parameterstep() >>> xpoints(0., 2., 4.) >>> ypoints(branch1=[0., 2., 2.], ... branch2=[0., 0., 2.]) >>> model.parameters.update() Low discharge example (linear interpolation between the first two supporting point pairs): >>> fluxes.input = 1. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=1.0, branch2=0.0) Medium discharge example (linear interpolation between the second two supporting point pairs): >>> fluxes.input = 3. >>> model.calc_outputs_v1() >>> print(fluxes.outputs) outputs(branch1=2.0, branch2=1.0) High discharge example (linear extrapolation beyond the second two supporting point pairs): >>> fluxes.input = 5. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=2.0, branch2=3.0) Non-monotonous relationships and balance violations are allowed, e.g.: >>> xpoints(0., 2., 4., 6.) >>> ypoints(branch1=[0., 2., 0., 0.], ... branch2=[0., 0., 2., 4.]) >>> model.parameters.update() >>> fluxes.input = 7. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=0.0, branch2=5.0) """
con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess # Search for the index of the two relevant x points... for pdx in range(1, der.nmbpoints): if con.xpoints[pdx] > flu.input: break # ...and use it for linear interpolation (or extrapolation). for bdx in range(der.nmbbranches): flu.outputs[bdx] = ( (flu.input-con.xpoints[pdx-1]) * (con.ypoints[bdx, pdx]-con.ypoints[bdx, pdx-1]) / (con.xpoints[pdx]-con.xpoints[pdx-1]) + con.ypoints[bdx, pdx-1])
<SYSTEM_TASK:> Connect the |LinkSequence| instances handled by the actual model <END_TASK> <USER_TASK:> Description: def connect(self): """Connect the |LinkSequence| instances handled by the actual model to the |NodeSequence| instances handled by one inlet node and multiple oulet nodes. The HydPy-H-Branch model passes multiple output values to different outlet nodes. This requires additional information regarding the `direction` of each output value. Therefore, node names are used as keywords. Assume the discharge values of both nodes `inflow1` and `inflow2` shall be branched to nodes `outflow1` and `outflow2` via element `branch`: >>> from hydpy import * >>> branch = Element('branch', ... inlets=['inflow1', 'inflow2'], ... outlets=['outflow1', 'outflow2']) Then parameter |YPoints| relates different supporting points via its keyword arguments to the respective nodes: >>> from hydpy.models.hbranch import * >>> parameterstep() >>> xpoints(0.0, 3.0) >>> ypoints(outflow1=[0.0, 1.0], outflow2=[0.0, 2.0]) >>> parameters.update() After connecting the model with its element the total discharge value of nodes `inflow1` and `inflow2` can be properly divided: >>> branch.model = model >>> branch.inlets.inflow1.sequences.sim = 1.0 >>> branch.inlets.inflow2.sequences.sim = 5.0 >>> model.doit(0) >>> print(branch.outlets.outflow1.sequences.sim) sim(2.0) >>> print(branch.outlets.outflow2.sequences.sim) sim(4.0) In case of missing (or misspelled) outlet nodes, the following error is raised: >>> branch.outlets.mutable = True >>> del branch.outlets.outflow1 >>> parameters.update() >>> model.connect() Traceback (most recent call last): ... RuntimeError: Model `hbranch` of element `branch` tried to connect \ to an outlet node named `outflow1`, which is not an available outlet node \ of element `branch`. """
nodes = self.element.inlets total = self.sequences.inlets.total if total.shape != (len(nodes),): total.shape = len(nodes) for idx, node in enumerate(nodes): double = node.get_double('inlets') total.set_pointer(double, idx) for (idx, name) in enumerate(self.nodenames): try: outlet = getattr(self.element.outlets, name) double = outlet.get_double('outlets') except AttributeError: raise RuntimeError( f'Model {objecttools.elementphrase(self)} tried ' f'to connect to an outlet node named `{name}`, ' f'which is not an available outlet node of element ' f'`{self.element.name}`.') self.sequences.outlets.branched.set_pointer(double, idx)
<SYSTEM_TASK:> Determine the number of response functions. <END_TASK> <USER_TASK:> Description: def update(self): """Determine the number of response functions. >>> from hydpy.models.arma import * >>> parameterstep('1d') >>> responses(((1., 2.), (1.,)), th_3=((1.,), (1., 2., 3.))) >>> derived.nmb.update() >>> derived.nmb nmb(2) Note that updating parameter `nmb` sets the shape of the flux sequences |QPIn|, |QPOut|, |QMA|, and |QAR| automatically. >>> fluxes.qpin qpin(nan, nan) >>> fluxes.qpout qpout(nan, nan) >>> fluxes.qma qma(nan, nan) >>> fluxes.qar qar(nan, nan) """
pars = self.subpars.pars responses = pars.control.responses fluxes = pars.model.sequences.fluxes self(len(responses)) fluxes.qpin.shape = self.value fluxes.qpout.shape = self.value fluxes.qma.shape = self.value fluxes.qar.shape = self.value
<SYSTEM_TASK:> Determine all AR coefficients. <END_TASK> <USER_TASK:> Description: def update(self): """Determine all AR coefficients. >>> from hydpy.models.arma import * >>> parameterstep('1d') >>> responses(((1., 2.), (1.,)), th_3=((1.,), (1., 2., 3.))) >>> derived.ar_coefs.update() >>> derived.ar_coefs ar_coefs([[1.0, 2.0], [1.0, nan]]) Note that updating parameter `ar_coefs` sets the shape of the log sequence |LogOut| automatically. >>> logs.logout logout([[nan, nan], [nan, nan]]) """
pars = self.subpars.pars coefs = pars.control.responses.ar_coefs self.shape = coefs.shape self(coefs) pars.model.sequences.logs.logout.shape = self.shape
<SYSTEM_TASK:> Determine all MA coefficients. <END_TASK> <USER_TASK:> Description: def update(self): """Determine all MA coefficients. >>> from hydpy.models.arma import * >>> parameterstep('1d') >>> responses(((1., 2.), (1.,)), th_3=((1.,), (1., 2., 3.))) >>> derived.ma_coefs.update() >>> derived.ma_coefs ma_coefs([[1.0, nan, nan], [1.0, 2.0, 3.0]]) Note that updating parameter `ar_coefs` sets the shape of the log sequence |LogIn| automatically. >>> logs.login login([[nan, nan, nan], [nan, nan, nan]]) """
pars = self.subpars.pars coefs = pars.control.responses.ma_coefs self.shape = coefs.shape self(coefs) pars.model.sequences.logs.login.shape = self.shape
<SYSTEM_TASK:> Try to convert the given argument to a |list| of |Selection| <END_TASK> <USER_TASK:> Description: def __getiterable(value): # ToDo: refactor """Try to convert the given argument to a |list| of |Selection| objects and return it. """
if isinstance(value, Selection): return [value] try: for selection in value: if not isinstance(selection, Selection): raise TypeError return list(value) except TypeError: raise TypeError( f'Binary operations on Selections objects are defined for ' f'other Selections objects, single Selection objects, or ' f'iterables containing `Selection` objects, but the type of ' f'the given argument is `{objecttools.classname(value)}`.')
<SYSTEM_TASK:> Return the network upstream of the given starting point, including <END_TASK> <USER_TASK:> Description: def search_upstream(self, device: devicetools.Device, name: str = 'upstream') -> 'Selection': """Return the network upstream of the given starting point, including the starting point itself. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() You can pass both |Node| and |Element| objects and, optionally, the name of the newly created |Selection| object: >>> test = pub.selections.complete.copy('test') >>> test.search_upstream(hp.nodes.lahn_2) Selection("upstream", nodes=("dill", "lahn_1", "lahn_2"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) >>> test.search_upstream( ... hp.elements.stream_lahn_1_lahn_2, 'UPSTREAM') Selection("UPSTREAM", nodes="lahn_1", elements=("land_lahn_1", "stream_lahn_1_lahn_2")) Wrong device specifications result in errors like the following: >>> test.search_upstream(1) Traceback (most recent call last): ... TypeError: While trying to determine the upstream network of \ selection `test`, the following error occurred: Either a `Node` or \ an `Element` object is required as the "outlet device", but the given \ `device` value is of type `int`. >>> pub.selections.headwaters.search_upstream(hp.nodes.lahn_3) Traceback (most recent call last): ... KeyError: "While trying to determine the upstream network of \ selection `headwaters`, the following error occurred: 'No node named \ `lahn_3` available.'" Method |Selection.select_upstream| restricts the current selection to the one determined with the method |Selection.search_upstream|: >>> test.select_upstream(hp.nodes.lahn_2) Selection("test", nodes=("dill", "lahn_1", "lahn_2"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) On the contrary, the method |Selection.deselect_upstream| restricts the current selection to all devices not determined by method |Selection.search_upstream|: >>> complete = pub.selections.complete.deselect_upstream( ... hp.nodes.lahn_2) >>> complete Selection("complete", nodes="lahn_3", elements=("land_lahn_3", "stream_lahn_2_lahn_3")) If necessary, include the "outlet device" manually afterwards: >>> complete.nodes += hp.nodes.lahn_2 >>> complete Selection("complete", nodes=("lahn_2", "lahn_3"), elements=("land_lahn_3", "stream_lahn_2_lahn_3")) """
try: selection = Selection(name) if isinstance(device, devicetools.Node): node = self.nodes[device.name] return self.__get_nextnode(node, selection) if isinstance(device, devicetools.Element): element = self.elements[device.name] return self.__get_nextelement(element, selection) raise TypeError( f'Either a `Node` or an `Element` object is required ' f'as the "outlet device", but the given `device` value ' f'is of type `{objecttools.classname(device)}`.') except BaseException: objecttools.augment_excmessage( f'While trying to determine the upstream network of ' f'selection `{self.name}`')
<SYSTEM_TASK:> Restrict the current selection to the network upstream of the given <END_TASK> <USER_TASK:> Description: def select_upstream(self, device: devicetools.Device) -> 'Selection': """Restrict the current selection to the network upstream of the given starting point, including the starting point itself. See the documentation on method |Selection.search_upstream| for additional information. """
upstream = self.search_upstream(device) self.nodes = upstream.nodes self.elements = upstream.elements return self
<SYSTEM_TASK:> Return a |Selection| object containing only the elements <END_TASK> <USER_TASK:> Description: def search_modeltypes(self, *models: ModelTypesArg, name: str = 'modeltypes') -> 'Selection': """Return a |Selection| object containing only the elements currently handling models of the given types. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() You can pass both |Model| objects and names and, as a keyword argument, the name of the newly created |Selection| object: >>> test = pub.selections.complete.copy('test') >>> from hydpy import prepare_model >>> hland_v1 = prepare_model('hland_v1') >>> test.search_modeltypes(hland_v1) Selection("modeltypes", nodes=(), elements=("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3")) >>> test.search_modeltypes( ... hland_v1, 'hstream_v1', 'lland_v1', name='MODELTYPES') Selection("MODELTYPES", nodes=(), elements=("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3", "stream_dill_lahn_2", "stream_lahn_1_lahn_2", "stream_lahn_2_lahn_3")) Wrong model specifications result in errors like the following: >>> test.search_modeltypes('wrong') Traceback (most recent call last): ... ModuleNotFoundError: While trying to determine the elements of \ selection `test` handling the model defined by the argument(s) `wrong` \ of type(s) `str`, the following error occurred: \ No module named 'hydpy.models.wrong' Method |Selection.select_modeltypes| restricts the current selection to the one determined with the method the |Selection.search_modeltypes|: >>> test.select_modeltypes(hland_v1) Selection("test", nodes=(), elements=("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3")) On the contrary, the method |Selection.deselect_upstream| restricts the current selection to all devices not determined by method the |Selection.search_upstream|: >>> pub.selections.complete.deselect_modeltypes(hland_v1) Selection("complete", nodes=(), elements=("stream_dill_lahn_2", "stream_lahn_1_lahn_2", "stream_lahn_2_lahn_3")) """
try: typelist = [] for model in models: if not isinstance(model, modeltools.Model): model = importtools.prepare_model(model) typelist.append(type(model)) typetuple = tuple(typelist) selection = Selection(name) for element in self.elements: if isinstance(element.model, typetuple): selection.elements += element return selection except BaseException: values = objecttools.enumeration(models) classes = objecttools.enumeration( objecttools.classname(model) for model in models) objecttools.augment_excmessage( f'While trying to determine the elements of selection ' f'`{self.name}` handling the model defined by the ' f'argument(s) `{values}` of type(s) `{classes}`')
<SYSTEM_TASK:> Return a new selection containing all nodes of the current <END_TASK> <USER_TASK:> Description: def search_nodenames(self, *substrings: str, name: str = 'nodenames') -> \ 'Selection': """Return a new selection containing all nodes of the current selection with a name containing at least one of the given substrings. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() Pass the (sub)strings as positional arguments and, optionally, the name of the newly created |Selection| object as a keyword argument: >>> test = pub.selections.complete.copy('test') >>> from hydpy import prepare_model >>> test.search_nodenames('dill', 'lahn_1') Selection("nodenames", nodes=("dill", "lahn_1"), elements=()) Wrong string specifications result in errors like the following: >>> test.search_nodenames(['dill', 'lahn_1']) Traceback (most recent call last): ... TypeError: While trying to determine the nodes of selection \ `test` with names containing at least one of the given substrings \ `['dill', 'lahn_1']`, the following error occurred: 'in <string>' \ requires string as left operand, not list Method |Selection.select_nodenames| restricts the current selection to the one determined with the the method |Selection.search_nodenames|: >>> test.select_nodenames('dill', 'lahn_1') Selection("test", nodes=("dill", "lahn_1"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3", "stream_dill_lahn_2", "stream_lahn_1_lahn_2", "stream_lahn_2_lahn_3")) On the contrary, the method |Selection.deselect_nodenames| restricts the current selection to all devices not determined by the method |Selection.search_nodenames|: >>> pub.selections.complete.deselect_nodenames('dill', 'lahn_1') Selection("complete", nodes=("lahn_2", "lahn_3"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3", "stream_dill_lahn_2", "stream_lahn_1_lahn_2", "stream_lahn_2_lahn_3")) """
try: selection = Selection(name) for node in self.nodes: for substring in substrings: if substring in node.name: selection.nodes += node break return selection except BaseException: values = objecttools.enumeration(substrings) objecttools.augment_excmessage( f'While trying to determine the nodes of selection ' f'`{self.name}` with names containing at least one ' f'of the given substrings `{values}`')
<SYSTEM_TASK:> Return a new selection containing all elements of the current <END_TASK> <USER_TASK:> Description: def search_elementnames(self, *substrings: str, name: str = 'elementnames') -> 'Selection': """Return a new selection containing all elements of the current selection with a name containing at least one of the given substrings. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() Pass the (sub)strings as positional arguments and, optionally, the name of the newly created |Selection| object as a keyword argument: >>> test = pub.selections.complete.copy('test') >>> from hydpy import prepare_model >>> test.search_elementnames('dill', 'lahn_1') Selection("elementnames", nodes=(), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) Wrong string specifications result in errors like the following: >>> test.search_elementnames(['dill', 'lahn_1']) Traceback (most recent call last): ... TypeError: While trying to determine the elements of selection \ `test` with names containing at least one of the given substrings \ `['dill', 'lahn_1']`, the following error occurred: 'in <string>' \ requires string as left operand, not list Method |Selection.select_elementnames| restricts the current selection to the one determined with the method |Selection.search_elementnames|: >>> test.select_elementnames('dill', 'lahn_1') Selection("test", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) On the contrary, the method |Selection.deselect_elementnames| restricts the current selection to all devices not determined by the method |Selection.search_elementnames|: >>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1') Selection("complete", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_lahn_2", "land_lahn_3", "stream_lahn_2_lahn_3")) """
try: selection = Selection(name) for element in self.elements: for substring in substrings: if substring in element.name: selection.elements += element break return selection except BaseException: values = objecttools.enumeration(substrings) objecttools.augment_excmessage( f'While trying to determine the elements of selection ' f'`{self.name}` with names containing at least one ' f'of the given substrings `{values}`')
<SYSTEM_TASK:> Return a new |Selection| object with the given name and copies <END_TASK> <USER_TASK:> Description: def copy(self, name: str) -> 'Selection': """Return a new |Selection| object with the given name and copies of the handles |Nodes| and |Elements| objects based on method |Devices.copy|."""
return type(self)(name, copy.copy(self.nodes), copy.copy(self.elements))
<SYSTEM_TASK:> Save the selection as a network file. <END_TASK> <USER_TASK:> Description: def save_networkfile(self, filepath: Union[str, None] = None, write_nodes: bool = True) -> None: """Save the selection as a network file. >>> from hydpy.core.examples import prepare_full_example_2 >>> _, pub, TestIO = prepare_full_example_2() In most cases, one should conveniently write network files via method |NetworkManager.save_files| of class |NetworkManager|. However, using the method |Selection.save_networkfile| allows for additional configuration via the arguments `filepath` and `write_nodes`: >>> with TestIO(): ... pub.selections.headwaters.save_networkfile() ... with open('headwaters.py') as networkfile: ... print(networkfile.read()) # -*- coding: utf-8 -*- <BLANKLINE> from hydpy import Node, Element <BLANKLINE> <BLANKLINE> Node("dill", variable="Q", keywords="gauge") <BLANKLINE> Node("lahn_1", variable="Q", keywords="gauge") <BLANKLINE> <BLANKLINE> Element("land_dill", outlets="dill", keywords="catchment") <BLANKLINE> Element("land_lahn_1", outlets="lahn_1", keywords="catchment") <BLANKLINE> >>> with TestIO(): ... pub.selections.headwaters.save_networkfile('test.py', False) ... with open('test.py') as networkfile: ... print(networkfile.read()) # -*- coding: utf-8 -*- <BLANKLINE> from hydpy import Node, Element <BLANKLINE> <BLANKLINE> Element("land_dill", outlets="dill", keywords="catchment") <BLANKLINE> Element("land_lahn_1", outlets="lahn_1", keywords="catchment") <BLANKLINE> """
if filepath is None: filepath = self.name + '.py' with open(filepath, 'w', encoding="utf-8") as file_: file_.write('# -*- coding: utf-8 -*-\n') file_.write('\nfrom hydpy import Node, Element\n\n') if write_nodes: for node in self.nodes: file_.write('\n' + repr(node) + '\n') file_.write('\n') for element in self.elements: file_.write('\n' + repr(element) + '\n')
<SYSTEM_TASK:> Calculate the input discharge portions of the different response <END_TASK> <USER_TASK:> Description: def calc_qpin_v1(self): """Calculate the input discharge portions of the different response functions. Required derived parameters: |Nmb| |MaxQ| |DiffQ| Required flux sequence: |QIn| Calculated flux sequences: |QPIn| Examples: Initialize an arma model with three different response functions: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb = 3 >>> derived.maxq.shape = 3 >>> derived.diffq.shape = 2 >>> fluxes.qpin.shape = 3 Define the maximum discharge value of the respective response functions and their successive differences: >>> derived.maxq(0.0, 2.0, 6.0) >>> derived.diffq(2., 4.) The first six examples are performed for inflow values ranging from 0 to 12 m³/s: >>> from hydpy import UnitTest >>> test = UnitTest( ... model, model.calc_qpin_v1, ... last_example=6, ... parseqs=(fluxes.qin, fluxes.qpin)) >>> test.nexts.qin = 0., 1., 2., 4., 6., 12. >>> test() | ex. | qin | qpin | ------------------------------- | 1 | 0.0 | 0.0 0.0 0.0 | | 2 | 1.0 | 1.0 0.0 0.0 | | 3 | 2.0 | 2.0 0.0 0.0 | | 4 | 4.0 | 2.0 2.0 0.0 | | 5 | 6.0 | 2.0 4.0 0.0 | | 6 | 12.0 | 2.0 4.0 6.0 | The following two additional examples are just supposed to demonstrate method |calc_qpin_v1| also functions properly if there is only one response function, wherefore total discharge does not need to be divided: >>> derived.nmb = 1 >>> derived.maxq.shape = 1 >>> derived.diffq.shape = 0 >>> fluxes.qpin.shape = 1 >>> derived.maxq(0.) >>> test = UnitTest( ... model, model.calc_qpin_v1, ... first_example=7, last_example=8, ... parseqs=(fluxes.qin, ... fluxes.qpin)) >>> test.nexts.qin = 0., 12. >>> test() | ex. | qin | qpin | --------------------- | 7 | 0.0 | 0.0 | | 8 | 12.0 | 12.0 | """
der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for idx in range(der.nmb-1): if flu.qin < der.maxq[idx]: flu.qpin[idx] = 0. elif flu.qin < der.maxq[idx+1]: flu.qpin[idx] = flu.qin-der.maxq[idx] else: flu.qpin[idx] = der.diffq[idx] flu.qpin[der.nmb-1] = max(flu.qin-der.maxq[der.nmb-1], 0.)
<SYSTEM_TASK:> Refresh the input log sequence for the different MA processes. <END_TASK> <USER_TASK:> Description: def calc_login_v1(self): """Refresh the input log sequence for the different MA processes. Required derived parameters: |Nmb| |MA_Order| Required flux sequence: |QPIn| Updated log sequence: |LogIn| Example: Assume there are three response functions, involving one, two and three MA coefficients respectively: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> derived.ma_order.shape = 3 >>> derived.ma_order = 1, 2, 3 >>> fluxes.qpin.shape = 3 >>> logs.login.shape = (3, 3) The "memory values" of the different MA processes are defined as follows (one row for each process): >>> logs.login = ((1.0, nan, nan), ... (2.0, 3.0, nan), ... (4.0, 5.0, 6.0)) These are the new inflow discharge portions to be included into the memories of the different processes: >>> fluxes.qpin = 7.0, 8.0, 9.0 Through applying method |calc_login_v1| all values already existing are shifted to the right ("into the past"). Values, which are no longer required due to the limited order or the different MA processes, are discarded. The new values are inserted in the first column: >>> model.calc_login_v1() >>> logs.login login([[7.0, nan, nan], [8.0, 2.0, nan], [9.0, 4.0, 5.0]]) """
der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for idx in range(der.nmb): for jdx in range(der.ma_order[idx]-2, -1, -1): log.login[idx, jdx+1] = log.login[idx, jdx] for idx in range(der.nmb): log.login[idx, 0] = flu.qpin[idx]
<SYSTEM_TASK:> Calculate the discharge responses of the different MA processes. <END_TASK> <USER_TASK:> Description: def calc_qma_v1(self): """Calculate the discharge responses of the different MA processes. Required derived parameters: |Nmb| |MA_Order| |MA_Coefs| Required log sequence: |LogIn| Calculated flux sequence: |QMA| Examples: Assume there are three response functions, involving one, two and three MA coefficients respectively: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> derived.ma_order.shape = 3 >>> derived.ma_order = 1, 2, 3 >>> derived.ma_coefs.shape = (3, 3) >>> logs.login.shape = (3, 3) >>> fluxes.qma.shape = 3 The coefficients of the different MA processes are stored in separate rows of the 2-dimensional parameter `ma_coefs`: >>> derived.ma_coefs = ((1.0, nan, nan), ... (0.8, 0.2, nan), ... (0.5, 0.3, 0.2)) The "memory values" of the different MA processes are defined as follows (one row for each process). The current values are stored in first column, the values of the last time step in the second column, and so on: >>> logs.login = ((1.0, nan, nan), ... (2.0, 3.0, nan), ... (4.0, 5.0, 6.0)) Applying method |calc_qma_v1| is equivalent to calculating the inner product of the different rows of both matrices: >>> model.calc_qma_v1() >>> fluxes.qma qma(1.0, 2.2, 4.7) """
der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for idx in range(der.nmb): flu.qma[idx] = 0. for jdx in range(der.ma_order[idx]): flu.qma[idx] += der.ma_coefs[idx, jdx] * log.login[idx, jdx]
<SYSTEM_TASK:> Calculate the discharge responses of the different AR processes. <END_TASK> <USER_TASK:> Description: def calc_qar_v1(self): """Calculate the discharge responses of the different AR processes. Required derived parameters: |Nmb| |AR_Order| |AR_Coefs| Required log sequence: |LogOut| Calculated flux sequence: |QAR| Examples: Assume there are four response functions, involving zero, one, two, and three AR coefficients respectively: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(4) >>> derived.ar_order.shape = 4 >>> derived.ar_order = 0, 1, 2, 3 >>> derived.ar_coefs.shape = (4, 3) >>> logs.logout.shape = (4, 3) >>> fluxes.qar.shape = 4 The coefficients of the different AR processes are stored in separate rows of the 2-dimensional parameter `ma_coefs`. Note the special case of the first AR process of zero order (first row), which involves no autoregressive memory at all: >>> derived.ar_coefs = ((nan, nan, nan), ... (1.0, nan, nan), ... (0.8, 0.2, nan), ... (0.5, 0.3, 0.2)) The "memory values" of the different AR processes are defined as follows (one row for each process). The values of the last time step are stored in first column, the values of the last time step in the second column, and so on: >>> logs.logout = ((nan, nan, nan), ... (1.0, nan, nan), ... (2.0, 3.0, nan), ... (4.0, 5.0, 6.0)) Applying method |calc_qar_v1| is equivalent to calculating the inner product of the different rows of both matrices: >>> model.calc_qar_v1() >>> fluxes.qar qar(0.0, 1.0, 2.2, 4.7) """
der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for idx in range(der.nmb): flu.qar[idx] = 0. for jdx in range(der.ar_order[idx]): flu.qar[idx] += der.ar_coefs[idx, jdx] * log.logout[idx, jdx]
<SYSTEM_TASK:> Calculate the ARMA results for the different response functions. <END_TASK> <USER_TASK:> Description: def calc_qpout_v1(self): """Calculate the ARMA results for the different response functions. Required derived parameter: |Nmb| Required flux sequences: |QMA| |QAR| Calculated flux sequence: |QPOut| Examples: Initialize an arma model with three different response functions: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> fluxes.qma.shape = 3 >>> fluxes.qar.shape = 3 >>> fluxes.qpout.shape = 3 Define the output values of the MA and of the AR processes associated with the three response functions and apply method |calc_qpout_v1|: >>> fluxes.qar = 4.0, 5.0, 6.0 >>> fluxes.qma = 1.0, 2.0, 3.0 >>> model.calc_qpout_v1() >>> fluxes.qpout qpout(5.0, 7.0, 9.0) """
der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess for idx in range(der.nmb): flu.qpout[idx] = flu.qma[idx]+flu.qar[idx]
<SYSTEM_TASK:> Refresh the log sequence for the different AR processes. <END_TASK> <USER_TASK:> Description: def calc_logout_v1(self): """Refresh the log sequence for the different AR processes. Required derived parameters: |Nmb| |AR_Order| Required flux sequence: |QPOut| Updated log sequence: |LogOut| Example: Assume there are four response functions, involving zero, one, two and three AR coefficients respectively: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(4) >>> derived.ar_order.shape = 4 >>> derived.ar_order = 0, 1, 2, 3 >>> fluxes.qpout.shape = 4 >>> logs.logout.shape = (4, 3) The "memory values" of the different AR processes are defined as follows (one row for each process). Note the special case of the first AR process of zero order (first row), which is why there are no autoregressive memory values required: >>> logs.logout = ((nan, nan, nan), ... (0.0, nan, nan), ... (1.0, 2.0, nan), ... (3.0, 4.0, 5.0)) These are the new outflow discharge portions to be included into the memories of the different processes: >>> fluxes.qpout = 6.0, 7.0, 8.0, 9.0 Through applying method |calc_logout_v1| all values already existing are shifted to the right ("into the past"). Values, which are no longer required due to the limited order or the different AR processes, are discarded. The new values are inserted in the first column: >>> model.calc_logout_v1() >>> logs.logout logout([[nan, nan, nan], [7.0, nan, nan], [8.0, 1.0, nan], [9.0, 3.0, 4.0]]) """
der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for idx in range(der.nmb): for jdx in range(der.ar_order[idx]-2, -1, -1): log.logout[idx, jdx+1] = log.logout[idx, jdx] for idx in range(der.nmb): if der.ar_order[idx] > 0: log.logout[idx, 0] = flu.qpout[idx]
<SYSTEM_TASK:> Sum up the results of the different response functions. <END_TASK> <USER_TASK:> Description: def calc_qout_v1(self): """Sum up the results of the different response functions. Required derived parameter: |Nmb| Required flux sequences: |QPOut| Calculated flux sequence: |QOut| Examples: Initialize an arma model with three different response functions: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> fluxes.qpout.shape = 3 Define the output values of the three response functions and apply method |calc_qout_v1|: >>> fluxes.qpout = 1.0, 2.0, 3.0 >>> model.calc_qout_v1() >>> fluxes.qout qout(6.0) """
der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess flu.qout = 0. for idx in range(der.nmb): flu.qout += flu.qpout[idx]
<SYSTEM_TASK:> Determine the number of branches <END_TASK> <USER_TASK:> Description: def update(self): """Determine the number of branches"""
con = self.subpars.pars.control self(con.ypoints.shape[0])
<SYSTEM_TASK:> Update value based on the actual |calc_qg_v1| method. <END_TASK> <USER_TASK:> Description: def update(self): """Update value based on the actual |calc_qg_v1| method. Required derived parameter: |H| Note that the value of parameter |lstream_derived.QM| is directly related to the value of parameter |HM| and indirectly related to all parameters values relevant for method |calc_qg_v1|. Hence the complete paramter (and sequence) requirements might differ for various application models. For examples, see the documentation on method ToDo. """
mod = self.subpars.pars.model con = mod.parameters.control flu = mod.sequences.fluxes flu.h = con.hm mod.calc_qg() self(flu.qg)
<SYSTEM_TASK:> Determines in how many segments the whole reach needs to be <END_TASK> <USER_TASK:> Description: def update(self): """Determines in how many segments the whole reach needs to be divided to approximate the desired lag time via integer rounding. Adjusts the shape of sequence |QJoints| additionally. Required control parameters: |Lag| Calculated derived parameters: |NmbSegments| Prepared state sequence: |QJoints| Examples: Define a lag time of 1.4 days and a simulation step size of 12 hours: >>> from hydpy.models.hstream import * >>> parameterstep('1d') >>> simulationstep('12h') >>> lag(1.4) Then the actual lag value for the simulation step size is 2.8 >>> lag lag(1.4) >>> lag.value 2.8 Through rounding the number of segments is determined: >>> derived.nmbsegments.update() >>> derived.nmbsegments nmbsegments(3) The number of joints is always the number of segments plus one: >>> states.qjoints.shape (4,) """
pars = self.subpars.pars self(int(round(pars.control.lag))) pars.model.sequences.states.qjoints.shape = self+1
<SYSTEM_TASK:> View the supplied data in an interactive, graphical table widget. <END_TASK> <USER_TASK:> Description: def view(data, enc=None, start_pos=None, delimiter=None, hdr_rows=None, idx_cols=None, sheet_index=0, transpose=False, wait=None, recycle=None, detach=None, metavar=None, title=None): """View the supplied data in an interactive, graphical table widget. data: When a valid path or IO object, read it as a tabular text file. When a valid URI, a Blaze object is constructed and visualized. Any other supported datatype is visualized directly and incrementally *without copying*. enc: File encoding (such as "utf-8", normally autodetected). delimiter: Text file delimiter (normally autodetected). hdr_rows: For files or lists of lists, specify the number of header rows. For files only, a default of one header line is assumed. idx_cols: For files or lists of lists, specify the number of index columns. By default, no index is assumed. sheet_index: For multi-table files (such as xls[x]), specify the sheet index to read, starting from 0. Defaults to the first. start_pos: A tuple of the form (y, x) specifying the initial cursor position. Negative offsets count from the end of the dataset. transpose: Transpose the resulting view. metavar: name of the variable being shown for display purposes (inferred automatically when possible). title: title of the data window. wait: Wait for the user to close the view before returning. By default, try to match the behavior of ``matplotlib.is_interactive()``. If matplotlib is not loaded, wait only if ``detach`` is also False. The default value can also be set through ``gtabview.WAIT``. recycle: Recycle the previous window instead of creating a new one. The default is True, and can also be set through ``gtabview.RECYCLE``. detach: Create a fully detached GUI thread for interactive use (note: this is *not* necessary if matplotlib is loaded). The default is False, and can also be set through ``gtabview.DETACH``. """
global WAIT, RECYCLE, DETACH, VIEW model = read_model(data, enc=enc, delimiter=delimiter, hdr_rows=hdr_rows, idx_cols=idx_cols, sheet_index=sheet_index, transpose=transpose) if model is None: warnings.warn("cannot visualize the supplied data type: {}".format(type(data)), category=RuntimeWarning) return None # setup defaults if wait is None: wait = WAIT if recycle is None: recycle = RECYCLE if detach is None: detach = DETACH if wait is None: if 'matplotlib' not in sys.modules: wait = not bool(detach) else: import matplotlib.pyplot as plt wait = not plt.isinteractive() # try to fetch the variable name in the upper stack if metavar is None: if isinstance(data, basestring): metavar = data else: metavar = _varname_in_stack(data, 1) # create a view controller if VIEW is None: if not detach: VIEW = ViewController() else: VIEW = DetachedViewController() VIEW.setDaemon(True) VIEW.start() if VIEW.is_detached(): atexit.register(VIEW.exit) else: VIEW = None return None # actually show the data view_kwargs = {'hdr_rows': hdr_rows, 'idx_cols': idx_cols, 'start_pos': start_pos, 'metavar': metavar, 'title': title} VIEW.view(model, view_kwargs, wait=wait, recycle=recycle) return VIEW
<SYSTEM_TASK:> Get and clear the current |Node| and |Element| registries. <END_TASK> <USER_TASK:> Description: def gather_registries() -> Tuple[Dict, Mapping, Mapping]: """Get and clear the current |Node| and |Element| registries. Function |gather_registries| is thought to be used by class |Tester| only. """
id2devices = copy.copy(_id2devices) registry = copy.copy(_registry) selection = copy.copy(_selection) dict_ = globals() dict_['_id2devices'] = {} dict_['_registry'] = {Node: {}, Element: {}} dict_['_selection'] = {Node: {}, Element: {}} return id2devices, registry, selection
<SYSTEM_TASK:> Reset the current |Node| and |Element| registries. <END_TASK> <USER_TASK:> Description: def reset_registries(dicts: Tuple[Dict, Mapping, Mapping]): """Reset the current |Node| and |Element| registries. Function |reset_registries| is thought to be used by class |Tester| only. """
dict_ = globals() dict_['_id2devices'] = dicts[0] dict_['_registry'] = dicts[1] dict_['_selection'] = dicts[2]
<SYSTEM_TASK:> Return a list of all keywords starting with the given string. <END_TASK> <USER_TASK:> Description: def startswith(self, name: str) -> List[str]: """Return a list of all keywords starting with the given string. >>> from hydpy.core.devicetools import Keywords >>> keywords = Keywords('first_keyword', 'second_keyword', ... 'keyword_3', 'keyword_4', ... 'keyboard') >>> keywords.startswith('keyword') ['keyword_3', 'keyword_4'] """
return sorted(keyword for keyword in self if keyword.startswith(name))
<SYSTEM_TASK:> Return a list of all keywords ending with the given string. <END_TASK> <USER_TASK:> Description: def endswith(self, name: str) -> List[str]: """Return a list of all keywords ending with the given string. >>> from hydpy.core.devicetools import Keywords >>> keywords = Keywords('first_keyword', 'second_keyword', ... 'keyword_3', 'keyword_4', ... 'keyboard') >>> keywords.endswith('keyword') ['first_keyword', 'second_keyword'] """
return sorted(keyword for keyword in self if keyword.endswith(name))
<SYSTEM_TASK:> Return a list of all keywords containing the given string. <END_TASK> <USER_TASK:> Description: def contains(self, name: str) -> List[str]: """Return a list of all keywords containing the given string. >>> from hydpy.core.devicetools import Keywords >>> keywords = Keywords('first_keyword', 'second_keyword', ... 'keyword_3', 'keyword_4', ... 'keyboard') >>> keywords.contains('keyword') ['first_keyword', 'keyword_3', 'keyword_4', 'second_keyword'] """
return sorted(keyword for keyword in self if name in keyword)