text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Corresponds to IDD Field `month` <END_TASK> <USER_TASK:> Description: def month(self, value=None): """Corresponds to IDD Field `month` Args: value (int): value for IDD Field `month` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `month`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `month`') if value > 12: raise ValueError('value need to be smaller 12 ' 'for field `month`') self._month = value
<SYSTEM_TASK:> Corresponds to IDD Field `day` <END_TASK> <USER_TASK:> Description: def day(self, value=None): """Corresponds to IDD Field `day` Args: value (int): value for IDD Field `day` value >= 1 value <= 31 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `day`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `day`') if value > 31: raise ValueError('value need to be smaller 31 ' 'for field `day`') self._day = value
<SYSTEM_TASK:> Corresponds to IDD Field `hour` <END_TASK> <USER_TASK:> Description: def hour(self, value=None): """Corresponds to IDD Field `hour` Args: value (int): value for IDD Field `hour` value >= 1 value <= 24 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `hour`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `hour`') if value > 24: raise ValueError('value need to be smaller 24 ' 'for field `hour`') self._hour = value
<SYSTEM_TASK:> Corresponds to IDD Field `minute` <END_TASK> <USER_TASK:> Description: def minute(self, value=None): """Corresponds to IDD Field `minute` Args: value (int): value for IDD Field `minute` value >= 0 value <= 60 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `minute`'.format(value)) if value < 0: raise ValueError('value need to be greater or equal 0 ' 'for field `minute`') if value > 60: raise ValueError('value need to be smaller 60 ' 'for field `minute`') self._minute = value
<SYSTEM_TASK:> Corresponds to IDD Field `dry_bulb_temperature` <END_TASK> <USER_TASK:> Description: def dry_bulb_temperature(self, value=99.9): """Corresponds to IDD Field `dry_bulb_temperature` Args: value (float): value for IDD Field `dry_bulb_temperature` Unit: C value > -70.0 value < 70.0 Missing value: 99.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `dry_bulb_temperature`'.format(value)) if value <= -70.0: raise ValueError('value need to be greater -70.0 ' 'for field `dry_bulb_temperature`') if value >= 70.0: raise ValueError('value need to be smaller 70.0 ' 'for field `dry_bulb_temperature`') self._dry_bulb_temperature = value
<SYSTEM_TASK:> Corresponds to IDD Field `dew_point_temperature` <END_TASK> <USER_TASK:> Description: def dew_point_temperature(self, value=99.9): """Corresponds to IDD Field `dew_point_temperature` Args: value (float): value for IDD Field `dew_point_temperature` Unit: C value > -70.0 value < 70.0 Missing value: 99.9 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `dew_point_temperature`'.format(value)) if value <= -70.0: raise ValueError('value need to be greater -70.0 ' 'for field `dew_point_temperature`') if value >= 70.0: raise ValueError('value need to be smaller 70.0 ' 'for field `dew_point_temperature`') self._dew_point_temperature = value
<SYSTEM_TASK:> Corresponds to IDD Field `relative_humidity` <END_TASK> <USER_TASK:> Description: def relative_humidity(self, value=999): """Corresponds to IDD Field `relative_humidity` Args: value (int): value for IDD Field `relative_humidity` value >= 0 value <= 110 Missing value: 999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `relative_humidity`'.format(value)) if value < 0: raise ValueError('value need to be greater or equal 0 ' 'for field `relative_humidity`') if value > 110: raise ValueError('value need to be smaller 110 ' 'for field `relative_humidity`') self._relative_humidity = value
<SYSTEM_TASK:> Corresponds to IDD Field `atmospheric_station_pressure` <END_TASK> <USER_TASK:> Description: def atmospheric_station_pressure(self, value=999999): """Corresponds to IDD Field `atmospheric_station_pressure` Args: value (int): value for IDD Field `atmospheric_station_pressure` Unit: Pa value > 31000 value < 120000 Missing value: 999999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError( 'value {} need to be of type int ' 'for field `atmospheric_station_pressure`'.format(value)) if value <= 31000: raise ValueError('value need to be greater 31000 ' 'for field `atmospheric_station_pressure`') if value >= 120000: raise ValueError('value need to be smaller 120000 ' 'for field `atmospheric_station_pressure`') self._atmospheric_station_pressure = value
<SYSTEM_TASK:> Corresponds to IDD Field `extraterrestrial_horizontal_radiation` <END_TASK> <USER_TASK:> Description: def extraterrestrial_horizontal_radiation(self, value=9999.0): """Corresponds to IDD Field `extraterrestrial_horizontal_radiation` Args: value (float): value for IDD Field `extraterrestrial_horizontal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `extraterrestrial_horizontal_radiation`'.format(value)) if value < 0.0: raise ValueError( 'value need to be greater or equal 0.0 ' 'for field `extraterrestrial_horizontal_radiation`') self._extraterrestrial_horizontal_radiation = value
<SYSTEM_TASK:> Corresponds to IDD Field `extraterrestrial_direct_normal_radiation` <END_TASK> <USER_TASK:> Description: def extraterrestrial_direct_normal_radiation(self, value=9999.0): """Corresponds to IDD Field `extraterrestrial_direct_normal_radiation` Args: value (float): value for IDD Field `extraterrestrial_direct_normal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `extraterrestrial_direct_normal_radiation`'.format(value)) if value < 0.0: raise ValueError( 'value need to be greater or equal 0.0 ' 'for field `extraterrestrial_direct_normal_radiation`') self._extraterrestrial_direct_normal_radiation = value
<SYSTEM_TASK:> Corresponds to IDD Field `horizontal_infrared_radiation_intensity` <END_TASK> <USER_TASK:> Description: def horizontal_infrared_radiation_intensity(self, value=9999.0): """Corresponds to IDD Field `horizontal_infrared_radiation_intensity` Args: value (float): value for IDD Field `horizontal_infrared_radiation_intensity` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `horizontal_infrared_radiation_intensity`'.format(value)) if value < 0.0: raise ValueError( 'value need to be greater or equal 0.0 ' 'for field `horizontal_infrared_radiation_intensity`') self._horizontal_infrared_radiation_intensity = value
<SYSTEM_TASK:> Corresponds to IDD Field `global_horizontal_radiation` <END_TASK> <USER_TASK:> Description: def global_horizontal_radiation(self, value=9999.0): """Corresponds to IDD Field `global_horizontal_radiation` Args: value (float): value for IDD Field `global_horizontal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `global_horizontal_radiation`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `global_horizontal_radiation`') self._global_horizontal_radiation = value
<SYSTEM_TASK:> Corresponds to IDD Field `direct_normal_radiation` <END_TASK> <USER_TASK:> Description: def direct_normal_radiation(self, value=9999.0): """Corresponds to IDD Field `direct_normal_radiation` Args: value (float): value for IDD Field `direct_normal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `direct_normal_radiation`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `direct_normal_radiation`') self._direct_normal_radiation = value
<SYSTEM_TASK:> Corresponds to IDD Field `diffuse_horizontal_radiation` <END_TASK> <USER_TASK:> Description: def diffuse_horizontal_radiation(self, value=9999.0): """Corresponds to IDD Field `diffuse_horizontal_radiation` Args: value (float): value for IDD Field `diffuse_horizontal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `diffuse_horizontal_radiation`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `diffuse_horizontal_radiation`') self._diffuse_horizontal_radiation = value
<SYSTEM_TASK:> Corresponds to IDD Field `global_horizontal_illuminance` <END_TASK> <USER_TASK:> Description: def global_horizontal_illuminance(self, value=999999.0): """ Corresponds to IDD Field `global_horizontal_illuminance` will be missing if >= 999900 Args: value (float): value for IDD Field `global_horizontal_illuminance` Unit: lux value >= 0.0 Missing value: 999999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `global_horizontal_illuminance`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `global_horizontal_illuminance`') self._global_horizontal_illuminance = value
<SYSTEM_TASK:> Corresponds to IDD Field `direct_normal_illuminance` <END_TASK> <USER_TASK:> Description: def direct_normal_illuminance(self, value=999999.0): """ Corresponds to IDD Field `direct_normal_illuminance` will be missing if >= 999900 Args: value (float): value for IDD Field `direct_normal_illuminance` Unit: lux value >= 0.0 Missing value: 999999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `direct_normal_illuminance`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `direct_normal_illuminance`') self._direct_normal_illuminance = value
<SYSTEM_TASK:> Corresponds to IDD Field `diffuse_horizontal_illuminance` <END_TASK> <USER_TASK:> Description: def diffuse_horizontal_illuminance(self, value=999999.0): """ Corresponds to IDD Field `diffuse_horizontal_illuminance` will be missing if >= 999900 Args: value (float): value for IDD Field `diffuse_horizontal_illuminance` Unit: lux value >= 0.0 Missing value: 999999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `diffuse_horizontal_illuminance`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `diffuse_horizontal_illuminance`') self._diffuse_horizontal_illuminance = value
<SYSTEM_TASK:> Corresponds to IDD Field `zenith_luminance` <END_TASK> <USER_TASK:> Description: def zenith_luminance(self, value=9999.0): """ Corresponds to IDD Field `zenith_luminance` will be missing if >= 9999 Args: value (float): value for IDD Field `zenith_luminance` Unit: Cd/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `zenith_luminance`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `zenith_luminance`') self._zenith_luminance = value
<SYSTEM_TASK:> Corresponds to IDD Field `wind_direction` <END_TASK> <USER_TASK:> Description: def wind_direction(self, value=999.0): """Corresponds to IDD Field `wind_direction` Args: value (float): value for IDD Field `wind_direction` Unit: degrees value >= 0.0 value <= 360.0 Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `wind_direction`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `wind_direction`') if value > 360.0: raise ValueError('value need to be smaller 360.0 ' 'for field `wind_direction`') self._wind_direction = value
<SYSTEM_TASK:> Corresponds to IDD Field `wind_speed` <END_TASK> <USER_TASK:> Description: def wind_speed(self, value=999.0): """Corresponds to IDD Field `wind_speed` Args: value (float): value for IDD Field `wind_speed` Unit: m/s value >= 0.0 value <= 40.0 Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `wind_speed`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `wind_speed`') if value > 40.0: raise ValueError('value need to be smaller 40.0 ' 'for field `wind_speed`') self._wind_speed = value
<SYSTEM_TASK:> Corresponds to IDD Field `present_weather_codes` <END_TASK> <USER_TASK:> Description: def present_weather_codes(self, value=None): """Corresponds to IDD Field `present_weather_codes` Args: value (int): value for IDD Field `present_weather_codes` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError( 'value {} need to be of type int ' 'for field `present_weather_codes`'.format(value)) self._present_weather_codes = value
<SYSTEM_TASK:> Corresponds to IDD Field `precipitable_water` <END_TASK> <USER_TASK:> Description: def precipitable_water(self, value=999.0): """Corresponds to IDD Field `precipitable_water` Args: value (float): value for IDD Field `precipitable_water` Unit: mm Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `precipitable_water`'.format(value)) self._precipitable_water = value
<SYSTEM_TASK:> Corresponds to IDD Field `aerosol_optical_depth` <END_TASK> <USER_TASK:> Description: def aerosol_optical_depth(self, value=0.999): """Corresponds to IDD Field `aerosol_optical_depth` Args: value (float): value for IDD Field `aerosol_optical_depth` Unit: thousandths Missing value: 0.999 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `aerosol_optical_depth`'.format(value)) self._aerosol_optical_depth = value
<SYSTEM_TASK:> Corresponds to IDD Field `snow_depth` <END_TASK> <USER_TASK:> Description: def snow_depth(self, value=999.0): """Corresponds to IDD Field `snow_depth` Args: value (float): value for IDD Field `snow_depth` Unit: cm Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `snow_depth`'.format(value)) self._snow_depth = value
<SYSTEM_TASK:> Corresponds to IDD Field `days_since_last_snowfall` <END_TASK> <USER_TASK:> Description: def days_since_last_snowfall(self, value=99): """Corresponds to IDD Field `days_since_last_snowfall` Args: value (int): value for IDD Field `days_since_last_snowfall` Missing value: 99 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = int(value) except ValueError: raise ValueError( 'value {} need to be of type int ' 'for field `days_since_last_snowfall`'.format(value)) self._days_since_last_snowfall = value
<SYSTEM_TASK:> Corresponds to IDD Field `albedo` <END_TASK> <USER_TASK:> Description: def albedo(self, value=999.0): """Corresponds to IDD Field `albedo` Args: value (float): value for IDD Field `albedo` Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `albedo`'.format(value)) self._albedo = value
<SYSTEM_TASK:> Corresponds to IDD Field `liquid_precipitation_depth` <END_TASK> <USER_TASK:> Description: def liquid_precipitation_depth(self, value=999.0): """Corresponds to IDD Field `liquid_precipitation_depth` Args: value (float): value for IDD Field `liquid_precipitation_depth` Unit: mm Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `liquid_precipitation_depth`'.format(value)) self._liquid_precipitation_depth = value
<SYSTEM_TASK:> Corresponds to IDD Field `liquid_precipitation_quantity` <END_TASK> <USER_TASK:> Description: def liquid_precipitation_quantity(self, value=99.0): """Corresponds to IDD Field `liquid_precipitation_quantity` Args: value (float): value for IDD Field `liquid_precipitation_quantity` Unit: hr Missing value: 99.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """
if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `liquid_precipitation_quantity`'.format(value)) self._liquid_precipitation_quantity = value
<SYSTEM_TASK:> Appends weather data. <END_TASK> <USER_TASK:> Description: def add_weatherdata(self, data): """Appends weather data. Args: data (WeatherData): weather data object """
if not isinstance(data, WeatherData): raise ValueError('Weather data need to be of type WeatherData') self._data["WEATHER DATA"].append(data)
<SYSTEM_TASK:> Creates an object depending on `internal_name` <END_TASK> <USER_TASK:> Description: def _create_datadict(cls, internal_name): """Creates an object depending on `internal_name` Args: internal_name (str): IDD name Raises: ValueError: if `internal_name` cannot be matched to a data dictionary object """
if internal_name == "LOCATION": return Location() if internal_name == "DESIGN CONDITIONS": return DesignConditions() if internal_name == "TYPICAL/EXTREME PERIODS": return TypicalOrExtremePeriods() if internal_name == "GROUND TEMPERATURES": return GroundTemperatures() if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS": return HolidaysOrDaylightSavings() if internal_name == "COMMENTS 1": return Comments1() if internal_name == "COMMENTS 2": return Comments2() if internal_name == "DATA PERIODS": return DataPeriods() raise ValueError( "No DataDictionary known for {}".format(internal_name))
<SYSTEM_TASK:> Read EPW weather data from path. <END_TASK> <USER_TASK:> Description: def read(self, path): """Read EPW weather data from path. Args: path (str): path to read weather data from """
with open(path, "r") as f: for line in f: line = line.strip() match_obj_name = re.search(r"^([A-Z][A-Z/ \d]+),", line) if match_obj_name is not None: internal_name = match_obj_name.group(1) if internal_name in self._data: self._data[internal_name] = self._create_datadict( internal_name) data_line = line[len(internal_name) + 1:] vals = data_line.strip().split(',') self._data[internal_name].read(vals) else: wd = WeatherData() wd.read(line.strip().split(',')) self.add_weatherdata(wd)
<SYSTEM_TASK:> Display a series of plots controlled by sliders. The function relies on Python string format functionality to index through a series of plots. <END_TASK> <USER_TASK:> Description: def display_plots(filebase, directory=None, width=700, height=500, **kwargs): """Display a series of plots controlled by sliders. The function relies on Python string format functionality to index through a series of plots."""
def show_figure(filebase, directory, **kwargs): """Helper function to load in the relevant plot for display.""" filename = filebase.format(**kwargs) if directory is not None: filename = directory + '/' + filename display(HTML("<img src='{filename}'>".format(filename=filename))) interact(show_figure, filebase=fixed(filebase), directory=fixed(directory), **kwargs)
<SYSTEM_TASK:> Returns the answers to the lab classes. <END_TASK> <USER_TASK:> Description: def answer(part, module='mlai2014.json'): """Returns the answers to the lab classes."""
marks = json.load(open(os.path.join(data_directory, module), 'rb')) return marks['Lab ' + str(part+1)]
<SYSTEM_TASK:> Gives a latex representation of the assessment. <END_TASK> <USER_TASK:> Description: def latex(self): """Gives a latex representation of the assessment."""
output = self.latex_preamble output += self._repr_latex_() output += self.latex_post return output
<SYSTEM_TASK:> Returns an pandas empty dataframe object containing rows and columns for marking. This can then be passed to a google doc that is distributed to markers for editing with the mark for each section. <END_TASK> <USER_TASK:> Description: def marksheet(self): """Returns an pandas empty dataframe object containing rows and columns for marking. This can then be passed to a google doc that is distributed to markers for editing with the mark for each section."""
columns=['Number', 'Question', 'Correct (a fraction)', 'Max Mark', 'Comments'] mark_sheet = pd.DataFrame() for qu_number, question in enumerate(self.answers): part_no = 0 for number, part in enumerate(question): if number>0: if part[2] > 0: part_no += 1 index = str(qu_number+1) +'_'+str(part_no) frame = pd.DataFrame(columns=columns, index=[index]) frame.loc[index]['Number'] = index frame.loc[index]['Question'] = part[0] frame.loc[index]['Max Mark'] = part[2] mark_sheet = mark_sheet.append(frame) return mark_sheet.sort(columns='Number')
<SYSTEM_TASK:> Compute the total mark for the assessment. <END_TASK> <USER_TASK:> Description: def total_marks(self): """Compute the total mark for the assessment."""
total = 0 for answer in self.answers: for number, part in enumerate(answer): if number>0: if part[2]>0: total+=part[2] return total
<SYSTEM_TASK:> Return a class category that shows the encoding <END_TASK> <USER_TASK:> Description: def discrete(cats, name='discrete'): """Return a class category that shows the encoding"""
import json ks = list(cats) for key in ks: if isinstance(key, bytes): cats[key.decode('utf-8')] = cats.pop(key) return 'discrete(' + json.dumps([cats, name]) + ')'
<SYSTEM_TASK:> Remove a data set from the cache <END_TASK> <USER_TASK:> Description: def clear_cache(dataset_name=None): """Remove a data set from the cache"""
dr = data_resources[dataset_name] if 'dirs' in dr: for dirs, files in zip(dr['dirs'], dr['files']): for dir, file in zip(dirs, files): path = os.path.join(data_path, dataset_name, dir, file) if os.path.exists(path): logging.info("clear_cache: removing " + path) os.unlink(path) for dir in dirs: path = os.path.join(data_path, dataset_name, dir) if os.path.exists(path): logging.info("clear_cache: remove directory " + path) os.rmdir(path) else: for file_list in dr['files']: for file in file_list: path = os.path.join(data_path, dataset_name, file) if os.path.exists(path): logging.info("clear_cache: remove " + path) os.unlink(path)
<SYSTEM_TASK:> Take a pods data set and write it as an ARFF file <END_TASK> <USER_TASK:> Description: def to_arff(dataset, **kwargs): """Take a pods data set and write it as an ARFF file"""
pods_data = dataset(**kwargs) vals = list(kwargs.values()) for i, v in enumerate(vals): if isinstance(v, list): vals[i] = '|'.join(v) else: vals[i] = str(v) args = '_'.join(vals) n = dataset.__name__ if len(args)>0: n += '_' + args n = n.replace(' ', '-') ks = pods_data.keys() d = None if 'Y' in ks and 'X' in ks: d = pd.DataFrame(pods_data['X']) if 'Xtest' in ks: d = d.append(pd.DataFrame(pods_data['Xtest']), ignore_index=True) if 'covariates' in ks: d.columns = pods_data['covariates'] dy = pd.DataFrame(pods_data['Y']) if 'Ytest' in ks: dy = dy.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True) if 'response' in ks: dy.columns = pods_data['response'] for c in dy.columns: if c not in d.columns: d[c] = dy[c] else: d['y'+str(c)] = dy[c] elif 'Y' in ks: d = pd.DataFrame(pods_data['Y']) if 'Ytest' in ks: d = d.append(pd.DataFrame(pods_data['Ytest']), ignore_index=True) elif 'data' in ks: d = pd.DataFrame(pods_data['data']) if d is not None: df2arff(d, n, pods_data)
<SYSTEM_TASK:> Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run. <END_TASK> <USER_TASK:> Description: def epomeo_gpx(data_set='epomeo_gpx', sample_every=4): """Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run."""
import gpxpy import gpxpy.gpx if not data_available(data_set): download_data(data_set) files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet'] X = [] for file in files: gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r') gpx = gpxpy.parse(gpx_file) segment = gpx.tracks[0].segments[0] points = [point for track in gpx.tracks for segment in track.segments for point in segment.points] data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points] X.append(np.asarray(data)[::sample_every, :]) gpx_file.close() if pandas_available: X = pd.DataFrame(X[0], columns=['seconds', 'latitude', 'longitude', 'elevation']) X.set_index(keys='seconds', inplace=True) return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
<SYSTEM_TASK:> Ohio State University's Run1 motion capture data set. <END_TASK> <USER_TASK:> Description: def osu_run1(data_set='osu_run1', sample_every=4): """Ohio State University's Run1 motion capture data set."""
path = os.path.join(data_path, data_set) if not data_available(data_set): import zipfile download_data(data_set) zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r') for name in zip.namelist(): zip.extract(name, path) from . import mocap Y, connect = mocap.load_text_data('Aug210106', path) Y = Y[0:-1:sample_every, :] return data_details_return({'Y': Y, 'connect' : connect}, data_set)
<SYSTEM_TASK:> Simple classification data in one dimension for illustrating models. <END_TASK> <USER_TASK:> Description: def toy_linear_1d_classification(seed=default_seed): """Simple classification data in one dimension for illustrating models."""
def sample_class(f): p = 1. / (1. + np.exp(-f)) c = np.random.binomial(1, p) c = np.where(c, 1, -1) return c np.random.seed(seed=seed) x1 = np.random.normal(-3, 5, 20) x2 = np.random.normal(3, 5, 20) X = (np.r_[x1, x2])[:, None] return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'covariates' : ['X'], 'response': [discrete({'positive': 1, 'negative': -1})],'seed' : seed}
<SYSTEM_TASK:> Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence <END_TASK> <USER_TASK:> Description: def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed): """Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence"""
if not data_available(data_set): download_data(data_set) dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'filtered_data.pickle') # 1. Load the dataset import pandas as pd data = pd.read_pickle(filename) # WARNING: removing year data.pop('Year') # Get data matrices Yall = data.pop('ArrDelay').values[:,None] Xall = data.values # Subset the data (memory!!) all_data = num_train+num_test Xall = Xall[:all_data] Yall = Yall[:all_data] # Get testing points np.random.seed(seed=seed) N_shuffled = permute(Yall.shape[0]) train, test = N_shuffled[num_test:], N_shuffled[:num_test] X, Y = Xall[train], Yall[train] Xtest, Ytest = Xall[test], Yall[test] covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years'] response = ['delay'] return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set)
<SYSTEM_TASK:> All olympics sprint winning times for multiple output prediction. <END_TASK> <USER_TASK:> Description: def olympic_sprints(data_set='rogers_girolami_data'): """All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2)) Y = np.zeros((0, 1)) cats = {} for i, dataset in enumerate([olympic_100m_men, olympic_100m_women, olympic_200m_men, olympic_200m_women, olympic_400m_men, olympic_400m_women]): data = dataset() year = data['X'] time = data['Y'] X = np.vstack((X, np.hstack((year, np.ones_like(year)*i)))) Y = np.vstack((Y, time)) cats[dataset.__name__] = i data['X'] = X data['Y'] = Y data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning." return data_details_return({ 'X': X, 'Y': Y, 'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')], 'response' : ['time'], 'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.", 'output_info': { 0:'100m Men', 1:'100m Women', 2:'200m Men', 3:'200m Women', 4:'400m Men', 5:'400m Women'} }, data_set)
<SYSTEM_TASK:> Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use. <END_TASK> <USER_TASK:> Description: def movielens100k(data_set='movielens100k'): """Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use."""
if not data_available(data_set): import zipfile download_data(data_set) dir_path = os.path.join(data_path, data_set) zip = zipfile.ZipFile(os.path.join(dir_path, 'ml-100k.zip'), 'r') for name in zip.namelist(): zip.extract(name, dir_path) import pandas as pd encoding = 'latin-1' movie_path = os.path.join(data_path, 'movielens100k', 'ml-100k') items = pd.read_csv(os.path.join(movie_path, 'u.item'), index_col = 'index', header=None, sep='|',names=['index', 'title', 'date', 'empty', 'imdb_url', 'unknown', 'Action', 'Adventure', 'Animation', 'Children''s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'], encoding=encoding) users = pd.read_csv(os.path.join(movie_path, 'u.user'), index_col = 'index', header=None, sep='|', names=['index', 'age', 'sex', 'job', 'id'], encoding=encoding) parts = ['u1.base', 'u1.test', 'u2.base', 'u2.test','u3.base', 'u3.test','u4.base', 'u4.test','u5.base', 'u5.test','ua.base', 'ua.test','ub.base', 'ub.test'] ratings = [] for part in parts: rate_part = pd.read_csv(os.path.join(movie_path, part), index_col = 'index', header=None, sep='\t', names=['user', 'item', 'rating', 'index'], encoding=encoding) rate_part['split'] = part ratings.append(rate_part) Y = pd.concat(ratings) return data_details_return({'Y':Y, 'film_info':items, 'user_info':users, 'info': 'The Movielens 100k data'}, data_set)
<SYSTEM_TASK:> Twenty two observations of the Dwarf planet Ceres as observed by Giueseppe Piazzi and published in the September edition of Monatlicher Correspondenz in 1801. These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later. <END_TASK> <USER_TASK:> Description: def ceres(data_set='ceres'): """Twenty two observations of the Dwarf planet Ceres as observed by Giueseppe Piazzi and published in the September edition of Monatlicher Correspondenz in 1801. These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later."""
if not data_available(data_set): download_data(data_set) import pandas as pd data = pd.read_csv(os.path.join(data_path, data_set, 'ceresData.txt'), index_col = 'Tag', header=None, sep='\t',names=['Tag', 'Mittlere Sonnenzeit', 'Gerade Aufstig in Zeit', 'Gerade Aufstiegung in Graden', 'Nordlich Abweich', 'Geocentrische Laenger', 'Geocentrische Breite', 'Ort der Sonne + 20" Aberration', 'Logar. d. Distanz'], parse_dates=True, dayfirst=False) return data_details_return({'data': data}, data_set)
<SYSTEM_TASK:> Logout current session <END_TASK> <USER_TASK:> Description: def logout(self, api_version="v2.0"): """ Logout current session **Parameters:**: - **api_version**: API version to use (default v2.0) **Returns:** requests.Response object extended with cgx_status and cgx_content properties. """
cur_ctlr = self._parent_class.controller url = str(cur_ctlr) + "/{}/api/logout".format(api_version) api_logger.debug("URL = %s", url) return self._parent_class.rest_call(url, "get")
<SYSTEM_TASK:> Function to use static AUTH_TOKEN as auth for the constructor instead of full login process. <END_TASK> <USER_TASK:> Description: def use_token(self, token=None): """ Function to use static AUTH_TOKEN as auth for the constructor instead of full login process. **Parameters:**: - **token**: Static AUTH_TOKEN **Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API` constructor items as needed. """
api_logger.info('use_token function:') # check token is a string. if not isinstance(token, (text_type, binary_type)): api_logger.debug('"token" was not a text-style string: {}'.format(text_type(token))) return False # Start setup of constructor. session = self._parent_class.expose_session() # clear cookies session.cookies.clear() # Static Token uses X-Auth-Token header instead of cookies. self._parent_class.add_headers({ 'X-Auth-Token': token }) # Step 2: Get operator profile for tenant ID and other info. if self.interactive_update_profile_vars(): # pull tenant detail if self._parent_class.tenant_id: # add tenant values to API() object if self.interactive_tenant_update_vars(): # Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for. if self._parent_class.is_esp: # ESP/MSP! choose_status, chosen_client_id = self.interactive_client_choice() if choose_status: # attempt to login as client clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {}) if clogin_resp.cgx_status: # login successful, update profile and tenant info c_profile = self.interactive_update_profile_vars() t_profile = self.interactive_tenant_update_vars() if c_profile and t_profile: # successful full client login. self._parent_class._password = None return True else: if t_profile: print("ESP Client Tenant detail retrieval failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False else: print("ESP Client Login failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False else: print("ESP Client Choice failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False # successful! # clear password out of memory self._parent_class._password = None return True else: print("Tenant detail retrieval failed.") # clear password out of memory self._parent_class.email = None self._parent_class._password = None return False else: # Profile detail retrieval failed self._parent_class.email = None self._parent_class._password = None return False api_logger.info("EMAIL = %s", self._parent_class.email) api_logger.info("USER_ID = %s", self._parent_class._user_id) api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles)) api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id) api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name) api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session) return True
<SYSTEM_TASK:> Function to update the `cloudgenix.API` object with tenant login info. Run after login or client login. <END_TASK> <USER_TASK:> Description: def interactive_tenant_update_vars(self): """ Function to update the `cloudgenix.API` object with tenant login info. Run after login or client login. **Returns:** Boolean on success/failure, """
api_logger.info('interactive_tenant_update_vars function:') tenant_resp = self._parent_class.get.tenants(self._parent_class.tenant_id) status = tenant_resp.cgx_status tenant_dict = tenant_resp.cgx_content if status: api_logger.debug("new tenant_dict: %s", tenant_dict) # Get Tenant info. self._parent_class.tenant_name = tenant_dict.get('name', self._parent_class.tenant_id) # is ESP/MSP? self._parent_class.is_esp = tenant_dict.get('is_esp') # grab tenant address for location. address_lookup = tenant_dict.get('address', None) if address_lookup: tenant_address = address_lookup.get('street', "") + ", " tenant_address += (str(address_lookup.get('street2', "")) + ", ") tenant_address += (str(address_lookup.get('city', "")) + ", ") tenant_address += (str(address_lookup.get('state', "")) + ", ") tenant_address += (str(address_lookup.get('post_code', "")) + ", ") tenant_address += (str(address_lookup.get('country', "")) + ", ") else: tenant_address = "Unknown" self._parent_class.address = tenant_address return True else: # update failed return False
<SYSTEM_TASK:> Function to update the `cloudgenix.API` object with profile info. Run after login or client login. <END_TASK> <USER_TASK:> Description: def interactive_update_profile_vars(self): """ Function to update the `cloudgenix.API` object with profile info. Run after login or client login. **Returns:** Boolean on success/failure, """
profile = self._parent_class.get.profile() if profile.cgx_status: # if successful, save tenant id and email info to cli state. self._parent_class.tenant_id = profile.cgx_content.get('tenant_id') self._parent_class.email = profile.cgx_content.get('email') self._parent_class._user_id = profile.cgx_content.get('id') self._parent_class.roles = profile.cgx_content.get('roles', []) self._parent_class.token_session = profile.cgx_content.get('token_session') return True else: print("Profile retrieval failed.") # clear password out of memory self._parent_class._password = None return False
<SYSTEM_TASK:> Function to display a quick menu for user input <END_TASK> <USER_TASK:> Description: def quick_menu(self, banner, list_line_format, choice_list): """ Function to display a quick menu for user input **Parameters:** - **banner:** Text to display before menu - **list_line_format:** Print'ing string with format spots for index + tuple values - **choice_list:** List of tuple values that you want returned if selected (and printed) **Returns:** Tuple that was selected. """
# Setup menu invalid = True menu_int = -1 # loop until valid while invalid: print(banner) for item_index, item_value in enumerate(choice_list): print(list_line_format.format(item_index + 1, *item_value)) menu_choice = compat_input("\nChoose a Number or (Q)uit: ") if str(menu_choice).lower() in ['q']: # exit print("Exiting..") # best effort logout self._parent_class.get.logout() sys.exit(0) # verify number entered try: menu_int = int(menu_choice) sanity = True except ValueError: # not a number print("ERROR: ", menu_choice) sanity = False # validate number chosen if sanity and 1 <= menu_int <= len(choice_list): invalid = False else: print("Invalid input, needs to be between 1 and {0}.\n".format(len(choice_list))) # return the choice_list tuple that matches the entry. return choice_list[int(menu_int) - 1]
<SYSTEM_TASK:> Login to the CloudGenix API, and see if SAML SSO has occurred. <END_TASK> <USER_TASK:> Description: def check_sso_login(self, operator_email, request_id): """ Login to the CloudGenix API, and see if SAML SSO has occurred. This function is used to check and see if SAML SSO has succeeded while waiting. **Parameters:** - **operator_email:** String with the username to log in with - **request_id:** String containing the SAML 2.0 Request ID from previous login attempt. **Returns:** Tuple (Boolean success, Token on success, JSON response on error.) """
data = { "email": operator_email, "requestId": request_id } # If debug is set.. api_logger.info('check_sso_login function:') response = self._parent_class.post.login(data=data) # If valid response, but no token. if not response.cgx_content.get('x_auth_token'): # no valid login yet. return response # update with token and region auth_region = self._parent_class.parse_region(response) self._parent_class.update_region_to_controller(auth_region) self._parent_class.reparse_login_cookie_after_region_update(response) return response
<SYSTEM_TASK:> Function to display a quick confirmation for user input <END_TASK> <USER_TASK:> Description: def quick_confirm(prompt, default_value): """ Function to display a quick confirmation for user input **Parameters:** - **prompt:** Text to display before confirm - **default_value:** Default value for no entry **Returns:** 'y', 'n', or Default value. """
valid = False value = default_value.lower() while not valid: input_val = compat_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": value = default_value.lower() valid = True else: try: if input_val.lower() in ['y', 'n']: value = input_val.lower() valid = True else: print("ERROR: enter 'Y' or 'N'.") valid = False except ValueError: print("ERROR: enter 'Y' or 'N'.") valid = False return value
<SYSTEM_TASK:> Function to display a quick question for integer user input <END_TASK> <USER_TASK:> Description: def quick_int_input(prompt, default_value, min_val=1, max_val=30): """ Function to display a quick question for integer user input **Parameters:** - **prompt:** Text / question to display - **default_value:** Default value for no entry - **min_val:** Lowest allowed integer - **max_val:** Highest allowed integer **Returns:** integer or default_value. """
valid = False num_val = default_value while not valid: input_val = compat_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": num_val = default_value valid = True else: try: num_val = int(input_val) if min_val <= num_val <= max_val: valid = True else: print("ERROR: must be between {0} and {1}.".format(min, max)) valid = False except ValueError: print("ERROR: must be a number.") valid = False return num_val
<SYSTEM_TASK:> Function to display a quick question for text input. <END_TASK> <USER_TASK:> Description: def quick_str_input(prompt, default_value): """ Function to display a quick question for text input. **Parameters:** - **prompt:** Text / question to display - **default_value:** Default value for no entry **Returns:** text_type() or default_value. """
valid = False str_val = default_value while not valid: input_val = raw_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": str_val = default_value valid = True else: try: str_val = text_type(input_val) valid = True except ValueError: print("ERROR: must be text.") valid = False return str_val
<SYSTEM_TASK:> implementation of the tran53 hash function <END_TASK> <USER_TASK:> Description: def tran_hash(self, a, b, c, n): """implementation of the tran53 hash function"""
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
<SYSTEM_TASK:> computes the hash of all of the trigrams in the chunk using a window <END_TASK> <USER_TASK:> Description: def process(self, chunk): """ computes the hash of all of the trigrams in the chunk using a window of length 5 """
self._digest = None if isinstance(chunk, text_type): chunk = chunk.encode('utf-8') # chunk is a byte string for char in chunk: self.num_char += 1 if PY3: # In Python 3, iterating over bytes yields integers c = char else: c = ord(char) if len(self.window) > 1: # seen at least three characters self.acc[self.tran_hash(c, self.window[0], self.window[1], 0)] += 1 if len(self.window) > 2: # seen at least four characters self.acc[self.tran_hash(c, self.window[0], self.window[2], 1)] += 1 self.acc[self.tran_hash(c, self.window[1], self.window[2], 2)] += 1 if len(self.window) > 3: # have a full window self.acc[self.tran_hash(c, self.window[0], self.window[3], 3)] += 1 self.acc[self.tran_hash(c, self.window[1], self.window[3], 4)] += 1 self.acc[self.tran_hash(c, self.window[2], self.window[3], 5)] += 1 # duplicate hashes, used to maintain 8 trigrams per character self.acc[self.tran_hash(self.window[3], self.window[0], c, 6)] += 1 self.acc[self.tran_hash(self.window[3], self.window[2], c, 7)] += 1 # add current character to the window, remove the previous character if len(self.window) < 4: self.window = [c] + self.window else: self.window = [c] + self.window[:3]
<SYSTEM_TASK:> read in a file and compute digest <END_TASK> <USER_TASK:> Description: def from_file(self, fname): """read in a file and compute digest"""
f = open(fname, "rb") data = f.read() self.update(data) f.close()
<SYSTEM_TASK:> returns difference between the nilsimsa digests between the current <END_TASK> <USER_TASK:> Description: def compare(self, digest_2, is_hex = False): """ returns difference between the nilsimsa digests between the current object and a given digest """
# convert hex string to list of ints if is_hex: digest_2 = convert_hex_to_ints(digest_2) bit_diff = 0 for i in range(len(self.digest)): bit_diff += POPC[self.digest[i] ^ digest_2[i]] #computes the bit diff between the i'th position of the digests return 128 - bit_diff
<SYSTEM_TASK:> Forgot password API <END_TASK> <USER_TASK:> Description: def tenant_forgot_password_login(self, data, tenant_id=None, api_version="v2.0"): """ Forgot password API **Parameters:**: - **data**: Dictionary containing data to POST as JSON - **tenant_id**: Tenant ID - **api_version**: API version to use (default v2.0) **Returns:** requests.Response object extended with cgx_status and cgx_content properties. """
if tenant_id is None and self._parent_class.tenant_id: # Pull tenant_id from parent namespace cache. tenant_id = self._parent_class.tenant_id elif not tenant_id: # No value for tenant_id. raise TypeError("tenant_id is required but not set or cached.") cur_ctlr = self._parent_class.controller url = str(cur_ctlr) + "/{}/api/tenants/{}/login/password/forgot".format(api_version, tenant_id) api_logger.debug("URL = %s", url) return self._parent_class.rest_call(url, "post", data=data, sensitive=True)
<SYSTEM_TASK:> verify the validity of the given file. Never trust the End-User <END_TASK> <USER_TASK:> Description: def is_valid_file(parser,arg): """verify the validity of the given file. Never trust the End-User"""
if not os.path.exists(arg): parser.error("File %s not found"%arg) else: return arg
<SYSTEM_TASK:> Detect the language used in the given file. <END_TASK> <USER_TASK:> Description: def detect_lang(path): """Detect the language used in the given file."""
blob = FileBlob(path, os.getcwd()) if blob.is_text: print('Programming language of the file detected: {0}'.format(blob.language.name)) return blob.language.name else:#images, binary and what-have-you won't be pasted print('File not a text file. Exiting...') sys.exit()
<SYSTEM_TASK:> Take a screenshot of device and log in the report with timestamp, scale for screenshot size and quality for screenshot quality <END_TASK> <USER_TASK:> Description: def screenshot(self, scale=None, quality=None): """ Take a screenshot of device and log in the report with timestamp, scale for screenshot size and quality for screenshot quality default scale=1.0 quality=100 """
output_dir = BuiltIn().get_variable_value('${OUTPUTDIR}') ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S') screenshot_path = '%s%s%s.png' % (output_dir, os.sep, st) self.device.screenshot(screenshot_path, scale, quality) logger.info('\n<a href="%s">%s</a><br><img src="%s">' % (screenshot_path, st, screenshot_path), html=True)
<SYSTEM_TASK:> This keyword can use object method from original python uiautomator <END_TASK> <USER_TASK:> Description: def call(self, obj, method, *args, **selectors): """ This keyword can use object method from original python uiautomator See more details from https://github.com/xiaocong/uiautomator Example: | ${accessibility_text} | Get Object | text=Accessibility | # Get the UI object | | Call | ${accessibility_text} | click | # Call the method of the UI object 'click' | """
func = getattr(obj, method) return func(**selectors)
<SYSTEM_TASK:> Merge two precomputed similarity lists, truncating the result to `clip` most similar items. <END_TASK> <USER_TASK:> Description: def merge_sims(oldsims, newsims, clip=None): """Merge two precomputed similarity lists, truncating the result to `clip` most similar items."""
if oldsims is None: result = newsims or [] elif newsims is None: result = oldsims else: result = sorted(oldsims + newsims, key=lambda item: -item[1]) if clip is not None: result = result[:clip] return result
<SYSTEM_TASK:> Delete all files created by this index, invalidating `self`. Use with care. <END_TASK> <USER_TASK:> Description: def terminate(self): """Delete all files created by this index, invalidating `self`. Use with care."""
try: self.id2sims.terminate() except: pass import glob for fname in glob.glob(self.fname + '*'): try: os.remove(fname) logger.info("deleted %s" % fname) except Exception, e: logger.warning("failed to delete %s: %s" % (fname, e)) for val in self.__dict__.keys(): try: delattr(self, val) except: pass
<SYSTEM_TASK:> Merge documents from the other index. Update precomputed similarities <END_TASK> <USER_TASK:> Description: def merge(self, other): """Merge documents from the other index. Update precomputed similarities in the process."""
other.qindex.normalize, other.qindex.num_best = False, self.topsims # update precomputed "most similar" for old documents (in case some of # the new docs make it to the top-N for some of the old documents) logger.info("updating old precomputed values") pos, lenself = 0, len(self.qindex) for chunk in self.qindex.iter_chunks(): for sims in other.qindex[chunk]: if pos in self.pos2id: # ignore masked entries (deleted, overwritten documents) docid = self.pos2id[pos] sims = self.sims2scores(sims) self.id2sims[docid] = merge_sims(self.id2sims[docid], sims, self.topsims) pos += 1 if pos % 10000 == 0: logger.info("PROGRESS: updated doc #%i/%i" % (pos, lenself)) self.id2sims.sync() logger.info("merging fresh index into optimized one") pos, docids = 0, [] for chunk in other.qindex.iter_chunks(): for vec in chunk: if pos in other.pos2id: # don't copy deleted documents self.qindex.add_documents([vec]) docids.append(other.pos2id[pos]) pos += 1 self.qindex.save() self.update_ids(docids) logger.info("precomputing most similar for the fresh index") pos, lenother = 0, len(other.qindex) norm, self.qindex.normalize = self.qindex.normalize, False topsims, self.qindex.num_best = self.qindex.num_best, self.topsims for chunk in other.qindex.iter_chunks(): for sims in self.qindex[chunk]: if pos in other.pos2id: # ignore masked entries (deleted, overwritten documents) docid = other.pos2id[pos] self.id2sims[docid] = self.sims2scores(sims) pos += 1 if pos % 10000 == 0: logger.info("PROGRESS: precomputed doc #%i/%i" % (pos, lenother)) self.qindex.normalize, self.qindex.num_best = norm, topsims self.id2sims.sync()
<SYSTEM_TASK:> Commit all changes, clear all caches. <END_TASK> <USER_TASK:> Description: def flush(self, save_index=False, save_model=False, clear_buffer=False): """Commit all changes, clear all caches."""
if save_index: if self.fresh_index is not None: self.fresh_index.save(self.location('index_fresh')) if self.opt_index is not None: self.opt_index.save(self.location('index_opt')) if save_model: if self.model is not None: self.model.save(self.location('model')) self.payload.commit() if clear_buffer: if hasattr(self, 'fresh_docs'): try: self.fresh_docs.terminate() # erase all buffered documents + file on disk except: pass self.fresh_docs = SqliteDict(journal_mode=JOURNAL_MODE) # buffer defaults to a random location in temp self.fresh_docs.sync()
<SYSTEM_TASK:> Explicitly close open file handles, databases etc. <END_TASK> <USER_TASK:> Description: def close(self): """Explicitly close open file handles, databases etc."""
try: self.payload.close() except: pass try: self.model.close() except: pass try: self.fresh_index.close() except: pass try: self.opt_index.close() except: pass try: self.fresh_docs.terminate() except: pass
<SYSTEM_TASK:> Create an indexing model. Will overwrite the model if it already exists. <END_TASK> <USER_TASK:> Description: def train(self, corpus=None, method='auto', clear_buffer=True, params=None): """ Create an indexing model. Will overwrite the model if it already exists. All indexes become invalid, because documents in them use a now-obsolete representation. The model is trained on documents previously entered via `buffer`, or directly on `corpus`, if specified. """
if corpus is not None: # use the supplied corpus only (erase existing buffer, if any) self.flush(clear_buffer=True) self.buffer(corpus) if not self.fresh_docs: msg = "train called but no training corpus specified for %s" % self logger.error(msg) raise ValueError(msg) if method == 'auto': numdocs = len(self.fresh_docs) if numdocs < 1000: logging.warning("too few training documents; using simple log-entropy model instead of latent semantic indexing") method = 'logentropy' else: method = 'lsi' if params is None: params = {} self.model = SimModel(self.fresh_docs, method=method, params=params) self.flush(save_model=True, clear_buffer=clear_buffer)
<SYSTEM_TASK:> Permanently index all documents previously added via `buffer`, or <END_TASK> <USER_TASK:> Description: def index(self, corpus=None, clear_buffer=True): """ Permanently index all documents previously added via `buffer`, or directly index documents from `corpus`, if specified. The indexing model must already exist (see `train`) before this function is called. """
if not self.model: msg = 'must initialize model for %s before indexing documents' % self.basename logger.error(msg) raise AttributeError(msg) if corpus is not None: # use the supplied corpus only (erase existing buffer, if any) self.flush(clear_buffer=True) self.buffer(corpus) if not self.fresh_docs: msg = "index called but no indexing corpus specified for %s" % self logger.error(msg) raise ValueError(msg) if not self.fresh_index: logger.info("starting a new fresh index for %s" % self) self.fresh_index = SimIndex(self.location('index_fresh'), self.model.num_features) self.fresh_index.index_documents(self.fresh_docs, self.model) if self.opt_index is not None: self.opt_index.delete(self.fresh_docs.keys()) logger.info("storing document payloads") for docid in self.fresh_docs: payload = self.fresh_docs[docid].get('payload', None) if payload is None: # HACK: exit on first doc without a payload (=assume all docs have payload, or none does) break self.payload[docid] = payload self.flush(save_index=True, clear_buffer=clear_buffer)
<SYSTEM_TASK:> Drop all indexed documents. If `keep_model` is False, also dropped the model. <END_TASK> <USER_TASK:> Description: def drop_index(self, keep_model=True): """Drop all indexed documents. If `keep_model` is False, also dropped the model."""
modelstr = "" if keep_model else "and model " logger.info("deleting similarity index " + modelstr + "from %s" % self.basename) # delete indexes for index in [self.fresh_index, self.opt_index]: if index is not None: index.terminate() self.fresh_index, self.opt_index = None, None # delete payload if self.payload is not None: self.payload.close() fname = self.location('payload') try: if os.path.exists(fname): os.remove(fname) logger.info("deleted %s" % fname) except Exception, e: logger.warning("failed to delete %s" % fname) self.payload = SqliteDict(self.location('payload'), autocommit=True, journal_mode=JOURNAL_MODE) # optionally, delete the model as well if not keep_model and self.model is not None: self.model.close() fname = self.location('model') try: if os.path.exists(fname): os.remove(fname) logger.info("deleted %s" % fname) except Exception, e: logger.warning("failed to delete %s" % fname) self.model = None self.flush(save_index=True, save_model=True, clear_buffer=True)
<SYSTEM_TASK:> Delete specified documents from the index. <END_TASK> <USER_TASK:> Description: def delete(self, docids): """Delete specified documents from the index."""
logger.info("asked to drop %i documents" % len(docids)) for index in [self.opt_index, self.fresh_index]: if index is not None: index.delete(docids) self.flush(save_index=True)
<SYSTEM_TASK:> Find `max_results` most similar articles in the index, each having similarity <END_TASK> <USER_TASK:> Description: def find_similar(self, doc, min_score=0.0, max_results=100): """ Find `max_results` most similar articles in the index, each having similarity score of at least `min_score`. The resulting list may be shorter than `max_results`, in case there are not enough matching documents. `doc` is either a string (=document id, previously indexed) or a dict containing a 'tokens' key. These tokens are processed to produce a vector, which is then used as a query against the index. The similar documents are returned in decreasing similarity order, as `(doc_id, similarity_score, doc_payload)` 3-tuples. The payload returned is identical to what was supplied for this document during indexing. """
logger.debug("received query call with %r" % doc) if self.is_locked(): msg = "cannot query while the server is being updated" logger.error(msg) raise RuntimeError(msg) sims_opt, sims_fresh = None, None for index in [self.fresh_index, self.opt_index]: if index is not None: index.topsims = max_results if isinstance(doc, basestring): # query by direct document id docid = doc if self.opt_index is not None and docid in self.opt_index: sims_opt = self.opt_index.sims_by_id(docid) if self.fresh_index is not None: vec = self.opt_index.vec_by_id(docid) sims_fresh = self.fresh_index.sims_by_vec(vec, normalize=False) elif self.fresh_index is not None and docid in self.fresh_index: sims_fresh = self.fresh_index.sims_by_id(docid) if self.opt_index is not None: vec = self.fresh_index.vec_by_id(docid) sims_opt = self.opt_index.sims_by_vec(vec, normalize=False) else: raise ValueError("document %r not in index" % docid) else: if 'topics' in doc: # user supplied vector directly => use that vec = gensim.matutils.any2sparse(doc['topics']) else: # query by an arbitrary text (=tokens) inside doc['tokens'] vec = self.model.doc2vec(doc) # convert document (text) to vector if self.opt_index is not None: sims_opt = self.opt_index.sims_by_vec(vec) if self.fresh_index is not None: sims_fresh = self.fresh_index.sims_by_vec(vec) merged = merge_sims(sims_opt, sims_fresh) logger.debug("got %s raw similars, pruning with max_results=%s, min_score=%s" % (len(merged), max_results, min_score)) result = [] for docid, score in merged: if score < min_score or 0 < max_results <= len(result): break result.append((docid, float(score), self.payload.get(docid, None))) return result
<SYSTEM_TASK:> Make sure a session is open. <END_TASK> <USER_TASK:> Description: def check_session(self): """ Make sure a session is open. If it's not and autosession is turned on, create a new session automatically. If it's not and autosession is off, raise an exception. """
if self.session is None: if self.autosession: self.open_session() else: msg = "must open a session before modifying %s" % self raise RuntimeError(msg)
<SYSTEM_TASK:> Open a new session to modify this server. <END_TASK> <USER_TASK:> Description: def open_session(self): """ Open a new session to modify this server. You can either call this fnc directly, or turn on autosession which will open/commit sessions for you transparently. """
if self.session is not None: msg = "session already open; commit it or rollback before opening another one in %s" % self logger.error(msg) raise RuntimeError(msg) logger.info("opening a new session") logger.info("removing %s" % self.loc_session) try: shutil.rmtree(self.loc_session) except: logger.info("failed to delete %s" % self.loc_session) logger.info("cloning server from %s to %s" % (self.loc_stable, self.loc_session)) shutil.copytree(self.loc_stable, self.loc_session) self.session = SimServer(self.loc_session, use_locks=self.use_locks) self.lock_update.acquire()
<SYSTEM_TASK:> Buffer documents, in the current session <END_TASK> <USER_TASK:> Description: def buffer(self, *args, **kwargs): """Buffer documents, in the current session"""
self.check_session() result = self.session.buffer(*args, **kwargs) return result
<SYSTEM_TASK:> Index documents, in the current session <END_TASK> <USER_TASK:> Description: def index(self, *args, **kwargs): """Index documents, in the current session"""
self.check_session() result = self.session.index(*args, **kwargs) if self.autosession: self.commit() return result
<SYSTEM_TASK:> Drop all indexed documents from the session. Optionally, drop model too. <END_TASK> <USER_TASK:> Description: def drop_index(self, keep_model=True): """Drop all indexed documents from the session. Optionally, drop model too."""
self.check_session() result = self.session.drop_index(keep_model) if self.autosession: self.commit() return result
<SYSTEM_TASK:> Optimize index for faster by-document-id queries. <END_TASK> <USER_TASK:> Description: def optimize(self): """Optimize index for faster by-document-id queries."""
self.check_session() result = self.session.optimize() if self.autosession: self.commit() return result
<SYSTEM_TASK:> Commit changes made by the latest session. <END_TASK> <USER_TASK:> Description: def commit(self): """Commit changes made by the latest session."""
if self.session is not None: logger.info("committing transaction in %s" % self) tmp = self.stable self.stable, self.session = self.session, None self.istable = 1 - self.istable self.write_istable() tmp.close() # don't wait for gc, release resources manually self.lock_update.release() else: logger.warning("commit called but there's no open session in %s" % self)
<SYSTEM_TASK:> Delete all files created by this server, invalidating `self`. Use with care. <END_TASK> <USER_TASK:> Description: def terminate(self): """Delete all files created by this server, invalidating `self`. Use with care."""
logger.info("deleting entire server %s" % self) self.close() try: shutil.rmtree(self.basedir) logger.info("deleted server under %s" % self.basedir) # delete everything from self, so that using this object fails results # in an error as quickly as possible for val in self.__dict__.keys(): try: delattr(self, val) except: pass except Exception, e: logger.warning("failed to delete SessionServer: %s" % (e))
<SYSTEM_TASK:> Find similar articles. <END_TASK> <USER_TASK:> Description: def find_similar(self, *args, **kwargs): """ Find similar articles. With autosession off, use the index state *before* current session started, so that changes made in the session will not be visible here. With autosession on, close the current session first (so that session changes *are* committed and visible). """
if self.session is not None and self.autosession: # with autosession on, commit the pending transaction first self.commit() return self.stable.find_similar(*args, **kwargs)
<SYSTEM_TASK:> Repeatedly call fold and merge on data and then finalize. <END_TASK> <USER_TASK:> Description: def reduce(reducer, data, chunk_size=DEFAULT_CHUNK_SIZE): """Repeatedly call fold and merge on data and then finalize. Arguments: data: Input for the fold function. reducer: The IReducer to use. chunk_size: How many items should be passed to fold at a time? Returns: Return value of finalize. """
if not chunk_size: return finalize(reducer, fold(reducer, data)) # Splitting the work up into chunks allows us to, e.g. reduce a large file # without loading everything into memory, while still being significantly # faster than repeatedly calling the fold function for every element. chunks = generate_chunks(data, chunk_size) intermediate = fold(reducer, next(chunks)) for chunk in chunks: intermediate = merge(reducer, intermediate, fold(reducer, chunk)) return finalize(reducer, intermediate)
<SYSTEM_TASK:> Adds tasks to the existing set of tasks of the Stage <END_TASK> <USER_TASK:> Description: def add_tasks(self, value): """ Adds tasks to the existing set of tasks of the Stage :argument: set of tasks """
tasks = self._validate_entities(value) self._tasks.update(tasks) self._task_count = len(self._tasks)
<SYSTEM_TASK:> Convert current Stage into a dictionary <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Convert current Stage into a dictionary :return: python dictionary """
stage_desc_as_dict = { 'uid': self._uid, 'name': self._name, 'state': self._state, 'state_history': self._state_history, 'parent_pipeline': self._p_pipeline } return stage_desc_as_dict
<SYSTEM_TASK:> Create a Stage from a dictionary. The change is in inplace. <END_TASK> <USER_TASK:> Description: def from_dict(self, d): """ Create a Stage from a dictionary. The change is in inplace. :argument: python dictionary :return: None """
if 'uid' in d: if d['uid']: self._uid = d['uid'] if 'name' in d: if d['name']: self._name = d['name'] if 'state' in d: if isinstance(d['state'], str) or isinstance(d['state'], unicode): if d['state'] in states._stage_state_values.keys(): self._state = d['state'] else: raise ValueError(obj=self._uid, attribute='state', expected_value=states._stage_state_values.keys(), actual_value=value) else: raise TypeError(entity='state', expected_type=str, actual_type=type(d['state'])) else: self._state = states.INITIAL if 'state_history' in d: if isinstance(d['state_history'], list): self._state_history = d['state_history'] else: raise TypeError(entity='state_history', expected_type=list, actual_type=type(d['state_history'])) if 'parent_pipeline' in d: if isinstance(d['parent_pipeline'], dict): self._p_pipeline = d['parent_pipeline'] else: raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type(d['parent_pipeline']))
<SYSTEM_TASK:> Generates the text of an RPM spec file. <END_TASK> <USER_TASK:> Description: def _make_spec_file(self): """Generates the text of an RPM spec file. Returns: A list of strings containing the lines of text. """
# Note that bdist_rpm can be an old style class. if issubclass(BdistRPMCommand, object): spec_file = super(BdistRPMCommand, self)._make_spec_file() else: spec_file = bdist_rpm._make_spec_file(self) if sys.version_info[0] < 3: python_package = "python" else: python_package = "python3" description = [] summary = "" in_description = False python_spec_file = [] for line in spec_file: if line.startswith("Summary: "): summary = line elif line.startswith("BuildRequires: "): line = "BuildRequires: {0:s}-setuptools".format(python_package) elif line.startswith("Requires: "): if python_package == "python3": line = line.replace("python", "python3") elif line.startswith("%description"): in_description = True elif line.startswith("%files"): line = "%files -f INSTALLED_FILES -n {0:s}-%{{name}}".format( python_package) elif line.startswith("%prep"): in_description = False python_spec_file.append( "%package -n {0:s}-%{{name}}".format(python_package)) python_spec_file.append("{0:s}".format(summary)) python_spec_file.append("") python_spec_file.append( "%description -n {0:s}-%{{name}}".format(python_package)) python_spec_file.extend(description) elif in_description: # Ignore leading white lines in the description. if not description and not line: continue description.append(line) python_spec_file.append(line) return python_spec_file
<SYSTEM_TASK:> Call IStructured.resolve across all scopes and return first hit. <END_TASK> <USER_TASK:> Description: def resolve(self, name): """Call IStructured.resolve across all scopes and return first hit."""
for scope in reversed(self.scopes): try: return structured.resolve(scope, name) except (KeyError, AttributeError): continue raise AttributeError(name)
<SYSTEM_TASK:> Reflect 'name' starting with local scope all the way up to global. <END_TASK> <USER_TASK:> Description: def reflect(self, name): """Reflect 'name' starting with local scope all the way up to global. This method will attempt both static and runtime reflection. This is the recommended way of using reflection. Returns: Type of 'name', or protocol.AnyType. Caveat: The type of 'name' does not necessarily have to be an instance of Python's type - it depends on what the host application returns through the reflection API. For example, Rekall uses objects generated at runtime to simulate a native (C/C++) type system. """
# Return whatever the most local scope defines this as, or bubble all # the way to the top. result = None for scope in reversed(self.scopes): try: if isinstance(scope, type): result = structured.reflect_static_member(scope, name) else: result = structured.reflect_runtime_member(scope, name) if result is not None: return result except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
<SYSTEM_TASK:> Reflect 'name' using ONLY runtime reflection. <END_TASK> <USER_TASK:> Description: def reflect_runtime_member(self, name): """Reflect 'name' using ONLY runtime reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """
for scope in reversed(self.scopes): try: return structured.reflect_runtime_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
<SYSTEM_TASK:> Reflect 'name' using ONLY static reflection. <END_TASK> <USER_TASK:> Description: def reflect_static_member(cls, name): """Reflect 'name' using ONLY static reflection. You most likely want to use ScopeStack.reflect instead. Returns: Type of 'name', or protocol.AnyType. """
for scope in reversed(cls.scopes): try: return structured.reflect_static_member(scope, name) except (NotImplementedError, KeyError, AttributeError): continue return protocol.AnyType
<SYSTEM_TASK:> Called only on a valid form, this method will place the chosen <END_TASK> <USER_TASK:> Description: def categorize_metrics(self): """Called only on a valid form, this method will place the chosen metrics in the given catgory."""
category = self.cleaned_data['category_name'] metrics = self.cleaned_data['metrics'] self.r.reset_category(category, metrics)
<SYSTEM_TASK:> Match grammar function 'f' against next token and set 'self.matched'. <END_TASK> <USER_TASK:> Description: def match(self, f, *args): """Match grammar function 'f' against next token and set 'self.matched'. Arguments: f: A grammar function - see efilter.parsers.common.grammar. Must return TokenMatch or None. args: Passed to 'f', if any. Returns: Instance of efilter.parsers.common.grammar.TokenMatch or None. Comment: If a match is returned, it will also be stored in self.matched. """
try: match = f(self.tokenizer, *args) except StopIteration: # The grammar function might have tried to access more tokens than # are available. That's not really an error, it just means it didn't # match. return if match is None: return if not isinstance(match, grammar.TokenMatch): raise TypeError("Invalid grammar function %r returned %r." % (f, match)) self.matched = match return match
<SYSTEM_TASK:> Like 'match', but throw a parse error if 'f' matches. <END_TASK> <USER_TASK:> Description: def reject(self, f, *args): """Like 'match', but throw a parse error if 'f' matches. This is useful when a parser wants to be strict about specific things being prohibited. For example, DottySQL bans the use of SQL keywords as variable names. """
match = self.match(f, *args) if match: token = self.peek(0) raise errors.EfilterParseError( query=self.tokenizer.source, token=token, message="Was not expecting a %s here." % token.name)
<SYSTEM_TASK:> Like 'accept' but throws a parse error if 'f' doesn't match. <END_TASK> <USER_TASK:> Description: def expect(self, f, *args): """Like 'accept' but throws a parse error if 'f' doesn't match."""
match = self.accept(f, *args) if match: return match try: func_name = f.func_name except AttributeError: func_name = "<unnamed grammar function>" start, end = self.current_position() raise errors.EfilterParseError( query=self.tokenizer.source, start=start, end=end, message="Was expecting %s here." % (func_name))
<SYSTEM_TASK:> Returns the value of the var named in the expression. <END_TASK> <USER_TASK:> Description: def solve_var(expr, vars): """Returns the value of the var named in the expression."""
try: return Result(structured.resolve(vars, expr.value), ()) except (KeyError, AttributeError) as e: # Raise a better exception for accessing a non-existent member. raise errors.EfilterKeyError(root=expr, key=expr.value, message=e, query=expr.source) except (TypeError, ValueError) as e: # Raise a better exception for what is probably a null pointer error. if vars.locals is None: raise errors.EfilterNoneError( root=expr, query=expr.source, message="Trying to access member %r of a null." % expr.value) else: raise errors.EfilterTypeError( root=expr, query=expr.source, message="%r (vars: %r)" % (e, vars)) except NotImplementedError as e: raise errors.EfilterError( root=expr, query=expr.source, message="Trying to access member %r of an instance of %r." % (expr.value, type(vars)))
<SYSTEM_TASK:> Build a repeated value from subexpressions. <END_TASK> <USER_TASK:> Description: def solve_repeat(expr, vars): """Build a repeated value from subexpressions."""
try: result = repeated.meld(*[solve(x, vars).value for x in expr.children]) return Result(result, ()) except TypeError: raise errors.EfilterTypeError( root=expr, query=expr.source, message="All values in a repeated value must be of the same type.")
<SYSTEM_TASK:> Evaluate conditions and return the one that matches. <END_TASK> <USER_TASK:> Description: def solve_ifelse(expr, vars): """Evaluate conditions and return the one that matches."""
for condition, result in expr.conditions(): if boolean.asbool(solve(condition, vars).value): return solve(result, vars) return solve(expr.default(), vars)
<SYSTEM_TASK:> Solves the map-form, by recursively calling its RHS with new vars. <END_TASK> <USER_TASK:> Description: def solve_map(expr, vars): """Solves the map-form, by recursively calling its RHS with new vars. let-forms are binary expressions. The LHS should evaluate to an IAssociative that can be used as new vars with which to solve a new query, of which the RHS is the root. In most cases, the LHS will be a Var (var). Typically, map-forms result from the dotty "dot" (.) operator. For example, the query "User.name" will translate to a map-form with the var "User" on LHS and a var to "name" on the RHS. With top-level vars being something like {"User": {"name": "Bob"}}, the Var on the LHS will evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as new vars, and that whole form will evaluate to "Bob". """
lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_map(): try: for lhs_value in repeated.getvalues(lhs_values): yield solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value except errors.EfilterNoneError as error: error.root = expr raise return Result(repeated.lazy(lazy_map), ())