text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def in_file(self, fn: str) -> Iterator[InsertionPoint]: """ Returns an iterator over all of the insertion points in a given file. """
logger.debug("finding insertion points in file: %s", fn) yield from self.__file_insertions.get(fn, [])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def at_line(self, line: FileLine) -> Iterator[InsertionPoint]: """ Returns an iterator over all of the insertion points located at a given line. """
logger.debug("finding insertion points at line: %s", str(line)) filename = line.filename # type: str line_num = line.num # type: int for ins in self.in_file(filename): if line_num == ins.location.line: logger.debug("found insertion point at line [%s]: %s", str(line), ins) yield ins
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loadPng(varNumVol, tplPngSize, strPathPng): """Load PNG files. Parameters varNumVol : float Number of volumes, i.e. number of time points in all runs. tplPngSize : tuple Shape of the stimulus image (i.e. png). strPathPng: str Path to the folder cointaining the png files. Returns ------- aryPngData : 2d numpy array, shape [png_x, png_y, n_vols] Stack of stimulus data. """
print('------Load PNGs') # Create list of png files to load: lstPngPaths = [None] * varNumVol for idx01 in range(0, varNumVol): lstPngPaths[idx01] = (strPathPng + str(idx01) + '.png') # Load png files. The png data will be saved in a numpy array of the # following order: aryPngData[x-pixel, y-pixel, PngNumber]. The # sp.misc.imread function actually contains three values per pixel (RGB), # but since the stimuli are black-and-white, any one of these is sufficient # and we discard the others. aryPngData = np.zeros((tplPngSize[0], tplPngSize[1], varNumVol)) for idx01 in range(0, varNumVol): aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01])) # Convert RGB values (0 to 255) to integer ones and zeros: aryPngData = (aryPngData > 0).astype(int) return aryPngData
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loadPrsOrd(vecRunLngth, strPathPresOrd, vecVslStim): """Load presentation order of motion directions. Parameters vecRunLngth : list Number of volumes in every run strPathPresOrd : str Path to the npy vector containing order of presented motion directions. vecVslStim: list Key of (stimulus) condition presented in every run Returns ------- aryPresOrdAprt : 1d numpy array, shape [n_vols] Presentation order of aperture position. aryPresOrdMtn : 1d numpy array, shape [n_vols] Presentation order of motion direction. """
print('------Load presentation order of motion directions') aryPresOrd = np.empty((0, 2)) for idx01 in range(0, len(vecRunLngth)): # reconstruct file name # ---> consider: some runs were shorter than others(replace next row) filename1 = (strPathPresOrd + str(vecVslStim[idx01]) + '.pickle') # filename1 = (strPathPresOrd + str(idx01+1) + '.pickle') # load array with open(filename1, 'rb') as handle: array1 = pickle.load(handle) tempCond = array1["Conditions"] tempCond = tempCond[:vecRunLngth[idx01], :] # add temp array to aryPresOrd aryPresOrd = np.concatenate((aryPresOrd, tempCond), axis=0) aryPresOrdAprt = aryPresOrd[:, 0].astype(int) aryPresOrdMtn = aryPresOrd[:, 1].astype(int) return aryPresOrdAprt, aryPresOrdMtn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crtPwBoxCarFn(varNumVol, aryPngData, aryPresOrd, vecMtDrctn): """Create pixel-wise boxcar functions. Parameters input1 : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. input2 : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] """
print('------Create pixel-wise boxcar functions') aryBoxCar = np.empty(aryPngData.shape[0:2] + (len(vecMtDrctn),) + (varNumVol,), dtype='int64') for ind, num in enumerate(vecMtDrctn): aryCondTemp = np.zeros((aryPngData.shape), dtype='int64') lgcTempMtDrctn = [aryPresOrd == num][0] aryCondTemp[:, :, lgcTempMtDrctn] = np.copy( aryPngData[:, :, lgcTempMtDrctn]) aryBoxCar[:, :, ind, :] = aryCondTemp return aryBoxCar
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol, queOut): """Spatially convolve boxcar functions with 2D Gaussian. Parameters idxPrc : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. aryBoxCar : float, positive Description of input 2. aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. tplPngSize : float, positive Description of input 2. varNumVol : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. queOut : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] """
# Number of combinations of model parameters in the current chunk: varChnkSze = np.size(aryMdlParamsChnk, axis=0) # Determine number of motion directions varNumMtnDrtn = aryBoxCar.shape[2] # Output array with pRF model time courses: aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol]) # Loop through different motion directions: for idxMtn in range(0, varNumMtnDrtn): # Loop through combinations of model parameters: for idxMdl in range(0, varChnkSze): # Spatial parameters of current model: varTmpX = aryMdlParamsChnk[idxMdl, 1] varTmpY = aryMdlParamsChnk[idxMdl, 2] varTmpSd = aryMdlParamsChnk[idxMdl, 3] # Create pRF model (2D): aryGauss = crtGauss2D(tplPngSize[0], tplPngSize[1], varTmpX, varTmpY, varTmpSd) # Multiply pixel-time courses with Gaussian pRF models: aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :], aryGauss[:, :, None]) # Calculate sum across x- and y-dimensions - the 'area under the # Gaussian surface'. This is essentially an unscaled version of the # pRF time course model (i.e. not yet scaled for size of the pRF). aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1)) # Put model time courses into function's output with 2d Gaussian # arrray: aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp # Put column with the indicies of model-parameter-combinations into the # output array (in order to be able to put the pRF model time courses into # the correct order after the parallelised function): lstOut = [idxPrc, aryOut] # Put output to queue: queOut.put(lstOut)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rsmplInHighRes(aryBoxCarConv, tplPngSize, tplVslSpcHighSze, varNumMtDrctn, varNumVol): """Resample pixel-time courses in high-res visual space. Parameters input1 : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. input2 : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1] """
# Array for super-sampled pixel-time courses: aryBoxCarConvHigh = np.zeros((tplVslSpcHighSze[0], tplVslSpcHighSze[1], varNumMtDrctn, varNumVol)) # Loop through volumes: for idxMtn in range(0, varNumMtDrctn): for idxVol in range(0, varNumVol): # Range for the coordinates: vecRange = np.arange(0, tplPngSize[0]) # The following array describes the coordinates of the pixels in # the flattened array (i.e. "vecOrigPixVal"). In other words, these # are the row and column coordinates of the original pizel values. crd2, crd1 = np.meshgrid(vecRange, vecRange) aryOrixPixCoo = np.column_stack((crd1.flatten(), crd2.flatten())) # The following vector will contain the actual original pixel # values: vecOrigPixVal = aryBoxCarConv[:, :, idxMtn, idxVol] vecOrigPixVal = vecOrigPixVal.flatten() # The sampling interval for the creation of the super-sampled pixel # data (complex numbers are used as a convention for inclusive # intervals in "np.mgrid()").: varStpSzeX = np.complex(tplVslSpcHighSze[0]) varStpSzeY = np.complex(tplVslSpcHighSze[1]) # The following grid has the coordinates of the points at which we # would like to re-sample the pixel data: aryPixGridX, aryPixGridY = np.mgrid[0:tplPngSize[0]:varStpSzeX, 0:tplPngSize[1]:varStpSzeY] # The actual resampling: aryResampled = griddata(aryOrixPixCoo, vecOrigPixVal, (aryPixGridX, aryPixGridY), method='nearest') # Put super-sampled pixel time courses into array: aryBoxCarConvHigh[:, :, idxMtn, idxVol] = aryResampled return aryBoxCarConvHigh
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """pyprf_sim entry point."""
# Get list of input arguments (without first one, which is the path to the # function that is called): --NOTE: This is another way of accessing # input arguments, but since we use 'argparse' it is redundant. # lstArgs = sys.argv[1:] strWelcome = 'pyprf_sim ' + __version__ strDec = '=' * len(strWelcome) print(strDec + '\n' + strWelcome + '\n' + strDec) objNspc = get_arg_parse() # Print info if no config argument is provided. if any(item is None for item in [objNspc.strCsvPrf, objNspc.strStmApr]): print('Please provide necessary file paths, e.g.:') print(' pyprf_sim -strCsvPrf /path/to/my_config_file.csv') print(' -strStmApr /path/to/my_stim_apertures.npy') else: # Signal non-test mode to lower functions (needed for pytest): lgcTest = False # Call to main function, to invoke pRF analysis: pyprf_sim(objNspc.strCsvPrf, objNspc.strStmApr, lgcTest=lgcTest, lgcNoise=objNspc.lgcNoise, lgcRtnNrl=objNspc.lgcRtnNrl, lstRat=objNspc.supsur)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def login_required(wrapped): """ Requires that the user is logged in and authorized to execute requests Except if the method is in authorized_methods of the auth_collection Then he can execute the requests even not being authorized """
@wraps(wrapped) def wrapper(*args, **kwargs): request = args[1] auth_collection = settings.AUTH_COLLECTION[ settings.AUTH_COLLECTION.rfind('.') + 1: ].lower() auth_document = request.environ.get(auth_collection) if auth_document and auth_document.is_authorized(request): setattr(request, auth_collection, auth_document) return wrapped(*args, **kwargs) return Response(response=serialize(UnauthorizedError()), status=401) if hasattr(wrapped, 'decorators'): wrapper.decorators = wrapped.decorators wrapper.decorators.append('login_required') else: wrapper.decorators = ['login_required'] return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serializable(wrapped): """ If a keyword argument 'serialize' with a True value is passed to the Wrapped function, the return of the wrapped function will be serialized. Nothing happens if the argument is not passed or the value is not True """
@wraps(wrapped) def wrapper(*args, **kwargs): should_serialize = kwargs.pop('serialize', False) result = wrapped(*args, **kwargs) return serialize(result) if should_serialize else result if hasattr(wrapped, 'decorators'): wrapper.decorators = wrapped.decorators wrapper.decorators.append('serializable') else: wrapper.decorators = ['serializable'] return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deserialize(to_deserialize, *args, **kwargs): """ Deserializes a string into a PyMongo BSON """
if isinstance(to_deserialize, string_types): if re.match('^[0-9a-f]{24}$', to_deserialize): return ObjectId(to_deserialize) try: return bson_loads(to_deserialize, *args, **kwargs) except: return bson_loads(bson_dumps(to_deserialize), *args, **kwargs) else: return bson_loads(bson_dumps(to_deserialize), *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _doAtomicFileCreation(filePath): """Tries to atomically create the requested file."""
try: _os.close(_os.open(filePath, _os.O_CREAT | _os.O_EXCL)) return True except OSError as e: if e.errno == _errno.EEXIST: return False else: raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def findNextFile(folder='.', prefix=None, suffix=None, fnameGen=None, base=0, maxattempts=10): """Finds the next available file-name in a sequence. This function will create a file of zero size and will return the path to it to the caller. No files which exist will be altered in this operation and concurrent executions of this function will return separate files. In case of conflict, the function will attempt to generate a new file name up to maxattempts number of times before failing. The sequence will start from the base argument (default: 0). If used with the prefix/suffix, it will look for the next file in the sequence ignoring any gaps. Hence, if the files "a.0.txt" and "a.3.txt" exist, then the next file returned will be "a.4.txt" when called with prefix="a." and suffix=".txt". In case fnameGen is provided, the first generated filename which does not exist will be created and its path will be returned. Hence, if the files "a.0.txt" and "a.3.txt" exist, then the next file returned will be "a.1.txt" when called with fnameGen = lambda x : "a." + str(x) + ".txt" Args: folder - string which has path to the folder where the file should be created (default: '.') prefix - prefix of the file to be generated (default: '') suffix - suffix of the file to be generated (default: '') fnameGen - function which generates the filenames given a number as input (default: None) base - the first index to count (default: 0) maxattempts - number of attempts to create the file before failing with OSError (default: 10) Returns: Path of the file which follows the provided pattern and can be opened for writing. Raises: RuntimeError - If an incorrect combination of arguments is provided. OSError - If is unable to create a file (wrong path, drive full, illegal character in filename, etc.). """
expFolder = _os.path.expanduser(_os.path.expandvars(folder)) return _findNextFile(expFolder, prefix, suffix, fnameGen, base, maxattempts, 0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _errstr(value): """Returns the value str, truncated to MAX_ERROR_STR_LEN characters. If """
value = str(value) # We won't make the caller convert value to a string each time. if len(value) > MAX_ERROR_STR_LEN: return value[:MAX_ERROR_STR_LEN] + '...' else: return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _raiseValidationException(standardExcMsg, customExcMsg=None): """Raise ValidationException with standardExcMsg, unless customExcMsg is specified."""
if customExcMsg is None: raise ValidationException(str(standardExcMsg)) else: raise ValidationException(str(customExcMsg))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validateGenericParameters(blank, strip, allowlistRegexes, blocklistRegexes): """Returns None if the blank, strip, and blocklistRegexes parameters are valid of PySimpleValidate's validation functions have. Raises a PySimpleValidateException if any of the arguments are invalid."""
# Check blank parameter. if not isinstance(blank, bool): raise PySimpleValidateException('blank argument must be a bool') # Check strip parameter. if not isinstance(strip, (bool, str, type(None))): raise PySimpleValidateException('strip argument must be a bool, None, or str') # Check allowlistRegexes parameter (including each regex in it). if allowlistRegexes is None: allowlistRegexes = [] # allowlistRegexes defaults to a blank list. try: len(allowlistRegexes) # Make sure allowlistRegexes is a sequence. except: raise PySimpleValidateException('allowlistRegexes must be a sequence of regex_strs') for response in allowlistRegexes: if not isinstance(response[0], str): raise PySimpleValidateException('allowlistRegexes must be a sequence of regex_strs') # Check allowlistRegexes parameter (including each regex in it). # NOTE: blocklistRegexes is NOT the same format as allowlistRegex, it can # include an "invalid input reason" string to display if the input matches # the blocklist regex. if blocklistRegexes is None: blocklistRegexes = [] # blocklistRegexes defaults to a blank list. try: len(blocklistRegexes) # Make sure blocklistRegexes is a sequence of (regex_str, str) or strs. except: raise PySimpleValidateException('blocklistRegexes must be a sequence of (regex_str, str) tuples or regex_strs') for response in blocklistRegexes: if isinstance(response, str): continue if len(response) != 2: raise PySimpleValidateException('blocklistRegexes must be a sequence of (regex_str, str) tuples or regex_strs') if not isinstance(response[0], str) or not isinstance(response[1], str): raise PySimpleValidateException('blocklistRegexes must be a sequence of (regex_str, str) tuples or regex_strs')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateNum(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, _numType='num', min=None, max=None, lessThan=None, greaterThan=None, excMsg=None): """Raises ValidationException if value is not a float or int. Returns value, so it can be used inline in an expression: print(2 + validateNum(your_number)) Note that since int() and float() ignore leading or trailing whitespace when converting a string to a number, so does this validateNum(). * value (str): The value being validated as an int or float. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float. * min (int, float): The (inclusive) minimum value for the value to pass validation. * max (int, float): The (inclusive) maximum value for the value to pass validation. * lessThan (int, float): The (exclusive) minimum value for the value to pass validation. * greaterThan (int, float): The (exclusive) maximum value for the value to pass validation. * excMsg (str): A custom message to use in the raised ValidationException. If you specify min or max, you cannot also respectively specify lessThan or greaterThan. Doing so will raise PySimpleValidateException. 3 3.0 3.0 5.498732598475984e+32 Traceback (most recent call last): pysimplevalidate.ValidationException: Number must be less than 4. 4 4 """
# Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes) _validateParamsFor_validateNum(min=min, max=max, lessThan=lessThan, greaterThan=greaterThan) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: # If we can convert value to an int/float, then do so. For example, # if an allowlist regex allows '42', then we should return 42/42.0. if (_numType == 'num' and '.' in value) or (_numType == 'float'): try: return float(value) except ValueError: return value # Return the value as is. if (_numType == 'num' and '.' not in value) or (_numType == 'int'): try: return int(value) except ValueError: return value # Return the value as is. # Validate the value's type (and convert value back to a number type). if (_numType == 'num' and '.' in value): # We are expecting a "num" (float or int) type and the user entered a float. try: value = float(value) except: _raiseValidationException(_('%r is not a number.') % (_errstr(value)), excMsg) elif (_numType == 'num' and '.' not in value): # We are expecting a "num" (float or int) type and the user entered an int. try: value = int(value) except: _raiseValidationException(_('%r is not a number.') % (_errstr(value)), excMsg) elif _numType == 'float': try: value = float(value) except: _raiseValidationException(_('%r is not a float.') % (_errstr(value)), excMsg) elif _numType == 'int': try: if float(value) % 1 != 0: # The number is a float that doesn't end with ".0" _raiseValidationException(_('%r is not an integer.') % (_errstr(value)), excMsg) value = int(float(value)) except: _raiseValidationException(_('%r is not an integer.') % (_errstr(value)), excMsg) # Validate against min argument. if min is not None and value < min: _raiseValidationException(_('Number must be at minimum %s.') % (min), excMsg) # Validate against max argument. if max is not None and value > max: _raiseValidationException(_('Number must be at maximum %s.') % (max), excMsg) # Validate against max argument. if lessThan is not None and value >= lessThan: _raiseValidationException(_('Number must be less than %s.') % (lessThan), excMsg) # Validate against max argument. if greaterThan is not None and value <= greaterThan: _raiseValidationException(_('Number must be greater than %s.') % (greaterThan), excMsg) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateInt(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, min=None, max=None, lessThan=None, greaterThan=None, excMsg=None): """Raises ValidationException if value is not a int. Returns value, so it can be used inline in an expression: print(2 + validateInt(your_number)) Note that since int() and ignore leading or trailing whitespace when converting a string to a number, so does this validateNum(). * value (str): The value being validated as an int or float. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float. * min (int, float): The (inclusive) minimum value for the value to pass validation. * max (int, float): The (inclusive) maximum value for the value to pass validation. * lessThan (int, float): The (exclusive) minimum value for the value to pass validation. * greaterThan (int, float): The (exclusive) maximum value for the value to pass validation. * excMsg (str): A custom message to use in the raised ValidationException. If you specify min or max, you cannot also respectively specify lessThan or greaterThan. Doing so will raise PySimpleValidateException. 42 Traceback (most recent call last): pysimplevalidate.ValidationException: 'forty two' is not an integer. """
return validateNum(value=value, blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes, _numType='int', min=min, max=max, lessThan=lessThan, greaterThan=greaterThan)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateChoice(value, choices, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, numbered=False, lettered=False, caseSensitive=False, excMsg=None): """Raises ValidationException if value is not one of the values in choices. Returns the selected choice. Returns the value in choices that was selected, so it can be used inline in an expression: print('You chose ' + validateChoice(your_choice, ['cat', 'dog'])) Note that value itself is not returned: validateChoice('CAT', ['cat', 'dog']) will return 'cat', not 'CAT'. If lettered is True, lower or uppercase letters will be accepted regardless of what caseSensitive is set to. The caseSensitive argument only matters for matching with the text of the strings in choices. * value (str): The value being validated. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * numbered (bool): If True, this function will also accept a string of the choice's number, i.e. '1' or '2'. * lettered (bool): If True, this function will also accept a string of the choice's letter, i.e. 'A' or 'B' or 'a' or 'b'. * caseSensitive (bool): If True, then the exact case of the option must be entered. * excMsg (str): A custom message to use in the raised ValidationException. Returns the choice selected as it appeared in choices. That is, if 'cat' was a choice and the user entered 'CAT' while caseSensitive is False, this function will return 'cat'. 'dog' 'dog' 'cat' 'dog' 'moose' 'dog' Traceback (most recent call last): pysimplevalidate.ValidationException: 'spider' is not a valid choice. """
# Validate parameters. _validateParamsFor_validateChoice(choices=choices, blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes, numbered=numbered, lettered=lettered, caseSensitive=caseSensitive) if '' in choices: # blank needs to be set to True here, otherwise '' won't be accepted as a choice. blank = True returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Validate against choices. if value in choices: return value if numbered and value.isdigit() and 0 < int(value) <= len(choices): # value must be 1 to len(choices) # Numbered options begins at 1, not 0. return choices[int(value) - 1] # -1 because the numbers are 1 to len(choices) but the index are 0 to len(choices) - 1 if lettered and len(value) == 1 and value.isalpha() and 0 < ord(value.upper()) - 64 <= len(choices): # Lettered options are always case-insensitive. return choices[ord(value.upper()) - 65] if not caseSensitive and value.upper() in [choice.upper() for choice in choices]: # Return the original item in choices that value has a case-insensitive match with. return choices[[choice.upper() for choice in choices].index(value.upper())] _raiseValidationException(_('%r is not a valid choice.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateTime(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%H:%M:%S', '%H:%M', '%X'), excMsg=None): """Raises ValidationException if value is not a time formatted in one of the formats formats. Returns a datetime.time object of value. * value (str): The value being validated as a time. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid time. * excMsg (str): A custom message to use in the raised ValidationException. datetime.time(12, 0, 1) datetime.time(13, 0, 1) Traceback (most recent call last): pysimplevalidate.ValidationException: '25:00:01' is not a valid time. datetime.time(12, 1) """
# TODO - handle this # Reuse the logic in _validateToDateTimeFormat() for this function. try: dt = _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) return datetime.time(dt.hour, dt.minute, dt.second, dt.microsecond) except ValidationException: _raiseValidationException(_('%r is not a valid time.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateDate(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%Y/%m/%d', '%y/%m/%d', '%m/%d/%Y', '%m/%d/%y', '%x'), excMsg=None): """Raises ValidationException if value is not a time formatted in one of the formats formats. Returns a datetime.date object of value. * value (str): The value being validated as a time. * blank (bool): If True, a blank string for value will be accepted. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid date. * excMsg (str): A custom message to use in the raised ValidationException. datetime.date(2004, 2, 29) Traceback (most recent call last): pysimplevalidate.ValidationException: '2/29/2005' is not a valid date. datetime.date(2019, 9, 1) """
# Reuse the logic in _validateToDateTimeFormat() for this function. try: dt = _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) return datetime.date(dt.year, dt.month, dt.day) except ValidationException: _raiseValidationException(_('%r is not a valid date.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateDatetime(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S', '%Y/%m/%d %H:%M', '%y/%m/%d %H:%M', '%m/%d/%Y %H:%M', '%m/%d/%y %H:%M', '%x %H:%M', '%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S'), excMsg=None): """Raises ValidationException if value is not a datetime formatted in one of the formats formats. Returns a datetime.datetime object of value. * value (str): The value being validated as a datetime. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid datetime. * excMsg (str): A custom message to use in the raised ValidationException. datetime.datetime(2018, 10, 31, 12, 0, 1) datetime.datetime(2018, 10, 31, 12, 0, 1) Traceback (most recent call last): pysimplevalidate.ValidationException: '10/31/2018' is not a valid date and time. """
# Reuse the logic in _validateToDateTimeFormat() for this function. try: return _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) except ValidationException: _raiseValidationException(_('%r is not a valid date and time.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateIP(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not an IPv4 or IPv6 address. Returns the value argument. * value (str): The value being validated as an IP address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. '127.0.0.1' '255.255.255.255' Traceback (most recent call last): pysimplevalidate.ValidationException: '256.256.256.256' is not a valid IP address. '1:2:3:4:5:6:7:8' '1::8' 'fe80::7:8%eth0' '::255.255.255.255' """
# Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Reuse the logic in validateRegex() try: try: # Check if value is an IPv4 address. if validateRegex(value=value, regex=IPV4_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes): return value except: pass # Go on to check if it's an IPv6 address. # Check if value is an IPv6 address. if validateRegex(value=value, regex=IPV6_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes): return value except ValidationException: _raiseValidationException(_('%r is not a valid IP address.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateRegex(value, regex, flags=0, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value does not match the regular expression in regex. Returns the value argument. This is similar to calling inputStr() and using the allowlistRegexes keyword argument, however, validateRegex() allows you to pass regex flags such as re.IGNORECASE or re.VERBOSE. You can also pass a regex object directly. If you want to check if a string is a regular expression string, call validateRegexStr(). * value (str): The value being validated as a regular expression string. * regex (str, regex): The regular expression to match the value against. * flags (int): Identical to the flags argument in re.compile(). Pass re.VERBOSE et al here. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. 'cat' '"Hello"' """
# Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Search value with regex, whether regex is a str or regex object. if isinstance(regex, str): # TODO - check flags to see they're valid regex flags. mo = re.compile(regex, flags).search(value) elif isinstance(regex, REGEX_TYPE): mo = regex.search(value) else: raise PySimpleValidateException('regex must be a str or regex object') if mo is not None: return mo.group() else: _raiseValidationException(_('%r does not match the specified pattern.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateRegexStr(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value can't be used as a regular expression string. Returns the value argument as a regex object. If you want to check if a string matches a regular expression, call validateRegex(). * value (str): The value being validated as a regular expression string. * regex (str, regex): The regular expression to match the value against. * flags (int): Identical to the flags argument in re.compile(). Pass re.VERBOSE et al here. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. re.compile('(cat)|(dog)') re.compile('"(.*?)"') Traceback (most recent call last): pysimplevalidate.ValidationException: '"(.*?"' is not a valid regular expression: missing ), unterminated subpattern at position 1 """
# TODO - I'd be nice to check regexes in other languages, i.e. JS and Perl. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value try: return re.compile(value) except Exception as ex: _raiseValidationException(_('%r is not a valid regular expression: %s') % (_errstr(value), ex), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateURL(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not a URL. Returns the value argument. The "http" or "https" protocol part of the URL is optional. * value (str): The value being validated as a URL. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. 'https://inventwithpython.com' 'inventwithpython.com' 'localhost' 'mailto:[email protected]' 'example.com' 'https://inventwithpython.com/blog/2018/02/02/how-to-ask-for-programming-help/' Traceback (most recent call last): pysimplevalidate.ValidationException: 'blah blah blah' is not a valid URL. """
# Reuse the logic in validateRegex() try: result = validateRegex(value=value, regex=URL_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) if result is not None: return result except ValidationException: # 'localhost' is also an acceptable URL: if value == 'localhost': return value _raiseValidationException(_('%r is not a valid URL.') % (value), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateEmail(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not an email address. Returns the value argument. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. '[email protected]' Traceback (most recent call last): pysimplevalidate.ValidationException: 'alinventwithpython.com' is not a valid email address. """
# Reuse the logic in validateRegex() try: result = validateRegex(value=value, regex=EMAIL_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) if result is not None: return result except ValidationException: _raiseValidationException(_('%r is not a valid email address.') % (value), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateYesNo(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, yesVal='yes', noVal='no', caseSensitive=False, excMsg=None): """Raises ValidationException if value is not a yes or no response. Returns the yesVal or noVal argument, not value. Note that value can be any case (by default) and can also just match the * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * caseSensitive (bool): Determines if value must match the case of yesVal and noVal. Defaults to False. * excMsg (str): A custom message to use in the raised ValidationException. 'yes' 'yes' 'no' 'oui' """
# Validate parameters. TODO - can probably improve this to remove the duplication. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value yesVal = str(yesVal) noVal = str(noVal) if len(yesVal) == 0: raise PySimpleValidateException('yesVal argument must be a non-empty string.') if len(noVal) == 0: raise PySimpleValidateException('noVal argument must be a non-empty string.') if (yesVal == noVal) or (not caseSensitive and yesVal.upper() == noVal.upper()): raise PySimpleValidateException('yesVal and noVal arguments must be different.') if (yesVal[0] == noVal[0]) or (not caseSensitive and yesVal[0].upper() == noVal[0].upper()): raise PySimpleValidateException('first character of yesVal and noVal arguments must be different') returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if caseSensitive: if value in (yesVal, yesVal[0]): return yesVal elif value in (noVal, noVal[0]): return noVal else: if value.upper() in (yesVal.upper(), yesVal[0].upper()): return yesVal elif value.upper() in (noVal.upper(), noVal[0].upper()): return noVal _raiseValidationException(_('%r is not a valid %s/%s response.') % (_errstr(value), yesVal, noVal), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateBool(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, trueVal='True', falseVal='False', caseSensitive=False, excMsg=None): """Raises ValidationException if value is not an email address. Returns the yesVal or noVal argument, not value. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. 'yes' 'yes' 'no' 'oui' """
# Validate parameters. TODO - can probably improve this to remove the duplication. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value # Replace the exception messages used in validateYesNo(): trueVal = str(trueVal) falseVal = str(falseVal) if len(trueVal) == 0: raise PySimpleValidateException('trueVal argument must be a non-empty string.') if len(falseVal) == 0: raise PySimpleValidateException('falseVal argument must be a non-empty string.') if (trueVal == falseVal) or (not caseSensitive and trueVal.upper() == falseVal.upper()): raise PySimpleValidateException('trueVal and noVal arguments must be different.') if (trueVal[0] == falseVal[0]) or (not caseSensitive and trueVal[0].upper() == falseVal[0].upper()): raise PySimpleValidateException('first character of trueVal and noVal arguments must be different') result = validateYesNo(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, yesVal=trueVal, noVal=falseVal, caseSensitive=caseSensitive, excMsg=None) # Return a bool value instead of a string. if result == trueVal: return True elif result == falseVal: return False else: assert False, 'inner validateYesNo() call returned something that was not yesVal or noVal. This should never happen.'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateState(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None, returnStateName=False): """Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. 'TX' 'CA' 'WA' 'Washington' """
# TODO - note that this is USA-centric. I should work on trying to make this more international. # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if value.upper() in USA_STATES_UPPER.keys(): # check if value is a state abbreviation if returnStateName: return USA_STATES[value.upper()] # Return full state name. else: return value.upper() # Return abbreviation. elif value.title() in USA_STATES.values(): # check if value is a state name if returnStateName: return value.title() # Return full state name. else: return USA_STATES_REVERSED[value.title()] # Return abbreviation. _raiseValidationException(_('%r is not a state.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateMonth(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, monthNames=ENGLISH_MONTHS, excMsg=None): """Raises ValidationException if value is not a month, like 'Jan' or 'March'. Returns the titlecased month. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. 'January' 'March' """
# returns full month name, e.g. 'January' # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value try: if (monthNames == ENGLISH_MONTHS) and (1 <= int(value) <= 12): # This check here only applies to months, not when validateDayOfWeek() calls this function. return ENGLISH_MONTH_NAMES[int(value) - 1] except: pass # continue if the user didn't enter a number 1 to 12. # Both month names and month abbreviations will be at least 3 characters. if len(value) < 3: _raiseValidationException(_('%r is not a month.') % (_errstr(value)), excMsg) if value[:3].upper() in monthNames.keys(): # check if value is a month abbreviation return monthNames[value[:3].upper()] # It turns out that titlecase is good for all the month. elif value.upper() in monthNames.values(): # check if value is a month name return value.title() _raiseValidationException(_('%r is not a month.') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateDayOfWeek(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, dayNames=ENGLISH_DAYS_OF_WEEK, excMsg=None): """Raises ValidationException if value is not a day of the week, such as 'Mon' or 'Friday'. Returns the titlecased day of the week. * value (str): The value being validated as a day of the week. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. 'Monday' 'Thursday' """
# TODO - reuse validateChoice for this function # returns full day of the week str, e.g. 'Sunday' # Reuses validateMonth. try: return validateMonth(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, monthNames=ENGLISH_DAYS_OF_WEEK) except: # Replace the exception message. _raiseValidationException(_('%r is not a day of the week') % (_errstr(value)), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validateDayOfMonth(value, year, month, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not a day of the month, from 1 to 28, 29, 30, or 31 depending on the month and year. Returns value. * value (str): The value being validated as existing as a numbered day in the given year and month. * year (int): The given year. * month (int): The given month. 1 is January, 2 is February, and so on. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. 31 Traceback (most recent call last): pysimplevalidate.ValidationException: '32' is not a day in the month of October 2019 29 Traceback (most recent call last): pysimplevalidate.ValidationException: '29' is not a day in the month of February 2005 """
try: daysInMonth = calendar.monthrange(year, month)[1] except: raise PySimpleValidateException('invalid arguments for year and/or month') try: return validateInt(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, min=1, max=daysInMonth) except: # Replace the exception message. _raiseValidationException(_('%r is not a day in the month of %s %s') % (_errstr(value), ENGLISH_MONTH_NAMES[month - 1], year), excMsg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_level(level_string): """ Returns an appropriate logging level integer from a string name """
levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} try: level = levels[level_string.lower()] except KeyError: sys.exit('{0} is not a recognized logging level'.format(level_string)) else: return level
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_logging(no_log_file, log_to, log_level, silent, verbosity): """ Configures and generates a Logger object, 'openaccess_epub' based on common parameters used for console interface script execution in OpenAccess_EPUB. These parameters are: no_log_file Boolean. Disables logging to file. If set to True, log_to and log_level become irrelevant. log_to A string name indicating a file path for logging. log_level Logging level, one of: 'debug', 'info', 'warning', 'error', 'critical' silent Boolean verbosity Console logging level, one of: 'debug', 'info', 'warning', 'error', 'critical This method currently only configures a console StreamHandler with a message-only Formatter. """
log_level = get_level(log_level) console_level = get_level(verbosity) #We want to configure our openaccess_epub as the parent log log = logging.getLogger('openaccess_epub') log.setLevel(logging.DEBUG) # Don't filter at the log level standard = logging.Formatter(STANDARD_FORMAT) message_only = logging.Formatter(MESSAGE_ONLY_FORMAT) #Only add FileHandler IF it's allowed AND we have a name for it if not no_log_file and log_to is not None: fh = logging.FileHandler(filename=log_to) fh.setLevel(log_level) fh.setFormatter(standard) log.addHandler(fh) #Add on the console StreamHandler at verbosity level if silent not set if not silent: sh_echo = logging.StreamHandler(sys.stdout) sh_echo.setLevel(console_level) sh_echo.setFormatter(message_only) log.addHandler(sh_echo)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace_filehandler(logname, new_file, level=None, frmt=None): """ This utility function will remove a previous Logger FileHandler, if one exists, and add a new filehandler. Parameters: logname The name of the log to reconfigure, 'openaccess_epub' for example new_file The file location for the new FileHandler level Optional. Level of FileHandler logging, if not used then the new FileHandler will have the same level as the old. Pass in name strings, 'INFO' for example frmt Optional string format of Formatter for the FileHandler, if not used then the new FileHandler will inherit the Formatter of the old, pass in format strings, '%(message)s' for example It is best practice to use the optional level and frmt arguments to account for the case where a previous FileHandler does not exist. In the case that they are not used and a previous FileHandler is not found, then the level will be set logging.DEBUG and the frmt will be set to openaccess_epub.utils.logs.STANDARD_FORMAT as a matter of safety. """
#Call up the Logger to get reconfigured log = logging.getLogger(logname) #Set up defaults and whether explicit for level if level is not None: level = get_level(level) explicit_level = True else: level = logging.DEBUG explicit_level = False #Set up defaults and whether explicit for frmt if frmt is not None: frmt = logging.Formatter(frmt) explicit_frmt = True else: frmt = logging.Formatter(STANDARD_FORMAT) explicit_frmt = False #Look for a FileHandler to replace, set level and frmt if not explicit old_filehandler = None for handler in log.handlers: #I think this is an effective method of detecting FileHandler if type(handler) == logging.FileHandler: old_filehandler = handler if not explicit_level: level = handler.level if not explicit_frmt: frmt = handler.formatter break #Set up the new FileHandler new_filehandler = logging.FileHandler(new_file) new_filehandler.setLevel(level) new_filehandler.setFormatter(frmt) #Add the new FileHandler log.addHandler(new_filehandler) #Remove the old FileHandler if we found one if old_filehandler is not None: old_filehandler.close() log.removeHandler(old_filehandler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rmp_pixel_deg_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax): """Remap x, y, sigma parameters from pixel to degree. Parameters vecX : 1D numpy array Array with possible x parametrs in pixels vecY : 1D numpy array Array with possible y parametrs in pixels vecPrfSd : 1D numpy array Array with possible sd parametrs in pixels tplPngSize : tuple, 2 Pixel dimensions of the visual space in pixel (width, height). varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varExtYmin : int Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) Returns ------- vecX : 1D numpy array Array with possible x parametrs in degree vecY : 1D numpy array Array with possible y parametrs in degree vecPrfSd : 1D numpy array Array with possible sd parametrs in degree """
# Remap modelled x-positions of the pRFs: vecXdgr = rmp_rng(vecX, varExtXmin, varExtXmax, varOldThrMin=0.0, varOldAbsMax=(tplPngSize[0] - 1)) # Remap modelled y-positions of the pRFs: vecYdgr = rmp_rng(vecY, varExtYmin, varExtYmax, varOldThrMin=0.0, varOldAbsMax=(tplPngSize[1] - 1)) # We calculate the scaling factor from pixels to degrees of visual angle to # separately for the x- and the y-directions (the two should be the same). varPix2DgrX = np.divide((varExtXmax - varExtXmin), tplPngSize[0]) varPix2DgrY = np.divide((varExtYmax - varExtYmin), tplPngSize[1]) # Check whether varDgr2PixX and varDgr2PixY are similar: strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \ 'stimulus space (in pixels) do not agree' assert 0.5 > np.absolute((varPix2DgrX - varPix2DgrY)), strErrMsg # Convert prf sizes from degrees of visual angles to pixel vecPrfSdDgr = np.multiply(vecPrfSd, varPix2DgrX) # Return new values. return vecXdgr, vecYdgr, vecPrfSdDgr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crt_mdl_prms(tplPngSize, varNum1, varExtXmin, varExtXmax, varNum2, varExtYmin, varExtYmax, varNumPrfSizes, varPrfStdMin, varPrfStdMax, kwUnt='pix', kwCrd='crt'): """Create an array with all possible model parameter combinations Parameters tplPngSize : tuple, 2 Pixel dimensions of the visual space (width, height). varNum1 : int, positive Number of x-positions to model varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varNum2 : float, positive Number of y-positions to model. varExtYmin : int Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) varNumPrfSizes : int, positive Number of pRF sizes to model. varPrfStdMin : float, positive Minimum pRF model size (standard deviation of 2D Gaussian) varPrfStdMax : float, positive Maximum pRF model size (standard deviation of 2D Gaussian) kwUnt: str Keyword to set the unit for model parameter combinations; model parameters can be in pixels ["pix"] or degrees of visual angles ["deg"] kwCrd: str Keyword to set the coordinate system for model parameter combinations; parameters can be in cartesian ["crt"] or polar ["pol"] coordinates Returns ------- aryMdlParams : 2d numpy array, shape [n_x_pos*n_y_pos*n_sd, 3] Model parameters (x, y, sigma) for all models. """
# Number of pRF models to be created (i.e. number of possible # combinations of x-position, y-position, and standard deviation): varNumMdls = varNum1 * varNum2 * varNumPrfSizes # Array for the x-position, y-position, and standard deviations for # which pRF model time courses are going to be created, where the # columns correspond to: (1) the x-position, (2) the y-position, and # (3) the standard deviation. The parameters are in units of the # upsampled visual space. aryMdlParams = np.zeros((varNumMdls, 3), dtype=np.float32) # Counter for parameter array: varCntMdlPrms = 0 if kwCrd == 'crt': # Vector with the moddeled x-positions of the pRFs: vecX = np.linspace(varExtXmin, varExtXmax, varNum1, endpoint=True) # Vector with the moddeled y-positions of the pRFs: vecY = np.linspace(varExtYmin, varExtYmax, varNum2, endpoint=True) # Vector with standard deviations pRF models (in degree of vis angle): vecPrfSd = np.linspace(varPrfStdMin, varPrfStdMax, varNumPrfSizes, endpoint=True) if kwUnt == 'deg': # since parameters are already in degrees of visual angle, # we do nothing pass elif kwUnt == 'pix': # convert parameters to pixels vecX, vecY, vecPrfSd = rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax) else: print('Unknown keyword provided for possible model parameter ' + 'combinations: should be either pix or deg') # Put all combinations of x-position, y-position, and standard # deviations into the array: # Loop through x-positions: for idxX in range(0, varNum1): # Loop through y-positions: for idxY in range(0, varNum2): # Loop through standard deviations (of Gaussian pRF models): for idxSd in range(0, varNumPrfSizes): # Place index and parameters in array: aryMdlParams[varCntMdlPrms, 0] = vecX[idxX] aryMdlParams[varCntMdlPrms, 1] = vecY[idxY] aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd] # Increment parameter index: varCntMdlPrms += 1 elif kwCrd == 'pol': # Vector with the radial position: vecRad = np.linspace(0.0, varExtXmax, varNum1, endpoint=True) # Vector with the angular position: vecTht = np.linspace(0.0, 2*np.pi, varNum2, endpoint=False) # Get all possible combinations on the grid, using matrix indexing ij # of output aryRad, aryTht = np.meshgrid(vecRad, vecTht, indexing='ij') # Flatten arrays to be able to combine them with meshgrid vecRad = aryRad.flatten() vecTht = aryTht.flatten() # Convert from polar to cartesian vecX, vecY = map_pol_to_crt(vecTht, vecRad) # Vector with standard deviations pRF models (in degree of vis angle): vecPrfSd = np.linspace(varPrfStdMin, varPrfStdMax, varNumPrfSizes, endpoint=True) if kwUnt == 'deg': # since parameters are already in degrees of visual angle, # we do nothing pass elif kwUnt == 'pix': # convert parameters to pixels vecX, vecY, vecPrfSd = rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax) # Put all combinations of x-position, y-position, and standard # deviations into the array: # Loop through x-positions: for idxXY in range(0, varNum1*varNum2): # Loop through standard deviations (of Gaussian pRF models): for idxSd in range(0, varNumPrfSizes): # Place index and parameters in array: aryMdlParams[varCntMdlPrms, 0] = vecX[idxXY] aryMdlParams[varCntMdlPrms, 1] = vecY[idxXY] aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd] # Increment parameter index: varCntMdlPrms += 1 else: print('Unknown keyword provided for coordinate system for model ' + 'parameter combinations: should be either crt or pol') return aryMdlParams
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crt_mdl_rsp(arySptExpInf, tplPngSize, aryMdlParams, varPar, strCrd='crt', lgcPrint=True): """Create responses of 2D Gauss models to spatial conditions. Parameters arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions] All spatial conditions stacked along second axis. tplPngSize : tuple, 2 Pixel dimensions of the visual space (width, height). aryMdlParams : 2d numpy array, shape [n_x_pos*n_y_pos*n_sd, 3] Model parameters (x, y, sigma) for all models. varPar : int, positive Number of cores to parallelize over. strCrd, string, either 'crt' or 'pol' Whether model parameters are provided in cartesian or polar coordinates lgcPrint : boolean Whether print statements should be executed. Returns ------- aryMdlCndRsp : 2d numpy array, shape [n_x_pos*n_y_pos*n_sd, n_cond] Responses of 2D Gauss models to spatial conditions. """
if varPar == 1: # if the number of cores requested by the user is equal to 1, # we save the overhead of multiprocessing by calling aryMdlCndRsp # directly aryMdlCndRsp = cnvl_2D_gauss(0, aryMdlParams, arySptExpInf, tplPngSize, None, strCrd=strCrd) else: # The long array with all the combinations of model parameters is put # into separate chunks for parallelisation, using a list of arrays. lstMdlParams = np.array_split(aryMdlParams, varPar) # Create a queue to put the results in: queOut = mp.Queue() # Empty list for results from parallel processes (for pRF model # responses): lstMdlTc = [None] * varPar # Empty list for processes: lstPrcs = [None] * varPar if lgcPrint: print('---------Running parallel processes') # Create processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc] = mp.Process(target=cnvl_2D_gauss, args=(idxPrc, lstMdlParams[idxPrc], arySptExpInf, tplPngSize, queOut ), kwargs={'strCrd': strCrd}, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # Start processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].start() # Collect results from queue: for idxPrc in range(0, varPar): lstMdlTc[idxPrc] = queOut.get(True) # Join processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].join() if lgcPrint: print('---------Collecting results from parallel processes') # Put output arrays from parallel process into one big array lstMdlTc = sorted(lstMdlTc) aryMdlCndRsp = np.empty((0, arySptExpInf.shape[-1])) for idx in range(0, varPar): aryMdlCndRsp = np.concatenate((aryMdlCndRsp, lstMdlTc[idx][1]), axis=0) # Clean up: del(lstMdlParams) del(lstMdlTc) return aryMdlCndRsp.astype('float16')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crt_nrl_tc(aryMdlRsp, aryCnd, aryOns, aryDrt, varTr, varNumVol, varTmpOvsmpl, lgcPrint=True): """Create temporally upsampled neural time courses. Parameters aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions. aryCnd : np.array 1D array with condition identifiers (every condition has its own int) aryOns : np.array, same len as aryCnd 1D array with condition onset times in seconds. aryDrt : np.array, same len as aryCnd 1D array with condition durations of different conditions in seconds. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment varNumVol : float, positive Number of data point (volumes) in the (fMRI) data varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTc : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, varNumVol*varTmpOvsmpl] Neural time course models in temporally upsampled space Notes --------- [1] This function first creates boxcar functions based on the conditions as they are specified in the temporal experiment information, provided by the user in the csv file. Second, it then replaces the 1s in the boxcar function by predicted condition values that were previously calculated based on the overlap between the assumed 2D Gaussian for the current model and the presented stimulus aperture for that condition. Since the 2D Gaussian is normalized, the overlap value will be between 0 and 1. """
# adjust the input, if necessary, such that input is 2D tplInpShp = aryMdlRsp.shape aryMdlRsp = aryMdlRsp.reshape((-1, aryMdlRsp.shape[-1])) # the first spatial condition might code the baseline (blank periods) with # all zeros. If this is the case, remove the first spatial condition, since # for temporal conditions this is removed automatically below and we need # temporal and sptial conditions to maych if np.all(aryMdlRsp[:, 0] == 0): if lgcPrint: print('------------Removed first spatial condition (all zeros)') aryMdlRsp = aryMdlRsp[:, 1:] # create boxcar functions in temporally upsampled space aryBxCarTmp = create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol, aryExclCnd=np.array([0.]), varTmpOvsmpl=varTmpOvsmpl).T # Make sure that aryMdlRsp and aryBxCarTmp have the same number of # conditions assert aryMdlRsp.shape[-1] == aryBxCarTmp.shape[0] # pre-allocate pixelwise boxcar array aryNrlTc = np.zeros((aryMdlRsp.shape[0], aryBxCarTmp.shape[-1]), dtype='float16') # loop through boxcar functions of conditions for ind, vecCndOcc in enumerate(aryBxCarTmp): # get response predicted by models for this specific spatial condition rspValPrdByMdl = aryMdlRsp[:, ind] # insert predicted response value several times using broad-casting aryNrlTc[..., vecCndOcc.astype('bool')] = rspValPrdByMdl[:, None] # determine output shape tplOutShp = tplInpShp[:-1] + (int(varNumVol*varTmpOvsmpl), ) return aryNrlTc.reshape(tplOutShp).astype('float16')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crt_prf_tc(aryNrlTc, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=None, lgcPrint=True): """Convolve every neural time course with HRF function. Parameters aryNrlTc : 4d numpy array, shape [n_x_pos, n_y_pos, n_sd, n_vol] Temporally upsampled neural time course models. varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Number of cores for multi-processing. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 5d numpy array, shape [n_x_pos, n_y_pos, n_sd, n_hrf_bases, varNumVol] Neural time courses convolved with HRF basis functions """
# Create hrf time course function: if switchHrfSet == 3: lstHrf = [spmt, dspmt, ddspmt] elif switchHrfSet == 2: lstHrf = [spmt, dspmt] elif switchHrfSet == 1: lstHrf = [spmt] # If necessary, adjust the input such that input is 2D, with last dim time tplInpShp = aryNrlTc.shape aryNrlTc = np.reshape(aryNrlTc, (-1, aryNrlTc.shape[-1])) if varPar == 1: # if the number of cores requested by the user is equal to 1, # we save the overhead of multiprocessing by calling aryMdlCndRsp # directly aryNrlTcConv = cnvl_tc(0, aryNrlTc, lstHrf, varTr, varNumVol, varTmpOvsmpl, None, dctPrm=dctPrm) else: # Put input data into chunks: lstNrlTc = np.array_split(aryNrlTc, varPar) # Create a queue to put the results in: queOut = mp.Queue() # Empty list for processes: lstPrcs = [None] * varPar # Empty list for results of parallel processes: lstConv = [None] * varPar if lgcPrint: print('------------Running parallel processes') # Create processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc] = mp.Process(target=cnvl_tc, args=(idxPrc, lstNrlTc[idxPrc], lstHrf, varTr, varNumVol, varTmpOvsmpl, queOut), kwargs={'dctPrm': dctPrm}, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # Start processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].start() # Collect results from queue: for idxPrc in range(0, varPar): lstConv[idxPrc] = queOut.get(True) # Join processes: for idxPrc in range(0, varPar): lstPrcs[idxPrc].join() if lgcPrint: print('------------Collecting results from parallel processes') # Put output into correct order: lstConv = sorted(lstConv) # Concatenate convolved pixel time courses (into the same order aryNrlTcConv = np.zeros((0, switchHrfSet, varNumVol), dtype=np.float32) for idxRes in range(0, varPar): aryNrlTcConv = np.concatenate((aryNrlTcConv, lstConv[idxRes][1]), axis=0) # clean up del(aryNrlTc) del(lstConv) # Reshape results: tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (varNumVol, ) # Return: return np.reshape(aryNrlTcConv, tplOutShp).astype(np.float32)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=None, lgcPrint=True): """Create all spatial x feature prf time courses. Parameters aryMdlRsp : 2d numpy array, shape [n_x_pos * n_y_pos * n_sd, n_cond] Responses of 2D Gauss models to spatial conditions aryTmpExpInf: 2d numpy array, shape [unknown, 4] Temporal information about conditions varNumVol : float, positive Number of volumes of the (fMRI) data. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varTmpOvsmpl : int, positive Factor by which the data hs been temporally upsampled. switchHrfSet : int, (1, 2, 3) Switch to determine which hrf basis functions are used tplPngSize : tuple Pixel dimensions of the visual space (width, height). varPar : int, positive Description of input 1. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. lgcPrint: boolean, default True Should print messages be sent to user? Returns ------- aryNrlTcConv : 3d numpy array, shape [nr of models, nr of unique feautures, varNumVol] Prf time course models """
# Identify number of unique features vecFeat = np.unique(aryTmpExpInf[:, 3]) vecFeat = vecFeat[np.nonzero(vecFeat)[0]] # Preallocate the output array aryPrfTc = np.zeros((aryMdlRsp.shape[0], 0, varNumVol), dtype=np.float32) # Loop over unique features for indFtr, ftr in enumerate(vecFeat): if lgcPrint: print('---------Create prf time course model for feature ' + str(ftr)) # Derive sptial conditions, onsets and durations for this specific # feature aryTmpCnd = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 0] aryTmpOns = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 1] aryTmpDrt = aryTmpExpInf[aryTmpExpInf[:, 3] == ftr, 2] # Create temporally upsampled neural time courses. aryNrlTcTmp = crt_nrl_tc(aryMdlRsp, aryTmpCnd, aryTmpOns, aryTmpDrt, varTr, varNumVol, varTmpOvsmpl, lgcPrint=lgcPrint) # Convolve with hrf to create model pRF time courses. aryPrfTcTmp = crt_prf_tc(aryNrlTcTmp, varNumVol, varTr, varTmpOvsmpl, switchHrfSet, tplPngSize, varPar, dctPrm=dctPrm, lgcPrint=lgcPrint) # Add temporal time course to time course that will be returned aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcTmp), axis=1) return aryPrfTc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fnd_unq_rws(A, return_index=False, return_inverse=False): """Find unique rows in 2D array. Parameters A : 2d numpy array Array for which unique rows should be identified. return_index : bool Bool to decide whether I is returned. return_inverse : bool Bool to decide whether J is returned. Returns ------- B : 1d numpy array, Unique rows I: 1d numpy array, only returned if return_index is True B = A[I,:] J: 2d numpy array, only returned if return_inverse is True A = B[J,:] """
A = np.require(A, requirements='C') assert A.ndim == 2, "array must be 2-dim'l" B = np.unique(A.view([('', A.dtype)]*A.shape[1]), return_index=return_index, return_inverse=return_inverse) if return_index or return_inverse: return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \ + B[1:] else: return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_publisher(self): """ This method defines how the Article tries to determine the publisher of the article. This method relies on the success of the get_DOI method to fetch the appropriate full DOI for the article. It then takes the DOI prefix which corresponds to the publisher and then uses that to attempt to load the correct publisher-specific code. This may fail; if the DOI is not mapped to a code file, if the DOI is mapped but the code file could not be located, or if the mapped code file is malformed then this method will issue/log an informative error message and return None. This method will not try to infer the publisher based on any metadata other than the DOI of the article. Returns ------- publisher : Publisher instance or None """
#For a detailed explanation of the DOI system, visit: #http://www.doi.org/hb.html #The basic syntax of a DOI is this <prefix>/<suffix> #The <prefix> specifies a unique DOI registrant, in our case, this #should correspond to the publisher. We use this information to register #the correct Publisher class with this article doi_prefix = self.doi.split('/')[0] #The import_by_doi method should raise ImportError if a problem occurred try: publisher_mod = openaccess_epub.publisher.import_by_doi(doi_prefix) except ImportError as e: log.exception(e) return None #Each publisher module should define an attribute "pub_class" pointing #to the publisher-specific class extending #openaccess_epub.publisher.Publisher return publisher_mod.pub_class(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_DOI(self): """ This method defines how the Article tries to detect the DOI. It attempts to determine the article DOI string by DTD-appropriate inspection of the article metadata. This method should be made as flexible as necessary to properly collect the DOI for any XML publishing specification. Returns ------- doi : str or None The full (publisher/article) DOI string for the article, or None on failure. """
if self.dtd_name == 'JPTS': doi = self.root.xpath("./front/article-meta/article-id[@pub-id-type='doi']") if doi: return doi[0].text log.warning('Unable to locate DOI string for this article') return None else: log.warning('Unable to locate DOI string for this article') return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def np_lst_sq(vecMdl, aryFuncChnk): """Least squares fitting in numpy without cross-validation. Notes ----- This is just a wrapper function for np.linalg.lstsq to keep piping consistent. """
aryTmpBts, vecTmpRes = np.linalg.lstsq(vecMdl, aryFuncChnk, rcond=-1)[:2] return aryTmpBts, vecTmpRes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def np_lst_sq_xval(vecMdl, aryFuncChnk, aryIdxTrn, aryIdxTst): """Least squares fitting in numpy with cross-validation. """
varNumXval = aryIdxTrn.shape[-1] varNumVoxChnk = aryFuncChnk.shape[-1] # pre-allocate ary to collect cross-validation # error for every xval fold aryResXval = np.empty((varNumVoxChnk, varNumXval), dtype=np.float32) # loop over cross-validation folds for idxXval in range(varNumXval): # Get pRF time course models for trn and tst: vecMdlTrn = vecMdl[aryIdxTrn[:, idxXval], :] vecMdlTst = vecMdl[aryIdxTst[:, idxXval], :] # Get functional data for trn and tst: aryFuncChnkTrn = aryFuncChnk[ aryIdxTrn[:, idxXval], :] aryFuncChnkTst = aryFuncChnk[ aryIdxTst[:, idxXval], :] # Numpy linalg.lstsq is used to calculate the # parameter estimates of the current model: vecTmpPe = np.linalg.lstsq(vecMdlTrn, aryFuncChnkTrn, rcond=-1)[0] # calculate model prediction time course aryMdlPrdTc = np.dot(vecMdlTst, vecTmpPe) # calculate residual sum of squares between # test data and model prediction time course aryResXval[:, idxXval] = np.sum( (np.subtract(aryFuncChnkTst, aryMdlPrdTc))**2, axis=0) return aryResXval
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def _raw_state_data(self) -> list: """Return a list of states."""
data = await self._request('get', 'states') return [ location for location in data if location['name'] != 'United States' ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def initialize(self, maxsize, history=None): '''size specifies the maximum amount of history to keep''' super().__init__() self.maxsize = int(maxsize) self.history = deque(maxlen=self.maxsize) # Preserves order history # If `items` are specified, then initialize with them if history is not None: for key, value in history: self.insert(key, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def insert(self, key, value): '''Adds a new key-value pair. Returns any discarded values.''' # Add to history and catch expectorate if len(self.history) == self.maxsize: expectorate = self.history[0] else: expectorate = None self.history.append((key, value)) # Add to the appropriate list of values if key in self: super().__getitem__(key).append(value) else: super().__setitem__(key, [value]) # Clean up old values if expectorate is not None: old_key, old_value = expectorate super().__getitem__(old_key).pop(0) if len(super().__getitem__(old_key)) == 0: super().__delitem__(old_key) return (old_key, old_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def up_to(self, key): '''Gets the recently inserted values up to a key''' for okey, ovalue in reversed(self.history): if okey == key: break else: yield ovalue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resource(self, uri, methods=frozenset({'GET'}), **kwargs): """ Decorates a function to be registered as a resource route. :param uri: path of the URL :param methods: list or tuple of methods allowed :param host: :param strict_slashes: :param stream: :param version: :param name: user defined route name for url_for :param filters: List of callable that will filter request and response data :param validators: List of callable added to the filter list. :return: A decorated function """
def decorator(f): if kwargs.get('stream'): f.is_stream = kwargs['stream'] self.add_resource(f, uri=uri, methods=methods, **kwargs) return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_resource(self, handler, uri, methods=frozenset({'GET'}), **kwargs): """ Register a resource route. :param handler: function or class instance :param uri: path of the URL :param methods: list or tuple of methods allowed :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :param filters: List of callable that will filter request and response data :param validators: List of callable added to the filter list. :return: function or class instance """
sanic_args = ('host', 'strict_slashes', 'version', 'name') view_kwargs = dict((k, v) for k, v in kwargs.items() if k in sanic_args) filters = kwargs.get('filters', self.default_filters) validators = kwargs.get('validators', []) filter_list = list(filters) + list(validators) filter_options = { 'filter_list': filter_list, 'handler': handler, 'uri': uri, 'methods': methods } filter_options.update(kwargs) handler = self.init_filters(filter_list, filter_options)(handler) return self.add_route(handler=handler, uri=uri, methods=methods, **view_kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_commits(repo_dir, old_commit, new_commit, hide_merges=True): """Find all commits between two commit SHAs."""
repo = Repo(repo_dir) commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit)) if hide_merges: return [x for x in commits if not x.summary.startswith("Merge ")] else: return list(commits)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_commit_url(repo_url): """Determine URL to view commits for repo."""
if "github.com" in repo_url: return repo_url[:-4] if repo_url.endswith(".git") else repo_url if "git.openstack.org" in repo_url: uri = '/'.join(repo_url.split('/')[-2:]) return "https://github.com/{0}".format(uri) # If it didn't match these conditions, just return it. return repo_url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_projects(osa_repo_dir, commit): """Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository repo = Repo(osa_repo_dir) checkout(repo, commit) yaml_files = glob.glob( '{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir) ) yaml_parsed = [] for yaml_file in yaml_files: with open(yaml_file, 'r') as f: yaml_parsed.append(yaml.load(f)) merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()} return normalize_yaml(merged_dicts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkout(repo, ref): """Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked # automatically. This prevents stale local branches from causing problems. # It also avoids problems with appending origin/ to refs as that doesn't # work with tags, SHAs, and upstreams not called origin. if ref in repo.branches: # eg delete master but leave origin/master log.warn("Removing local branch {b} for repo {r}".format(b=ref, r=repo)) # Can't delete currently checked out branch, so make sure head is # detached before deleting. repo.head.reset(index=True, working_tree=True) repo.git.checkout(repo.head.commit.hexsha) repo.delete_head(ref, '--force') log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo, ref=ref)) repo.head.reset(index=True, working_tree=True) repo.git.checkout(ref) repo.head.reset(index=True, working_tree=True) sha = repo.head.commit.hexsha log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_roles(osa_repo_dir, commit, role_requirements): """Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir) checkout(repo, commit) log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir, f=role_requirements)) filename = "{0}/{1}".format(osa_repo_dir, role_requirements) with open(filename, 'r') as f: roles_yaml = yaml.load(f) return normalize_yaml(roles_yaml)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_yaml(yaml): """Normalize the YAML from project and role lookups. These are returned as a list of tuples. """
if isinstance(yaml, list): # Normalize the roles YAML data normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD')) for x in yaml] else: # Extract the project names from the roles YAML and create a list of # tuples. projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')] normalized_yaml = [] for project in projects: repo_url = yaml['{0}_git_repo'.format(project)] commit_sha = yaml['{0}_git_install_branch'.format(project)] normalized_yaml.append((project, repo_url, commit_sha)) return normalized_yaml
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def post_gist(report_data, old_sha, new_sha): """Post the report to a GitHub Gist and return the URL of the gist."""
payload = { "description": ("Changes in OpenStack-Ansible between " "{0} and {1}".format(old_sha, new_sha)), "public": True, "files": { "osa-diff-{0}-{1}.rst".format(old_sha, new_sha): { "content": report_data } } } url = "https://api.github.com/gists" r = requests.post(url, data=json.dumps(payload)) response = r.json() return response['html_url']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_storage_dir(storage_directory): """Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory) if not os.path.exists(storage_directory): os.mkdir(storage_directory) return storage_directory
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def render_template(template_file, template_vars): """Render a jinja template."""
# Load our Jinja templates template_dir = "{0}/templates".format( os.path.dirname(os.path.abspath(__file__)) ) jinja_env = jinja2.Environment( loader=jinja2.FileSystemLoader(template_dir), trim_blocks=True ) rendered = jinja_env.get_template(template_file).render(template_vars) return rendered
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def repo_pull(repo_dir, repo_url, fetch=False): """Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch. repo = Repo(repo_dir) repo.git.clean("-df") repo.git.reset("--hard") repo.git.checkout("master") repo.head.reset(index=True, working_tree=True) # Compile the refspec appropriately to ensure # that if the repo is from github it includes # all the refs needed, including PR's. refspec_list = [ "+refs/heads/*:refs/remotes/origin/*", "+refs/heads/*:refs/heads/*", "+refs/tags/*:refs/tags/*" ] if "github.com" in repo_url: refspec_list.extend([ "+refs/pull/*:refs/remotes/origin/pr/*", "+refs/heads/*:refs/remotes/origin/*"]) # Only get the latest updates if requested. if fetch: repo.git.fetch(["-u", "-v", "-f", repo_url, refspec_list]) return repo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_repo(repo_dir, repo_url, fetch=False): """Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir) if not repo_exists: log.info("Cloning repo {}".format(repo_url)) repo = repo_clone(repo_dir, repo_url) # Make sure the repo is properly prepared # and has all the refs required log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch)) repo = repo_pull(repo_dir, repo_url, fetch) return repo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_commits(repo_dir, commits): """Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir)) repo = Repo(repo_dir) for commit in commits: try: commit = repo.commit(commit) except Exception: msg = ("Commit {commit} could not be found in repo {repo}. " "You may need to pass --update to fetch the latest " "updates to the git repositories stored on " "your local computer.".format(repo=repo_dir, commit=commit)) raise exceptions.InvalidCommitException(msg) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_commit_range(repo_dir, old_commit, new_commit): """Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided? try: commits = get_commits(repo_dir, old_commit, new_commit) except Exception: commits = [] if len(commits) == 0: # The user might have gotten their commits out of order. Let's flip # the order of the commits and try again. try: commits = get_commits(repo_dir, new_commit, old_commit) except Exception: commits = [] if len(commits) == 0: # Okay, so there really are no commits between the two commits # provided by the user. :) msg = ("The commit range {0}..{1} is invalid for {2}." "You may need to use the --update option to fetch the " "latest updates to the git repositories stored on your " "local computer.".format(old_commit, new_commit, repo_dir)) raise exceptions.InvalidCommitRangeException(msg) else: return 'flip' return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit): """Get release notes between the two revisions."""
repo = Repo(osa_repo_dir) # Get a list of tags, sorted tags = repo.git.tag().split('\n') tags = sorted(tags, key=LooseVersion) # Currently major tags are being printed after rc and # b tags. We need to fix the list so that major # tags are printed before rc and b releases tags = _fix_tags_list(tags) # Find the closest tag from a given SHA # The tag found here is the tag that was cut # either on or before the given SHA checkout(repo, osa_old_commit) old_tag = repo.git.describe() # If the SHA given is between two release tags, then # 'git describe' will return a tag in form of # <tag>-<commitNum>-<sha>. For example: # 14.0.2-3-g6931e26 # Since reno does not support this format, we need to # strip away the commit number and sha bits. if '-' in old_tag: old_tag = old_tag[0:old_tag.index('-')] # Get the nearest tag associated with the new commit checkout(repo, osa_new_commit) new_tag = repo.git.describe() if '-' in new_tag: nearest_new_tag = new_tag[0:new_tag.index('-')] else: nearest_new_tag = new_tag # Truncate the tags list to only include versions # between old_sha and new_sha. The latest release # is not included in this list. That version will be # printed separately in the following step. tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)] release_notes = "" # Checkout the new commit, then run reno to get the latest # releasenotes that have been created or updated between # the latest release and this new commit. repo.git.checkout(osa_new_commit, '-f') reno_report_command = ['reno', 'report', '--earliest-version', nearest_new_tag] reno_report_p = subprocess.Popen(reno_report_command, cwd=osa_repo_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) reno_output = reno_report_p.communicate()[0].decode('UTF-8') release_notes += reno_output # We want to start with the latest packaged release first, so # the tags list is reversed for version in reversed(tags): # If version is an rc or b tag, and it has a major # release tag, then skip it. There is no need to print # release notes for an rc or b release unless we are # comparing shas between two rc or b releases. repo.git.checkout(version, '-f') # We are outputing one version at a time here reno_report_command = ['reno', 'report', '--branch', version, '--earliest-version', version] reno_report_p = subprocess.Popen(reno_report_command, cwd=osa_repo_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) reno_output = reno_report_p.communicate()[0].decode('UTF-8') # We need to ensure the output includes the version we are concerned # about. # This is due to https://bugs.launchpad.net/reno/+bug/1670173 if version in reno_output: release_notes += reno_output # Clean up "Release Notes" title. We don't need this title for # each tagged release. release_notes = release_notes.replace( "=============\nRelease Notes\n=============", "" ) # Replace headers that contain '=' with '~' to comply with osa-differ's # formatting release_notes = re.sub('===+', _equal_to_tilde, release_notes) # Replace headers that contain '-' with '#' to comply with osa-differ's # formatting release_notes = re.sub('---+', _dash_to_num, release_notes) return release_notes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_osa_differ(): """Start here."""
# Get our arguments from the command line args = parse_arguments() # Set up DEBUG logging if needed if args.debug: log.setLevel(logging.DEBUG) elif args.verbose: log.setLevel(logging.INFO) # Create the storage directory if it doesn't exist already. try: storage_directory = prepare_storage_dir(args.directory) except OSError: print("ERROR: Couldn't create the storage directory {0}. " "Please create it manually.".format(args.directory)) sys.exit(1) # Assemble some variables for the OSA repository. osa_old_commit = args.old_commit[0] osa_new_commit = args.new_commit[0] osa_repo_dir = "{0}/openstack-ansible".format(storage_directory) # Generate OpenStack-Ansible report header. report_rst = make_osa_report(osa_repo_dir, osa_old_commit, osa_new_commit, args) # Get OpenStack-Ansible Reno release notes for the packaged # releases between the two commits. if args.release_notes: report_rst += ("\nRelease Notes\n" "-------------") report_rst += get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit) # Get the list of OpenStack roles from the newer and older commits. role_yaml = get_roles(osa_repo_dir, osa_old_commit, args.role_requirements) role_yaml_latest = get_roles(osa_repo_dir, osa_new_commit, args.role_requirements) if not args.skip_roles: # Generate the role report. report_rst += ("\nOpenStack-Ansible Roles\n" "-----------------------") report_rst += make_report(storage_directory, role_yaml, role_yaml_latest, args.update, args.version_mappings) if not args.skip_projects: # Get the list of OpenStack projects from newer commit and older # commit. project_yaml = get_projects(osa_repo_dir, osa_old_commit) project_yaml_latest = get_projects(osa_repo_dir, osa_new_commit) # Generate the project report. report_rst += ("\nOpenStack Projects\n" "------------------") report_rst += make_report(storage_directory, project_yaml, project_yaml_latest, args.update) # Publish report according to the user's request. output = publish_report(report_rst, args, osa_old_commit, osa_new_commit) print(output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_new_text(destination, text, join_str=None): """ This method provides the functionality of adding text appropriately underneath the destination node. This will be either to the destination's text attribute or to the tail attribute of the last child. """
if join_str is None: join_str = ' ' if len(destination) > 0: # Destination has children last = destination[-1] if last.tail is None: # Last child has no tail last.tail = text else: # Last child has a tail last.tail = join_str.join([last.tail, text]) else: # Destination has no children if destination.text is None: # Destination has no text destination.text = text else: # Destination has a text destination.text = join_str.join([destination.text, text])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_all_below(destination, source, join_str=None): """ Compared to xml.dom.minidom, lxml's treatment of text as .text and .tail attributes of elements is an oddity. It can even be a little frustrating when one is attempting to copy everything underneath some element to another element; one has to write in extra code to handle the text. This method provides the functionality of adding everything underneath the source element, in preserved order, to the destination element. """
if join_str is None: join_str = ' ' if source.text is not None: # If source has text if len(destination) == 0: # Destination has no children if destination.text is None: # Destination has no text destination.text = source.text else: # Destination has a text destination.text = join_str.join([destination.text, source.text]) else: # Destination has children #Select last child last = destination[-1] if last.tail is None: # Last child has no tail last.tail = source.text else: # Last child has a tail last.tail = join_str.join([last.tail, source.text]) for each_child in source: destination.append(deepcopy(each_child))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_all_attributes(element, exclude=None): """ This method will remove all attributes of any provided element. A list of strings may be passed to the keyward-argument "exclude", which will serve as a list of attributes which will not be removed. """
if exclude is None: exclude = [] for k in element.attrib.keys(): if k not in exclude: element.attrib.pop(k)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rename_attributes(element, attrs): """ Renames the attributes of the element. Accepts the element and a dictionary of string values. The keys are the original names, and their values will be the altered names. This method treats all attributes as optional and will not fail on missing attributes. """
for name in attrs.keys(): if name not in element.attrib: continue else: element.attrib[attrs[name]] = element.attrib.pop(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace(old, new): """ A simple way to replace one element node with another. """
parent = old.getparent() parent.replace(old, new)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_before(old, new): """ A simple way to insert a new element node before the old element node among its siblings. """
parent = old.getparent() parent.insert(parent.index(old), new)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def comment(node): """ Converts the node received to a comment, in place, and will also return the comment element. """
parent = node.parentNode comment = node.ownerDocument.createComment(node.toxml()) parent.replaceChild(comment, node) return comment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def uncomment(comment): """ Converts the comment node received to a non-commented element, in place, and will return the new node. This may fail, primarily due to special characters within the comment that the xml parser is unable to handle. If it fails, this method will log an error and return None """
parent = comment.parentNode h = html.parser.HTMLParser() data = h.unescape(comment.data) try: node = minidom.parseString(data).firstChild except xml.parsers.expat.ExpatError: # Could not parse! log.error('Could not uncomment node due to parsing error!') return None else: parent.replaceChild(node, comment) return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize(element, strip=False): """ A handy way to serialize an element to text. """
text = etree.tostring(element, method='text', encoding='utf-8') if strip: text = text.strip() return str(text, encoding='utf-8')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """pyprf_opt_brute entry point."""
# Get list of input arguments (without first one, which is the path to the # function that is called): --NOTE: This is another way of accessing # input arguments, but since we use 'argparse' it is redundant. # lstArgs = sys.argv[1:] strWelcome = 'pyprf_opt_brute ' + __version__ strDec = '=' * len(strWelcome) print(strDec + '\n' + strWelcome + '\n' + strDec) objNspc = get_arg_parse() # Print info if no config argument is provided. if any(item is None for item in [objNspc.config, objNspc.strPthPrior, objNspc.varNumOpt1, objNspc.varNumOpt2]): print('Please provide the necessary input arguments, i.e.:') print('-strCsvCnfg -strPthPrior -varNumOpt1 and -varNumOpt2') else: # Signal non-test mode to lower functions (needed for pytest): lgcTest = False # Perform pRF fitting without suppressive surround if objNspc.supsur is None: print('***Mode: Fit pRF models, no suppressive surround***') # Call to main function, to invoke pRF analysis: pyprf_opt_brute(objNspc.config, objNspc, lgcTest=lgcTest, strPathHrf=objNspc.strPathHrf, varRat=None) # Perform pRF fitting with suppressive surround else: print('***Mode: Fit pRF models, suppressive surround***') # Load config parameters from csv file into dictionary: dicCnfg = load_config(objNspc.config, lgcTest=lgcTest, lgcPrint=False) # Load config parameters from dictionary into namespace. # We do this on every loop so we have a fresh start in case # variables are redefined during the prf analysis cfg = cls_set_config(dicCnfg) # Make sure that lgcCrteMdl is set to True since we will need # to loop iteratively over pyprf_feature with different ratios # for size surround to size center. On every loop models, # reflecting the new ratio, need to be created from scratch errorMsg = 'lgcCrteMdl needs to be set to True for -supsur.' assert cfg.lgcCrteMdl, errorMsg # Make sure that switchHrf is set to 1. It would not make sense # to find the negative surround for the hrf deriavtive function errorMsg = 'switchHrfSet needs to be set to 1 for -supsur.' assert cfg.switchHrfSet == 1, errorMsg # Get list with size ratios lstRat = objNspc.supsur # Make sure that all ratios are larger than 1.0 errorMsg = 'All provided ratios need to be larger than 1.0' assert np.all(np.greater(np.array(lstRat), 1.0)), errorMsg # Append None as the first entry, so fitting without surround # is performed once as well lstRat.insert(0, None) # Loop over ratios and find best pRF for varRat in lstRat: # Print to command line, so the user knows which exponent # is used print('---Ratio surround to center: ' + str(varRat)) # Call to main function, to invoke pRF analysis: pyprf_opt_brute(objNspc.config, objNspc, lgcTest=lgcTest, strPathHrf=objNspc.strPathHrf, varRat=varRat) # List with name suffices of output images: lstNiiNames = ['_x_pos', '_y_pos', '_SD', '_R2', '_polar_angle', '_eccentricity', '_Betas'] # Compare results for the different ratios, export nii files # based on the results of the comparison and delete in-between # results # Replace first entry (None) with 1, so it can be saved to nii lstRat[0] = 1.0 # Append 'hrf' to cfg.strPathOut, if fitting was done with # custom hrf if objNspc.strPathHrf is not None: cfg.strPathOut = cfg.strPathOut + '_hrf' cmp_res_R2(lstRat, lstNiiNames, cfg.strPathOut, cfg.strPathMdl, lgcDel=True, lgcSveMdlTc=False, strNmeExt='_brute')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _homogenize_data_filter(dfilter): """ Make data filter definition consistent. Create a tuple where first element is the row filter and the second element is the column filter """
if isinstance(dfilter, tuple) and (len(dfilter) == 1): dfilter = (dfilter[0], None) if (dfilter is None) or (dfilter == (None, None)) or (dfilter == (None,)): dfilter = (None, None) elif isinstance(dfilter, dict): dfilter = (dfilter, None) elif isinstance(dfilter, (list, str)) or ( isinstance(dfilter, int) and (not isinstance(dfilter, bool)) ): dfilter = (None, dfilter if isinstance(dfilter, list) else [dfilter]) elif isinstance(dfilter[0], dict) or ( (dfilter[0] is None) and (not isinstance(dfilter[1], dict)) ): pass else: dfilter = (dfilter[1], dfilter[0]) return dfilter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _tofloat(obj): """Convert to float if object is a float string."""
if "inf" in obj.lower().strip(): return obj try: return int(obj) except ValueError: try: return float(obj) except ValueError: return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_frow(self, frow): """Validate frow argument."""
is_int = isinstance(frow, int) and (not isinstance(frow, bool)) pexdoc.exh.addai("frow", not (is_int and (frow >= 0))) return frow
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_rfilter(self, rfilter, letter="d"): """Validate that all columns in filter are in header."""
if letter == "d": pexdoc.exh.addai( "dfilter", ( (not self._has_header) and any([not isinstance(item, int) for item in rfilter.keys()]) ), ) else: pexdoc.exh.addai( "rfilter", ( (not self._has_header) and any([not isinstance(item, int) for item in rfilter.keys()]) ), ) for key in rfilter: self._in_header(key) rfilter[key] = ( [rfilter[key]] if isinstance(rfilter[key], str) else rfilter[key] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dsort(self, order): r""" Sort rows. :param order: Sort order :type order: :ref:`CsvColFilter` .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.dsort :raises: * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """
# Make order conforming to a list of dictionaries order = order if isinstance(order, list) else [order] norder = [{item: "A"} if not isinstance(item, dict) else item for item in order] # Verify that all columns exist in file self._in_header([list(item.keys())[0] for item in norder]) # Get column indexes clist = [] for nitem in norder: for key, value in nitem.items(): clist.append( ( key if isinstance(key, int) else self._header_upper.index(key.upper()), value.upper() == "D", ) ) # From the Python documentation: # "Starting with Python 2.3, the sort() method is guaranteed to be # stable. A sort is stable if it guarantees not to change the # relative order of elements that compare equal - this is helpful # for sorting in multiple passes (for example, sort by department, # then by salary grade)." # This means that the sorts have to be done from "minor" column to # "major" column for (cindex, rvalue) in reversed(clist): fpointer = operator.itemgetter(cindex) self._data.sort(key=fpointer, reverse=rvalue)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def header(self, filtered=False): r""" Return data header. When the raw (input) data is used the data header is a list of the comma-separated values file header if the file is loaded with header (each list item is a column header) or a list of column numbers if the file is loaded without header (column zero is the leftmost column). When filtered data is used the data header is the active column filter, if any, otherwise it is the same as the raw (input) data header :param filtered: Flag that indicates whether the raw (input) data should be used (False) or whether filtered data should be used (True) :type filtered: boolean :rtype: list of strings or integers .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.header :raises: RuntimeError (Argument \`filtered\` is not valid) .. [[[end]]] """
return ( self._header if (not filtered) or (filtered and self._cfilter is None) else self._cfilter )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace(self, rdata, filtered=False): r""" Replace data. :param rdata: Replacement data :type rdata: list of lists :param filtered: Filtering type :type filtered: :ref:`CsvFiltered` .. [[[cog cog.out(exobj.get_sphinx_autodoc(width=63)) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.replace :raises: * RuntimeError (Argument \`filtered\` is not valid) * RuntimeError (Argument \`rdata\` is not valid) * ValueError (Number of columns mismatch between input and replacement data) * ValueError (Number of rows mismatch between input and replacement data) .. [[[end]]] """
# pylint: disable=R0914 rdata_ex = pexdoc.exh.addai("rdata") rows_ex = pexdoc.exh.addex( ValueError, "Number of rows mismatch between input and replacement data" ) cols_ex = pexdoc.exh.addex( ValueError, "Number of columns mismatch between input and replacement data" ) rdata_ex(any([len(item) != len(rdata[0]) for item in rdata])) # Use all columns if no specification has been given cfilter = ( self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header ) # Verify column names, has to be done before getting data col_num = len(self._data[0]) - 1 odata = self._apply_filter(filtered) cfilter = ( self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header ) col_index = [ self._header_upper.index(col_id.upper()) if isinstance(col_id, str) else col_id for col_id in cfilter ] rows_ex(len(odata) != len(rdata)) cols_ex(len(odata[0]) != len(rdata[0])) df_tuples = self._format_rfilter(self._rfilter) rnum = 0 for row in self._data: if (not filtered) or ( filtered and all([row[col_num] in col_value for col_num, col_value in df_tuples]) ): for col_num, new_data in zip(col_index, rdata[rnum]): row[col_num] = new_data rnum = rnum + 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list(self, request): """ Returns the list of documents found on the collection """
pipeline = [{'$match': request.args.pop('match', {})}] sort = request.args.pop('sort', {}) if sort: pipeline.append({'$sort': sort}) project = request.args.pop('project', {}) if project: pipeline.append({'$project': project}) return Response(serialize(self.collection.aggregate(pipeline)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, request): """ Creates a new document based on the given data """
document = self.collection(request.json) document.created_at = datetime.utcnow() document.updated_at = document.created_at created = document.insert() return Response( response=serialize(created), status=( 201 if not all( key in created for key in [ 'error_code', 'error_type', 'error_message' ] ) else 400 ) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve(self, request, _id): """ Returns the document containing the given _id or 404 """
_id = deserialize(_id) retrieved = self.collection.find_one({'_id': _id}) if retrieved: return Response(serialize(retrieved)) else: return Response( response=serialize( DocumentNotFoundError(self.collection.__name__, _id) ), status=400 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, request, _id): """ Updates the document with the given _id using the given data """
_id = deserialize(_id) to_update = self.collection.find_one({'_id': _id}) if to_update: document = self.collection(dict(to_update, **request.json)) document.updated_at = datetime.utcnow() updated = document.update() return Response( response=serialize(updated), status=( 200 if not all( key in updated for key in [ 'error_code', 'error_type', 'error_message' ] ) else 400 ) ) else: return Response( response=serialize( DocumentNotFoundError(self.collection.__name__, _id) ), status=400 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, request, _id): """ Deletes the document with the given _id if it exists """
_id = deserialize(_id) to_delete = self.collection.get({'_id': _id}) if to_delete: deleted = to_delete.delete() return Response( response=serialize(deleted), status=( 200 if not all( key in deleted for key in [ 'error_code', 'error_type', 'error_message' ] ) else 400 ) ) else: return Response( response=serialize( DocumentNotFoundError(self.collection.__name__, _id) ), status=404 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ensure_reactor_running(): """ Starts the twisted reactor if it is not running already. The reactor is started in a new daemon-thread. Has to perform dirty hacks so that twisted can register signals even if it is not running in the main-thread. """
if not reactor.running: # Some of the `signal` API can only be called # from the main-thread. So we do a dirty workaround. # # `signal.signal()` and `signal.wakeup_fd_capture()` # are temporarily monkey-patched while the reactor is # starting. # # The patched functions record the invocations in # `signal_registrations`. # # Once the reactor is started, the main-thread # is used to playback the recorded invocations. signal_registrations = [] # do the monkey patching def signal_capture(*args, **kwargs): signal_registrations.append((orig_signal, args, kwargs)) def set_wakeup_fd_capture(*args, **kwargs): signal_registrations.append((orig_set_wakeup_fd, args, kwargs)) orig_signal = signal.signal signal.signal = signal_capture orig_set_wakeup_fd = signal.set_wakeup_fd signal.set_wakeup_fd = set_wakeup_fd_capture # start the reactor in a daemon-thread reactor_thread = threading.Thread(target=reactor.run, name="reactor") reactor_thread.daemon = True reactor_thread.start() while not reactor.running: time.sleep(0.01) # Give the reactor a moment to register the signals. # Apparently the 'running' flag is set before that. time.sleep(0.01) # Undo the monkey-paching signal.signal = orig_signal signal.set_wakeup_fd = orig_set_wakeup_fd # Playback the recorded calls for func, args, kwargs in signal_registrations: func(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_json(value): """Convert the given value to a JSON object."""
if hasattr(value, 'replace'): value = value.replace('\n', ' ') try: return json.loads(value) except json.JSONDecodeError: # Escape double quotes. if hasattr(value, 'replace'): value = value.replace('"', '\\"') # try putting the value into a string return json.loads('"{}"'.format(value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_list(key, *values): """Convert the given list of parameters to a JSON object. JSON object is of the form: where values represent the given list of parameters. """
return json.dumps({key: [_get_json(value) for value in values]})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def progress(progress): """Convert given progress to a JSON object. Check that progress can be represented as float between 0 and 1 and return it in JSON of the form: {"proc.progress": progress} """
if isinstance(progress, int) or isinstance(progress, float): progress = float(progress) else: try: progress = float(json.loads(progress)) except (TypeError, ValueError): return warning("Progress must be a float.") if not 0 <= progress <= 1: return warning("Progress must be a float between 0 and 1.") return json.dumps({'proc.progress': progress})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def export_file(file_path): """Prepend the given parameter with ``export``"""
if not os.path.isfile(file_path): return error("Referenced file does not exist: '{}'.".format(file_path)) return "export {}".format(file_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loadNiiData(lstNiiFls, strPathNiiMask=None, strPathNiiFunc=None): """load nii data. Parameters lstNiiFls : list, list of str with nii file names strPathNiiMask : str, path to nii file with mask (optional) strPathNiiFunc : str, parent path to nii files (optional) Returns ------- aryFunc : np.array Nii data """
print('---------Loading nii data') # check whether a mask is available if strPathNiiMask is not None: aryMask = nb.load(strPathNiiMask).get_data().astype('bool') # check a parent path is available that needs to be preprended to nii files if strPathNiiFunc is not None: lstNiiFls = [os.path.join(strPathNiiFunc, i) for i in lstNiiFls] aryFunc = [] for idx, path in enumerate(lstNiiFls): print('------------Loading run: ' + str(idx+1)) # Load 4D nii data: niiFunc = nb.load(path).get_data() # append to list if strPathNiiMask is not None: aryFunc.append(niiFunc[aryMask, :]) else: aryFunc.append(niiFunc) # concatenate arrys in list along time dimension aryFunc = np.concatenate(aryFunc, axis=-1) # set to type float32 aryFunc = aryFunc.astype('float32') return aryFunc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calcMse(predTst, yTest, axis=0): """calculate mean squared error. Assumes that axis=0 is time Parameters predTst : np.array, predicted reponse for yTest yTest : np.array, acxtually observed response for yTest Returns ------- aryFunc : np.array MSE """
return np.mean((yTest - predTst) ** 2, axis=axis)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def detect(checksum_revisions, radius=defaults.RADIUS): """ Detects reverts that occur in a sequence of revisions. Note that, `revision` data meta will simply be returned in the case of a revert. This function serves as a convenience wrapper around calls to :class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process` method. :Parameters: checksum_revisions : `iterable` ( (checksum, revision) ) an iterable over tuples of checksum and revision meta data radius : int a positive integer indicating the maximum revision distance that a revert can span. :Return: a iterator over :class:`mwreverts.Revert` :Example: [Revert(reverting={'rev_id': 3}, reverteds=[{'rev_id': 2}], reverted_to={'rev_id': 1})] """
revert_detector = Detector(radius) for checksum, revision in checksum_revisions: revert = revert_detector.process(checksum, revision) if revert is not None: yield revert
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(self, article): """ Ingests an article and processes it for metadata and elements to provide proper references in the EPUB spine. This method may only be called once unless the Package was instantiated in collection mode using ``Package(collection=True)``. It places entries in an internal spine list for the Main Content Document, the Bibliographic Content Document (if there are ref elements in Back), and the Tables Content Document (if there are table elements). It then employs the publisher specific methods for extracting article metadata using the article's publisher attribute (an instance of a Publisher class). Parameters article : openaccess_epub.article.Article instance An article to be included in the EPUB, to be processed for metadata and appropriate content document references. """
if self.article is not None and not self.collection: log.warning('Could not process additional article. Package only \ handles one article unless collection mode is set.') return False if article.publisher is None: log.error('''Package cannot be generated for an Article \ without a publisher!''') return self.article = article self.article_doi = self.article.doi.split('/')[1] self.all_dois.append(self.article.doi) #Analyze the article to add entries to the spine dash_doi = self.article_doi.replace('.', '-') #Entry for the main content document main_idref = 'main-{0}-xhtml'.format(dash_doi) self.spine_list.append(spine_item(main_idref, True)) #Entry for the biblio content document biblio_idref = 'biblio-{0}-xhtml'.format(dash_doi) if self.article.root.xpath('./back/ref-list/ref'): self.spine_list.append(spine_item(biblio_idref, True)) #Entry for the tables content document tables_idref = 'tables-{0}-xhtml'.format(dash_doi) if self.article.publisher.has_out_of_flow_tables(): self.spine_list.append(spine_item(tables_idref, False)) self.acquire_metadata()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def acquire_metadata(self): """ Handles the acquisition of metadata for both collection mode and single mode, uses the metadata methods belonging to the article's publisher attribute. """
#For space economy publisher = self.article.publisher if self.collection: # collection mode metadata gathering pass else: # single mode metadata gathering self.pub_id = publisher.package_identifier() self.title = publisher.package_title() for date in publisher.package_date(): self.dates.add(date) #Common metadata gathering for lang in publisher.package_language(): self.languages.add(lang) # languages for contributor in publisher.package_contributors(): # contributors self.contributors.add(contributor) self.publishers.add(publisher.package_publisher()) # publisher names desc = publisher.package_description() if desc is not None: self.descriptions.add(desc) for subj in publisher.package_subject(): self.subjects.add(subj) # subjects #Rights art_rights = publisher.package_rights() self.rights.add(art_rights) if art_rights not in self.rights_associations: self.rights_associations[art_rights] = [self.article.doi] else: self.rights_associations[art_rights].append(self.article.doi)