text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cmd(command, ignore_stderr=False, raise_on_return=False, timeout=None, encoding="utf-8"): """ Run a shell command and have it automatically decoded and printed :param command: Command to run as str :param ignore_stderr: To not print stderr :param raise_on_return: Run CompletedProcess.check_returncode() :param timeout: timeout to pass to communicate if python 3 :param encoding: How the output should be decoded """
result = run(command, timeout=timeout, shell=True) if raise_on_return: result.check_returncode() print(result.stdout.decode(encoding)) if not ignore_stderr and result.stderr: print(result.stderr.decode(encoding))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pushd(directory): """Change working directories in style and stay organized! :param directory: Where do you want to go and remember? :return: saved directory stack """
directory = os.path.expanduser(directory) _saved_paths.insert(0, os.path.abspath(os.getcwd())) os.chdir(directory) return [directory] + _saved_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def popd(): """Go back to where you once were. :return: saved directory stack """
try: directory = _saved_paths.pop(0) except IndexError: return [os.getcwd()] os.chdir(directory) return [directory] + _saved_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(name=None, ext=None, directory=".", match_case=False, disable_glob=False, depth=None): """ Designed for the interactive interpreter by making default order of find_files faster. :param name: Part of the file name :param ext: Extensions of the file you are looking for :param directory: Top location to recursively search for matching files :param match_case: If name has to be a direct match or not :param disable_glob: Do not look for globable names or use glob magic check :param depth: How many directories down to search :return: list of all files in the specified directory """
return find_files_list(directory=directory, ext=ext, name=name, match_case=match_case, disable_glob=disable_glob, depth=depth)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def head(file_path, lines=10, encoding="utf-8", printed=True, errors='strict'): """ Read the first N lines of a file, defaults to 10 :param file_path: Path to file to read :param lines: Number of lines to read in :param encoding: defaults to utf-8 to decode as, will fail on binary :param printed: Automatically print the lines instead of returning it :param errors: Decoding errors: 'strict', 'ignore' or 'replace' :return: if printed is false, the lines are returned as a list """
data = [] with open(file_path, "rb") as f: for _ in range(lines): try: if python_version >= (2, 7): data.append(next(f).decode(encoding, errors=errors)) else: data.append(next(f).decode(encoding)) except StopIteration: break if printed: print("".join(data)) else: return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tail(file_path, lines=10, encoding="utf-8", printed=True, errors='strict'): """ A really silly way to get the last N lines, defaults to 10. :param file_path: Path to file to read :param lines: Number of lines to read in :param encoding: defaults to utf-8 to decode as, will fail on binary :param printed: Automatically print the lines instead of returning it :param errors: Decoding errors: 'strict', 'ignore' or 'replace' :return: if printed is false, the lines are returned as a list """
data = deque() with open(file_path, "rb") as f: for line in f: if python_version >= (2, 7): data.append(line.decode(encoding, errors=errors)) else: data.append(line.decode(encoding)) if len(data) > lines: data.popleft() if printed: print("".join(data)) else: return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cp(src, dst, overwrite=False): """ Copy files to a new location. :param src: list (or string) of paths of files to copy :param dst: file or folder to copy item(s) to :param overwrite: IF the file already exists, should I overwrite it? """
if not isinstance(src, list): src = [src] dst = os.path.expanduser(dst) dst_folder = os.path.isdir(dst) if len(src) > 1 and not dst_folder: raise OSError("Cannot copy multiple item to same file") for item in src: source = os.path.expanduser(item) destination = (dst if not dst_folder else os.path.join(dst, os.path.basename(source))) if not overwrite and os.path.exists(destination): _logger.warning("Not replacing {0} with {1}, overwrite not enabled" "".format(destination, source)) continue shutil.copy(source, destination)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cut(string, characters=2, trailing="normal"): """ Split a string into a list of N characters each. .. code:: python reusables.cut("abcdefghi") # ['ab', 'cd', 'ef', 'gh', 'i'] trailing gives you the following options: * normal: leaves remaining characters in their own last position * remove: return the list without the remainder characters * combine: add the remainder characters to the previous set * error: raise an IndexError if there are remaining characters .. code:: python reusables.cut("abcdefghi", 2, "error") # Traceback (most recent call last): # IndexError: String of length 9 not divisible by 2 to splice reusables.cut("abcdefghi", 2, "remove") # ['ab', 'cd', 'ef', 'gh'] reusables.cut("abcdefghi", 2, "combine") # ['ab', 'cd', 'ef', 'ghi'] :param string: string to modify :param characters: how many characters to split it into :param trailing: "normal", "remove", "combine", or "error" :return: list of the cut string """
split_str = [string[i:i + characters] for i in range(0, len(string), characters)] if trailing != "normal" and len(split_str[-1]) != characters: if trailing.lower() == "remove": return split_str[:-1] if trailing.lower() == "combine" and len(split_str) >= 2: return split_str[:-2] + [split_str[-2] + split_str[-1]] if trailing.lower() == "error": raise IndexError("String of length {0} not divisible by {1} to" " cut".format(len(string), characters)) return split_str
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def int_to_roman(integer): """ Convert an integer into a string of roman numbers. .. code: python reusables.int_to_roman(445) # 'CDXLV' :param integer: :return: roman string """
if not isinstance(integer, int): raise ValueError("Input integer must be of type int") output = [] while integer > 0: for r, i in sorted(_roman_dict.items(), key=lambda x: x[1], reverse=True): while integer >= i: output.append(r) integer -= i return "".join(output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def roman_to_int(roman_string): """ Converts a string of roman numbers into an integer. .. code: python reusables.roman_to_int("XXXVI") # 36 :param roman_string: XVI or similar :return: parsed integer """
roman_string = roman_string.upper().strip() if "IIII" in roman_string: raise ValueError("Malformed roman string") value = 0 skip_one = False last_number = None for i, letter in enumerate(roman_string): if letter not in _roman_dict: raise ValueError("Malformed roman string") if skip_one: skip_one = False continue if i < (len(roman_string) - 1): double_check = letter + roman_string[i + 1] if double_check in _roman_dict: if last_number and _roman_dict[double_check] > last_number: raise ValueError("Malformed roman string") last_number = _roman_dict[double_check] value += _roman_dict[double_check] skip_one = True continue if last_number and _roman_dict[letter] > last_number: raise ValueError("Malformed roman string") last_number = _roman_dict[letter] value += _roman_dict[letter] return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def task_list(): """ Scans the modules set in RQ_JOBS_MODULES for RQ jobs decorated with @task Compiles a readable list for Job model task choices """
try: jobs_module = settings.RQ_JOBS_MODULE except AttributeError: raise ImproperlyConfigured(_("You have to define RQ_JOBS_MODULE in settings.py")) if isinstance(jobs_module, string_types): jobs_modules = (jobs_module,) elif isinstance(jobs_module, (tuple, list)): jobs_modules = jobs_module else: raise ImproperlyConfigured(_("RQ_JOBS_MODULE must be a string or a tuple")) choices = [] for module in jobs_modules: try: tasks = importlib.import_module(module) except ImportError: raise ImproperlyConfigured(_("Can not find module {}").format(module)) module_choices = [('%s.%s' % (module, x), underscore_to_camelcase(x)) for x, y in list(tasks.__dict__.items()) if type(y) == FunctionType and hasattr(y, 'delay')] choices.extend(module_choices) choices.sort(key=lambda tup: tup[1]) return choices
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rq_job(self): """The last RQ Job this ran on"""
if not self.rq_id or not self.rq_origin: return try: return RQJob.fetch(self.rq_id, connection=get_connection(self.rq_origin)) except NoSuchJobError: return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rq_link(self): """Link to Django-RQ status page for this job"""
if self.rq_job: url = reverse('rq_job_detail', kwargs={'job_id': self.rq_id, 'queue_index': queue_index_by_name(self.rq_origin)}) return '<a href="{}">{}</a>'.format(url, self.rq_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_module(job): """ Fix for tasks without a module. Provides backwards compatibility with < 0.1.5 """
modules = settings.RQ_JOBS_MODULE if not type(modules) == tuple: modules = [modules] for module in modules: try: module_match = importlib.import_module(module) if hasattr(module_match, job.task): job.task = '{}.{}'.format(module, job.task) break except ImportError: continue return job
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_checks(self): """Return a list of functions to use when testing values."""
return [ self.is_date, self.is_datetime, self.is_integer, self.is_float, self.default]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def clean_text(self, text): '''Clean text using bleach.''' if text is None: return '' text = re.sub(ILLEGAL_CHARACTERS_RE, '', text) if '<' in text or '&lt' in text: text = clean(text, tags=self.tags, strip=self.strip) return unescape(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def path(self, category = None, image = None, feature = None): """ Constructs the path to categories, images and features. This path function assumes that the following storage scheme is used on the hard disk to access categories, images and features: - categories: /impath/category - images: /impath/category/category_image.png - features: /ftrpath/category/feature/category_image.mat The path function is called to query the location of categories, images and features before they are loaded. Thus, if your features are organized in a different way, you can simply replace this method such that it returns appropriate paths' and the LoadFromDisk loader will use your naming scheme. """
filename = None if not category is None: filename = join(self.impath, str(category)) if not image is None: assert not category is None, "The category has to be given if the image is given" filename = join(filename, '%s_%s.png' % (str(category), str(image))) if not feature is None: assert category != None and image != None, "If a feature name is given the category and image also have to be given." filename = join(self.ftrpath, str(category), feature, '%s_%s.mat' % (str(category), str(image))) return filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_image(self, cat, img): """ Loads an image from disk. """
filename = self.path(cat, img) data = [] if filename.endswith('mat'): data = loadmat(filename)['output'] else: data = imread(filename) if self.size is not None: return imresize(data, self.size) else: return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_feature(self, cat, img, feature): """ Load a feature from disk. """
filename = self.path(cat, img, feature) data = loadmat(filename) name = [k for k in list(data.keys()) if not k.startswith('__')] if self.size is not None: return imresize(data[name.pop()], self.size) return data[name.pop()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_image(self, cat, img, data): """Saves a new image."""
filename = self.path(cat, img) mkdir(filename) if type(data) == np.ndarray: data = Image.fromarray(data).convert('RGB') data.save(filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_feature(self, cat, img, feature, data): """Saves a new feature."""
filename = self.path(cat, img, feature) mkdir(filename) savemat(filename, {'output':data})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def randsample(vec, nr_samples, with_replacement = False): """ Draws nr_samples random samples from vec. """
if not with_replacement: return np.random.permutation(vec)[0:nr_samples] else: return np.asarray(vec)[np.random.randint(0, len(vec), nr_samples)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dict_fun(data, function): """ Apply a function to all values in a dictionary, return a dictionary with results. Parameters data : dict a dictionary whose values are adequate input to the second argument of this function. function : function a function that takes one argument Returns ------- a dictionary with the same keys as data, such that result[key] = function(data[key]) """
return dict((k, function(v)) for k, v in list(data.items()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(path, variable='Datamat'): """ Load datamat at path. Parameters: path : string Absolute path of the file to load from. """
f = h5py.File(path,'r') try: dm = fromhdf5(f[variable]) finally: f.close() return dm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(self, index): #@ReservedAssignment """ Filters a datamat by different aspects. This function is a device to filter the datamat by certain logical conditions. It takes as input a logical array (contains only True or False for every datapoint) and kicks out all datapoints for which the array says False. The logical array can conveniently be created with numpy:: np.array([2,9]) np.array([9]) Parameters: index : array Array-like that contains True for every element that passes the filter; else contains False Returns: datamat : Datamat Instance """
return Datamat(categories=self._categories, datamat=self, index=index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self): """ Returns a copy of the datamat. """
return self.filter(np.ones(self._num_fix).astype(bool))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, path): """ Saves Datamat to path. Parameters: path : string Absolute path of the file to save to. """
f = h5py.File(path, 'w') try: fm_group = f.create_group('Datamat') for field in self.fieldnames(): try: fm_group.create_dataset(field, data = self.__dict__[field]) except (TypeError,) as e: # Assuming field is an object array that contains dicts which # contain numpy arrays as values sub_group = fm_group.create_group(field) for i, d in enumerate(self.__dict__[field]): index_group = sub_group.create_group(str(i)) print((field, d)) for key, value in list(d.items()): index_group.create_dataset(key, data=value) for param in self.parameters(): fm_group.attrs[param]=self.__dict__[param] finally: f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_param(self, key, value): """ Set the value of a parameter. """
self.__dict__[key] = value self._parameters[key] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def by_field(self, field): """ Returns an iterator that iterates over unique values of field Parameters: field : string Filters the datamat for every unique value in field and yields the filtered datamat. Returns: datamat : Datamat that is filtered according to one of the unique values in 'field'. """
for value in np.unique(self.__dict__[field]): yield self.filter(self.__dict__[field] == value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_field(self, name, data): """ Add a new field to the datamat. Parameters: name : string Name of the new field data : list Data for the new field, must be same length as all other fields. """
if name in self._fields: raise ValueError if not len(data) == self._num_fix: raise ValueError self._fields.append(name) self.__dict__[name] = data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_field_like(self, name, like_array): """ Add a new field to the Datamat with the dtype of the like_array and the shape of the like_array except for the first dimension which will be instead the field-length of this Datamat. """
new_shape = list(like_array.shape) new_shape[0] = len(self) new_data = ma.empty(new_shape, like_array.dtype) new_data.mask = True self.add_field(name, new_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rm_field(self, name): """ Remove a field from the datamat. Parameters: name : string Name of the field to be removed """
if not name in self._fields: raise ValueError self._fields.remove(name) del self.__dict__[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_parameter(self, name, value): """ Adds a parameter to the existing Datamat. Fails if parameter with same name already exists or if name is otherwise in this objects ___dict__ dictionary. """
if name in self._parameters: raise ValueError("'%s' is already a parameter" % (name)) elif name in self.__dict__: raise ValueError("'%s' conflicts with the Datamat name-space" % (name)) self.__dict__[name] = value self._parameters[name] = self.__dict__[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rm_parameter(self, name): """ Removes a parameter to the existing Datamat. Fails if parameter doesn't exist. """
if name not in self._parameters: raise ValueError("no '%s' parameter found" % (name)) del self._parameters[name] del self.__dict__[name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parameter_to_field(self, name): """ Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter. """
if name not in self._parameters: raise ValueError("no '%s' parameter found" % (name)) if self._fields.count(name) > 0: raise ValueError("field with name '%s' already exists" % (name)) data = np.array([self._parameters[name]]*self._num_fix) self.rm_parameter(name) self.add_field(name, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join(self, fm_new, minimal_subset=True): """ Adds content of a new Datamat to this Datamat. If a parameter of the Datamats is not equal or does not exist in one, it is promoted to a field. If the two Datamats have different fields then the elements for the Datamats that did not have the field will be NaN, unless 'minimal_subset' is true, in which case the mismatching fields will simply be deleted. Parameters fm_new : instance of Datamat This Datamat is added to the current one. minimal_subset : if true, remove fields which don't exist in both, instead of using NaNs for missing elements (defaults to False) Capacity to use superset of fields added by rmuil 2012/01/30 """
# Check if parameters are equal. If not, promote them to fields. ''' for (nm, val) in fm_new._parameters.items(): if self._parameters.has_key(nm): if (val != self._parameters[nm]): self.parameter_to_field(nm) fm_new.parameter_to_field(nm) else: fm_new.parameter_to_field(nm) ''' # Deal with mismatch in the fields # First those in self that do not exist in new... orig_fields = self._fields[:] for field in orig_fields: if not field in fm_new._fields: if minimal_subset: self.rm_field(field) else: warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning) fm_new.add_field_like(field, self.field(field)) # ... then those in the new that do not exist in self. orig_fields = fm_new._fields[:] for field in orig_fields: if not field in self._fields: if minimal_subset: fm_new.rm_field(field) else: warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning) self.add_field_like(field, fm_new.field(field)) if 'SUBJECTINDEX' in self._fields[:]: if fm_new.SUBJECTINDEX[0] in self.SUBJECTINDEX: fm_new.SUBJECTINDEX[:] = self.SUBJECTINDEX.max()+1 # Concatenate fields for field in self._fields: self.__dict__[field] = ma.hstack((self.__dict__[field], fm_new.__dict__[field])) # Update _num_fix self._num_fix += fm_new._num_fix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _draw(self, prev_angle = None, prev_length = None): """ Draws a new length- and angle-difference pair and calculates length and angle absolutes matching the last saccade drawn. Parameters: prev_angle : float, optional The last angle that was drawn in the current trajectory prev_length : float, optional The last length that was drawn in the current trajectory Note: Either both prev_angle and prev_length have to be given or none; if only one parameter is given, it will be neglected. """
if (prev_angle is None) or (prev_length is None): (length, angle)= np.unravel_index(self.drawFrom('self.firstLenAng_cumsum', self.getrand('self.firstLenAng_cumsum')), self.firstLenAng_shape) angle = angle-((self.firstLenAng_shape[1]-1)/2) angle += 0.5 length += 0.5 length *= self.fm.pixels_per_degree else: ind = int(floor(prev_length/self.fm.pixels_per_degree)) while ind >= len(self.probability_cumsum): ind -= 1 while not(self.probability_cumsum[ind]).any(): ind -= 1 J, I = np.unravel_index(self.drawFrom('self.probability_cumsum '+repr(ind),self.getrand('self.probability_cumsum '+repr(ind))), self.full_H1[ind].shape) angle = reshift((I-self.full_H1[ind].shape[1]/2) + prev_angle) angle += 0.5 length = J+0.5 length *= self.fm.pixels_per_degree return angle, length
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample(self): """ Draws a trajectory length, first coordinates, lengths, angles and length-angle-difference pairs according to the empirical distribution. Each call creates one complete trajectory. """
lenghts = [] angles = [] coordinates = [] fix = [] sample_size = int(round(self.trajLen_borders[self.drawFrom('self.trajLen_cumsum', self.getrand('self.trajLen_cumsum'))])) coordinates.append([0, 0]) fix.append(1) while len(coordinates) < sample_size: if len(lenghts) == 0 and len(angles) == 0: angle, length = self._draw(self) else: angle, length = self._draw(prev_angle = angles[-1], prev_length = lenghts[-1]) x, y = self._calc_xy(coordinates[-1], angle, length) coordinates.append([x, y]) lenghts.append(length) angles.append(angle) fix.append(fix[-1]+1) return coordinates
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def relative_bias(fm, scale_factor = 1, estimator = None): """ Computes the relative bias, i.e. the distribution of saccade angles and amplitudes. Parameters: fm : DataMat The fixation data to use scale_factor : double Returns: 2D probability distribution of saccade angles and amplitudes. """
assert 'fix' in fm.fieldnames(), "Can not work without fixation numbers" excl = fm.fix - np.roll(fm.fix, 1) != 1 # Now calculate the direction where the NEXT fixation goes to diff_x = (np.roll(fm.x, 1) - fm.x)[~excl] diff_y = (np.roll(fm.y, 1) - fm.y)[~excl] # Make a histogram of diff values # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled ylim = np.round(scale_factor * fm.image_size[0]) xlim = np.round(scale_factor * fm.image_size[1]) x_steps = np.ceil(2*xlim) +1 if x_steps % 2 != 0: x_steps+=1 y_steps = np.ceil(2*ylim)+1 if y_steps % 2 != 0: y_steps+=1 e_x = np.linspace(-xlim,xlim,x_steps) e_y = np.linspace(-ylim,ylim,y_steps) #e_y = np.arange(-ylim, ylim+1) #e_x = np.arange(-xlim, xlim+1) samples = np.array(list(zip((scale_factor * diff_y), (scale_factor* diff_x)))) if estimator == None: (hist, _) = np.histogramdd(samples, (e_y, e_x)) else: hist = estimator(samples, e_y, e_x) return hist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_velocity(samplemat, Hz, blinks=None): ''' Compute velocity of eye-movements. Samplemat must contain fields 'x' and 'y', specifying the x,y coordinates of gaze location. The function assumes that the values in x,y are sampled continously at a rate specified by 'Hz'. ''' Hz = float(Hz) distance = ((np.diff(samplemat.x) ** 2) + (np.diff(samplemat.y) ** 2)) ** .5 distance = np.hstack(([distance[0]], distance)) if blinks is not None: distance[blinks[1:]] = np.nan win = np.ones((velocity_window_size)) / float(velocity_window_size) velocity = np.convolve(distance, win, mode='same') velocity = velocity / (velocity_window_size / Hz) acceleration = np.diff(velocity) / (1. / Hz) acceleration = abs(np.hstack(([acceleration[0]], acceleration))) return velocity, acceleration
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def fixation_detection(samplemat, saccades, Hz=200, samples2fix=None, respect_trial_borders=False, sample_times=None): ''' Detect Fixation from saccades. Fixations are defined as intervals between saccades. This function also calcuates start and end times (in ms) for each fixation. Input: samplemat: datamat Contains the recorded samples and associated metadata. saccades: ndarray Logical vector that is True for samples that belong to a saccade. Hz: Float Number of samples per second. samples2fix: Dict There is usually metadata associated with the samples (e.g. the trial number). This dictionary can be used to specify how the metadata should be collapsed for one fixation. It contains field names from samplemat as keys and functions as values that return one value when they are called with all samples for one fixation. In addition the function can raise an 'InvalidFixation' exception to signal that the fixation should be discarded. ''' if samples2fix is None: samples2fix = {} fixations = ~saccades acc = AccumulatorFactory() if not respect_trial_borders: borders = np.where(np.diff(fixations.astype(int)))[0] + 1 else: borders = np.where( ~(np.diff(fixations.astype(int)) == 0) | ~(np.diff(samplemat.trial.astype(int)) == 0))[0] + 1 fixations = 0 * saccades.copy() if not saccades[0]: borders = np.hstack(([0], borders)) #lasts,laste = borders[0], borders[1] for i, (start, end) in enumerate(zip(borders[0::2], borders[1::2])): current = {} for k in samplemat.fieldnames(): if k in list(samples2fix.keys()): current[k] = samples2fix[k](samplemat, k, start, end) else: current[k] = np.mean(samplemat.field(k)[start:end]) current['start_sample'] = start current['end_sample'] = end fixations[start:end] = 1 # Calculate start and end time in ms if sample_times is None: current['start'] = 1000 * start / Hz current['end'] = 1000 * end / Hz else: current['start'] = sample_times[start] current['end'] = sample_times[end] #lasts, laste = start,end acc.update(current) return acc.get_dm(params=samplemat.parameters()), fixations.astype(bool)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(parse_obj, agent=None, etag=None, modified=None, inject=False): """Parse a subscription list and return a dict containing the results. :param parse_obj: A file-like object or a string containing a URL, an absolute or relative filename, or an XML document. :type parse_obj: str or file :param agent: User-Agent header to be sent when requesting a URL :type agent: str :param etag: The ETag header to be sent when requesting a URL. :type etag: str :param modified: The Last-Modified header to be sent when requesting a URL. :type modified: str or datetime.datetime :returns: All of the parsed information, webserver HTTP response headers, and any exception encountered. :rtype: dict :py:func:`~listparser.parse` is the only public function exposed by listparser. If *parse_obj* is a URL, the *agent* will identify the software making the request, *etag* will identify the last HTTP ETag header returned by the webserver, and *modified* will identify the last HTTP Last-Modified header returned by the webserver. *agent* and *etag* must be strings, while *modified* can be either a string or a Python *datetime.datetime* object. If *agent* is not provided, the :py:data:`~listparser.USER_AGENT` global variable will be used by default. """
guarantees = common.SuperDict({ 'bozo': 0, 'feeds': [], 'lists': [], 'opportunities': [], 'meta': common.SuperDict(), 'version': '', }) fileobj, info = _mkfile(parse_obj, (agent or USER_AGENT), etag, modified) guarantees.update(info) if not fileobj: return guarantees handler = Handler() handler.harvest.update(guarantees) parser = xml.sax.make_parser() parser.setFeature(xml.sax.handler.feature_namespaces, True) parser.setContentHandler(handler) parser.setErrorHandler(handler) if inject: fileobj = Injector(fileobj) try: parser.parse(fileobj) except (SAXParseException, MalformedByteSequenceException): # noqa: E501 # pragma: no cover # Jython propagates exceptions past the ErrorHandler. err = sys.exc_info()[1] handler.harvest.bozo = 1 handler.harvest.bozo_exception = err finally: fileobj.close() # Test if a DOCTYPE injection is needed if hasattr(handler.harvest, 'bozo_exception'): if 'entity' in handler.harvest.bozo_exception.__str__(): if not inject: return parse(parse_obj, agent, etag, modified, True) # Make it clear that the XML file is broken # (if no other exception has been assigned) if inject and not handler.harvest.bozo: handler.harvest.bozo = 1 handler.harvest.bozo_exception = ListError('undefined entity found') return handler.harvest
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def fixations(self): ''' Filter the fixmat such that it only contains fixations on images in categories that are also in the categories object''' if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') if len(list(self._categories.keys())) == 0: return None else: idx = np.zeros(self._fixations.x.shape, dtype='bool') for (cat, _) in list(self._categories.items()): idx = idx | ((self._fixations.category == cat)) return self._fixations[idx]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data(self, value): """ Saves a new image to disk """
self.loader.save_image(self.category, self.image, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fixations(self): """ Returns all fixations that are on this image. A precondition for this to work is that a fixmat is associated with this Image object. """
if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') return self._fixations[(self._fixations.category == self.category) & (self._fixations.filenumber == self.image)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate(self): """ Generator for creating the cross-validation slices. Returns A tuple of that contains two fixmats (training and test) and two Category objects (test and train). """
for _ in range(0, self.num_slices): #1. separate fixmat into test and training fixmat subjects = np.unique(self.fm.SUBJECTINDEX) test_subs = randsample(subjects, self.subject_hold_out*len(subjects)) train_subs = [x for x in subjects if x not in test_subs] test_fm = self.fm[ismember(self.fm.SUBJECTINDEX, test_subs)] train_fm = self.fm[ismember(self.fm.SUBJECTINDEX, train_subs)] #2. distribute images test_imgs = {} train_imgs = {} id_test = (test_fm.x <1) & False id_train = (train_fm.x <1) & False for cat in self.categories: imgs = cat.images() test_imgs.update({cat.category:randsample(imgs, self.image_hold_out*len(imgs)).tolist()}) train_imgs.update({cat.category:[x for x in imgs if not ismember(x, test_imgs[cat.category])]}) id_test = id_test | ((ismember(test_fm.filenumber, test_imgs[cat.category])) & (test_fm.category == cat.category)) id_train = id_train | ((ismember(train_fm.filenumber, train_imgs[cat.category])) & (train_fm.category == cat.category)) #3. Create categories objects and yield result test_stimuli = Categories(self.categories.loader, test_imgs, features=self.categories._features, fixations=test_fm) train_stimuli = Categories(self.categories.loader, train_imgs, features=self.categories._features, fixations=train_fm) yield (train_fm[id_train], train_stimuli, test_fm[id_test], test_stimuli)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def prepare_data(fm, max_back, dur_cap=700): ''' Computes angle and length differences up to given order and deletes suspiciously long fixations. Input fm: Fixmat Fixmat for which to comput angle and length differences max_back: Int Computes delta angle and amplitude up to order max_back. dur_cap: Int Longest allowed fixation duration Output fm: Fixmat Filtered fixmat that aligns to the other outputs. durations: ndarray Duration for each fixation in fm forward_angle: Angle between previous and next saccade. ''' durations = np.roll(fm.end - fm.start, 1).astype(float) angles, lengths, ads, lds = anglendiff(fm, roll=max_back, return_abs=True) # durations and ads are aligned in a way that an entry in ads # encodes the angle of the saccade away from a fixation in # durations forward_angle = abs(reshift(ads[0])).astype(float) ads = [abs(reshift(a)) for a in ads] # Now filter out weird fixation durations id_in = durations > dur_cap durations[id_in] = np.nan forward_angle[id_in] = np.nan return fm, durations, forward_angle, ads, lds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def saccadic_momentum_effect(durations, forward_angle, summary_stat=nanmean): """ Computes the mean fixation duration at forward angles. """
durations_per_da = np.nan * np.ones((len(e_angle) - 1,)) for i, (bo, b1) in enumerate(zip(e_angle[:-1], e_angle[1:])): idx = ( bo <= forward_angle) & ( forward_angle < b1) & ( ~np.isnan(durations)) durations_per_da[i] = summary_stat(durations[idx]) return durations_per_da
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ior_effect(durations, angle_diffs, length_diffs, summary_stat=np.mean, parallel=True, min_samples=20): """ Computes a measure of fixation durations at delta angle and delta length combinations. """
raster = np.empty((len(e_dist) - 1, len(e_angle) - 1), dtype=object) for a, (a_low, a_upp) in enumerate(zip(e_angle[:-1], e_angle[1:])): for d, (d_low, d_upp) in enumerate(zip(e_dist[:-1], e_dist[1:])): idx = ((d_low <= length_diffs) & (length_diffs < d_upp) & (a_low <= angle_diffs) & (angle_diffs < a_upp)) if sum(idx) < min_samples: raster[d, a] = np.array([np.nan]) else: raster[d, a] = durations[idx] if parallel: p = pool.Pool(3) result = p.map(summary_stat, list(raster.flatten())) p.terminate() else: result = list(map(summary_stat, list(raster.flatten()))) for idx, value in enumerate(result): i, j = np.unravel_index(idx, raster.shape) raster[i, j] = value return raster
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict_fixation_duration( durations, angles, length_diffs, dataset=None, params=None): """ Fits a non-linear piecewise regression to fixtaion durations for a fixmat. Returns corrected fixation durations. """
if dataset is None: dataset = np.ones(durations.shape) corrected_durations = np.nan * np.ones(durations.shape) for i, ds in enumerate(np.unique(dataset)): e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y) v0 = [120, 220.0, -.1, 0.5, .1, .1] id_ds = dataset == ds idnan = ( ~np.isnan(angles)) & ( ~np.isnan(durations)) & ( ~np.isnan(length_diffs)) v, s = leastsq( e, v0, args=( angles[ idnan & id_ds], durations[ idnan & id_ds], length_diffs[ idnan & id_ds]), maxfev=10000) corrected_durations[id_ds] = (durations[id_ds] - (leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v))) if params is not None: params['v' + str(i)] = v params['s' + str(i)] = s return corrected_durations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def subject_predictions(fm, field='SUBJECTINDEX', method=predict_fixation_duration, data=None): ''' Calculates the saccadic momentum effect for individual subjects. Removes any effect of amplitude differences. The parameters are fitted on unbinned data. The effects are computed on binned data. See e_dist and e_angle for the binning parameter. ''' if data is None: fma, dura, faa, adsa, ldsa = prepare_data(fm, dur_cap=700, max_back=5) adsa = adsa[0] ldsa = ldsa[0] else: fma, dura, faa, adsa, ldsa = data fma = fma.copy() # [ones(fm.x.shape)] sub_effects = [] sub_predictions = [] parameters = [] for i, fmsub in enumerate(np.unique(fma.field(field))): id = fma.field(field) == fmsub #_, dur, fa, ads, lds = prepare_data(fmsub, dur_cap = 700, max_back=5) dur, fa, ads, lds = dura[id], faa[id], adsa[id], ldsa[id] params = {} _ = method(dur, fa, lds, params=params) ps = params['v0'] ld_corrected = leastsq_only_dist(lds, ps[4], ps[5]) prediction = leastsq_only_angle(fa, ps[0], ps[1], ps[2], ps[3]) sub_predictions += [saccadic_momentum_effect(prediction, fa)] sub_effects += [saccadic_momentum_effect(dur - ld_corrected, fa)] parameters += [ps] return np.array(sub_effects), np.array(sub_predictions), parameters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def intersubject_scores(fm, category, predicting_filenumbers, predicting_subjects, predicted_filenumbers, predicted_subjects, controls = True, scale_factor = 1): """ Calculates how well the fixations from a set of subjects on a set of images can be predicted with the fixations from another set of subjects on another set of images. The prediction is carried out by computing a fixation density map from fixations of predicting_subjects subjects on predicting_images images. Prediction accuracy is assessed by measures.prediction_scores. Parameters fm : fixmat instance category : int Category from which the fixations are taken. predicting_filenumbers : list List of filenumbers used for prediction, i.e. images where fixations for the prediction are taken from. predicting_subjects : list List of subjects whose fixations on images in predicting_filenumbers are used for the prediction. predicted_filenumnbers : list List of images from which the to be predicted fixations are taken. predicted_subjects : list List of subjects used for evaluation, i.e subjects whose fixations on images in predicted_filenumbers are taken for evaluation. controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns auc : area under the roc curve for sets of actuals and controls true_pos_rate : ndarray Rate of true positives for every given threshold value. All values appearing in actuals are taken as thresholds. Uses lower sum interpolation. false_pos_rate : ndarray See true_pos_rate but for false positives. """
predicting_fm = fm[ (ismember(fm.SUBJECTINDEX, predicting_subjects)) & (ismember(fm.filenumber, predicting_filenumbers)) & (fm.category == category)] predicted_fm = fm[ (ismember(fm.SUBJECTINDEX,predicted_subjects)) & (ismember(fm.filenumber,predicted_filenumbers))& (fm.category == category)] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None if controls == True: fm_controls = fm[ (ismember(fm.SUBJECTINDEX, predicted_subjects)) & ((ismember(fm.filenumber, predicted_filenumbers)) != True) & (fm.category == category)] return measures.prediction_scores(predicting_fdm, predicted_fm, controls = (fm_controls.y, fm_controls.x)) return measures.prediction_scores(predicting_fdm, predicted_fm, controls = None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def intersubject_scores_random_subjects(fm, category, filenumber, n_train, n_predict, controls=True, scale_factor = 1): """ Calculates how well the fixations of n random subjects on one image can be predicted with the fixations of m other random subjects. Notes Function that uses intersubject_auc for computing auc. Parameters fm : fixmat instance category : int Category from which the fixations are taken. filnumber : int Image from which fixations are taken. n_train : int The number of subjects which are used for prediction. n_predict : int The number of subjects to predict controls : bool, optional If True (default), n_predict subjects are chosen from the fixmat. If False, 1000 fixations are randomly generated and used for testing. scale_factor : int, optional specifies the scaling of the fdm. Default is 1. Returns tuple : prediction scores """
subjects = np.unique(fm.SUBJECTINDEX) if len(subjects) < n_train + n_predict: raise ValueError("""Not enough subjects in fixmat""") # draw a random sample of subjects for testing and evaluation, according # to the specified set sizes (n_train, n_predict) np.random.shuffle(subjects) predicted_subjects = subjects[0 : n_predict] predicting_subjects = subjects[n_predict : n_predict + n_train] assert len(predicting_subjects) == n_train assert len(predicted_subjects) == n_predict assert [x not in predicting_subjects for x in predicted_subjects] return intersubject_scores(fm, category, [filenumber], predicting_subjects, [filenumber], predicted_subjects, controls, scale_factor)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upper_bound(fm, nr_subs = None, scale_factor = 1): """ compute the inter-subject consistency upper bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """
nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if not nr_subs: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values intersub_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category), result_vectors))) intersub_scores.append(res_dict) #compute inter-subject scores for every stimulus, with leave-one-out #over subjects for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): predicting_subs = (np.setdiff1d(np.unique( fm_single.SUBJECTINDEX),[sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fm = fm_single[ (ismember(fm_single.SUBJECTINDEX, predicting_subs))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores( predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): intersub_scores[measure][cat][sub_counter] = score return intersub_scores
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lower_bound(fm, nr_subs = None, nr_imgs = None, scale_factor = 1): """ Compute the spatial bias lower bound for a fixmat. Input: fm : a fixmat instance nr_subs : the number of subjects used for the prediction. Defaults to the total number of subjects in the fixmat minus 1 nr_imgs : the number of images used for prediction. If given, the same number will be used for every category. If not given, leave-one-out will be used in all categories. scale_factor : the scale factor of the FDMs. Default is 1. Returns: A list of spatial bias scores; the list contains one dictionary for each measure. Each dictionary contains one key for each category and corresponding values is an array with scores for each subject. """
nr_subs_total = len(np.unique(fm.SUBJECTINDEX)) if nr_subs is None: nr_subs = nr_subs_total - 1 assert (nr_subs < nr_subs_total) # initialize output structure; every measure gets one dict with # category numbers as keys and numpy-arrays as values sb_scores = [] for measure in range(len(measures.scores)): res_dict = {} result_vectors = [np.empty(nr_subs_total) + np.nan for _ in np.unique(fm.category)] res_dict.update(list(zip(np.unique(fm.category),result_vectors))) sb_scores.append(res_dict) # compute mean spatial bias predictive power for all subjects in all # categories for fm_cat in fm.by_field('category'): cat = fm_cat.category[0] nr_imgs_cat = len(np.unique(fm_cat.filenumber)) if not nr_imgs: nr_imgs_current = nr_imgs_cat - 1 else: nr_imgs_current = nr_imgs assert(nr_imgs_current < nr_imgs_cat) for (sub_counter, sub) in enumerate(np.unique(fm.SUBJECTINDEX)): image_scores = [] for fm_single in fm_cat.by_field('filenumber'): # Iterating by field filenumber makes filenumbers # in fm_single unique: Just take the first one to get the # filenumber for this fixmat fn = fm_single.filenumber[0] predicting_subs = (np.setdiff1d(np.unique( fm_cat.SUBJECTINDEX), [sub])) np.random.shuffle(predicting_subs) predicting_subs = predicting_subs[0:nr_subs] predicting_fns = (np.setdiff1d(np.unique( fm_cat.filenumber), [fn])) np.random.shuffle(predicting_fns) predicting_fns = predicting_fns[0:nr_imgs_current] predicting_fm = fm_cat[ (ismember(fm_cat.SUBJECTINDEX, predicting_subs)) & (ismember(fm_cat.filenumber, predicting_fns))] predicted_fm = fm_single[fm_single.SUBJECTINDEX == sub] try: predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor) except RuntimeError: predicting_fdm = None image_scores.append(measures.prediction_scores(predicting_fdm, predicted_fm)) for (measure, score) in enumerate(nanmean(image_scores, 0)): sb_scores[measure][cat][sub_counter] = score return sb_scores
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ind2sub(ind, dimensions): """ Calculates subscripts for indices into regularly spaced matrixes. """
# check that the index is within range if ind >= np.prod(dimensions): raise RuntimeError("ind2sub: index exceeds array size") cum_dims = list(dimensions) cum_dims.reverse() m = 1 mult = [] for d in cum_dims: m = m*d mult.append(m) mult.pop() mult.reverse() mult.append(1) indices = [] for d in mult: indices.append((ind/d)+1) ind = ind - (ind/d)*d return indices
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sub2ind(indices, dimensions): """ An exemplary sub2ind implementation to create randomization scripts. This function calculates indices from subscripts into regularly spaced matrixes. """
# check that none of the indices exceeds the size of the array if any([i > j for i, j in zip(indices, dimensions)]): raise RuntimeError("sub2ind:an index exceeds its dimension's size") dims = list(dimensions) dims.append(1) dims.remove(dims[0]) dims.reverse() ind = list(indices) ind.reverse() idx = 0 mult = 1 for (cnt, dim) in zip(ind, dims): mult = dim*mult idx = idx + (cnt-1)*mult return idx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RestoreTaskStoreFactory(store_class, chunk_size, restore_file, save_file): """ Restores a task store from file. """
intm_results = np.load(restore_file) intm = intm_results[intm_results.files[0]] idx = np.isnan(intm).flatten().nonzero()[0] partitions = math.ceil(len(idx) / float(chunk_size)) task_store = store_class(partitions, idx.tolist(), save_file) task_store.num_tasks = len(idx) # Also set up matrices for saving results for f in intm_results.files: task_store.__dict__[f] = intm_results[f] return task_store
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xmlrpc_reschedule(self): """ Reschedule all running tasks. """
if not len(self.scheduled_tasks) == 0: self.reschedule = list(self.scheduled_tasks.items()) self.scheduled_tasks = {} return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xmlrpc_task_done(self, result): """ Take the results of a computation and put it into the results list. """
(task_id, task_results) = result del self.scheduled_tasks[task_id] self.task_store.update_results(task_id, task_results) self.results += 1 return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xmlrpc_save2file(self, filename): """ Save results and own state into file. """
savefile = open(filename,'wb') try: pickle.dump({'scheduled':self.scheduled_tasks, 'reschedule':self.reschedule},savefile) except pickle.PicklingError: return -1 savefile.close() return 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """This function needs to be called to start the computation."""
(task_id, tasks) = self.server.get_task() self.task_store.from_dict(tasks) for (index, task) in self.task_store: result = self.compute(index, task) self.results.append(result) self.server.task_done((task_id, self.results))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(self, description): """Configures the task store to be the task_store described in description"""
assert(self.ident == description['ident']) self.partitions = description['partitions'] self.indices = description['indices']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def partition(self): """Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks."""
step = int(math.ceil(self.num_tasks / float(self.partitions))) if self.indices == None: slice_ind = list(range(0, self.num_tasks, step)) for start in slice_ind: yield self.__class__(self.partitions, list(range(start, start + step))) else: slice_ind = list(range(0, len(self.indices), step)) for start in slice_ind: if start + step <= len(self.indices): yield self.__class__(self.partitions, self.indices[start: start + step]) else: yield self.__class__(self.partitions, self.indices[start:])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit3d(samples, e_x, e_y, e_z, remove_zeros = False, **kw): """Fits a 3D distribution with splines. Input: samples: Array Array of samples from a probability distribution e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """
height, width, depth = len(e_y)-1, len(e_x)-1, len(e_z)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y, e_z)) p_est = p_est/sum(p_est.flat) p_est = p_est.flatten() if remove_zeros: non_zero = ~(p_est == 0) else: non_zero = (p_est >= 0) basis = spline_base3d(width,height, depth, **kw) model = linear_model.BayesianRidge() model.fit(basis[:, non_zero].T, p_est[:,np.newaxis][non_zero,:]) return (model.predict(basis.T).reshape((width, height, depth)), p_est.reshape((width, height, depth)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit2d(samples,e_x, e_y, remove_zeros = False, p_est = None, **kw): """Fits a 2D distribution with splines. Input: samples: Matrix or list of arrays If matrix, it must be of size Nx2, where N is the number of observations. If list, it must contain two arrays of length N. e_x: Array Edges that define the events in the probability distribution along the x direction. For example, e_x[0] < samples[0] <= e_x[1] picks out all samples that are associated with the first event. e_y: Array See e_x, but for the y direction. remove_zeros: Bool If True, events that are not observed will not be part of the fitting process. If False, those events will be modelled as finfo('float').eps **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Tuple of arrays Sequence of knots that were used for the spline basis (x,y) """
if p_est is None: height = len(e_y)-1 width = len(e_x)-1 (p_est, _) = np.histogramdd(samples, (e_x, e_y)) else: p_est = p_est.T width, height = p_est.shape # p_est contains x in dim 1 and y in dim 0 shape = p_est.shape p_est = (p_est/sum(p_est.flat)).reshape(shape) mx = p_est.sum(1) my = p_est.sum(0) # Transpose hist to have x in dim 0 p_est = p_est.T.flatten() basis, knots = spline_base2d(width, height, marginal_x = mx, marginal_y = my, **kw) model = linear_model.BayesianRidge() if remove_zeros: non_zero = ~(p_est == 0) model.fit(basis[:, non_zero].T, p_est[non_zero]) else: non_zero = (p_est >= 0) p_est[~non_zero,:] = np.finfo(float).eps model.fit(basis.T, p_est) return (model.predict(basis.T).reshape((height, width)), p_est.reshape((height, width)), knots)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit1d(samples, e, remove_zeros = False, **kw): """Fits a 1D distribution with splines. Input: samples: Array Array of samples from a probability distribution e: Array Edges that define the events in the probability distribution. For example, e[0] < x <= e[1] is the range of values that are associated with the first event. **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Array Sequence of knots that were used for the spline basis """
samples = samples[~np.isnan(samples)] length = len(e)-1 hist,_ = np.histogramdd(samples, (e,)) hist = hist/sum(hist) basis, knots = spline_base1d(length, marginal = hist, **kw) non_zero = hist>0 model = linear_model.BayesianRidge() if remove_zeros: model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:]) else: hist[~non_zero] = np.finfo(float).eps model.fit(basis, hist[:,np.newaxis]) return model.predict(basis), hist, knots
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def knots_from_marginal(marginal, nr_knots, spline_order): """ Determines knot placement based on a marginal distribution. It places knots such that each knot covers the same amount of probability mass. Two of the knots are reserved for the borders which are treated seperatly. For example, a uniform distribution with 5 knots will cause the knots to be equally spaced with 25% of the probability mass between each two knots. Input: marginal: Array Estimate of the marginal distribution used to estimate knot placement. nr_knots: int Number of knots to be placed. spline_order: int Order of the splines Returns: knots: Array Sequence of knot positions """
cumsum = np.cumsum(marginal) cumsum = cumsum/cumsum.max() borders = np.linspace(0,1,nr_knots) knot_placement = [0] + np.unique([np.where(cumsum>=b)[0][0] for b in borders[1:-1]]).tolist() +[len(marginal)-1] knots = augknt(knot_placement, spline_order) return knots
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spline_base3d( width, height, depth, nr_knots_x = 10.0, nr_knots_y = 10.0, nr_knots_z=10, spline_order = 3, marginal_x = None, marginal_y = None, marginal_z = None): """Computes a set of 3D spline basis functions. For a description of the parameters see spline_base2d. """
if not nr_knots_z < depth: raise RuntimeError("Too many knots for size of the base") basis2d, (knots_x, knots_y) = spline_base2d(height, width, nr_knots_x, nr_knots_y, spline_order, marginal_x, marginal_y) if marginal_z is not None: knots_z = knots_from_marginal(marginal_z, nr_knots_z, spline_order) else: knots_z = augknt(np.linspace(0,depth+1, nr_knots_z), spline_order) z_eval = np.arange(1,depth+1).astype(float) spline_setz = spcol(z_eval, knots_z, spline_order) bspline = np.zeros((basis2d.shape[0]*len(z_eval), height*width*depth)) basis_nr = 0 for spline_a in spline_setz.T: for spline_b in basis2d: spline_b = spline_b.reshape((height, width)) bspline[basis_nr, :] = (spline_b[:,:,np.newaxis] * spline_a[:]).flat basis_nr +=1 return bspline, (knots_x, knots_y, knots_z)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spline(x,knots,p,i=0.0): """Evaluates the ith spline basis given by knots on points in x"""
assert(p+1<len(knots)) return np.array([N(float(u),float(i),float(p),knots) for u in x])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def augknt(knots,order): """Augment knot sequence such that some boundary conditions are met."""
a = [] [a.append(knots[0]) for t in range(0,order)] [a.append(k) for k in knots] [a.append(knots[-1]) for t in range(0,order)] return np.array(a)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def N(u,i,p,knots): """Compute Spline Basis Evaluates the spline basis of order p defined by knots at knot i and point u. """
if p == 0: if knots[i] < u and u <=knots[i+1]: return 1.0 else: return 0.0 else: try: k = (( float((u-knots[i]))/float((knots[i+p] - knots[i]) )) * N(u,i,p-1,knots)) except ZeroDivisionError: k = 0.0 try: q = (( float((knots[i+p+1] - u))/float((knots[i+p+1] - knots[i+1]))) * N(u,i+1,p-1,knots)) except ZeroDivisionError: q = 0.0 return float(k + q)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prediction_scores(prediction, fm, **kw): """ Evaluates a prediction against fixations in a fixmat with different measures. The default measures which are used are AUC, NSS and KL-divergence. This can be changed by setting the list of measures with set_scores. As different measures need potentially different parameters, the kw dictionary can be used to pass arguments to measures. Every named argument (except fm and prediction) of a measure that is included in kw.keys() will be filled with the value stored in kw. Example: In this case the AUC will be computed with control points (y,x), because the measure 'roc_model' has 'ctr_loc' as named argument. Input: prediction : 2D numpy array The prediction that should be evaluated fm : Fixmat The eyetracking data to evaluate against Output: Tuple of prediction scores. The order of the scores is determined by order of measures.scores. """
if prediction == None: return [np.NaN for measure in scores] results = [] for measure in scores: (args, _, _, _) = inspect.getargspec(measure) if len(args)>2: # Filter dictionary, such that only the keys that are # expected by the measure are in it mdict = {} [mdict.update({key:value}) for (key, value) in list(kw.items()) if key in args] score = measure(prediction, fm, **mdict) else: score = measure(prediction, fm) results.append(score) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kldiv_model(prediction, fm): """ wraps kldiv functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Should be filtered for the image corresponding to the prediction """
(_, r_x) = calc_resize_factor(prediction, fm.image_size) q = np.array(prediction, copy=True) q -= np.min(q.flatten()) q /= np.sum(q.flatten()) return kldiv(None, q, distp = fm, scale_factor = r_x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kldiv(p, q, distp = None, distq = None, scale_factor = 1): """ Computes the Kullback-Leibler divergence between two distributions. Parameters p : Matrix The first probability distribution q : Matrix The second probability distribution distp : fixmat If p is None, distp is used to compute a FDM which is then taken as 1st probability distribution. distq : fixmat If q is None, distq is used to compute a FDM which is then taken as 2dn probability distribution. scale_factor : double Determines the size of FDM computed from distq or distp. """
assert q != None or distq != None, "Either q or distq have to be given" assert p != None or distp != None, "Either p or distp have to be given" try: if p == None: p = compute_fdm(distp, scale_factor = scale_factor) if q == None: q = compute_fdm(distq, scale_factor = scale_factor) except RuntimeError: return np.NaN q += np.finfo(q.dtype).eps p += np.finfo(p.dtype).eps kl = np.sum( p * (np.log2(p / q))) return kl
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kldiv_cs_model(prediction, fm): """ Computes Chao-Shen corrected KL-divergence between prediction and fdm made from fixations in fm. Parameters : prediction : np.ndarray a fixation density map fm : FixMat object """
# compute histogram of fixations needed for ChaoShen corrected kl-div # image category must exist (>-1) and image_size must be non-empty assert(len(fm.image_size) == 2 and (fm.image_size[0] > 0) and (fm.image_size[1] > 0)) assert(-1 not in fm.category) # check whether fixmat contains fixations if len(fm.x) == 0: return np.NaN (scale_factor, _) = calc_resize_factor(prediction, fm.image_size) # this specifies left edges of the histogram bins, i.e. fixations between # ]0 binedge[0]] are included. --> fixations are ceiled e_y = np.arange(0, np.round(scale_factor*fm.image_size[0]+1)) e_x = np.arange(0, np.round(scale_factor*fm.image_size[1]+1)) samples = np.array(list(zip((scale_factor*fm.y), (scale_factor*fm.x)))) (fdm, _) = np.histogramdd(samples, (e_y, e_x)) # compute ChaoShen corrected kl-div q = np.array(prediction, copy = True) q[q == 0] = np.finfo(q.dtype).eps q /= np.sum(q) (H, pa, la) = chao_shen(fdm) q = q[fdm > 0] cross_entropy = -np.sum((pa * np.log2(q)) / la) return (cross_entropy - H)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def chao_shen(q): """ Computes some terms needed for the Chao-Shen KL correction. """
yx = q[q > 0] # remove bins with zero counts n = np.sum(yx) p = yx.astype(float)/n f1 = np.sum(yx == 1) # number of singletons in the sample if f1 == n: # avoid C == 0 f1 -= 1 C = 1 - (f1/n) # estimated coverage of the sample pa = C * p # coverage adjusted empirical frequencies la = (1 - (1 - pa) ** n) # probability to see a bin (species) in the sample H = -np.sum((pa * np.log2(pa)) / la) return (H, pa, la)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def correlation_model(prediction, fm): """ wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared. """
(_, r_x) = calc_resize_factor(prediction, fm.image_size) fdm = compute_fdm(fm, scale_factor = r_x) return np.corrcoef(fdm.flatten(), prediction.flatten())[0,1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nss_model(prediction, fm): """ wraps nss functionality for model evaluation input: prediction: 2D matrix the model salience map fm : fixmat Fixations that define the actuals """
(r_y, r_x) = calc_resize_factor(prediction, fm.image_size) fix = ((np.array(fm.y-1)*r_y).astype(int), (np.array(fm.x-1)*r_x).astype(int)) return nss(prediction, fix)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nss(prediction, fix): """ Compute the normalized scanpath salience input: fix : list, l[0] contains y, l[1] contains x """
prediction = prediction - np.mean(prediction) prediction = prediction / np.std(prediction) return np.mean(prediction[fix[0], fix[1]])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def roc_model(prediction, fm, ctr_loc = None, ctr_size = None): """ wraps roc functionality for model evaluation Parameters: prediction: 2D array the model salience map fm : fixmat Fixations that define locations of the actuals ctr_loc : tuple of (y.x) coordinates, optional Allows to specify control points for spatial bias correction ctr_size : two element tuple, optional Specifies the assumed image size of the control locations, defaults to fm.image_size """
# check if prediction is a valid numpy array assert type(prediction) == np.ndarray # check whether scaling preserved aspect ratio (r_y, r_x) = calc_resize_factor(prediction, fm.image_size) # read out values in the fdm at actual fixation locations # .astype(int) floors numbers in np.array y_index = (r_y * np.array(fm.y-1)).astype(int) x_index = (r_x * np.array(fm.x-1)).astype(int) actuals = prediction[y_index, x_index] if not ctr_loc: xc = np.random.randint(0, prediction.shape[1], 1000) yc = np.random.randint(0, prediction.shape[0], 1000) ctr_loc = (yc.astype(int), xc.astype(int)) else: if not ctr_size: ctr_size = fm.image_size else: (r_y, r_x) = calc_resize_factor(prediction, ctr_size) ctr_loc = ((r_y * np.array(ctr_loc[0])).astype(int), (r_x * np.array(ctr_loc[1])).astype(int)) controls = prediction[ctr_loc[0], ctr_loc[1]] return fast_roc(actuals, controls)[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fast_roc(actuals, controls): """ approximates the area under the roc curve for sets of actuals and controls. Uses all values appearing in actuals as thresholds and lower sum interpolation. Also returns arrays of the true positive rate and the false positive rate that can be used for plotting the roc curve. Parameters: actuals : list A list of numeric values for positive observations. controls : list A list of numeric values for negative observations. """
assert(type(actuals) is np.ndarray) assert(type(controls) is np.ndarray) actuals = np.ravel(actuals) controls = np.ravel(controls) if np.isnan(actuals).any(): raise RuntimeError('NaN found in actuals') if np.isnan(controls).any(): raise RuntimeError('NaN found in controls') thresholds = np.hstack([-np.inf, np.unique(actuals), np.inf])[::-1] true_pos_rate = np.empty(thresholds.size) false_pos_rate = np.empty(thresholds.size) num_act = float(len(actuals)) num_ctr = float(len(controls)) for i, value in enumerate(thresholds): true_pos_rate[i] = (actuals >= value).sum() / num_act false_pos_rate[i] = (controls >= value).sum() / num_ctr auc = np.dot(np.diff(false_pos_rate), true_pos_rate[0:-1]) # treat cases where TPR of one is not reached before FPR of one # by using trapezoidal integration for the last segment # (add the missing triangle) if false_pos_rate[-2] == 1: auc += ((1-true_pos_rate[-3])*.5*(1-false_pos_rate[-3])) return (auc, true_pos_rate, false_pos_rate)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def emd_model(prediction, fm): """ wraps emd functionality for model evaluation requires: OpenCV python bindings input: prediction: the model salience map fm : fixmat filtered for the image corresponding to the prediction """
(_, r_x) = calc_resize_factor(prediction, fm.image_size) gt = fixmat.compute_fdm(fm, scale_factor = r_x) return emd(prediction, gt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def emd(prediction, ground_truth): """ Compute the Eart Movers Distance between prediction and model. This implementation uses opencv for doing the actual work. Unfortunately, at the time of implementation only the SWIG bindings werer available and the numpy arrays have to converted by hand. This changes with opencv 2.1. """
import opencv if not (prediction.shape == ground_truth.shape): raise RuntimeError('Shapes of prediction and ground truth have' + ' to be equal. They are: %s, %s' %(str(prediction.shape), str(ground_truth.shape))) (x, y) = np.meshgrid(list(range(0, prediction.shape[1])), list(range(0, prediction.shape[0]))) s1 = np.array([x.flatten(), y.flatten(), prediction.flatten()]).T s2 = np.array([x.flatten(), y.flatten(), ground_truth.flatten()]).T s1m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) s2m = opencv.cvCreateMat(s1.shape[0], s2.shape[1], opencv.CV_32FC1) for r in range(0, s1.shape[0]): for c in range(0, s1.shape[1]): s1m[r, c] = float(s1[r, c]) s2m[r, c] = float(s2[r, c]) d = opencv.cvCalcEMD2(s1m, s2m, opencv.CV_DIST_L2) return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_parser(f): """ Gets the parser for the command f, if it not exists it creates a new one """
_COMMAND_GROUPS[f.__module__].load() if f.__name__ not in _COMMAND_GROUPS[f.__module__].parsers: parser = _COMMAND_GROUPS[f.__module__].parser_generator.add_parser(f.__name__, help=f.__doc__, description=f.__doc__) parser.set_defaults(func=f) _COMMAND_GROUPS[f.__module__].parsers[f.__name__] = parser return _COMMAND_GROUPS[f.__module__].parsers[f.__name__]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def discoverEndpoint(url, test_urls=True, headers={}, timeout=None, request=None, debug=False): """Discover any WebMention endpoint for a given URL. :param link: URL to discover WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :param request: optional Requests request object to avoid another GET :rtype: tuple (status_code, URL, [debug]) """
if test_urls: URLValidator(message='invalid URL')(url) # status, webmention endpointURL = None debugOutput = [] try: if request is not None: targetRequest = request else: targetRequest = requests.get(url, verify=False, headers=headers, timeout=timeout) returnCode = targetRequest.status_code debugOutput.append('%s %s' % (returnCode, url)) if returnCode == requests.codes.ok: try: linkHeader = parse_link_header(targetRequest.headers['link']) endpointURL = linkHeader.get('webmention', '') or \ linkHeader.get('http://webmention.org', '') or \ linkHeader.get('http://webmention.org/', '') or \ linkHeader.get('https://webmention.org', '') or \ linkHeader.get('https://webmention.org/', '') # force searching in the HTML if not found if not endpointURL: raise AttributeError debugOutput.append('found in link headers') except (KeyError, AttributeError): endpointURL = findEndpoint(targetRequest.text) debugOutput.append('found in body') if endpointURL is not None: endpointURL = urljoin(url, endpointURL) except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') returnCode = 500 debugOutput.append('endpointURL: %s %s' % (returnCode, endpointURL)) if debug: return (returnCode, endpointURL, debugOutput) else: return (returnCode, endpointURL)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def indent_text(string, indent_level=2): """Indent every line of text in a newline-delimited string"""
indented_lines = [] indent_spaces = ' ' * indent_level for line in string.split('\n'): indented_lines.append(indent_spaces + line) return '\n'.join(indented_lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(url, target, headers=None, trackers=()): """Download a file using requests. This is like urllib.request.urlretrieve, but: - requests validates SSL certificates by default - you can pass tracker objects to e.g. display a progress bar or calculate a file hash. """
if headers is None: headers = {} headers.setdefault('user-agent', 'requests_download/'+__version__) r = requests.get(url, headers=headers, stream=True) r.raise_for_status() for t in trackers: t.on_start(r) with open(target, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) for t in trackers: t.on_chunk(chunk) for t in trackers: t.on_finish()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(parsed_obj, spec=None, filename=None): """Writes an object created by `parse` to either a file or a bytearray. If the object doesn't end on a byte boundary, zeroes are appended to it until it does. """
if not isinstance(parsed_obj, BreadStruct): raise ValueError( 'Object to write must be a structure created ' 'by bread.parse') if filename is not None: with open(filename, 'wb') as fp: parsed_obj._data_bits[:parsed_obj._length].tofile(fp) else: return bytearray(parsed_obj._data_bits[:parsed_obj._length].tobytes())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy_file(file_path, bucket): """ Uploads a file to an S3 bucket, as a public file. """
# Paths look like: # index.html # css/bootstrap.min.css logger.info("Deploying {0}".format(file_path)) # Upload the actual file to file_path k = Key(bucket) k.key = file_path try: k.set_contents_from_filename(file_path) k.set_acl('public-read') except socket.error: logger.warning("Caught socket.error while trying to upload {0}".format( file_path)) msg = "Please file an issue with alotofeffort if you see this," logger.warning(msg) logger.warning("providing as much info as you can.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy(www_dir, bucket_name): """ Deploy to the configured S3 bucket. """
# Set up the connection to an S3 bucket. conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) # Deploy each changed file in www_dir os.chdir(www_dir) for root, dirs, files in os.walk('.'): for f in files: # Use full relative path. Normalize to remove dot. file_path = os.path.normpath(os.path.join(root, f)) if has_changed_since_last_deploy(file_path, bucket): deploy_file(file_path, bucket) else: logger.info("Skipping {0}".format(file_path)) # Make the whole bucket public bucket.set_acl('public-read') # Configure it to be a website bucket.configure_website('index.html', 'error.html') # Print the endpoint, so you know the URL msg = "Your website is now live at {0}".format( bucket.get_website_endpoint()) logger.info(msg) logger.info("If you haven't done so yet, point your domain name there!")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_changed_since_last_deploy(file_path, bucket): """ Checks if a file has changed since the last time it was deployed. :param file_path: Path to file which should be checked. Should be relative from root of bucket. :param bucket_name: Name of S3 bucket to check against. :returns: True if the file has changed, else False. """
msg = "Checking if {0} has changed since last deploy.".format(file_path) logger.debug(msg) with open(file_path) as f: data = f.read() file_md5 = hashlib.md5(data.encode('utf-8')).hexdigest() logger.debug("file_md5 is {0}".format(file_md5)) key = bucket.get_key(file_path) # HACK: Boto's md5 property does not work when the file hasn't been # downloaded. The etag works but will break for multi-part uploaded files. # http://stackoverflow.com/questions/16872679/how-to-programmatically- # get-the-md5-checksum-of-amazon-s3-file-using-boto/17607096#17607096 # Also the double quotes around it must be stripped. Sketchy...boto's fault if key: key_md5 = key.etag.replace('"', '').strip() logger.debug("key_md5 is {0}".format(key_md5)) else: logger.debug("File does not exist in bucket") return True if file_md5 == key_md5: logger.debug("File has not changed.") return False logger.debug("File has changed.") return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ Entry point for the package, as defined in setup.py. """
# Log info and above to console logging.basicConfig( format='%(levelname)s: %(message)s', level=logging.INFO) # Get command line input/output arguments msg = 'Instantly deploy static HTML sites to S3 at the command line.' parser = argparse.ArgumentParser(description=msg) parser.add_argument( 'www_dir', help='Directory containing the HTML files for your website.' ) parser.add_argument( 'bucket_name', help='Name of S3 bucket to deploy to, e.g. mybucket.' ) args = parser.parse_args() # Deploy the site to S3! deploy(args.www_dir, args.bucket_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_sikuli_process(self, port=None): """ This keyword is used to start sikuli java process. If library is inited with mode "OLD", sikuli java process is started automatically. If library is inited with mode "NEW", this keyword should be used. :param port: port of sikuli java process, if value is None or 0, a random free port will be used :return: None """
if port is None or int(port) == 0: port = self._get_free_tcp_port() self.port = port start_retries = 0 started = False while start_retries < 5: try: self._start_sikuli_java_process() except RuntimeError as err: print('error........%s' % err) if self.process: self.process.terminate_process() self.port = self._get_free_tcp_port() start_retries += 1 continue started = True break if not started: raise RuntimeError('Start sikuli java process failed!') self.remote = self._connect_remote_library()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def EXPIRING_TOKEN_LIFESPAN(self): """ Return the allowed lifespan of a token as a TimeDelta object. Defaults to 30 days. """
try: val = settings.EXPIRING_TOKEN_LIFESPAN except AttributeError: val = timedelta(days=30) return val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expired(self): """Return boolean indicating token expiration."""
now = timezone.now() if self.created < now - token_settings.EXPIRING_TOKEN_LIFESPAN: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(self): """ Store the actual process in _process. If it doesn't exist yet, create it. """
if hasattr(self, '_process'): return self._process else: self._process = self._get_process() return self._process
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_process(self): """ Create the process by running the specified command. """
command = self._get_command() return subprocess.Popen(command, bufsize=-1, close_fds=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize_list(self, text): """ Split a text into separate words. """
return [self.get_record_token(record) for record in self.analyze(text)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_stopword(self, text): """ Determine whether a single word is a stopword, or whether a short phrase is made entirely of stopwords, disregarding context. Use of this function should be avoided; it's better to give the text in context and let the process determine which words are the stopwords. """
found_content_word = False for record in self.analyze(text): if not self.is_stopword_record(record): found_content_word = True break return not found_content_word