text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Obtain details of a single device <END_TASK> <USER_TASK:> Description: def DeviceGet(self, device_id): """ Obtain details of a single device @param device_id (int) - Device for which to obtain details """
if self.__SenseApiCall__('/devices/{0}'.format(device_id), 'GET'): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Obtain a list of all sensors attached to a device. <END_TASK> <USER_TASK:> Description: def DeviceSensorsGet(self, device_id, parameters): """ Obtain a list of all sensors attached to a device. @param device_id (int) - Device for which to retrieve sensors @param parameters (dict) - Search parameters @return (bool) - Boolean indicating whether DeviceSensorsGet was succesful. """
if self.__SenseApiCall__('/devices/{0}/sensors.json'.format(device_id), 'GET', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Delete a group from CommonSense. <END_TASK> <USER_TASK:> Description: def GroupsDelete(self, group_id): """ Delete a group from CommonSense. @param group_id (int) - group id of group to delete from CommonSense. @return (bool) - Boolean indicating whether GroupsDelete was successful. """
if self.__SenseApiCall__('/groups/{0}.json'.format(group_id), 'DELETE'): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Add users to a group in CommonSense. <END_TASK> <USER_TASK:> Description: def GroupsUsersPost(self, parameters, group_id): """ Add users to a group in CommonSense. @param parameters (dictonary) - Dictionary containing the users to add. @return (bool) - Boolean indicating whether GroupsPost was successful. """
if self.__SenseApiCall__('/groups/{group_id}/users.json'.format(group_id = group_id), 'POST', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Delete a user from a group in CommonSense. <END_TASK> <USER_TASK:> Description: def GroupsUsersDelete(self, group_id, user_id): """ Delete a user from a group in CommonSense. @return (bool) - Boolean indicating whether GroupsPost was successful. """
if self.__SenseApiCall__('/groups/{group_id}/users/{user_id}.json'.format(group_id = group_id, user_id = user_id), 'DELETE'): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Share a sensor with a user <END_TASK> <USER_TASK:> Description: def SensorShare(self, sensor_id, parameters): """ Share a sensor with a user @param sensor_id (int) - Id of sensor to be shared @param parameters (dictionary) - Additional parameters for the call @return (bool) - Boolean indicating whether the ShareSensor call was successful """
if not parameters['user']['id']: parameters['user'].pop('id') if not parameters['user']['username']: parameters['user'].pop('username') if self.__SenseApiCall__("/sensors/{0}/users".format(sensor_id), "POST", parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Share a number of sensors within a group. <END_TASK> <USER_TASK:> Description: def GroupsSensorsPost(self, group_id, sensors): """ Share a number of sensors within a group. @param group_id (int) - Id of the group to share sensors with @param sensors (dictionary) - Dictionary containing the sensors to share within the groups @return (bool) - Boolean indicating whether the GroupsSensorsPost call was successful """
if self.__SenseApiCall__("/groups/{0}/sensors.json".format(group_id), "POST", parameters = sensors): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Retrieve sensors shared within the group. <END_TASK> <USER_TASK:> Description: def GroupsSensorsGet(self, group_id, parameters): """ Retrieve sensors shared within the group. @param group_id (int) - Id of the group to retrieve sensors from @param parameters (dictionary) - Additional parameters for the call @return (bool) - Boolean indicating whether GroupsSensorsGet was successful """
if self.__SenseApiCall("/groups/{0}/sensors.json".format(group_id), "GET", parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Stop sharing a sensor within a group <END_TASK> <USER_TASK:> Description: def GroupsSensorsDelete(self, group_id, sensor_id): """ Stop sharing a sensor within a group @param group_id (int) - Id of the group to stop sharing the sensor with @param sensor_id (int) - Id of the sensor to stop sharing @return (bool) - Boolean indicating whether GroupsSensorsDelete was successful """
if self.__SenseApiCall__("/groups/{0}/sensors/{1}.json".format(group_id, sensor_id), "DELETE"): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> This method returns the domains of the current user. <END_TASK> <USER_TASK:> Description: def DomainsGet(self, parameters = None, domain_id = -1): """ This method returns the domains of the current user. The list also contains the domains to which the users has not yet been accepted. @param parameters (dictonary) - Dictionary containing the parameters of the request. @return (bool) - Boolean indicating whether DomainsGet was successful. """
url = '' if parameters is None and domain_id <> -1: url = '/domains/{0}.json'.format(domain_id) else: url = '/domains.json' if self.__SenseApiCall__(url, 'GET', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Retrieve users of the specified domain. <END_TASK> <USER_TASK:> Description: def DomainUsersGet(self, domain_id, parameters): """ Retrieve users of the specified domain. @param domain_id (int) - Id of the domain to retrieve users from @param parameters (int) - parameters of the api call. @return (bool) - Boolean idicating whether DomainUsersGet was successful. """
if self.__SenseApiCall__('/domains/{0}/users.json'.format(domain_id), 'GET', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> T his method returns the list of tokens which are available for this domain. <END_TASK> <USER_TASK:> Description: def DomainTokensGet(self, domain_id): """ T his method returns the list of tokens which are available for this domain. Only domain managers can list domain tokens. @param domain_id - ID of the domain for which to retrieve tokens @return (bool) - Boolean indicating whether DomainTokensGet was successful """
if self.__SenseApiCall__('/domains/{0}/tokens.json'.format(domain_id), 'GET'): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> This method creates tokens that can be used by users who want to join the domain. <END_TASK> <USER_TASK:> Description: def DomainTokensCreate(self, domain_id, amount): """ This method creates tokens that can be used by users who want to join the domain. Tokens are automatically deleted after usage. Only domain managers can create tokens. """
if self.__SenseApiCall__('/domains/{0}/tokens.json'.format(domain_id), 'POST', parameters = {"amount":amount}): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> List the users data processors. <END_TASK> <USER_TASK:> Description: def DataProcessorsGet(self, parameters): """ List the users data processors. @param parameters (dictonary) - Dictionary containing the parameters of the request. @return (bool) - Boolean indicating whether this call was successful. """
if self.__SenseApiCall__('/dataprocessors.json', 'GET', parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Delete a data processor in CommonSense. <END_TASK> <USER_TASK:> Description: def DataProcessorsDelete(self, dataProcessorId): """ Delete a data processor in CommonSense. @param dataProcessorId - The id of the data processor that will be deleted. @return (bool) - Boolean indicating whether GroupsPost was successful. """
if self.__SenseApiCall__('/dataprocessors/{id}.json'.format(id = dataProcessorId), 'DELETE'): return True else: self.__error__ = "api call unsuccessful" return False
<SYSTEM_TASK:> Iteratively re-weighted least squares estimation routine <END_TASK> <USER_TASK:> Description: def iwls(y, x, family, offset, y_fix, ini_betas=None, tol=1.0e-8, max_iter=200, wi=None): """ Iteratively re-weighted least squares estimation routine Parameters ---------- y : array n*1, dependent variable x : array n*k, designs matrix of k independent variables family : family object probability models: Gaussian, Poisson, or Binomial offset : array n*1, the offset variable for each observation. y_fix : array n*1, the fixed intercept value of y for each observation ini_betas : array 1*k, starting values for the k betas within the iteratively weighted least squares routine tol : float tolerance for estimation convergence max_iter : integer maximum number of iterations if convergence not met wi : array n*1, weights to transform observations from location i in GWR Returns ------- betas : array k*1, estimated coefficients mu : array n*1, predicted y values wx : array n*1, final weights used for iwls for GLM n_iter : integer number of iterations that when iwls algorithm terminates w : array n*1, final weights used for iwls for GWR z : array iwls throughput v : array iwls throughput xtx_inv_xt : array iwls throughout to compute GWR hat matrix [X'X]^-1 X' """
n_iter = 0 diff = 1.0e6 if ini_betas is None: betas = np.zeros((x.shape[1], 1), np.float) else: betas = ini_betas if isinstance(family, Binomial): y = family.link._clean(y) if isinstance(family, Poisson): y_off = y / offset y_off = family.starting_mu(y_off) v = family.predict(y_off) mu = family.starting_mu(y) else: mu = family.starting_mu(y) v = family.predict(mu) while diff > tol and n_iter < max_iter: n_iter += 1 w = family.weights(mu) z = v + (family.link.deriv(mu) * (y - mu)) w = np.sqrt(w) if not isinstance(x, np.ndarray): w = sp.csr_matrix(w) z = sp.csr_matrix(z) wx = spmultiply(x, w, array_out=False) wz = spmultiply(z, w, array_out=False) if wi is None: n_betas = _compute_betas(wz, wx) else: n_betas, xtx_inv_xt = _compute_betas_gwr(wz, wx, wi) v = spdot(x, n_betas) mu = family.fitted(v) if isinstance(family, Poisson): mu = mu * offset diff = min(abs(n_betas - betas)) betas = n_betas if wi is None: return betas, mu, wx, n_iter else: return betas, mu, v, w, z, xtx_inv_xt, n_iter
<SYSTEM_TASK:> Find the next regular number greater than or equal to target. <END_TASK> <USER_TASK:> Description: def _next_regular(target): """ Find the next regular number greater than or equal to target. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. Target must be a positive integer. """
if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target - 1)): return target match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient try: p2 = 2 ** ((quotient - 1).bit_length()) except AttributeError: # Fallback for Python <2.7 p2 = 2 ** _bit_length_26(quotient - 1) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
<SYSTEM_TASK:> Compute the weighted quantile of a 1D numpy array. <END_TASK> <USER_TASK:> Description: def quantile_1D(data, weights, quantile): """ Compute the weighted quantile of a 1D numpy array. Parameters ---------- data : ndarray Input array (one dimension). weights : ndarray Array with the weights of the same size of `data`. quantile : float Quantile to compute. It must have a value between 0 and 1. Returns ------- quantile_1D : float The output value. """
# Check the data if not isinstance(data, np.matrix): data = np.asarray(data) if not isinstance(weights, np.matrix): weights = np.asarray(weights) nd = data.ndim if nd != 1: raise TypeError("data must be a one dimensional array") ndw = weights.ndim if ndw != 1: raise TypeError("weights must be a one dimensional array") if data.shape != weights.shape: raise TypeError("the length of data and weights must be the same") if ((quantile > 1.) or (quantile < 0.)): raise ValueError("quantile must have a value between 0. and 1.") # Sort the data ind_sorted = np.argsort(data) sorted_data = data[ind_sorted] sorted_weights = weights[ind_sorted] # Compute the auxiliary arrays Sn = np.cumsum(sorted_weights) # TODO: Check that the weights do not sum zero #assert Sn != 0, "The sum of the weights must not be zero" Pn = (Sn-0.5*sorted_weights)/Sn[-1] # Get the value of the weighted median return np.interp(quantile, Pn, sorted_data)
<SYSTEM_TASK:> Weighted quantile of an array with respect to the last axis. <END_TASK> <USER_TASK:> Description: def quantile(data, weights, quantile): """ Weighted quantile of an array with respect to the last axis. Parameters ---------- data : ndarray Input array. weights : ndarray Array with the weights. It must have the same size of the last axis of `data`. quantile : float Quantile to compute. It must have a value between 0 and 1. Returns ------- quantile : float The output value. """
# TODO: Allow to specify the axis nd = data.ndim if nd == 0: TypeError("data must have at least one dimension") elif nd == 1: return quantile_1D(data, weights, quantile) elif nd > 1: n = data.shape imr = data.reshape((np.prod(n[:-1]), n[-1])) result = np.apply_along_axis(quantile_1D, -1, imr, weights, quantile) return result.reshape(n[:-1])
<SYSTEM_TASK:> execute a NON-cached, throttled einfo query <END_TASK> <USER_TASK:> Description: def einfo(self, args=None): """ execute a NON-cached, throttled einfo query einfo.fcgi?db=<database> Input: Entrez database (&db) or None (returns info on all Entrez databases) Output: XML containing database statistics Example: Find database statistics for Entrez Protein. QueryService.einfo({'db': 'protein'}) Equivalent HTTP request: https://eutils.ncbi.nlm.nih.gov/entrez/eutils/einfo.fcgi?db=protein :param dict args: dict of query items (optional) :returns: content of reply :rtype: str :raises EutilsRequestError: when NCBI replies, but the request failed (e.g., bogus database name) """
if args is None: args = {} return self._query('/einfo.fcgi', args, skip_cache=True)
<SYSTEM_TASK:> Return whether or not a container is permitted. <END_TASK> <USER_TASK:> Description: def container_permitted(self, name): """Return whether or not a container is permitted. :param name: Container name. :return: ``True`` if container is permitted. :rtype: ``bool`` """
white = self._container_whitelist black = self._container_blacklist return name not in black and (not white or name in white)
<SYSTEM_TASK:> Get application media root from real media root URL. <END_TASK> <USER_TASK:> Description: def app_media_url(self): """Get application media root from real media root URL."""
url = None media_dir = self.CLOUD_BROWSER_STATIC_MEDIA_DIR if media_dir: url = os.path.join(self.MEDIA_URL, media_dir).rstrip('/') + '/' return url
<SYSTEM_TASK:> Get the mtime associated with a module. If this is a .pyc or .pyo file and <END_TASK> <USER_TASK:> Description: def module_getmtime(filename): """ Get the mtime associated with a module. If this is a .pyc or .pyo file and a corresponding .py file exists, the time of the .py file is returned. :param filename: filename of the module. :returns: mtime or None if the file doesn"t exist. """
if os.path.splitext(filename)[1].lower() in (".pyc", ".pyo") and os.path.exists(filename[:-1]): return os.path.getmtime(filename[:-1]) if os.path.exists(filename): return os.path.getmtime(filename) return None
<SYSTEM_TASK:> Reload a module if it has changed since we last imported it. This is <END_TASK> <USER_TASK:> Description: def module_reload_changed(key): """ Reload a module if it has changed since we last imported it. This is necessary if module a imports script b, script b is changed, and then module c asks to import script b. :param key: our key used in the WatchList. :returns: True if reloaded. """
imp.acquire_lock() try: modkey = module_sys_modules_key(key) if not modkey: return False found = None if modkey: for second in WatchList: secmodkey = module_sys_modules_key(second) if secmodkey and sys.modules[modkey] == sys.modules[secmodkey]: found = second foundmodkey = secmodkey break if not found: return filemtime = module_getmtime(WatchList[found]["file"]) filemtime = latest_submodule_time(found, filemtime) if filemtime > WatchList[found]["time"]: tangelo.log("Reloaded %s" % found) reload_including_local(sys.modules[foundmodkey]) for second in WatchList: if WatchList[second]["file"] == WatchList[found]["file"]: WatchList[second]["time"] = filemtime finally: imp.release_lock() return True
<SYSTEM_TASK:> Check if a module is in the sys.modules dictionary in some manner. If so, <END_TASK> <USER_TASK:> Description: def module_sys_modules_key(key): """ Check if a module is in the sys.modules dictionary in some manner. If so, return the key used in that dictionary. :param key: our key to the module. :returns: the key in sys.modules or None. """
moduleparts = key.split(".") for partnum, part in enumerate(moduleparts): modkey = ".".join(moduleparts[partnum:]) if modkey in sys.modules: return modkey return None
<SYSTEM_TASK:> Reload a module. If it isn"t found, try to include the local service <END_TASK> <USER_TASK:> Description: def reload_including_local(module): """ Reload a module. If it isn"t found, try to include the local service directory. This must be called from a thread that has acquired the import lock. :param module: the module to reload. """
try: reload(module) except ImportError: # This can happen if the module was loaded in the immediate script # directory. Add the service path and try again. if not hasattr(cherrypy.thread_data, "modulepath"): raise path = os.path.abspath(cherrypy.thread_data.modulepath) root = os.path.abspath(cherrypy.config.get("webroot")) if path not in sys.path and (path == root or path.startswith(root + os.path.sep)): oldpath = sys.path try: sys.path = [path] + sys.path reload(module) finally: sys.path = oldpath else: raise
<SYSTEM_TASK:> Recursively reload submodules which are more recent than a specified <END_TASK> <USER_TASK:> Description: def reload_recent_submodules(module, mtime=0, processed=[]): """ Recursively reload submodules which are more recent than a specified timestamp. To be called from a thread that has acquired the import lock to be thread safe. :param module: the module name. The WatchList is checked for modules that list this as a parent. :param mtime: the latest module time known to this point. :param processed: a list of modules that were processed (to avoid infinite recursion). :returns: True if any submodule was reloaded. """
if module.endswith(".py"): module = module[:-3] if module in processed: return False any_reloaded = False for key in WatchList: if WatchList[key]["parent"] == module: reloaded = reload_recent_submodules(key, mtime, processed) filemtime = module_getmtime(WatchList[key]["file"]) filemtime = latest_submodule_time(key, filemtime) any_reloaded = any_reloaded or reloaded if reloaded or filemtime > WatchList[key]["time"]: WatchList[key]["time"] = filemtime for second in WatchList: if second != key and WatchList[second]["file"] == WatchList[key]["file"]: WatchList[second]["time"] = filemtime modkey = module_sys_modules_key(key) if modkey: try: reload_including_local(sys.modules[modkey]) tangelo.log("Reloaded %s" % modkey) except ImportError: del sys.modules[modkey] tangelo.log("Asking %s to reimport" % modkey) any_reloaded = True return any_reloaded
<SYSTEM_TASK:> When a module is asked to be imported, check if we have previously imported <END_TASK> <USER_TASK:> Description: def watch_import(name, globals=None, *args, **kwargs): """ When a module is asked to be imported, check if we have previously imported it. If so, check if the time stamp of it, a companion yaml file, or any modules it imports have changed. If so, reimport the module. :params: see __builtin__.__import__ """
# Don"t monitor builtin modules. types seem special, so don"t monitor it # either. monitor = not imp.is_builtin(name) and name not in ("types", ) # Don"t monitor modules if we don"t know where they came from monitor = monitor and isinstance(globals, dict) and globals.get("__name__") if not monitor: return builtin_import(name, globals, *args, **kwargs) # This will be the dotted module name except for service modules where it # will be the absolute file path. parent = globals["__name__"] key = parent + "." + name module_reload_changed(key) try: module = builtin_import(name, globals, *args, **kwargs) except ImportError: raise if getattr(module, "__file__", None): if key not in WatchList: tangelo.log_info("WATCH", "Monitoring import %s from %s" % (name, parent)) imp.acquire_lock() try: if key not in WatchList: filemtime = module_getmtime(module.__file__) or 0 filemtime = latest_submodule_time(key, filemtime) WatchList[key] = { "time": filemtime } WatchList[key].update({ "parent": parent, "name": name, "file": module.__file__ }) finally: imp.release_lock() return module
<SYSTEM_TASK:> When we ask to fetch a module with optional config file, check time stamps <END_TASK> <USER_TASK:> Description: def watch_module_cache_get(cache, module): """ When we ask to fetch a module with optional config file, check time stamps and dependencies to determine if it should be reloaded or not. :param cache: the cache object that stores whether to check for config files and which files have been loaded. :param module: the path of the module to load. :returns: the loaded module. """
imp.acquire_lock() try: if not hasattr(cache, "timestamps"): cache.timestamps = {} mtime = os.path.getmtime(module) mtime = latest_submodule_time(module, mtime) if getattr(cache, "config", False): config_file = module[:-2] + "yaml" if os.path.exists(config_file): # Our timestamp is the latest time of the config file or the # module. mtime = max(mtime, os.path.getmtime(config_file)) # If we have a config file and the timestamp is more recent than # the recorded timestamp, remove the config file from the list of # loaded files so that it will get loaded again. if config_file in cache.config_files and mtime > cache.timestamps.get(module, 0): del cache.config_files[config_file] tangelo.log("WATCH", "Asking to reload config file %s" % config_file) # If the timestamp is more recent than the recorded value, remove the # the module from our records so that it will be loaded again. if module in cache.modules and mtime > cache.timestamps.get(module, 0): del cache.modules[module] tangelo.log("WATCH", "Asking to reload module %s" % module) if module not in cache.timestamps: tangelo.log_info("WATCH", "Monitoring module %s" % module) reload_recent_submodules(module, mtime) cache.timestamps[module] = mtime service = tangelo_module_cache_get(cache, module) # Update our time based on all the modules that we may have just # imported. The times can change from before because python files are # compiled, for instance. mtime = latest_submodule_time(module, mtime) cache.timestamps[module] = mtime finally: imp.release_lock() return service
<SYSTEM_TASK:> Convert value to integer. <END_TASK> <USER_TASK:> Description: def get_int(value, default, test_fn=None): """Convert value to integer. :param value: Integer value. :param default: Default value on failed conversion. :param test_fn: Constraint function. Use default if returns ``False``. :return: Integer value. :rtype: ``int`` """
try: converted = int(value) except ValueError: return default test_fn = test_fn if test_fn else lambda x: True return converted if test_fn(converted) else default
<SYSTEM_TASK:> Enforces module presence. <END_TASK> <USER_TASK:> Description: def requires(module, name=""): """Enforces module presence. The general use here is to allow conditional imports that may fail (e.g., a required python package is not installed) but still allow the rest of the python package to compile and run fine. If the wrapped method with this decorated is invoked, then a runtime error is generated. :param module: required module (set as variable to ``None`` on import fail) :type module: ``module`` or ``None`` :param name: module name :type name: ``string`` """
def wrapped(method): """Call and enforce method.""" if module is None: raise ImproperlyConfigured("Module '%s' is not installed." % name) return method return wrapped
<SYSTEM_TASK:> Try various RFC conversions to ``datetime`` or return ``None``. <END_TASK> <USER_TASK:> Description: def dt_from_header(date_str): """Try various RFC conversions to ``datetime`` or return ``None``. :param date_str: Date string. :type date_str: ``string`` :return: Date time. :rtype: :class:`datetime.datetime` or ``None`` """
convert_fns = ( dt_from_rfc8601, dt_from_rfc1123, ) for convert_fn in convert_fns: try: return convert_fn(date_str) except ValueError: pass return None
<SYSTEM_TASK:> Rightmost part of path after separator. <END_TASK> <USER_TASK:> Description: def basename(path): """Rightmost part of path after separator."""
base_path = path.strip(SEP) sep_ind = base_path.rfind(SEP) if sep_ind < 0: return path return base_path[sep_ind + 1:]
<SYSTEM_TASK:> Split path into container, object. <END_TASK> <USER_TASK:> Description: def path_parts(path): """Split path into container, object. :param path: Path to resource (including container). :type path: `string` :return: Container, storage object tuple. :rtype: `tuple` of `string`, `string` """
path = path if path is not None else '' container_path = object_path = '' parts = path_list(path) if len(parts) >= 1: container_path = parts[0] if len(parts) > 1: object_path = path_join(*parts[1:]) return container_path, object_path
<SYSTEM_TASK:> Squared sum of total displacements for these atoms. <END_TASK> <USER_TASK:> Description: def collective_dr_squared( self ): """ Squared sum of total displacements for these atoms. Args: None Returns: (Float): The square of the summed total displacements for these atoms. """
return sum( np.square( sum( [ atom.dr for atom in self.atoms ] ) ) )
<SYSTEM_TASK:> Number of these atoms occupying a specific site type. <END_TASK> <USER_TASK:> Description: def occupations( self, site_label ): """ Number of these atoms occupying a specific site type. Args: site_label (Str): Label for the site type being considered. Returns: (Int): Number of atoms occupying sites of type `site_label`. """
return sum( atom.site.label == site_label for atom in self.atoms )
<SYSTEM_TASK:> Convert a string version of a function name to the callable object. <END_TASK> <USER_TASK:> Description: def get_class(class_string): """ Convert a string version of a function name to the callable object. """
try: mod_name, class_name = get_mod_func(class_string) if class_name != '': cls = getattr(__import__(mod_name, {}, {}, ['']), class_name) return cls except (ImportError, AttributeError): pass raise ImportError('Failed to import %s' % class_string)
<SYSTEM_TASK:> Return ``True`` if GS standalone folder object. <END_TASK> <USER_TASK:> Description: def _is_gs_folder(cls, result): """Return ``True`` if GS standalone folder object. GS will create a 0 byte ``<FOLDER NAME>_$folder$`` key as a pseudo-directory place holder if there are no files present. """
return (cls.is_key(result) and result.size == 0 and result.name.endswith(cls._gs_folder_suffix))
<SYSTEM_TASK:> Return ``True`` if result is a prefix object. <END_TASK> <USER_TASK:> Description: def is_prefix(cls, result): """Return ``True`` if result is a prefix object. .. note:: Boto uses the S3 Prefix object for GS prefixes. """
from boto.s3.prefix import Prefix return isinstance(result, Prefix) or cls._is_gs_folder(result)
<SYSTEM_TASK:> Return whether or not to do translation. <END_TASK> <USER_TASK:> Description: def translate(self, exc): """Return whether or not to do translation."""
from boto.exception import StorageResponseError if isinstance(exc, StorageResponseError): if exc.status == 404: return self.error_cls(str(exc)) return None
<SYSTEM_TASK:> Create from ambiguous result. <END_TASK> <USER_TASK:> Description: def from_result(cls, container, result): """Create from ambiguous result."""
if result is None: raise errors.NoObjectException elif cls.is_prefix(result): return cls.from_prefix(container, result) elif cls.is_key(result): return cls.from_key(container, result) raise errors.CloudException("Unknown boto result type: %s" % type(result))
<SYSTEM_TASK:> Create configuration from Django settings or environment. <END_TASK> <USER_TASK:> Description: def from_settings(cls): """Create configuration from Django settings or environment."""
from cloud_browser.app_settings import settings from django.core.exceptions import ImproperlyConfigured conn_cls = conn_fn = None datastore = settings.CLOUD_BROWSER_DATASTORE if datastore == 'AWS': # Try AWS from cloud_browser.cloud.aws import AwsConnection account = settings.CLOUD_BROWSER_AWS_ACCOUNT secret_key = settings.CLOUD_BROWSER_AWS_SECRET_KEY if account and secret_key: conn_cls = AwsConnection conn_fn = lambda: AwsConnection(account, secret_key) if datastore == 'Google': # Try Google Storage from cloud_browser.cloud.google import GsConnection account = settings.CLOUD_BROWSER_GS_ACCOUNT secret_key = settings.CLOUD_BROWSER_GS_SECRET_KEY if account and secret_key: conn_cls = GsConnection conn_fn = lambda: GsConnection(account, secret_key) elif datastore == 'Rackspace': # Try Rackspace account = settings.CLOUD_BROWSER_RACKSPACE_ACCOUNT secret_key = settings.CLOUD_BROWSER_RACKSPACE_SECRET_KEY servicenet = settings.CLOUD_BROWSER_RACKSPACE_SERVICENET authurl = settings.CLOUD_BROWSER_RACKSPACE_AUTHURL if account and secret_key: from cloud_browser.cloud.rackspace import RackspaceConnection conn_cls = RackspaceConnection conn_fn = lambda: RackspaceConnection( account, secret_key, servicenet=servicenet, authurl=authurl) elif datastore == 'Filesystem': # Mock filesystem root = settings.CLOUD_BROWSER_FILESYSTEM_ROOT if root is not None: from cloud_browser.cloud.fs import FilesystemConnection conn_cls = FilesystemConnection conn_fn = lambda: FilesystemConnection(root) if conn_cls is None: raise ImproperlyConfigured( "No suitable credentials found for datastore: %s." % datastore) # Adjust connection function. conn_fn = staticmethod(conn_fn) # Directly cache attributes. cls.__connection_cls = conn_cls cls.__connection_fn = conn_fn return conn_cls, conn_fn
<SYSTEM_TASK:> Return connection class. <END_TASK> <USER_TASK:> Description: def get_connection_cls(cls): """Return connection class. :rtype: :class:`type` """
if cls.__connection_cls is None: cls.__connection_cls, _ = cls.from_settings() return cls.__connection_cls
<SYSTEM_TASK:> Create container from path. <END_TASK> <USER_TASK:> Description: def from_path(cls, conn, path): """Create container from path."""
path = path.strip(SEP) full_path = os.path.join(conn.abs_root, path) return cls(conn, path, 0, os.path.getsize(full_path))
<SYSTEM_TASK:> Generate a cubic lattice. <END_TASK> <USER_TASK:> Description: def cubic_lattice( a, b, c, spacing ): """ Generate a cubic lattice. Args: a (Int): Number of lattice repeat units along x. b (Int): Number of lattice repeat units along y. c (Int): Number of lattice repeat units along z. spacing (Float): Distance between lattice sites. Returns: (Lattice): The new lattice """
grid = np.array( list( range( 1, a * b * c + 1 ) ) ).reshape( a, b, c, order='F' ) it = np.nditer( grid, flags=[ 'multi_index' ] ) sites = [] while not it.finished: x, y, z = it.multi_index r = np.array( [ x, y, z ] ) * spacing neighbours = [ np.roll( grid, +1, axis=0 )[x,y,z], np.roll( grid, -1, axis=0 )[x,y,z], np.roll( grid, +1, axis=1 )[x,y,z], np.roll( grid, -1, axis=1 )[x,y,z], np.roll( grid, +1, axis=2 )[x,y,z], np.roll( grid, -1, axis=2 )[x,y,z] ] sites.append( lattice_site.Site( int( it[0] ), r, neighbours, 0.0, 'L' ) ) it.iternext() return lattice.Lattice( sites, cell_lengths = np.array( [ a, b, c ] ) * spacing )
<SYSTEM_TASK:> Generate a lattice from a sites file. <END_TASK> <USER_TASK:> Description: def lattice_from_sites_file( site_file, cell_lengths ): """ Generate a lattice from a sites file. Args: site_file (Str): Filename for the file containing the site information. cell_lengths (List(Float,Float,Float)): A list containing the [ x, y, z ] cell lengths. Returns: (Lattice): The new lattice Notes: | The site information file format is: | <number_of_sites> (Int). | Followed by blocks of data separated by double linebreaks; one block per site. | site: <site number> (Int). | centre: <x> <y> <z> (Float,Float,Float). | neighbours: <list of site numbers of neighbouring sites> (List[Int]). | label: <site group labal> (Str). | energy: <site occupation energy> (Float). | The energy is optional, and will be set to 0.0 if not included. | Line order within each block is not meaningful. | British and American English spellings for centre|center and neighbour|neighbor are accepted. | An example file can be found in the examples directory. """
sites = [] site_re = re.compile( 'site:\s+([-+]?\d+)' ) r_re = re.compile( 'cent(?:er|re):\s+([-\d\.e]+)\s+([-\d\.e]+)\s+([-\d\.e]+)' ) r_neighbours = re.compile( 'neighbou{0,1}rs:((\s+[-+]?\d+)+)' ) r_label = re.compile( 'label:\s+(\S+)' ) r_energy = re.compile( 'energy:\s([-+\d\.]+)' ) with open( site_file ) as f: filein = f.read().split("\n\n") number_of_sites = int( filein[0] ) for i in range( number_of_sites ): block = filein[ i+1 ] number = int( site_re.findall( block )[0] ) r = np.array( [ float(s) for s in r_re.findall( block )[0] ] ) neighbours = [ int( s ) for s in r_neighbours.findall( block )[0][0].split() ] label = r_label.findall( block )[0] energy = r_energy.findall( block ) if energy: energy = float( energy[0] ) else: energy = 0.0 sites.append( lattice_site.Site( number, r, neighbours, energy, label ) ) return lattice.Lattice( sites, cell_lengths = np.array( cell_lengths ) )
<SYSTEM_TASK:> Redirect to a given url while setting the chosen location in the <END_TASK> <USER_TASK:> Description: def set_location(request): """ Redirect to a given url while setting the chosen location in the cookie. The url and the location_id need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """
next = request.GET.get('next', None) or request.POST.get('next', None) if not next: next = request.META.get('HTTP_REFERER', None) if not next: next = '/' response = http.HttpResponseRedirect(next) if request.method == 'POST': location_id = request.POST.get('location_id', None) or request.POST.get('location', None) if location_id: try: location = get_class(settings.GEOIP_LOCATION_MODEL).objects.get(pk=location_id) storage_class(request=request, response=response).set(location=location, force=True) except (ValueError, ObjectDoesNotExist): pass return response
<SYSTEM_TASK:> Returns the number of occupied nearest neighbour sites, classified by site type. <END_TASK> <USER_TASK:> Description: def site_specific_nn_occupation( self ): """ Returns the number of occupied nearest neighbour sites, classified by site type. Args: None Returns: (Dict(Str:Int)): Dictionary of nearest-neighbour occupied site numbers, classified by site label, e.g. { 'A' : 2, 'B' : 1 }. """
to_return = { l : 0 for l in set( ( site.label for site in self.p_neighbours ) ) } for site in self.p_neighbours: if site.is_occupied: to_return[ site.label ] += 1 return to_return
<SYSTEM_TASK:> The coordination-number dependent energy for this site. <END_TASK> <USER_TASK:> Description: def cn_occupation_energy( self, delta_occupation=None ): """ The coordination-number dependent energy for this site. Args: delta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }. If this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None Returns: (Float): The coordination-number dependent energy for this site. """
nn_occupations = self.site_specific_nn_occupation() if delta_occupation: for site in delta_occupation: assert( site in nn_occupations ) nn_occupations[ site ] += delta_occupation[ site ] return sum( [ self.cn_occupation_energies[ s ][ n ] for s, n in nn_occupations.items() ] )
<SYSTEM_TASK:> Removes all geodata stored in database. <END_TASK> <USER_TASK:> Description: def clear_database(self): """ Removes all geodata stored in database. Useful for development, never use on production. """
self.logger.info('Removing obsolete geoip from database...') IpRange.objects.all().delete() City.objects.all().delete() Region.objects.all().delete() Country.objects.all().delete()
<SYSTEM_TASK:> Converts file line into dictonary <END_TASK> <USER_TASK:> Description: def _line_to_dict(self, file, field_names): """ Converts file line into dictonary """
for line in file: delimiter = settings.IPGEOBASE_FILE_FIELDS_DELIMITER yield self._extract_data_from_line(line, field_names, delimiter)
<SYSTEM_TASK:> Iterate over ip info and extract useful data <END_TASK> <USER_TASK:> Description: def _process_cidr_file(self, file): """ Iterate over ip info and extract useful data """
data = {'cidr': list(), 'countries': set(), 'city_country_mapping': dict()} allowed_countries = settings.IPGEOBASE_ALLOWED_COUNTRIES for cidr_info in self._line_to_dict(file, field_names=settings.IPGEOBASE_CIDR_FIELDS): city_id = cidr_info['city_id'] if cidr_info['city_id'] != '-' else None if city_id is not None: data['city_country_mapping'].update({cidr_info['city_id']: cidr_info['country_code']}) if allowed_countries and cidr_info['country_code'] not in allowed_countries: continue data['cidr'].append({'start_ip': cidr_info['start_ip'], 'end_ip': cidr_info['end_ip'], 'country_id': cidr_info['country_code'], 'city_id': city_id}) data['countries'].add(cidr_info['country_code']) return data
<SYSTEM_TASK:> Iterate over cities info and extract useful data <END_TASK> <USER_TASK:> Description: def _process_cities_file(self, file, city_country_mapping): """ Iterate over cities info and extract useful data """
data = {'all_regions': list(), 'regions': list(), 'cities': list(), 'city_region_mapping': dict()} allowed_countries = settings.IPGEOBASE_ALLOWED_COUNTRIES for geo_info in self._line_to_dict(file, field_names=settings.IPGEOBASE_CITIES_FIELDS): country_code = self._get_country_code_for_city(geo_info['city_id'], city_country_mapping, data['all_regions']) new_region = {'name': geo_info['region_name'], 'country__code': country_code} if new_region not in data['all_regions']: data['all_regions'].append(new_region) if allowed_countries and country_code not in allowed_countries: continue if new_region not in data['regions']: data['regions'].append(new_region) data['cities'].append({'region__name': geo_info['region_name'], 'name': geo_info['city_name'], 'id': geo_info['city_id'], 'latitude': Decimal(geo_info['latitude']), 'longitude': Decimal(geo_info['longitude'])}) return data
<SYSTEM_TASK:> Update database with new countries, regions and cities <END_TASK> <USER_TASK:> Description: def _update_geography(self, countries, regions, cities, city_country_mapping): """ Update database with new countries, regions and cities """
existing = { 'cities': list(City.objects.values_list('id', flat=True)), 'regions': list(Region.objects.values('name', 'country__code')), 'countries': Country.objects.values_list('code', flat=True) } for country_code in countries: if country_code not in existing['countries']: Country.objects.create(code=country_code, name=ISO_CODES.get(country_code, country_code)) for entry in regions: if entry not in existing['regions']: Region.objects.create(name=entry['name'], country_id=entry['country__code']) for entry in cities: if int(entry['id']) not in existing['cities']: code = city_country_mapping.get(entry['id']) if code: region = Region.objects.get(name=entry['region__name'], country__code=code) City.objects.create(id=entry['id'], name=entry['name'], region=region, latitude=entry.get('latitude'), longitude=entry.get('longitude'))
<SYSTEM_TASK:> The relative probability for a jump between two sites with specific site types and coordination numbers. <END_TASK> <USER_TASK:> Description: def relative_probability( self, l1, l2, c1, c2 ): """ The relative probability for a jump between two sites with specific site types and coordination numbers. Args: l1 (Str): Site label for the initial site. l2 (Str): Site label for the final site. c1 (Int): Coordination number for the initial site. c2 (Int): Coordination number for the final site. Returns: (Float): The relative probability of this jump occurring. """
if self.site_energies: site_delta_E = self.site_energies[ l2 ] - self.site_energies[ l1 ] else: site_delta_E = 0.0 if self.nn_energy: delta_nn = c2 - c1 - 1 # -1 because the hopping ion is not counted in the final site occupation number site_delta_E += delta_nn * self.nn_energy return metropolis( site_delta_E )
<SYSTEM_TASK:> Construct a look-up table of relative jump probabilities for a nearest-neighbour interaction Hamiltonian. <END_TASK> <USER_TASK:> Description: def generate_nearest_neighbour_lookup_table( self ): """ Construct a look-up table of relative jump probabilities for a nearest-neighbour interaction Hamiltonian. Args: None. Returns: None. """
self.jump_probability = {} for site_label_1 in self.connected_site_pairs: self.jump_probability[ site_label_1 ] = {} for site_label_2 in self.connected_site_pairs[ site_label_1 ]: self.jump_probability[ site_label_1 ][ site_label_2 ] = {} for coordination_1 in range( self.max_coordination_per_site[ site_label_1 ] ): self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ] = {} for coordination_2 in range( 1, self.max_coordination_per_site[ site_label_2 ] + 1 ): self.jump_probability[ site_label_1 ][ site_label_2 ][ coordination_1 ][ coordination_2 ] = self.relative_probability( site_label_1, site_label_2, coordination_1, coordination_2 )
<SYSTEM_TASK:> Reinitialise the stored displacements, number of hops, and list of sites visited for this `Atom`. <END_TASK> <USER_TASK:> Description: def reset( self ): """ Reinitialise the stored displacements, number of hops, and list of sites visited for this `Atom`. Args: None Returns: None """
self.number_of_hops = 0 self.dr = np.array( [ 0.0, 0.0, 0.0 ] ) self.summed_dr2 = 0.0 self.sites_visited = [ self._site.number ]
<SYSTEM_TASK:> Find the smallest range containing the given IP. <END_TASK> <USER_TASK:> Description: def by_ip(self, ip): """ Find the smallest range containing the given IP. """
try: number = inet_aton(ip) except Exception: raise IpRange.DoesNotExist try: return self.filter(start_ip__lte=number, end_ip__gte=number)\ .order_by('end_ip', '-start_ip')[0] except IndexError: raise IpRange.DoesNotExist
<SYSTEM_TASK:> Decorate a function that takes strings to one that takes typed values. <END_TASK> <USER_TASK:> Description: def types(**typefuncs): """ Decorate a function that takes strings to one that takes typed values. The decorator's arguments are functions to perform type conversion. The positional and keyword arguments will be mapped to the positional and keyword arguments of the decoratored function. This allows web-based service functions, which by design always are passed string arguments, to be declared as functions taking typed arguments instead, eliminating the overhead of having to perform type conversions manually. If type conversion fails for any argument, the wrapped function will return a dict describing the exception that was raised. """
def wrap(f): @functools.wraps(f) def typed_func(*pargs, **kwargs): # Analyze the incoming arguments so we know how to apply the # type-conversion functions in `typefuncs`. argspec = inspect.getargspec(f) # The `args` property contains the list of named arguments passed to # f. Construct a dict mapping from these names to the values that # were passed. # # It is possible that `args` contains names that are not represented # in `pargs`, if some of the arguments are passed as keyword # arguments. In this case, the relative shortness of `pargs` will # cause the call to zip() to truncate the `args` list, and the # keyword-style passed arguments will simply be present in `kwargs`. pargs_dict = {name: value for (name, value) in zip(argspec.args, pargs)} # Begin converting arguments according to the functions given in # `typefuncs`. If a given name does not appear in `typefuncs`, # simply leave it unchanged. If a name appears in `typefuncs` that # does not appear in the argument list, this is considered an error. try: for name, func in typefuncs.iteritems(): if name in pargs_dict: pargs_dict[name] = func(pargs_dict[name]) elif name in kwargs: kwargs[name] = func(kwargs[name]) else: http_status(400, "Unknown Argument Name") content_type("application/json") return {"error": "'%s' was registered for type conversion but did not appear in the arguments list" % (name)} except ValueError as e: http_status(400, "Input Value Conversion Failed") content_type("application/json") return {"error": str(e)} # Unroll `pargs` into a list of arguments that are in the correct # order. pargs = [] for name in argspec.args: try: pargs.append(pargs_dict[name]) except KeyError: break # Call the wrapped function using the converted arguments. return f(*pargs, **kwargs) typed_func.typefuncs = typefuncs return typed_func return wrap
<SYSTEM_TASK:> Decorate a function to automatically convert its return type to a string <END_TASK> <USER_TASK:> Description: def return_type(rettype): """ Decorate a function to automatically convert its return type to a string using a custom function. Web-based service functions must return text to the client. Tangelo contains default logic to convert many kinds of values into string, but this decorator allows the service writer to specify custom behavior falling outside of the default. If the conversion fails, an appropriate server error will be raised. """
def wrap(f): @functools.wraps(f) def converter(*pargs, **kwargs): # Run the function to capture the output. result = f(*pargs, **kwargs) # Convert the result using the return type function. try: result = rettype(result) except ValueError as e: http_status(500, "Return Value Conversion Failed") content_type("application/json") return {"error": str(e)} return result return converter return wrap
<SYSTEM_TASK:> Return tuple of underlying exception classes to trap and wrap. <END_TASK> <USER_TASK:> Description: def excepts(cls): """Return tuple of underlying exception classes to trap and wrap. :rtype: ``tuple`` of ``type`` """
if cls._excepts is None: cls._excepts = tuple(cls.translations.keys()) return cls._excepts
<SYSTEM_TASK:> Return translation of exception to new class. <END_TASK> <USER_TASK:> Description: def translate(self, exc): """Return translation of exception to new class. Calling code should only raise exception if exception class is passed in, else ``None`` (which signifies no wrapping should be done). """
# Find actual class. for key in self.translations.keys(): if isinstance(exc, key): # pylint: disable=unsubscriptable-object return self.translations[key](str(exc)) return None
<SYSTEM_TASK:> Insert decorator from settings, if any. <END_TASK> <USER_TASK:> Description: def settings_view_decorator(function): """Insert decorator from settings, if any. .. note:: Decorator in ``CLOUD_BROWSER_VIEW_DECORATOR`` can be either a callable or a fully-qualified string path (the latter, which we'll lazy import). """
dec = settings.CLOUD_BROWSER_VIEW_DECORATOR # Trade-up string to real decorator. if isinstance(dec, str): # Split into module and decorator strings. mod_str, _, dec_str = dec.rpartition('.') if not (mod_str and dec_str): raise ImportError("Unable to import module: %s" % mod_str) # Import and try to get decorator function. mod = import_module(mod_str) if not hasattr(mod, dec_str): raise ImportError("Unable to import decorator: %s" % dec) dec = getattr(mod, dec_str) if dec and callable(dec): return dec(function) return function
<SYSTEM_TASK:> View files in a file path. <END_TASK> <USER_TASK:> Description: def browser(request, path='', template="cloud_browser/browser.html"): """View files in a file path. :param request: The request. :param path: Path to resource, including container as first part of path. :param template: Template to render. """
from itertools import islice try: # pylint: disable=redefined-builtin from future_builtins import filter except ImportError: # pylint: disable=import-error from builtins import filter # Inputs. container_path, object_path = path_parts(path) incoming = request.POST or request.GET or {} marker = incoming.get('marker', None) marker_part = incoming.get('marker_part', None) if marker_part: marker = path_join(object_path, marker_part) # Get and adjust listing limit. limit_default = settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT def limit_test(num): return num > 0 and (MAX_LIMIT is None or num <= MAX_LIMIT - 1) limit = get_int(incoming.get('limit', limit_default), limit_default, limit_test) # Q1: Get all containers. # We optimize here by not individually looking up containers later, # instead going through this in-memory list. # TODO: Should page listed containers with a ``limit`` and ``marker``. conn = get_connection() containers = conn.get_containers() marker_part = None container = None objects = None if container_path != '': # Find marked container from list. def cont_eq(container): return container.name == container_path filtered_conts = filter(cont_eq, containers) cont_list = list(islice(filtered_conts, 1)) if not cont_list: raise Http404("No container at: %s" % container_path) # Q2: Get objects for instant list, plus one to check "next". container = cont_list[0] objects = container.get_objects(object_path, marker, limit + 1) marker = None # If over limit, strip last item and set marker. if len(objects) == limit + 1: objects = objects[:limit] marker = objects[-1].name marker_part = relpath(marker, object_path) return render(request, template, {'path': path, 'marker': marker, 'marker_part': marker_part, 'limit': limit, 'breadcrumbs': _breadcrumbs(path), 'container_path': container_path, 'containers': containers, 'container': container, 'object_path': object_path, 'objects': objects})
<SYSTEM_TASK:> View single document from path. <END_TASK> <USER_TASK:> Description: def document(_, path=''): """View single document from path. :param path: Path to resource, including container as first part of path. """
container_path, object_path = path_parts(path) conn = get_connection() try: container = conn.get_container(container_path) except errors.NoContainerException: raise Http404("No container at: %s" % container_path) except errors.NotPermittedException: raise Http404("Access denied for container at: %s" % container_path) try: storage_obj = container.get_object(object_path) except errors.NoObjectException: raise Http404("No object at: %s" % object_path) # Get content-type and encoding. content_type = storage_obj.smart_content_type encoding = storage_obj.smart_content_encoding response = HttpResponse(content=storage_obj.read(), content_type=content_type) if encoding not in (None, ''): response['Content-Encoding'] = encoding return response
<SYSTEM_TASK:> Truncate string on character boundary. <END_TASK> <USER_TASK:> Description: def truncatechars(value, num, end_text="..."): """Truncate string on character boundary. .. note:: Django ticket `5025 <http://code.djangoproject.com/ticket/5025>`_ has a patch for a more extensible and robust truncate characters tag filter. Example:: {{ my_variable|truncatechars:22 }} :param value: Value to truncate. :type value: ``string`` :param num: Number of characters to trim to. :type num: ``int`` """
length = None try: length = int(num) except ValueError: pass if length is not None and len(value) > length: return value[:length - len(end_text)] + end_text return value
<SYSTEM_TASK:> Get base media URL for application static media. <END_TASK> <USER_TASK:> Description: def cloud_browser_media_url(_, token): """Get base media URL for application static media. Correctly handles whether or not the settings variable ``CLOUD_BROWSER_STATIC_MEDIA_DIR`` is set and served. For example:: <link rel="stylesheet" type="text/css" href="{% cloud_browser_media_url "css/cloud-browser.css" %}" /> """
bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument" % bits[0]) rel_path = bits[1] return MediaUrlNode(rel_path)
<SYSTEM_TASK:> Reset all counters for this simulation. <END_TASK> <USER_TASK:> Description: def reset( self ): """ Reset all counters for this simulation. Args: None Returns: None """
self.lattice.reset() for atom in self.atoms.atoms: atom.reset()
<SYSTEM_TASK:> Set the number of atoms for the simulation, and populate the simulation lattice. <END_TASK> <USER_TASK:> Description: def set_number_of_atoms( self, n, selected_sites=None ): """ Set the number of atoms for the simulation, and populate the simulation lattice. Args: n (Int): Number of atoms for this simulation. selected_sites (:obj:(List|Set|String), optional): Selects a subset of site types to be populated with atoms. Defaults to None. Returns: None """
self.number_of_atoms = n self.atoms = species.Species( self.lattice.populate_sites( self.number_of_atoms, selected_sites=selected_sites ) )
<SYSTEM_TASK:> Check whether the simulation has been initialised. <END_TASK> <USER_TASK:> Description: def is_initialised( self ): """ Check whether the simulation has been initialised. Args: None Returns: None """
if not self.lattice: raise AttributeError('Running a simulation needs the lattice to be initialised') if not self.atoms: raise AttributeError('Running a simulation needs the atoms to be initialised') if not self.number_of_jumps and not self.for_time: raise AttributeError('Running a simulation needs number_of_jumps or for_time to be set')
<SYSTEM_TASK:> Returns the collective correlation factor, f_I <END_TASK> <USER_TASK:> Description: def old_collective_correlation( self ): """ Returns the collective correlation factor, f_I Args: None Returns: (Float): The collective correlation factor, f_I. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jumps distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use self.collective_correlation """
if self.has_run: return self.atoms.collective_dr_squared() / float( self.number_of_jumps ) else: return None
<SYSTEM_TASK:> Create a jump-probability look-up table corresponding to the appropriate Hamiltonian. <END_TASK> <USER_TASK:> Description: def setup_lookup_table( self, hamiltonian='nearest-neighbour' ): """ Create a jump-probability look-up table corresponding to the appropriate Hamiltonian. Args: hamiltonian (Str, optional): String specifying the simulation Hamiltonian. valid values are 'nearest-neighbour' (default) and 'coordination_number'. Returns: None """
expected_hamiltonian_values = [ 'nearest-neighbour', 'coordination_number' ] if hamiltonian not in expected_hamiltonian_values: raise ValueError self.lattice.jump_lookup_table = lookup_table.LookupTable( self.lattice, hamiltonian )
<SYSTEM_TASK:> Update the lattice state by accepting a specific jump <END_TASK> <USER_TASK:> Description: def update( self, jump ): """ Update the lattice state by accepting a specific jump Args: jump (Jump): The jump that has been accepted. Returns: None. """
atom = jump.initial_site.atom dr = jump.dr( self.cell_lengths ) #print( "atom {} jumped from site {} to site {}".format( atom.number, jump.initial_site.number, jump.final_site.number ) ) jump.final_site.occupation = atom.number jump.final_site.atom = atom jump.final_site.is_occupied = True jump.initial_site.occupation = 0 jump.initial_site.atom = None jump.initial_site.is_occupied = False # TODO: updating atom counters could be contained in an atom.move_to( site ) method atom.site = jump.final_site atom.number_of_hops += 1 atom.dr += dr atom.summed_dr2 += np.dot( dr, dr )
<SYSTEM_TASK:> Populate the lattice sites with a specific number of atoms. <END_TASK> <USER_TASK:> Description: def populate_sites( self, number_of_atoms, selected_sites=None ): """ Populate the lattice sites with a specific number of atoms. Args: number_of_atoms (Int): The number of atoms to populate the lattice sites with. selected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None. Returns: None """
if number_of_atoms > self.number_of_sites: raise ValueError if selected_sites: atoms = [ atom.Atom( initial_site = site ) for site in random.sample( [ s for s in self.sites if s.label in selected_sites ], number_of_atoms ) ] else: atoms = [ atom.Atom( initial_site = site ) for site in random.sample( self.sites, number_of_atoms ) ] self.number_of_occupied_sites = number_of_atoms return atoms
<SYSTEM_TASK:> Select a jump at random from all potential jumps, then update the lattice state. <END_TASK> <USER_TASK:> Description: def jump( self ): """ Select a jump at random from all potential jumps, then update the lattice state. Args: None Returns: None """
potential_jumps = self.potential_jumps() if not potential_jumps: raise BlockedLatticeError('No moves are possible in this lattice') all_transitions = transitions.Transitions( self.potential_jumps() ) random_jump = all_transitions.random() delta_t = all_transitions.time_to_jump() self.time += delta_t self.update_site_occupation_times( delta_t ) self.update( random_jump ) return( all_transitions.time_to_jump() )
<SYSTEM_TASK:> Average site occupation for each site type <END_TASK> <USER_TASK:> Description: def site_occupation_statistics( self ): """ Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 } """
if self.time == 0.0: return None occupation_stats = { label : 0.0 for label in self.site_labels } for site in self.sites: occupation_stats[ site.label ] += site.time_occupied for label in self.site_labels: occupation_stats[ label ] /= self.time return occupation_stats
<SYSTEM_TASK:> Set the energies for every site in the lattice according to the site labels. <END_TASK> <USER_TASK:> Description: def set_site_energies( self, energies ): """ Set the energies for every site in the lattice according to the site labels. Args: energies (Dict(Str:Float): Dictionary of energies for each site label, e.g.:: { 'A' : 1.0, 'B', 0.0 } Returns: None """
self.site_energies = energies for site_label in energies: for site in self.sites: if site.label == site_label: site.energy = energies[ site_label ]
<SYSTEM_TASK:> Set the coordination number dependent energies for this lattice. <END_TASK> <USER_TASK:> Description: def set_cn_energies( self, cn_energies ): """ Set the coordination number dependent energies for this lattice. Args: cn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.:: { 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } } Returns: None """
for site in self.sites: site.set_cn_occupation_energies( cn_energies[ site.label ] ) self.cn_energies = cn_energies
<SYSTEM_TASK:> Returns a dictionary of coordination numbers for each site type. <END_TASK> <USER_TASK:> Description: def site_specific_coordination_numbers( self ): """ Returns a dictionary of coordination numbers for each site type. Args: None Returns: (Dict(Str:List(Int))) : Dictionary of coordination numbers for each site type, e.g.:: { 'A' : [ 2, 4 ], 'B' : [ 2 ] } """
specific_coordination_numbers = {} for site in self.sites: specific_coordination_numbers[ site.label ] = site.site_specific_neighbours() return specific_coordination_numbers
<SYSTEM_TASK:> Selects a random subset of sites with a specific label and gives them a different label. <END_TASK> <USER_TASK:> Description: def transmute_sites( self, old_site_label, new_site_label, n_sites_to_change ): """ Selects a random subset of sites with a specific label and gives them a different label. Args: old_site_label (String or List(String)): Site label(s) of the sites to be modified.. new_site_label (String): Site label to be applied to the modified sites. n_sites_to_change (Int): Number of sites to modify. Returns: None """
selected_sites = self.select_sites( old_site_label ) for site in random.sample( selected_sites, n_sites_to_change ): site.label = new_site_label self.site_labels = set( [ site.label for site in self.sites ] )
<SYSTEM_TASK:> Searches the lattice to find sets of sites that are contiguously neighbouring. <END_TASK> <USER_TASK:> Description: def connected_sites( self, site_labels=None ): """ Searches the lattice to find sets of sites that are contiguously neighbouring. Mutually exclusive sets of contiguous sites are returned as Cluster objects. Args: site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search. This can be a list:: [ 'A', 'B' ] a set:: ( 'A', 'B' ) or a string:: 'A'. Returns: (List(Cluster)): List of Cluster objects for groups of contiguous sites. """
if site_labels: selected_sites = self.select_sites( site_labels ) else: selected_sites = self.sites initial_clusters = [ cluster.Cluster( [ site ] ) for site in selected_sites ] if site_labels: blocking_sites = self.site_labels - set( site_labels ) for c in initial_clusters: c.remove_sites_from_neighbours( blocking_sites ) final_clusters = [] while initial_clusters: # loop until initial_clusters is empty this_cluster = initial_clusters.pop(0) while this_cluster.neighbours: neighbouring_clusters = [ c for c in initial_clusters if this_cluster.is_neighbouring( c ) ] for nc in neighbouring_clusters: initial_clusters.remove( nc ) this_cluster = this_cluster.merge( nc ) final_clusters.append( this_cluster ) return final_clusters
<SYSTEM_TASK:> Selects sites in the lattice with specified labels. <END_TASK> <USER_TASK:> Description: def select_sites( self, site_labels ): """ Selects sites in the lattice with specified labels. Args: site_labels (List(Str)|Set(Str)|Str): Labels of sites to select. This can be a List [ 'A', 'B' ], a Set ( 'A', 'B' ), or a String 'A'. Returns: (List(Site)): List of sites with labels given by `site_labels`. """
if type( site_labels ) in ( list, set ): selected_sites = [ s for s in self.sites if s.label in site_labels ] elif type( site_labels ) is str: selected_sites = [ s for s in self.sites if s.label is site_labels ] else: raise ValueError( str( site_labels ) ) return selected_sites
<SYSTEM_TASK:> Combine two clusters into a single cluster. <END_TASK> <USER_TASK:> Description: def merge( self, other_cluster ): """ Combine two clusters into a single cluster. Args: other_cluster (Cluster): The second cluster to combine. Returns: (Cluster): The combination of both clusters. """
new_cluster = Cluster( self.sites | other_cluster.sites ) new_cluster.neighbours = ( self.neighbours | other_cluster.neighbours ).difference( new_cluster.sites ) return new_cluster
<SYSTEM_TASK:> Finds the six sites with the maximum and minimum coordinates along x, y, and z. <END_TASK> <USER_TASK:> Description: def sites_at_edges( self ): """ Finds the six sites with the maximum and minimum coordinates along x, y, and z. Args: None Returns: (List(List)): In the order [ +x, -x, +y, -y, +z, -z ] """
min_x = min( [ s.r[0] for s in self.sites ] ) max_x = max( [ s.r[0] for s in self.sites ] ) min_y = min( [ s.r[1] for s in self.sites ] ) max_y = max( [ s.r[1] for s in self.sites ] ) min_z = min( [ s.r[2] for s in self.sites ] ) max_z = max( [ s.r[2] for s in self.sites ] ) x_max = [ s for s in self.sites if s.r[0] == min_x ] x_min = [ s for s in self.sites if s.r[0] == max_x ] y_max = [ s for s in self.sites if s.r[1] == min_y ] y_min = [ s for s in self.sites if s.r[1] == max_y ] z_max = [ s for s in self.sites if s.r[2] == min_z ] z_min = [ s for s in self.sites if s.r[2] == max_z ] return ( x_max, x_min, y_max, y_min, z_max, z_min )
<SYSTEM_TASK:> logical check whether a cluster connects with itself across the <END_TASK> <USER_TASK:> Description: def is_periodically_contiguous( self ): """ logical check whether a cluster connects with itself across the simulation periodic boundary conditions. Args: none Returns ( Bool, Bool, Bool ): Contiguity along the x, y, and z coordinate axes """
edges = self.sites_at_edges() is_contiguous = [ False, False, False ] along_x = any( [ s2 in s1.p_neighbours for s1 in edges[0] for s2 in edges[1] ] ) along_y = any( [ s2 in s1.p_neighbours for s1 in edges[2] for s2 in edges[3] ] ) along_z = any( [ s2 in s1.p_neighbours for s1 in edges[4] for s2 in edges[5] ] ) return ( along_x, along_y, along_z )
<SYSTEM_TASK:> Removes sites from the set of neighbouring sites if these have labels in remove_labels. <END_TASK> <USER_TASK:> Description: def remove_sites_from_neighbours( self, remove_labels ): """ Removes sites from the set of neighbouring sites if these have labels in remove_labels. Args: Remove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set. Returns: None """
if type( remove_labels ) is str: remove_labels = [ remove_labels ] self.neighbours = set( n for n in self.neighbours if n.label not in remove_labels )
<SYSTEM_TASK:> Cumulative sum of the relative probabilities for all possible jumps. <END_TASK> <USER_TASK:> Description: def cumulative_probabilities( self ): """ Cumulative sum of the relative probabilities for all possible jumps. Args: None Returns: (np.array): Cumulative sum of relative jump probabilities. """
partition_function = np.sum( self.p ) return np.cumsum( self.p ) / partition_function
<SYSTEM_TASK:> Select a jump at random with appropriate relative probabilities. <END_TASK> <USER_TASK:> Description: def random( self ): """ Select a jump at random with appropriate relative probabilities. Args: None Returns: (Jump): The randomly selected Jump. """
j = np.searchsorted( self.cumulative_probabilities(), random.random() ) return self.jumps[ j ]
<SYSTEM_TASK:> The timestep until the next jump. <END_TASK> <USER_TASK:> Description: def time_to_jump( self ): """ The timestep until the next jump. Args: None Returns: (Float): The timestep until the next jump. """
k_tot = rate_prefactor * np.sum( self.p ) return -( 1.0 / k_tot ) * math.log( random.random() )
<SYSTEM_TASK:> Get IP from request. <END_TASK> <USER_TASK:> Description: def _get_real_ip(self): """ Get IP from request. :param request: A usual request object :type request: HttpRequest :return: ipv4 string or None """
try: # Trying to work with most common proxy headers real_ip = self.request.META['HTTP_X_FORWARDED_FOR'] return real_ip.split(',')[0] except KeyError: return self.request.META['REMOTE_ADDR'] except Exception: # Unknown IP return None
<SYSTEM_TASK:> Fetches IpRange instance if request IP is found in database. <END_TASK> <USER_TASK:> Description: def _get_ip_range(self): """ Fetches IpRange instance if request IP is found in database. :param request: A ususal request object :type request: HttpRequest :return: IpRange object or None """
ip = self._get_real_ip() try: geobase_entry = IpRange.objects.by_ip(ip) except IpRange.DoesNotExist: geobase_entry = None return geobase_entry
<SYSTEM_TASK:> Get location from cookie. <END_TASK> <USER_TASK:> Description: def _get_stored_location(self): """ Get location from cookie. :param request: A ususal request object :type request: HttpRequest :return: Custom location model """
location_storage = storage_class(request=self.request, response=None) return location_storage.get()
<SYSTEM_TASK:> Create from subdirectory or file info object. <END_TASK> <USER_TASK:> Description: def from_info(cls, container, info_obj): """Create from subdirectory or file info object."""
create_fn = cls.from_subdir if 'subdir' in info_obj \ else cls.from_file_info return create_fn(container, info_obj)
<SYSTEM_TASK:> Create from subdirectory info object. <END_TASK> <USER_TASK:> Description: def from_subdir(cls, container, info_obj): """Create from subdirectory info object."""
return cls(container, info_obj['subdir'], obj_type=cls.type_cls.SUBDIR)
<SYSTEM_TASK:> Choose object type from content type. <END_TASK> <USER_TASK:> Description: def choose_type(cls, content_type): """Choose object type from content type."""
return cls.type_cls.SUBDIR if content_type in cls.subdir_types \ else cls.type_cls.FILE
<SYSTEM_TASK:> The change in system energy if this jump were accepted. <END_TASK> <USER_TASK:> Description: def delta_E( self ): """ The change in system energy if this jump were accepted. Args: None Returns: (Float): delta E """
site_delta_E = self.final_site.energy - self.initial_site.energy if self.nearest_neighbour_energy: site_delta_E += self.nearest_neighbour_delta_E() if self.coordination_number_energy: site_delta_E += self.coordination_number_delta_E() return site_delta_E
<SYSTEM_TASK:> Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. <END_TASK> <USER_TASK:> Description: def nearest_neighbour_delta_E( self ): """ Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (nearest-neighbour) """
delta_nn = self.final_site.nn_occupation() - self.initial_site.nn_occupation() - 1 # -1 because the hopping ion is not counted in the final site occupation number return ( delta_nn * self.nearest_neighbour_energy )
<SYSTEM_TASK:> Coordination-number dependent energy conrtibution to the change in system energy if this jump were accepted. <END_TASK> <USER_TASK:> Description: def coordination_number_delta_E( self ): """ Coordination-number dependent energy conrtibution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (coordination-number) """
initial_site_neighbours = [ s for s in self.initial_site.p_neighbours if s.is_occupied ] # excludes final site, since this is always unoccupied final_site_neighbours = [ s for s in self.final_site.p_neighbours if s.is_occupied and s is not self.initial_site ] # excludes initial site initial_cn_occupation_energy = ( self.initial_site.cn_occupation_energy() + sum( [ site.cn_occupation_energy() for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy() for site in final_site_neighbours ] ) ) final_cn_occupation_energy = ( self.final_site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.final_site.label : +1 } ) for site in final_site_neighbours ] ) ) return ( final_cn_occupation_energy - initial_cn_occupation_energy )
<SYSTEM_TASK:> Particle displacement vector for this jump <END_TASK> <USER_TASK:> Description: def dr( self, cell_lengths ): """ Particle displacement vector for this jump Args: cell_lengths (np.array(x,y,z)): Cell lengths for the orthogonal simulation cell. Returns (np.array(x,y,z)): dr """
half_cell_lengths = cell_lengths / 2.0 this_dr = self.final_site.r - self.initial_site.r for i in range( 3 ): if this_dr[ i ] > half_cell_lengths[ i ]: this_dr[ i ] -= cell_lengths[ i ] if this_dr[ i ] < -half_cell_lengths[ i ]: this_dr[ i ] += cell_lengths[ i ] return this_dr
<SYSTEM_TASK:> Relative probability of accepting this jump from a lookup-table. <END_TASK> <USER_TASK:> Description: def relative_probability_from_lookup_table( self, jump_lookup_table ): """ Relative probability of accepting this jump from a lookup-table. Args: jump_lookup_table (LookupTable): the lookup table to be used for this jump. Returns: (Float): relative probability of accepting this jump. """
l1 = self.initial_site.label l2 = self.final_site.label c1 = self.initial_site.nn_occupation() c2 = self.final_site.nn_occupation() return jump_lookup_table.jump_probability[ l1 ][ l2 ][ c1 ][ c2 ]