text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Write a list of indexes. <END_TASK> <USER_TASK:> Description: def _write_indexlist(self, name): """Write a list of indexes."""
d = [self._site.timeline.index(p) for p in getattr(self._site, name)] self.db.delete('site:{0}'.format(name)) if d: self.db.rpush('site:{0}'.format(name), *d)
<SYSTEM_TASK:> Get timeline, reloading the site if needed. <END_TASK> <USER_TASK:> Description: def timeline(self): """Get timeline, reloading the site if needed."""
rev = int(self.db.get('site:rev')) if rev != self.revision: self.reload_site() return self._timeline
<SYSTEM_TASK:> Get posts, reloading the site if needed. <END_TASK> <USER_TASK:> Description: def posts(self): """Get posts, reloading the site if needed."""
rev = int(self.db.get('site:rev')) if rev != self.revision: self.reload_site() return self._posts
<SYSTEM_TASK:> Get all_posts, reloading the site if needed. <END_TASK> <USER_TASK:> Description: def all_posts(self): """Get all_posts, reloading the site if needed."""
rev = self.db.get('site:rev') if int(rev) != self.revision: self.reload_site() return self._all_posts
<SYSTEM_TASK:> Get pages, reloading the site if needed. <END_TASK> <USER_TASK:> Description: def pages(self): """Get pages, reloading the site if needed."""
rev = self.db.get('site:rev') if int(rev) != self.revision: self.reload_site() return self._pages
<SYSTEM_TASK:> Dictionary of options which affect the curve fitting algorithm. <END_TASK> <USER_TASK:> Description: def options(self): """ Dictionary of options which affect the curve fitting algorithm. Must contain the key `fit_function` which must be set to the function that will perform the fit. All other options are passed as keyword arguments to the `fit_function`. The default options use `scipy.optimize.curve_fit`. If `fit_function` has the special value `lmfit`, then [lmfit][1] is used for the fit and all other options are passed as keyword arguments to [`lmfit.minimize`][2]. When using [lmfit][1], additional control of the fit is obtained by overriding `scipy_data_fitting.Fit.lmfit_fcn2min`. Any other function may be used for `fit_function` that satisfies the following criteria: * Must accept the following non-keyword arguments in this order (even if unused in the fitting function): 1. Function to fit, see `scipy_data_fitting.Fit.function`. 2. Independent values: see `scipy_data_fitting.Data.array`. 3. Dependent values: see `scipy_data_fitting.Data.array`. 4. List of the initial fitting parameter guesses in same order as given by `scipy_data_fitting.Fit.fitting_parameters`. The initial guesses will be scaled by their prefix before being passed. * Can accept any keyword arguments set in `scipy_data_fitting.Fit.options`. For example, this is how one could pass error values to the fitting function. * Must return an object whose first element is a list or array of the values of the fitted parameters (and only those values) in same order as given by `scipy_data_fitting.Fit.fitting_parameters`. Default: #!python { 'fit_function': scipy.optimize.curve_fit, 'maxfev': 1000, } [1]: http://lmfit.github.io/lmfit-py/ [2]: http://lmfit.github.io/lmfit-py/fitting.html#the-minimize-function """
if not hasattr(self, '_options'): self._options = { 'fit_function': scipy.optimize.curve_fit, 'maxfev': 1000, } return self._options
<SYSTEM_TASK:> Limits to use for the independent variable whenever <END_TASK> <USER_TASK:> Description: def limits(self): """ Limits to use for the independent variable whenever creating a linespace, plot, etc. Defaults to `(-x, x)` where `x` is the largest absolute value of the data corresponding to the independent variable. If no such values are negative, defaults to `(0, x)` instead. """
if not hasattr(self, '_limits'): xmax = max(abs(self.data.array[0])) xmin = min(self.data.array[0]) x_error = self.data.error[0] if isinstance(x_error, numpy.ndarray): if x_error.ndim == 0: xmax = xmax + x_error if xmin < 0: self._limits = (-xmax, xmax) else: self._limits = (0, xmax) return self._limits
<SYSTEM_TASK:> A flat tuple of all values corresponding to `scipy_data_fitting.Fit.fixed_parameters` <END_TASK> <USER_TASK:> Description: def fixed_values(self): """ A flat tuple of all values corresponding to `scipy_data_fitting.Fit.fixed_parameters` and `scipy_data_fitting.Fit.constants` after applying any prefixes. The values mimic the order of those lists. """
values = [] values.extend([ prefix_factor(param) * param['value'] for param in self.fixed_parameters ]) values.extend([ prefix_factor(const) * get_constant(const['value']) for const in self.constants ]) return tuple(values)
<SYSTEM_TASK:> The function passed to the `fit_function` specified in `scipy_data_fitting.Fit.options`, <END_TASK> <USER_TASK:> Description: def function(self): """ The function passed to the `fit_function` specified in `scipy_data_fitting.Fit.options`, and used by `scipy_data_fitting.Fit.pointspace` to generate plots, etc. Its number of arguments and their order is determined by items 1, 2, and 3 as listed in `scipy_data_fitting.Fit.all_variables`. All parameter values will be multiplied by their corresponding prefix before being passed to this function. By default, it is a functional form of `scipy_data_fitting.Fit.expression` converted using `scipy_data_fitting.Model.lambdify`. See also `scipy_data_fitting.Fit.lambdify_options`. """
if not hasattr(self,'_function'): function = self.model.lambdify(self.expression, self.all_variables, **self.lambdify_options) self._function = lambda *x: function(*(x + self.fixed_values)) return self._function
<SYSTEM_TASK:> Fits `scipy_data_fitting.Fit.function` to the data and returns <END_TASK> <USER_TASK:> Description: def curve_fit(self): """ Fits `scipy_data_fitting.Fit.function` to the data and returns the output from the specified curve fit function. See `scipy_data_fitting.Fit.options` for details on how to control or override the the curve fitting algorithm. """
if not hasattr(self,'_curve_fit'): options = self.options.copy() fit_function = options.pop('fit_function') independent_values = self.data.array[0] dependent_values = self.data.array[1] if fit_function == 'lmfit': self._curve_fit = lmfit.minimize( self.lmfit_fcn2min, self.lmfit_parameters, args=(independent_values, dependent_values, self.data.error), **options) else: p0 = [ prefix_factor(param) * param['guess'] for param in self.fitting_parameters ] self._curve_fit = fit_function( self.function, independent_values, dependent_values, p0, **options) return self._curve_fit
<SYSTEM_TASK:> A tuple of fitted values for the `scipy_data_fitting.Fit.fitting_parameters`. <END_TASK> <USER_TASK:> Description: def fitted_parameters(self): """ A tuple of fitted values for the `scipy_data_fitting.Fit.fitting_parameters`. The values in this tuple are not scaled by the prefix, as they are passed back to `scipy_data_fitting.Fit.function`, e.g. in most standard use cases these would be the SI values. If no fitting parameters were specified, this will just return an empty tuple. """
if hasattr(self,'_fitted_parameters'): return self._fitted_parameters if not self.fitting_parameters: return tuple() if self.options['fit_function'] == 'lmfit': return tuple( self.curve_fit.params[key].value for key in sorted(self.curve_fit.params) ) else: return tuple(self.curve_fit[0])
<SYSTEM_TASK:> A function of the single independent variable after <END_TASK> <USER_TASK:> Description: def fitted_function(self): """ A function of the single independent variable after partially evaluating `scipy_data_fitting.Fit.function` at the `scipy_data_fitting.Fit.fitted_parameters`. """
function = self.function fitted_parameters = self.fitted_parameters return lambda x: function(x, *fitted_parameters)
<SYSTEM_TASK:> A list identical to what is set with `scipy_data_fitting.Fit.fitting_parameters`, <END_TASK> <USER_TASK:> Description: def computed_fitting_parameters(self): """ A list identical to what is set with `scipy_data_fitting.Fit.fitting_parameters`, but in each dictionary, the key `value` is added with the fitted value of the quantity. The reported value is scaled by the inverse prefix. """
fitted_parameters = [] for (i, v) in enumerate(self.fitting_parameters): param = v.copy() param['value'] = self.fitted_parameters[i] * prefix_factor(param)**(-1) fitted_parameters.append(param) return fitted_parameters
<SYSTEM_TASK:> Returns a dictionary with the keys `data` and `fit`. <END_TASK> <USER_TASK:> Description: def pointspace(self, **kwargs): """ Returns a dictionary with the keys `data` and `fit`. `data` is just `scipy_data_fitting.Data.array`. `fit` is a two row [`numpy.ndarray`][1], the first row values correspond to the independent variable and are generated using [`numpy.linspace`][2]. The second row are the values of `scipy_data_fitting.Fit.fitted_function` evaluated on the linspace. For both `fit` and `data`, each row will be scaled by the corresponding inverse prefix if given in `scipy_data_fitting.Fit.independent` or `scipy_data_fitting.Fit.dependent`. Any keyword arguments are passed to [`numpy.linspace`][2]. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html [2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html """
scale_array = numpy.array([ [prefix_factor(self.independent)**(-1)], [prefix_factor(self.dependent)**(-1)] ]) linspace = numpy.linspace(self.limits[0], self.limits[1], **kwargs) return { 'data': self.data.array * scale_array, 'fit': numpy.array([linspace, self.fitted_function(linspace)]) * scale_array }
<SYSTEM_TASK:> Write the results of the fit to a json file at `path`. <END_TASK> <USER_TASK:> Description: def to_json(self, path, points=50, meta=None): """ Write the results of the fit to a json file at `path`. `points` will define the length of the `fit` array. If `meta` is given, a `meta` key be added with the given value. The json object has the form #!text { 'data': [ [x1, y1], [x2, y2], ... ], 'fit': [ [x1, y1], [x2, y2], ... ], 'meta': meta } """
pointspace = self.pointspace(num=points) fit_points = numpy.dstack(pointspace['fit'])[0] data_points = numpy.dstack(pointspace['data'])[0] fit = [ [ point[0], point[1] ] for point in fit_points ] data = [ [ point[0], point[1] ] for point in data_points ] obj = {'data': data, 'fit': fit} if meta: obj['meta'] = meta f = open(path, 'w') json.dump(obj, f) f.close
<SYSTEM_TASK:> Cleans the notebook to be suitable for inclusion in the docs. <END_TASK> <USER_TASK:> Description: def clean_for_doc(nb): """ Cleans the notebook to be suitable for inclusion in the docs. """
new_cells = [] for cell in nb.worksheets[0].cells: # Remove the pylab inline line. if "input" in cell and cell["input"].strip() == "%pylab inline": continue # Remove output resulting from the stream/trace method chaining. if "outputs" in cell: outputs = [_i for _i in cell["outputs"] if "text" not in _i or not _i["text"].startswith("<obspy.core")] cell["outputs"] = outputs new_cells.append(cell) nb.worksheets[0].cells = new_cells return nb
<SYSTEM_TASK:> sets callback function for updating the plot. <END_TASK> <USER_TASK:> Description: def set_call_back(self, func): """sets callback function for updating the plot. in the callback function implement the logic of reading of serial input also the further processing of the signal if necessary has to be done in this callbak function."""
self.timer.add_callback(func) self.timer.start()
<SYSTEM_TASK:> define your callback function with the decorator @plotter.plot_self. <END_TASK> <USER_TASK:> Description: def plot_self(self, func): """define your callback function with the decorator @plotter.plot_self. in the callback function set the data of lines in the plot using self.lines[i][j].set_data(your data)"""
def func_wrapper(): func() try: self.manager.canvas.draw() except ValueError as ve: print(ve) pass except RuntimeError as RtE: print(RtE) pass except Exception as e: print(e) pass return func_wrapper
<SYSTEM_TASK:> The meat. Filtering using Django model style syntax. <END_TASK> <USER_TASK:> Description: def filter(cls, **kwargs): """ The meat. Filtering using Django model style syntax. All kwargs are translated into attributes on the underlying objects. If the attribute is not found, it looks for a similar key in the tags. There are a couple comparisons to check against as well: exact: check strict equality iexact: case insensitive exact like: check against regular expression ilike: case insensitive like contains: check if string is found with attribute icontains: case insensitive contains startswith: check if attribute value starts with the string istartswith: case insensitive startswith endswith: check if attribute value ends with the string iendswith: case insensitive startswith isnull: check if the attribute does not exist >>> ec2.instances.filter(name__startswith='production') [ ... ] """
qs = cls.all() for key in kwargs: qs = filter(lambda i: make_compare(key, kwargs[key], i), qs) return qs
<SYSTEM_TASK:> Convert list of key,value lists to dict <END_TASK> <USER_TASK:> Description: def list_of_lists_to_dict(l): """ Convert list of key,value lists to dict [['id', 1], ['id', 2], ['id', 3], ['foo': 4]] {'id': [1, 2, 3], 'foo': [4]} """
d = {} for key, val in l: d.setdefault(key, []).append(val) return d
<SYSTEM_TASK:> Dumps an integer into a base36 string. <END_TASK> <USER_TASK:> Description: def dumps(number): """Dumps an integer into a base36 string. :param number: the 10-based integer. :returns: the base36 string. """
if not isinstance(number, integer_types): raise TypeError('number must be an integer') if number < 0: return '-' + dumps(-number) value = '' while number != 0: number, index = divmod(number, len(alphabet)) value = alphabet[index] + value return value or '0'
<SYSTEM_TASK:> Returns a aiohttp_auth.acl middleware factory for use by the aiohttp <END_TASK> <USER_TASK:> Description: def acl_middleware(callback): """Returns a aiohttp_auth.acl middleware factory for use by the aiohttp application object. Args: callback: This is a callable which takes a user_id (as returned from the auth.get_auth function), and expects a sequence of permitted ACL groups to be returned. This can be a empty tuple to represent no explicit permissions, or None to explicitly forbid this particular user_id. Note that the user_id passed may be None if no authenticated user exists. Returns: A aiohttp middleware factory. """
async def _acl_middleware_factory(app, handler): async def _middleware_handler(request): # Save the policy in the request request[GROUPS_KEY] = callback # Call the next handler in the chain return await handler(request) return _middleware_handler return _acl_middleware_factory
<SYSTEM_TASK:> Returns the groups that the user in this request has access to. <END_TASK> <USER_TASK:> Description: async def get_user_groups(request): """Returns the groups that the user in this request has access to. This function gets the user id from the auth.get_auth function, and passes it to the ACL callback function to get the groups. Args: request: aiohttp Request object Returns: If the ACL callback function returns None, this function returns None. Otherwise this function returns the sequence of group permissions provided by the callback, plus the Everyone group. If user_id is not None, the AuthnticatedUser group and the user_id are added to the groups returned by the function Raises: RuntimeError: If the ACL middleware is not installed """
acl_callback = request.get(GROUPS_KEY) if acl_callback is None: raise RuntimeError('acl_middleware not installed') user_id = await get_auth(request) groups = await acl_callback(user_id) if groups is None: return None user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else () return set(itertools.chain(groups, (Group.Everyone,), user_groups))
<SYSTEM_TASK:> Returns true if the one of the groups in the request has the requested <END_TASK> <USER_TASK:> Description: async def get_permitted(request, permission, context): """Returns true if the one of the groups in the request has the requested permission. The function takes a request, a permission to check for and a context. A context is a sequence of ACL tuples which consist of a Allow/Deny action, a group, and a sequence of permissions for that ACL group. For example:: context = [(Permission.Allow, 'view_group', ('view',)), (Permission.Allow, 'edit_group', ('view', 'edit')),] ACL tuple sequences are checked in order, with the first tuple that matches the group the user is a member of, and includes the permission passed to the function, to be the matching ACL group. If no ACL group is found, the function returns False. Groups and permissions need only be immutable objects, so can be strings, numbers, enumerations, or other immutable objects. Args: request: aiohttp Request object permission: The specific permission requested. context: A sequence of ACL tuples Returns: The function gets the groups by calling get_user_groups() and returns true if the groups are Allowed the requested permission, false otherwise. Raises: RuntimeError: If the ACL middleware is not installed """
groups = await get_user_groups(request) if groups is None: return False for action, group, permissions in context: if group in groups: if permission in permissions: return action == Permission.Allow return False
<SYSTEM_TASK:> Return molecular weight of a molecule. <END_TASK> <USER_TASK:> Description: def molecular_weight(elements): """ Return molecular weight of a molecule. Parameters ---------- elements : numpy.ndarray An array of all elements (type: str) in a molecule. Returns ------- numpy.float64 A molecular weight of a molecule. """
return (np.array([atomic_mass[i.upper()] for i in elements]).sum())
<SYSTEM_TASK:> Return element for deciphered atom key. <END_TASK> <USER_TASK:> Description: def decipher_atom_key(atom_key, forcefield): """ Return element for deciphered atom key. This functions checks if the forcfield specified by user is supported and passes the atom key to the appropriate function for deciphering. Parameters ---------- atom_key : str The atom key which is to be deciphered. forcefield : str The forcefield to which the atom key belongs to. Returns ------- str A string that is the periodic table element equvalent of forcefield atom key. """
load_funcs = { 'DLF': dlf_notation, 'DL_F': dlf_notation, 'OPLS': opls_notation, 'OPLSAA': opls_notation, 'OPLS2005': opls_notation, 'OPLS3': opls_notation, } if forcefield.upper() in load_funcs.keys(): return load_funcs[forcefield.upper()](atom_key) else: raise _ForceFieldError( ("Unfortunetely, '{0}' forcefield is not supported by pyWINDOW." " For list of supported forcefields see User's Manual or " "MolecularSystem._decipher_atom_keys() function doc string." ).format(forcefield))
<SYSTEM_TASK:> Return coordinates translated by some vector. <END_TASK> <USER_TASK:> Description: def shift_com(elements, coordinates, com_adjust=np.zeros(3)): """ Return coordinates translated by some vector. Parameters ---------- elements : numpy.ndarray An array of all elements (type: str) in a molecule. coordinates : numpy.ndarray An array containing molecule's coordinates. com_adjust : numpy.ndarray (default = [0, 0, 0]) Returns ------- numpy.ndarray Translated array of molecule's coordinates. """
com = center_of_mass(elements, coordinates) com = np.array([com - com_adjust] * coordinates.shape[0]) return coordinates - com
<SYSTEM_TASK:> Return the maximum diameter of a molecule. <END_TASK> <USER_TASK:> Description: def max_dim(elements, coordinates): """ Return the maximum diameter of a molecule. Parameters ---------- elements : numpy.ndarray An array of all elements (type: str) in a molecule. coordinates : numpy.ndarray An array containing molecule's coordinates. Returns ------- """
atom_vdw_vertical = np.matrix( [[atomic_vdw_radius[i.upper()]] for i in elements]) atom_vdw_horizontal = np.matrix( [atomic_vdw_radius[i.upper()] for i in elements]) dist_matrix = euclidean_distances(coordinates, coordinates) vdw_matrix = atom_vdw_vertical + atom_vdw_horizontal re_dist_matrix = dist_matrix + vdw_matrix final_matrix = np.triu(re_dist_matrix) i1, i2 = np.unravel_index(final_matrix.argmax(), final_matrix.shape) maxdim = final_matrix[i1, i2] return i1, i2, maxdim
<SYSTEM_TASK:> Return optimised pore diameter and it's COM. <END_TASK> <USER_TASK:> Description: def opt_pore_diameter(elements, coordinates, bounds=None, com=None, **kwargs): """Return optimised pore diameter and it's COM."""
args = elements, coordinates if com is not None: pass else: com = center_of_mass(elements, coordinates) if bounds is None: pore_r = pore_diameter(elements, coordinates, com=com)[0] / 2 bounds = ( (com[0]-pore_r, com[0]+pore_r), (com[1]-pore_r, com[1]+pore_r), (com[2]-pore_r, com[2]+pore_r) ) minimisation = minimize( correct_pore_diameter, x0=com, args=args, bounds=bounds) pored = pore_diameter(elements, coordinates, com=minimisation.x) return (pored[0], pored[1], minimisation.x)
<SYSTEM_TASK:> Return the gyration tensor of a molecule. <END_TASK> <USER_TASK:> Description: def get_gyration_tensor(elements, coordinates): """ Return the gyration tensor of a molecule. The gyration tensor should be invariant to the molecule's position. The known formulas for the gyration tensor have the correction for the centre of mass of the molecule, therefore, the coordinates are first corrected for the centre of mass and essentially shifted to the origin. Parameters ---------- elements : numpy.ndarray The array containing the molecule's elemental data. coordinates : numpy.ndarray The array containing the Cartesian coordinates of the molecule. Returns ------- numpy.ndarray The gyration tensor of a molecule invariant to the molecule's position. """
# First calculate COM for correction. com = centre_of_mass(elements, coordinates) # Correct the coordinates for the COM. coordinates = coordinates - com # Calculate diagonal and then other values of the matrix. diag = np.sum(coordinates**2, axis=0) xy = np.sum(coordinates[:, 0] * coordinates[:, 1]) xz = np.sum(coordinates[:, 0] * coordinates[:, 2]) yz = np.sum(coordinates[:, 1] * coordinates[:, 2]) S = np.array([[diag[0], xy, xz], [xy, diag[1], yz], [xz, yz, diag[2]]]) / coordinates.shape[0] return (S)
<SYSTEM_TASK:> Return the tensor of inertia a molecule. <END_TASK> <USER_TASK:> Description: def get_inertia_tensor(elements, coordinates): """ Return the tensor of inertia a molecule. Parameters ---------- elements : numpy.ndarray The array containing the molecule's elemental data. coordinates : numpy.ndarray The array containing the Cartesian coordinates of the molecule. Returns ------- numpy.ndarray The tensor of inertia of a molecule. """
pow2 = coordinates**2 molecular_weight = np.array( [[atomic_mass[e.upper()]] for e in elements]) diag_1 = np.sum(molecular_weight * (pow2[:, 1] + pow2[:, 2])) diag_2 = np.sum(molecular_weight * (pow2[:, 0] + pow2[:, 2])) diag_3 = np.sum(molecular_weight * (pow2[:, 0] + pow2[:, 1])) mxy = np.sum(-molecular_weight * coordinates[:, 0] * coordinates[:, 1]) mxz = np.sum(-molecular_weight * coordinates[:, 0] * coordinates[:, 2]) myz = np.sum(-molecular_weight * coordinates[:, 1] * coordinates[:, 2]) inertia_tensor = np.array([[diag_1, mxy, mxz], [mxy, diag_2, myz], [mxz, myz, diag_3]]) / coordinates.shape[0] return (inertia_tensor)
<SYSTEM_TASK:> Normalize a vector. <END_TASK> <USER_TASK:> Description: def normalize_vector(vector): """ Normalize a vector. A new vector is returned, the original vector is not modified. Parameters ---------- vector : np.array The vector to be normalized. Returns ------- np.array The normalized vector. """
v = np.divide(vector, np.linalg.norm(vector)) return np.round(v, decimals=4)
<SYSTEM_TASK:> Return a rotation matrix of `angle` radians about `axis`. <END_TASK> <USER_TASK:> Description: def rotation_matrix_arbitrary_axis(angle, axis): """ Return a rotation matrix of `angle` radians about `axis`. Parameters ---------- angle : int or float The size of the rotation in radians. axis : numpy.array A 3 element aray which represents a vector. The vector is the axis about which the rotation is carried out. Returns ------- numpy.array A 3x3 array representing a rotation matrix. """
axis = normalize_vector(axis) a = np.cos(angle / 2) b, c, d = axis * np.sin(angle / 2) e11 = np.square(a) + np.square(b) - np.square(c) - np.square(d) e12 = 2 * (b * c - a * d) e13 = 2 * (b * d + a * c) e21 = 2 * (b * c + a * d) e22 = np.square(a) + np.square(c) - np.square(b) - np.square(d) e23 = 2 * (c * d - a * b) e31 = 2 * (b * d - a * c) e32 = 2 * (c * d + a * b) e33 = np.square(a) + np.square(d) - np.square(b) - np.square(c) return np.array([[e11, e12, e13], [e21, e22, e23], [e31, e32, e33]])
<SYSTEM_TASK:> Return crystallographic param. from unit cell lattice matrix. <END_TASK> <USER_TASK:> Description: def lattice_array_to_unit_cell(lattice_array): """Return crystallographic param. from unit cell lattice matrix."""
cell_lengths = np.sqrt(np.sum(lattice_array**2, axis=0)) gamma_r = np.arccos(lattice_array[0][1] / cell_lengths[1]) beta_r = np.arccos(lattice_array[0][2] / cell_lengths[2]) alpha_r = np.arccos( lattice_array[1][2] * np.sin(gamma_r) / cell_lengths[2] + np.cos(beta_r) * np.cos(gamma_r) ) cell_angles = [ np.rad2deg(alpha_r), np.rad2deg(beta_r), np.rad2deg(gamma_r) ] return np.append(cell_lengths, cell_angles)
<SYSTEM_TASK:> Return a fractional coordinate from a cartesian one. <END_TASK> <USER_TASK:> Description: def fractional_from_cartesian(coordinate, lattice_array): """Return a fractional coordinate from a cartesian one."""
deorthogonalisation_M = np.matrix(np.linalg.inv(lattice_array)) fractional = deorthogonalisation_M * coordinate.reshape(-1, 1) return np.array(fractional.reshape(1, -1))
<SYSTEM_TASK:> Return cartesian coordinate from a fractional one. <END_TASK> <USER_TASK:> Description: def cartisian_from_fractional(coordinate, lattice_array): """Return cartesian coordinate from a fractional one."""
orthogonalisation_M = np.matrix(lattice_array) orthogonal = orthogonalisation_M * coordinate.reshape(-1, 1) return np.array(orthogonal.reshape(1, -1))
<SYSTEM_TASK:> Convert all cartesian coordinates to fractional. <END_TASK> <USER_TASK:> Description: def cart2frac_all(coordinates, lattice_array): """Convert all cartesian coordinates to fractional."""
frac_coordinates = deepcopy(coordinates) for coord in range(frac_coordinates.shape[0]): frac_coordinates[coord] = fractional_from_cartesian( frac_coordinates[coord], lattice_array) return frac_coordinates
<SYSTEM_TASK:> Convert all fractional coordinates to cartesian. <END_TASK> <USER_TASK:> Description: def frac2cart_all(frac_coordinates, lattice_array): """Convert all fractional coordinates to cartesian."""
coordinates = deepcopy(frac_coordinates) for coord in range(coordinates.shape[0]): coordinates[coord] = cartisian_from_fractional(coordinates[coord], lattice_array) return coordinates
<SYSTEM_TASK:> Calculate the angle between two vectors x and y. <END_TASK> <USER_TASK:> Description: def angle_between_vectors(x, y): """Calculate the angle between two vectors x and y."""
first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / ( np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) * np.sqrt(y[0]**2 + y[1]**2 + y[2]**2)) second_step = np.arccos(first_step) return (second_step)
<SYSTEM_TASK:> Analyse a sampling vector's path for window analysis purpose. <END_TASK> <USER_TASK:> Description: def vector_analysis(vector, coordinates, elements_vdw, increment=1.0): """Analyse a sampling vector's path for window analysis purpose."""
# Calculate number of chunks if vector length is divided by increment. chunks = int(np.linalg.norm(vector) // increment) # Create a single chunk. chunk = vector / chunks # Calculate set of points on vector's path every increment. vector_pathway = np.array([chunk * i for i in range(chunks + 1)]) analysed_vector = np.array([ np.amin( euclidean_distances(coordinates, i.reshape(1, -1)) - elements_vdw) for i in vector_pathway ]) if all(i > 0 for i in analysed_vector): pos = np.argmin(analysed_vector) # As first argument we need to give the distance from the origin. dist = np.linalg.norm(chunk * pos) return np.array( [dist, analysed_vector[pos] * 2, *chunk * pos, *vector])
<SYSTEM_TASK:> Return negative pore diameter for x and y coordinates optimisation. <END_TASK> <USER_TASK:> Description: def optimise_xy(xy, *args): """Return negative pore diameter for x and y coordinates optimisation."""
z, elements, coordinates = args window_com = np.array([xy[0], xy[1], z]) return -pore_diameter(elements, coordinates, com=window_com)[0]
<SYSTEM_TASK:> Return pore diameter for coordinates optimisation in z direction. <END_TASK> <USER_TASK:> Description: def optimise_z(z, *args): """Return pore diameter for coordinates optimisation in z direction."""
x, y, elements, coordinates = args window_com = np.array([x, y, z]) return pore_diameter(elements, coordinates, com=window_com)[0]
<SYSTEM_TASK:> Generate random samples from the distribution <END_TASK> <USER_TASK:> Description: def rand(self, n=1): """ Generate random samples from the distribution Parameters ---------- n : int, optional(default=1) The number of samples to generate Returns ------- out : array_like The generated samples """
if n == 1: return self._rand1() else: out = np.empty((n, self._p, self._p)) for i in range(n): out[i] = self._rand1() return out
<SYSTEM_TASK:> Generate the matrix A in the Bartlett decomposition <END_TASK> <USER_TASK:> Description: def _genA(self): """ Generate the matrix A in the Bartlett decomposition A is a lower triangular matrix, with A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j ~ Normal() when i > j """
p, df = self._p, self.df A = np.zeros((p, p)) for i in range(p): A[i, i] = sqrt(st.chi2.rvs(df - i)) for j in range(p-1): for i in range(j+1, p): A[i, j] = np.random.randn() return A
<SYSTEM_TASK:> This function opens any type of a readable file and decompose <END_TASK> <USER_TASK:> Description: def load_file(self, filepath): """ This function opens any type of a readable file and decompose the file object into a list, for each line, of lists containing splitted line strings using space as a spacer. Parameters ---------- filepath : :class:`str` The full path or a relative path to any type of file. Returns ------- :class:`dict` Returns a dictionary containing the molecular information extracted from the input files. This information will vary with file type and information stored in it. The data is sorted into lists that contain one feature for example key atom_id: [atom_id_1, atom_id_2] Over the process of analysis this dictionary will be updated with new data. """
self.file_path = filepath _, self.file_type = os.path.splitext(filepath) _, self.file_name = os.path.split(filepath) with open(filepath) as ffile: self.file_content = ffile.readlines() return (self._load_funcs[self.file_type]())
<SYSTEM_TASK:> Dump a dictionary into a JSON dictionary. <END_TASK> <USER_TASK:> Description: def dump2json(self, obj, filepath, override=False, **kwargs): """ Dump a dictionary into a JSON dictionary. Uses the json.dump() function. Parameters ---------- obj : :class:`dict` A dictionary to be dumpped as JSON file. filepath : :class:`str` The filepath for the dumped file. override : :class:`bool` If True, any file in the filepath will be override. (default=False) """
# We make sure that the object passed by the user is a dictionary. if isinstance(obj, dict): pass else: raise _NotADictionary( "This function only accepts dictionaries as input") # We check if the filepath has a json extenstion, if not we add it. if str(filepath[-4:]) == 'json': pass else: filepath = ".".join((str(filepath), "json")) # First we check if the file already exists. If yes and the override # keyword is False (default), we will raise an exception. Otherwise # the file will be overwritten. if override is False: if os.path.isfile(filepath): raise _FileAlreadyExists( "The file {0} already exists. Use a different filepath, " "or set the 'override' kwarg to True.".format(filepath)) # We dump the object to the json file. Additional kwargs can be passed. with open(filepath, 'w+') as json_file: json.dump(obj, json_file, **kwargs)
<SYSTEM_TASK:> Creates a new object pretreated input data. <END_TASK> <USER_TASK:> Description: def create(self, data, update=False, **kwargs): """ Creates a new object pretreated input data. .. code-block:: python DBSession.sacrud(Users).create({'name': 'Vasya', 'sex': 1}) Support JSON: .. code-block:: python DBSession.sacrud(Users).create('{"name": "Vasya", "sex": 1}') For adding multiple data for m2m or m2o use endinng `[]`, ex.: .. code-block:: python DBSession.sacrud(Users).create( {'name': 'Vasya', 'sex': 1, 'groups[]': ['["id", 1]', '["id", 2]']} ) """
data = unjson(data) if update is True: obj = get_obj_by_request_data(self.session, self.table, data) else: obj = None return self._add(obj, data, **kwargs)
<SYSTEM_TASK:> Return a list of entries in the table or single <END_TASK> <USER_TASK:> Description: def read(self, *pk): """ Return a list of entries in the table or single entry if there is an pk. .. code-block:: python # All users DBSession.sacrud(Users).read() # Composite primary_key DBSession.sacrud(User2Groups).read({'user_id': 4, 'group_id': 2}) # Multiple rows primary_keys = [ {'user_id': 4, 'group_id': 2}, {'user_id': 4, 'group_id': 3}, {'user_id': 1, 'group_id': 1}, {'user_id': 19, 'group_id': 2} ] DBSession.sacrud(User2Groups).read(*primary_keys) # JSON using primary_keys = '''[ {"user_id": 4, "group_id": 2}, {"user_id": 4, "group_id": 3}, {"user_id": 1, "group_id": 1}, {"user_id": 19, "group_id": 2} ]''' DBSession.sacrud(User2Groups).read(primary_keys) # Delete DBSession.sacrud(User2Groups).read(*primary_keys)\ .delete(synchronize_session=False) # Same, but work with only not composite primary key DBSession.sacrud(Users).read((5, 10)) # as list DBSession.sacrud(Users).read('[5, 10]') # as JSON DBSession.sacrud(Users).read('{"id": 5}') # as JSON explicit pk DBSession.sacrud(Users).read(5, "1", 2) # as *args DBSession.sacrud(Users).read(42) # single """
pk = [unjson(obj) for obj in pk] if len(pk) == 1: # like ([1,2,3,4,5], ) return get_obj(self.session, self.table, pk[0]) elif len(pk) > 1: # like (1, 2, 3, 4, 5) return get_obj(self.session, self.table, pk) return self.session.query(self.table)
<SYSTEM_TASK:> Delete the object directly. <END_TASK> <USER_TASK:> Description: def _delete(self, obj, **kwargs): """ Delete the object directly. .. code-block:: python DBSession.sacrud(Users)._delete(UserObj) If you no needed commit session .. code-block:: python DBSession.sacrud(Users, commit=False)._delete(UserObj) """
if isinstance(obj, sqlalchemy.orm.query.Query): obj = obj.one() obj = self.preprocessing(obj=obj).delete() self.session.delete(obj) if kwargs.get('commit', self.commit) is True: try: self.session.commit() except AssertionError: transaction.commit() return True
<SYSTEM_TASK:> Checks if user is authenticated using token passed in argument <END_TASK> <USER_TASK:> Description: def authenticated(self, user_token, **validation_context): """Checks if user is authenticated using token passed in argument :param user_token: string representing token :param validation_context: Token.validate optional keyword arguments """
token = self.token_storage.get(user_token) if token and token.validate(user_token, **validation_context): return True return False
<SYSTEM_TASK:> Checks if user represented by token is in group. <END_TASK> <USER_TASK:> Description: def group_authenticated(self, user_token, group): """Checks if user represented by token is in group. :param user_token: string representing token :param group: group's name """
if self.authenticated(user_token): token = self.token_storage.get(user_token) groups = self.get_groups(token.username) if group in groups: return True return False
<SYSTEM_TASK:> Returns list of groups in which user is. <END_TASK> <USER_TASK:> Description: def get_groups(self, username): """Returns list of groups in which user is. :param username: name of Linux user """
groups = [] for group in grp.getgrall(): if username in group.gr_mem: groups.append(group.gr_name) return groups
<SYSTEM_TASK:> Decorator which checks if user is authenticated <END_TASK> <USER_TASK:> Description: def auth_required(self, view): """Decorator which checks if user is authenticated Decorator for Flask's view which blocks not authenticated requests :param view: Flask's view function """
@functools.wraps(view) def decorated(*args, **kwargs): log.info("Trying to get access to protected resource: '%s'", view.__name__) if request.method == 'POST': token = request.form['token'] if self.development or self.authenticated(token): return view(*args, **kwargs) else: log.warning("User has not been authorized to get access to resource: %s", view.__name__) else: log.warning("Bad request type! Expected 'POST', actual '%s'", request.method) return abort(403) return decorated
<SYSTEM_TASK:> Decorator which checks if user is in group <END_TASK> <USER_TASK:> Description: def group_required(self, group): """Decorator which checks if user is in group Decorator for Flask's view which blocks requests from not authenticated users or if user is not member of specified group :param group: group's name """
def decorator(view): @functools.wraps(view) def decorated(*args, **kwargs): log.info("Trying to get access to resource: %s protected by group: %s", view.__name__, group) if request.method == 'POST': token = request.form['token'] if self.development or self.group_authenticated(token, group): return view(*args, **kwargs) else: log.warning("User has not been authorized to get access to resource: %s", view.__name__) else: log.error("Bad request type! Expected 'POST', actual '%s'", request.method) return abort(403) return decorated return decorator
<SYSTEM_TASK:> Find corresponding .py name given a .pyc or .pyo <END_TASK> <USER_TASK:> Description: def pyc2py(filename): """ Find corresponding .py name given a .pyc or .pyo """
if re.match(".*py[co]$", filename): if PYTHON3: return re.sub(r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER, '\\1\\2.py', filename) else: return filename[:-1] return filename
<SYSTEM_TASK:> Clear the file cache. If no filename is given clear it entirely. <END_TASK> <USER_TASK:> Description: def clear_file_cache(filename=None): """Clear the file cache. If no filename is given clear it entirely. if a filename is given, clear just that filename."""
global file_cache, file2file_remap, file2file_remap_lines if filename is not None: if filename in file_cache: del file_cache[filename] pass else: file_cache = {} file2file_remap = {} file2file_remap_lines = {} pass return
<SYSTEM_TASK:> Remove syntax-formatted lines in the cache. Use this <END_TASK> <USER_TASK:> Description: def clear_file_format_cache(): """Remove syntax-formatted lines in the cache. Use this when you change the Pygments syntax or Token formatting and want to redo how files may have previously been syntax marked."""
for fname, cache_info in file_cache.items(): for format, lines in cache_info.lines.items(): if 'plain' == format: continue file_cache[fname].lines[format] = None pass pass pass
<SYSTEM_TASK:> Cache script if it is not already cached. <END_TASK> <USER_TASK:> Description: def cache_script(script, text, opts={}): """Cache script if it is not already cached."""
global script_cache if script not in script_cache: update_script_cache(script, text, opts) pass return script
<SYSTEM_TASK:> Cache filename if it is not already cached. <END_TASK> <USER_TASK:> Description: def cache_file(filename, reload_on_change=False, opts=default_opts): """Cache filename if it is not already cached. Return the expanded filename for it in the cache or nil if we can not find the file."""
filename = pyc2py(filename) if filename in file_cache: if reload_on_change: checkcache(filename) pass else: opts['use_linecache_lines'] = True update_cache(filename, opts) pass if filename in file_cache: return file_cache[filename].path else: return None return
<SYSTEM_TASK:> Adds line_map list to the list of association of from_file to <END_TASK> <USER_TASK:> Description: def remap_file_lines(from_path, to_path, line_map_list): """Adds line_map list to the list of association of from_file to to to_file"""
from_path = pyc2py(from_path) cache_file(to_path) remap_entry = file2file_remap_lines.get(to_path) if remap_entry: new_list = list(remap_entry.from_to_pairs) + list(line_map_list) else: new_list = line_map_list # FIXME: look for duplicates ? file2file_remap_lines[to_path] = RemapLineEntry( from_path, tuple(sorted(new_list, key=lambda t: t[0])) ) return
<SYSTEM_TASK:> Return the number of lines in filename. If `use_cache_only' is False, <END_TASK> <USER_TASK:> Description: def size(filename, use_cache_only=False): """Return the number of lines in filename. If `use_cache_only' is False, we'll try to fetch the file if it is not cached."""
filename = unmap_file(filename) if filename not in file_cache: if not use_cache_only: cache_file(filename) if filename not in file_cache: return None pass return len(file_cache[filename].lines['plain'])
<SYSTEM_TASK:> Return the maximum line number filename after taking into account <END_TASK> <USER_TASK:> Description: def maxline(filename, use_cache_only=False): """Return the maximum line number filename after taking into account line remapping. If no remapping then this is the same as size"""
if filename not in file2file_remap_lines: return size(filename, use_cache_only) max_lineno = -1 remap_line_entry = file2file_remap_lines.get(filename) if not remap_line_entry: return size(filename, use_cache_only) for t in remap_line_entry.from_to_pairs: max_lineno = max(max_lineno, t[1]) if max_lineno == -1: return size(filename, use_cache_only) else: return max_lineno
<SYSTEM_TASK:> Called to store the ticket data for a request. <END_TASK> <USER_TASK:> Description: async def remember_ticket(self, request, ticket): """Called to store the ticket data for a request. Ticket data is stored in the aiohttp_session object Args: request: aiohttp Request object. ticket: String like object representing the ticket to be stored. """
session = await get_session(request) session[self.cookie_name] = ticket
<SYSTEM_TASK:> Called to forget the ticket data a request <END_TASK> <USER_TASK:> Description: async def forget_ticket(self, request): """Called to forget the ticket data a request Args: request: aiohttp Request object. """
session = await get_session(request) session.pop(self.cookie_name, '')
<SYSTEM_TASK:> Called to return the ticket for a request. <END_TASK> <USER_TASK:> Description: async def get_ticket(self, request): """Called to return the ticket for a request. Args: request: aiohttp Request object. Returns: A ticket (string like) object, or None if no ticket is available for the passed request. """
session = await get_session(request) return session.get(self.cookie_name)
<SYSTEM_TASK:> Returns a aiohttp_auth middleware factory for use by the aiohttp <END_TASK> <USER_TASK:> Description: def auth_middleware(policy): """Returns a aiohttp_auth middleware factory for use by the aiohttp application object. Args: policy: A authentication policy with a base class of AbstractAuthentication. """
assert isinstance(policy, AbstractAuthentication) async def _auth_middleware_factory(app, handler): async def _middleware_handler(request): # Save the policy in the request request[POLICY_KEY] = policy # Call the next handler in the chain response = await handler(request) # Give the policy a chance to handle the response await policy.process_response(request, response) return response return _middleware_handler return _auth_middleware_factory
<SYSTEM_TASK:> Returns the user_id associated with a particular request. <END_TASK> <USER_TASK:> Description: async def get_auth(request): """Returns the user_id associated with a particular request. Args: request: aiohttp Request object. Returns: The user_id associated with the request, or None if no user is associated with the request. Raises: RuntimeError: Middleware is not installed """
auth_val = request.get(AUTH_KEY) if auth_val: return auth_val auth_policy = request.get(POLICY_KEY) if auth_policy is None: raise RuntimeError('auth_middleware not installed') request[AUTH_KEY] = await auth_policy.get(request) return request[AUTH_KEY]
<SYSTEM_TASK:> Called to store and remember the userid for a request <END_TASK> <USER_TASK:> Description: async def remember(request, user_id): """Called to store and remember the userid for a request Args: request: aiohttp Request object. user_id: String representing the user_id to remember Raises: RuntimeError: Middleware is not installed """
auth_policy = request.get(POLICY_KEY) if auth_policy is None: raise RuntimeError('auth_middleware not installed') return await auth_policy.remember(request, user_id)
<SYSTEM_TASK:> Called to forget the userid for a request <END_TASK> <USER_TASK:> Description: async def forget(request): """Called to forget the userid for a request Args: request: aiohttp Request object Raises: RuntimeError: Middleware is not installed """
auth_policy = request.get(POLICY_KEY) if auth_policy is None: raise RuntimeError('auth_middleware not installed') return await auth_policy.forget(request)
<SYSTEM_TASK:> Decorator that will postpone a functions <END_TASK> <USER_TASK:> Description: def debounce(wait): """ Decorator that will postpone a functions execution until after wait seconds have elapsed since the last time it was invoked. """
def decorator(fn): def debounced(*args, **kwargs): def call_it(): fn(*args, **kwargs) try: debounced.t.cancel() except(AttributeError): pass debounced.t = threading.Timer(wait, call_it) debounced.t.start() return debounced return decorator
<SYSTEM_TASK:> Called to perform any processing of the response required. <END_TASK> <USER_TASK:> Description: async def process_response(self, request, response): """Called to perform any processing of the response required. This function stores any cookie data in the COOKIE_AUTH_KEY as a cookie in the response object. If the value is a empty string, the associated cookie is deleted instead. This function requires the response to be a aiohttp Response object, and assumes that the response has not started if the remember or forget functions are called during the request. Args: request: aiohttp Request object. response: response object returned from the handled view Raises: RuntimeError: Raised if response has already started. """
await super().process_response(request, response) if COOKIE_AUTH_KEY in request: if response.started: raise RuntimeError("Cannot save cookie into started response") cookie = request[COOKIE_AUTH_KEY] if cookie == '': response.del_cookie(self.cookie_name) else: response.set_cookie(self.cookie_name, cookie)
<SYSTEM_TASK:> Called to store the userid for a request. <END_TASK> <USER_TASK:> Description: async def remember(self, request, user_id): """Called to store the userid for a request. This function creates a ticket from the request and user_id, and calls the abstract function remember_ticket() to store the ticket. Args: request: aiohttp Request object. user_id: String representing the user_id to remember """
ticket = self._new_ticket(request, user_id) await self.remember_ticket(request, ticket)
<SYSTEM_TASK:> Gets the user_id for the request. <END_TASK> <USER_TASK:> Description: async def get(self, request): """Gets the user_id for the request. Gets the ticket for the request using the get_ticket() function, and authenticates the ticket. Args: request: aiohttp Request object. Returns: The userid for the request, or None if the ticket is not authenticated. """
ticket = await self.get_ticket(request) if ticket is None: return None try: # Returns a tuple of (user_id, token, userdata, validuntil) now = time.time() fields = self._ticket.validate(ticket, self._get_ip(request), now) # Check if we need to reissue a ticket if (self._reissue_time is not None and now >= (fields.valid_until - self._reissue_time)): # Reissue our ticket, and save it in our request. request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id) return fields.user_id except TicketError as e: return None
<SYSTEM_TASK:> If a reissue was requested, only reiisue if the response was a <END_TASK> <USER_TASK:> Description: async def process_response(self, request, response): """If a reissue was requested, only reiisue if the response was a valid 2xx response """
if _REISSUE_KEY in request: if (response.started or not isinstance(response, web.Response) or response.status < 200 or response.status > 299): return await self.remember_ticket(request, request[_REISSUE_KEY])
<SYSTEM_TASK:> Returns a decorator that checks if a user has the requested permission <END_TASK> <USER_TASK:> Description: def acl_required(permission, context): """Returns a decorator that checks if a user has the requested permission from the passed acl context. This function constructs a decorator that can be used to check a aiohttp's view for authorization before calling it. It uses the get_permission() function to check the request against the passed permission and context. If the user does not have the correct permission to run this function, it raises HTTPForbidden. Args: permission: The specific permission requested. context: Either a sequence of ACL tuples, or a callable that returns a sequence of ACL tuples. For more information on ACL tuples, see get_permission() Returns: A decorator which will check the request passed has the permission for the given context. The decorator will raise HTTPForbidden if the user does not have the correct permissions to access the view. """
def decorator(func): @wraps(func) async def wrapper(*args): request = args[-1] if callable(context): context = context() if await get_permitted(request, permission, context): return await func(*args) raise web.HTTPForbidden() return wrapper return decorator
<SYSTEM_TASK:> Checks if json-stat version attribute exists and is equal or greater \ <END_TASK> <USER_TASK:> Description: def check_version_2(dataset): """Checks if json-stat version attribute exists and is equal or greater \ than 2.0 for a given dataset. Args: dataset (OrderedDict): data in JSON-stat format, previously \ deserialized to a python object by \ json.load() or json.loads(), Returns: bool: True if version exists and is equal or greater than 2.0, \ False otherwise. For datasets without the version attribute, \ always return False. """
if float(dataset.get('version')) >= 2.0 \ if dataset.get('version') else False: return True else: return False
<SYSTEM_TASK:> Unnest collection structure extracting all its datasets and converting \ <END_TASK> <USER_TASK:> Description: def unnest_collection(collection, df_list): """Unnest collection structure extracting all its datasets and converting \ them to Pandas Dataframes. Args: collection (OrderedDict): data in JSON-stat format, previously \ deserialized to a python object by \ json.load() or json.loads(), df_list (list): list variable which will contain the converted \ datasets. Returns: Nothing. """
for item in collection['link']['item']: if item['class'] == 'dataset': df_list.append(Dataset.read(item['href']).write('dataframe')) elif item['class'] == 'collection': nested_collection = request(item['href']) unnest_collection(nested_collection, df_list)
<SYSTEM_TASK:> Get dimensions from input data. <END_TASK> <USER_TASK:> Description: def get_dimensions(js_dict, naming): """Get dimensions from input data. Args: js_dict (dict): dictionary containing dataset data and metadata. naming (string, optional): dimension naming. Possible values: 'label' \ or 'id'. Returns: dimensions (list): list of pandas data frames with dimension \ category data. dim_names (list): list of strings with dimension names. """
dimensions = [] dim_names = [] if check_version_2(js_dict): dimension_dict = js_dict else: dimension_dict = js_dict['dimension'] for dim in dimension_dict['id']: dim_name = js_dict['dimension'][dim]['label'] if not dim_name: dim_name = dim if naming == 'label': dim_label = get_dim_label(js_dict, dim) dimensions.append(dim_label) dim_names.append(dim_name) else: dim_index = get_dim_index(js_dict, dim) dimensions.append(dim_index) dim_names.append(dim) return dimensions, dim_names
<SYSTEM_TASK:> Get label from a given dimension. <END_TASK> <USER_TASK:> Description: def get_dim_label(js_dict, dim, input="dataset"): """Get label from a given dimension. Args: js_dict (dict): dictionary containing dataset data and metadata. dim (string): dimension name obtained from JSON file. Returns: dim_label(pandas.DataFrame): DataFrame with label-based dimension data. """
if input == 'dataset': input = js_dict['dimension'][dim] label_col = 'label' elif input == 'dimension': label_col = js_dict['label'] input = js_dict else: raise ValueError try: dim_label = input['category']['label'] except KeyError: dim_index = get_dim_index(js_dict, dim) dim_label = pd.concat([dim_index['id'], dim_index['id']], axis=1) dim_label.columns = ['id', 'label'] else: dim_label = pd.DataFrame(list(zip(dim_label.keys(), dim_label.values())), index=dim_label.keys(), columns=['id', label_col]) # index must be added to dim label so that it can be sorted try: dim_index = input['category']['index'] except KeyError: dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index']) else: if type(dim_index) is list: dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index']) else: dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index']) dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index') return dim_label
<SYSTEM_TASK:> Get index from a given dimension. <END_TASK> <USER_TASK:> Description: def get_dim_index(js_dict, dim): """Get index from a given dimension. Args: js_dict (dict): dictionary containing dataset data and metadata. dim (string): dimension name obtained from JSON file. Returns: dim_index (pandas.DataFrame): DataFrame with index-based dimension data. """
try: dim_index = js_dict['dimension'][dim]['category']['index'] except KeyError: dim_label = get_dim_label(js_dict, dim) dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])), index=[0], columns=['id', 'index']) else: if type(dim_index) is list: dim_index = pd.DataFrame(list(zip(dim_index, range(0, len(dim_index)))), index=dim_index, columns=['id', 'index']) else: dim_index = pd.DataFrame(list(zip(dim_index.keys(), dim_index.values())), index=dim_index.keys(), columns=['id', 'index']) dim_index = dim_index.sort_index(by='index') return dim_index
<SYSTEM_TASK:> Get values from input data. <END_TASK> <USER_TASK:> Description: def get_values(js_dict, value='value'): """Get values from input data. Args: js_dict (dict): dictionary containing dataset data and metadata. value (string, optional): name of the value column. Defaults to 'value'. Returns: values (list): list of dataset values. """
values = js_dict[value] if type(values) is list: if type(values[0]) is not dict or tuple: return values # being not a list of dicts or tuples leaves us with a dict... values = {int(key): value for (key, value) in values.items()} if js_dict.get('size'): max_val = np.prod(np.array((js_dict['size']))) else: max_val = np.prod(np.array((js_dict['dimension']['size']))) vals = max_val * [None] for (key, value) in values.items(): vals[key] = value values = vals return values
<SYSTEM_TASK:> Generate row dimension values for a pandas dataframe. <END_TASK> <USER_TASK:> Description: def get_df_row(dimensions, naming='label', i=0, record=None): """Generate row dimension values for a pandas dataframe. Args: dimensions (list): list of pandas dataframes with dimension labels \ generated by get_dim_label or get_dim_index methods. naming (string, optional): dimension naming. Possible values: 'label' \ or 'id'. i (int): dimension list iteration index. Default is 0, it's used in the \ recursive calls to the method. record (list): list of values representing a pandas dataframe row, \ except for the value column. Default is empty, it's used \ in the recursive calls to the method. Yields: list: list with pandas dataframe column values except for value column """
check_input(naming) if i == 0 or record is None: record = [] for dimension in dimensions[i][naming]: record.append(dimension) if len(record) == len(dimensions): yield record if i + 1 < len(dimensions): for row in get_df_row(dimensions, naming, i + 1, record): yield row if len(record) == i + 1: record.pop()
<SYSTEM_TASK:> Decode JSON-stat formatted data into pandas.DataFrame object. <END_TASK> <USER_TASK:> Description: def from_json_stat(datasets, naming='label', value='value'): """Decode JSON-stat formatted data into pandas.DataFrame object. Args: datasets(OrderedDict, list): data in JSON-stat format, previously \ deserialized to a python object by \ json.load() or json.loads(), for example.\ Both List and OrderedDict are accepted \ as inputs. naming(string, optional): dimension naming. Possible values: 'label' or 'id'.Defaults to 'label'. value (string, optional): name of the value column. Defaults to 'value'. Returns: results(list): list of pandas.DataFrame with imported data. """
warnings.warn( "Shouldn't use this function anymore! Now use read() methods of" "Dataset, Collection or Dimension.", DeprecationWarning ) check_input(naming) results = [] if type(datasets) is list: for idx, element in enumerate(datasets): for dataset in element: js_dict = datasets[idx][dataset] results.append(generate_df(js_dict, naming, value)) elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \ isinstance(datasets, Dataset): if 'class' in datasets: if datasets['class'] == 'dataset': js_dict = datasets results.append(generate_df(js_dict, naming, value)) else: # 1.00 bundle type for dataset in datasets: js_dict = datasets[dataset] results.append(generate_df(js_dict, naming, value)) return results
<SYSTEM_TASK:> Send a request to a given URL accepting JSON format and return a \ <END_TASK> <USER_TASK:> Description: def request(path): """Send a request to a given URL accepting JSON format and return a \ deserialized Python object. Args: path (str): The URI to be requested. Returns: response: Deserialized JSON Python object. Raises: HTTPError: the HTTP error returned by the requested server. InvalidURL: an invalid URL has been requested. Exception: generic exception. """
headers = {'Accept': 'application/json'} try: requested_object = requests.get(path, headers=headers) requested_object.raise_for_status() except requests.exceptions.HTTPError as exception: LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' + str(exception.response.status_code) + ' ' + str(exception.response.reason) + ' ' + str(path)) raise except requests.exceptions.InvalidURL as exception: LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path)) raise except Exception: import traceback LOGGER.error('Generic exception: ' + traceback.format_exc()) raise else: response = requested_object.json() return response
<SYSTEM_TASK:> Utility decorator that checks if a user has been authenticated for this <END_TASK> <USER_TASK:> Description: def auth_required(func): """Utility decorator that checks if a user has been authenticated for this request. Allows views to be decorated like: @auth_required def view_func(request): pass providing a simple means to ensure that whoever is calling the function has the correct authentication details. Args: func: Function object being decorated and raises HTTPForbidden if not Returns: A function object that will raise web.HTTPForbidden() if the passed request does not have the correct permissions to access the view. """
@wraps(func) async def wrapper(*args): if (await get_auth(args[-1])) is None: raise web.HTTPForbidden() return await func(*args) return wrapper
<SYSTEM_TASK:> Inserts the output of a view, using fully qualified view name, <END_TASK> <USER_TASK:> Description: def render_partial(parser, token): """ Inserts the output of a view, using fully qualified view name, or view name from urls.py. {% render_partial view_name arg[ arg2] k=v [k2=v2...] %} IMPORTANT: the calling template must receive a context variable called 'request' containing the original HttpRequest. This means you should be OK with permissions and other session state. (Note that every argument will be evaluated against context except for the names of any keyword arguments.) """
args = [] kwargs = {} tokens = token.split_contents() if len(tokens) < 2: raise TemplateSyntaxError( '%r tag requires one or more arguments' % token.contents.split()[0] ) tokens.pop(0) # tag name view_name = tokens.pop(0) for token in tokens: equals = token.find('=') if equals == -1: args.append(token) else: kwargs[str(token[:equals])] = token[equals+1:] return ViewNode(view_name, args, kwargs)
<SYSTEM_TASK:> Render a wavedrom image <END_TASK> <USER_TASK:> Description: def render_wavedrom(self, node, outpath, bname, format): """ Render a wavedrom image """
# Try to convert node, raise error with code on failure try: svgout = WaveDrom().renderWaveForm(0, json.loads(node['code'])) except JSONDecodeError as e: raise SphinxError("Cannot render the following json code: \n{} \n\nError: {}".format(node['code'], e)) if not os.path.exists(outpath): os.makedirs(outpath) # SVG can be directly written and is supported on all versions if format == 'image/svg+xml': fname = "{}.{}".format(bname, "svg") fpath = os.path.join(outpath, fname) svgout.saveas(fpath) return fname # It gets a bit ugly, if the output does not support svg. We use cairosvg, because it is the easiest # to use (no dependency on installed programs). But it only works for Python 3. try: import cairosvg except: raise SphinxError(__("Cannot import 'cairosvg'. In Python 2 wavedrom figures other than svg are " "not supported, in Python 3 ensure 'cairosvg' is installed.")) if format == 'application/pdf': fname = "{}.{}".format(bname, "pdf") fpath = os.path.join(outpath, fname) cairosvg.svg2pdf(svgout.tostring(), write_to=fpath) return fname if format == 'image/png': fname = "{}.{}".format(bname, "png") fpath = os.path.join(outpath, fname) cairosvg.svg2png(svgout.tostring(), write_to=fpath) return fname raise SphinxError("No valid wavedrom conversion supplied")
<SYSTEM_TASK:> Visit the wavedrom node <END_TASK> <USER_TASK:> Description: def visit_wavedrom(self, node): """ Visit the wavedrom node """
format = determine_format(self.builder.supported_image_types) if format is None: raise SphinxError(__("Cannot determine a suitable output format")) # Create random filename bname = "wavedrom-{}".format(uuid4()) outpath = path.join(self.builder.outdir, self.builder.imagedir) # Render the wavedrom image imgname = render_wavedrom(self, node, outpath, bname, format) # Now we unpack the image node again. The file was created at the build destination, # and we can now use the standard visitor for the image node. We add the image node # as a child and then raise a SkipDepature, which will trigger the builder to visit # children. image_node = node['image_node'] image_node['uri'] = os.path.join(self.builder.imgpath, imgname) node.append(image_node) raise nodes.SkipDeparture
<SYSTEM_TASK:> When the document, and all the links are fully resolved, we inject one <END_TASK> <USER_TASK:> Description: def doctree_resolved(app, doctree, fromdocname): """ When the document, and all the links are fully resolved, we inject one raw html element for running the command for processing the wavedrom diagrams at the onload event. """
# Skip for non-html or if javascript is not inlined if not app.env.config.wavedrom_html_jsinline: return text = """ <script type="text/javascript"> function init() { WaveDrom.ProcessAll(); } window.onload = init; </script>""" doctree.append(nodes.raw(text=text, format='html'))
<SYSTEM_TASK:> Return a supercell. <END_TASK> <USER_TASK:> Description: def make_supercell(system, matrix, supercell=[1, 1, 1]): """ Return a supercell. This functions takes the input unitcell and creates a supercell of it that is returned as a new :class:`pywindow.molecular.MolecularSystem`. Parameters ---------- system : :attr:`pywindow.molecular.MolecularSystem.system` The unit cell for creation of the supercell matrix : :class:`numpy.array` The unit cell parameters in form of a lattice. supercell : :class:`list`, optional A list that specifies the size of the supercell in the a, b and c direction. (default=[1, 1, 1]) Returns ------- :class:`pywindow.molecular.MolecularSystem` Returns the created supercell as a new :class:`MolecularSystem`. """
user_supercell = [[1, supercell[0]], [1, supercell[1]], [1, supercell[1]]] system = create_supercell(system, matrix, supercell=user_supercell) return MolecularSystem.load_system(system)
<SYSTEM_TASK:> Extract frames from the trajectory file. <END_TASK> <USER_TASK:> Description: def get_frames(self, frames='all', override=False, **kwargs): """ Extract frames from the trajectory file. Depending on the passed parameters a frame, a list of particular frames, a range of frames (from, to), or all frames can be extracted with this function. Parameters ---------- frames : :class:`int` or :class:`list` or :class:`touple` or :class:`str` Specified frame (:class:`int`), or frames (:class:`list`), or range (:class:`touple`), or `all`/`everything` (:class:`str`). (default=`all`) override : :class:`bool` If True, a frame already storred in :attr:`frames` can be override. (default=False) extract_data : :class:`bool`, optional If False, a frame is returned as a :class:`str` block as in the trajectory file. Ohterwise, it is extracted and returned as :class:`pywindow.molecular.MolecularSystem`. (default=True) swap_atoms : :class:`dict`, optional If this kwarg is passed with an appropriate dictionary a :func:`pywindow.molecular.MolecularSystem.swap_atom_keys()` will be applied to the extracted frame. forcefield : :class:`str`, optional If this kwarg is passed with appropriate forcefield keyword a :func:`pywindow.molecular.MolecularSystem.decipher_atom_keys()` will be applied to the extracted frame. Returns ------- :class:`pywindow.molecular.MolecularSystem` If a single frame is extracted. None : :class:`NoneType` If more than one frame is extracted, the frames are returned to :attr:`frames` """
if override is True: self.frames = {} if isinstance(frames, int): frame = self._get_frame( self.trajectory_map[frames], frames, **kwargs) if frames not in self.frames.keys(): self.frames[frames] = frame return frame if isinstance(frames, list): for frame in frames: if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs) if isinstance(frames, tuple): for frame in range(frames[0], frames[1]): if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs) if isinstance(frames, str): if frames in ['all', 'everything']: for frame in range(0, self.no_of_frames): if frame not in self.frames.keys(): self.frames[frame] = self._get_frame( self.trajectory_map[frame], frame, **kwargs)
<SYSTEM_TASK:> Return filepath as a class attribute <END_TASK> <USER_TASK:> Description: def _map_trajectory(self): """ Return filepath as a class attribute"""
self.trajectory_map = {} with open(self.filepath, 'r') as trajectory_file: with closing( mmap( trajectory_file.fileno(), 0, access=ACCESS_READ)) as mapped_file: progress = 0 line = 0 frame = -1 frame_start = 0 while progress <= len(mapped_file): line = line + 1 # We read a binary data from a mapped file. bline = mapped_file.readline() # If the bline length equals zero we terminate. # We reached end of the file but still add the last frame! if len(bline) == 0: frame = frame + 1 if progress - frame_start > 10: self.trajectory_map[frame] = [ frame_start, progress ] break # We need to decode byte line into an utf-8 string. sline = bline.decode("utf-8").strip('\n').split() # We extract map's byte coordinates for each frame if len(sline) == 1 and sline[0] == 'END': frame = frame + 1 self.trajectory_map[frame] = [frame_start, progress] frame_start = progress # Here we extract the map's byte coordinates for the header # And also the periodic system type needed for later. progress = progress + len(bline) self.no_of_frames = frame
<SYSTEM_TASK:> Perform a full structural analysis of a molecule. <END_TASK> <USER_TASK:> Description: def full_analysis(self, ncpus=1, **kwargs): """ Perform a full structural analysis of a molecule. This invokes other methods: 1. :attr:`molecular_weight()` 2. :attr:`calculate_centre_of_mass()` 3. :attr:`calculate_maximum_diameter()` 4. :attr:`calculate_average_diameter()` 5. :attr:`calculate_pore_diameter()` 6. :attr:`calculate_pore_volume()` 7. :attr:`calculate_pore_diameter_opt()` 8. :attr:`calculate_pore_volume_opt()` 9. :attr:`calculate_pore_diameter_opt()` 10. :attr:`calculate_windows()` Parameters ---------- ncpus : :class:`int` Number of CPUs used for the parallelised parts of :func:`pywindow.utilities.find_windows()`. (default=1=serial) Returns ------- :attr:`Molecule.properties` The updated :attr:`Molecule.properties` with returns of all used methods. """
self.molecular_weight() self.calculate_centre_of_mass() self.calculate_maximum_diameter() self.calculate_average_diameter() self.calculate_pore_diameter() self.calculate_pore_volume() self.calculate_pore_diameter_opt(**kwargs) self.calculate_pore_volume_opt(**kwargs) self.calculate_windows(ncpus=ncpus, **kwargs) # self._circumcircle(**kwargs) return self.properties
<SYSTEM_TASK:> Return the xyz coordinates of the centre of mass of a molecule. <END_TASK> <USER_TASK:> Description: def calculate_centre_of_mass(self): """ Return the xyz coordinates of the centre of mass of a molecule. Returns ------- :class:`numpy.array` The centre of mass of the molecule. """
self.centre_of_mass = center_of_mass(self.elements, self.coordinates) self.properties['centre_of_mass'] = self.centre_of_mass return self.centre_of_mass
<SYSTEM_TASK:> Return the maximum diamension of a molecule. <END_TASK> <USER_TASK:> Description: def calculate_maximum_diameter(self): """ Return the maximum diamension of a molecule. Returns ------- :class:`float` The maximum dimension of the molecule. """
self.maxd_atom_1, self.maxd_atom_2, self.maximum_diameter = max_dim( self.elements, self.coordinates) self.properties['maximum_diameter'] = { 'diameter': self.maximum_diameter, 'atom_1': int(self.maxd_atom_1), 'atom_2': int(self.maxd_atom_2), } return self.maximum_diameter
<SYSTEM_TASK:> Return the average diamension of a molecule. <END_TASK> <USER_TASK:> Description: def calculate_average_diameter(self, **kwargs): """ Return the average diamension of a molecule. Returns ------- :class:`float` The average dimension of the molecule. """
self.average_diameter = find_average_diameter( self.elements, self.coordinates, **kwargs) return self.average_diameter
<SYSTEM_TASK:> Return the diameters of all windows in a molecule. <END_TASK> <USER_TASK:> Description: def calculate_windows(self, **kwargs): """ Return the diameters of all windows in a molecule. This function first finds and then measures the diameters of all the window in the molecule. Returns ------- :class:`numpy.array` An array of windows' diameters. :class:`NoneType` If no windows were found. """
windows = find_windows(self.elements, self.coordinates, **kwargs) if windows: self.properties.update( { 'windows': { 'diameters': windows[0], 'centre_of_mass': windows[1], } } ) return windows[0] else: self.properties.update( {'windows': {'diameters': None, 'centre_of_mass': None, }} ) return None
<SYSTEM_TASK:> Shift a molecule to Origin. <END_TASK> <USER_TASK:> Description: def shift_to_origin(self, **kwargs): """ Shift a molecule to Origin. This function takes the molecule's coordinates and adjust them so that the centre of mass of the molecule coincides with the origin of the coordinate system. Returns ------- None : :class:`NoneType` """
self.coordinates = shift_com(self.elements, self.coordinates, **kwargs) self._update()
<SYSTEM_TASK:> Rebuild molecules in molecular system. <END_TASK> <USER_TASK:> Description: def rebuild_system(self, override=False, **kwargs): """ Rebuild molecules in molecular system. Parameters ---------- override : :class:`bool`, optional (default=False) If False the rebuild molecular system is returned as a new :class:`MolecularSystem`, if True, the current :class:`MolecularSystem` is modified. """
# First we create a 3x3x3 supercell with the initial unit cell in the # centre and the 26 unit cell translations around to provide all the # atom positions necessary for the molecules passing through periodic # boundary reconstruction step. supercell_333 = create_supercell(self.system, **kwargs) # smolsys = self.load_system(supercell_333, self.system_id + '_311') # smolsys.dump_system(override=True) discrete = discrete_molecules(self.system, rebuild=supercell_333) # This function overrides the initial data for 'coordinates', # 'atom_ids', and 'elements' instances in the 'system' dictionary. coordinates = np.array([], dtype=np.float64).reshape(0, 3) atom_ids = np.array([]) elements = np.array([]) for i in discrete: coordinates = np.concatenate( [coordinates, i['coordinates']], axis=0 ) atom_ids = np.concatenate([atom_ids, i['atom_ids']], axis=0) elements = np.concatenate([elements, i['elements']], axis=0) rebuild_system = { 'coordinates': coordinates, 'atom_ids': atom_ids, 'elements': elements } if override is True: self.system.update(rebuild_system) return None else: return self.load_system(rebuild_system)
<SYSTEM_TASK:> Swap a force field atom id for another user-defined value. <END_TASK> <USER_TASK:> Description: def swap_atom_keys(self, swap_dict, dict_key='atom_ids'): """ Swap a force field atom id for another user-defined value. This modified all values in :attr:`MolecularSystem.system['atom_ids']` that match criteria. This function can be used to decipher a whole forcefield if an appropriate dictionary is passed to the function. Example ------- In this example all atom ids 'he' will be exchanged to 'H'. .. code-block:: python pywindow.molecular.MolecularSystem.swap_atom_keys({'he': 'H'}) Parameters ---------- swap_dict: :class:`dict` A dictionary containg force field atom ids (keys) to be swapped with corresponding values (keys' arguments). dict_key: :class:`str` A key in :attr:`MolecularSystem.system` dictionary to perform the atom keys swapping operation on. (default='atom_ids') Returns ------- None : :class:`NoneType` """
# Similar situation to the one from decipher_atom_keys function. if 'atom_ids' not in self.system.keys(): dict_key = 'elements' for atom_key in range(len(self.system[dict_key])): for key in swap_dict.keys(): if self.system[dict_key][atom_key] == key: self.system[dict_key][atom_key] = swap_dict[key]