text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Get a download from GBIF. <END_TASK> <USER_TASK:> Description: def download_get(key, path=".", **kwargs): """ Get a download from GBIF. :param key: [str] A key generated from a request, like that from ``download`` :param path: [str] Path to write zip file to. Default: ``"."``, with a ``.zip`` appended to the end. :param **kwargs**: Further named arguments passed on to ``requests.get`` Downloads the zip file to a directory you specify on your machine. The speed of this function is of course proportional to the size of the file to download, and affected by your internet connection speed. This function only downloads the file. To open and read it, see https://github.com/BelgianBiodiversityPlatform/python-dwca-reader Usage:: from pygbif import occurrences as occ occ.download_get("0000066-140928181241064") occ.download_get("0003983-140910143529206") """
meta = pygbif.occurrences.download_meta(key) if meta['status'] != 'SUCCEEDED': raise Exception('download "%s" not of status SUCCEEDED' % key) else: print('Download file size: %s bytes' % meta['size']) url = 'http://api.gbif.org/v1/occurrence/download/request/' + key path = "%s/%s.zip" % (path, key) gbif_GET_write(url, path, **kwargs) print("On disk at " + path) return {'path': path, 'size': meta['size'], 'key': key}
<SYSTEM_TASK:> add key, value, type combination of a predicate <END_TASK> <USER_TASK:> Description: def add_predicate(self, key, value, predicate_type='equals'): """ add key, value, type combination of a predicate :param key: query KEY parameter :param value: the value used in the predicate :param predicate_type: the type of predicate (e.g. ``equals``) """
if predicate_type not in operators: predicate_type = operator_lkup.get(predicate_type) if predicate_type: self.predicates.append({'type': predicate_type, 'key': key, 'value': value }) else: raise Exception("predicate type not a valid operator")
<SYSTEM_TASK:> extract values from either file or list <END_TASK> <USER_TASK:> Description: def _extract_values(values_list): """extract values from either file or list :param values_list: list or file name (str) with list of values """
values = [] # check if file or list of values to iterate if isinstance(values_list, str): with open(values_list) as ff: reading = csv.reader(ff) for j in reading: values.append(j[0]) elif isinstance(values_list, list): values = values_list else: raise Exception("input datatype not supported.") return values
<SYSTEM_TASK:> add an iterative predicate with a key and set of values <END_TASK> <USER_TASK:> Description: def add_iterative_predicate(self, key, values_list): """add an iterative predicate with a key and set of values which it can be equal to in and or function. The individual predicates are specified with the type ``equals`` and combined with a type ``or``. The main reason for this addition is the inability of using ``in`` as predicate type wfor multiple taxon_key values (cfr. http://dev.gbif.org/issues/browse/POR-2753) :param key: API key to use for the query. :param values_list: Filename or list containing the taxon keys to be s searched. """
values = self._extract_values(values_list) predicate = {'type': 'equals', 'key': key, 'value': None} predicates = [] while values: predicate['value'] = values.pop() predicates.append(predicate.copy()) self.predicates.append({'type': 'or', 'predicates': predicates})
<SYSTEM_TASK:> Detect if code is running in a Jupyter Notebook. <END_TASK> <USER_TASK:> Description: def _detect_notebook() -> bool: """Detect if code is running in a Jupyter Notebook. This isn't 100% correct but seems good enough Returns ------- bool True if it detects this is a notebook, otherwise False. """
try: from IPython import get_ipython from ipykernel import zmqshell except ImportError: return False kernel = get_ipython() try: from spyder.utils.ipython.spyder_kernel import SpyderKernel if isinstance(kernel.kernel, SpyderKernel): return False except (ImportError, AttributeError): pass return isinstance(kernel, zmqshell.ZMQInteractiveShell)
<SYSTEM_TASK:> Merge attributes from two layouts. <END_TASK> <USER_TASK:> Description: def _merge_layout(x: go.Layout, y: go.Layout) -> go.Layout: """Merge attributes from two layouts."""
xjson = x.to_plotly_json() yjson = y.to_plotly_json() if 'shapes' in yjson and 'shapes' in xjson: xjson['shapes'] += yjson['shapes'] yjson.update(xjson) return go.Layout(yjson)
<SYSTEM_TASK:> Try to convert to pandas objects to datetimes. <END_TASK> <USER_TASK:> Description: def _try_pydatetime(x): """Try to convert to pandas objects to datetimes. Plotly doesn't know how to handle them. """
try: # for datetimeindex x = [y.isoformat() for y in x.to_pydatetime()] except AttributeError: pass try: # for generic series x = [y.isoformat() for y in x.dt.to_pydatetime()] except AttributeError: pass return x
<SYSTEM_TASK:> Draws a vertical line from `ymin` to `ymax`. <END_TASK> <USER_TASK:> Description: def vertical(x, ymin=0, ymax=1, color=None, width=None, dash=None, opacity=None): """Draws a vertical line from `ymin` to `ymax`. Parameters ---------- xmin : int, optional xmax : int, optional color : str, optional width : number, optional Returns ------- Chart """
lineattr = {} if color: lineattr['color'] = color if width: lineattr['width'] = width if dash: lineattr['dash'] = dash layout = dict( shapes=[dict(type='line', x0=x, x1=x, y0=ymin, y1=ymax, opacity=opacity, line=lineattr)] ) return Chart(layout=layout)
<SYSTEM_TASK:> Draws a horizontal line from `xmin` to `xmax`. <END_TASK> <USER_TASK:> Description: def horizontal(y, xmin=0, xmax=1, color=None, width=None, dash=None, opacity=None): """Draws a horizontal line from `xmin` to `xmax`. Parameters ---------- xmin : int, optional xmax : int, optional color : str, optional width : number, optional Returns ------- Chart """
lineattr = {} if color: lineattr['color'] = color if width: lineattr['width'] = width if dash: lineattr['dash'] = dash layout = dict( shapes=[dict(type='line', x0=xmin, x1=xmax, y0=y, y1=y, opacity=opacity, line=lineattr)] ) return Chart(layout=layout)
<SYSTEM_TASK:> Draws dots. <END_TASK> <USER_TASK:> Description: def scatter( x=None, y=None, label=None, color=None, width=None, dash=None, opacity=None, markersize=6, yaxis=1, fill=None, text="", mode='markers', ): """Draws dots. Parameters ---------- x : array-like, optional y : array-like, optional label : array-like, optional Returns ------- Chart """
return line( x=x, y=y, label=label, color=color, width=width, dash=dash, opacity=opacity, mode=mode, yaxis=yaxis, fill=fill, text=text, markersize=markersize, )
<SYSTEM_TASK:> Create a bar chart. <END_TASK> <USER_TASK:> Description: def bar(x=None, y=None, label=None, mode='group', yaxis=1, opacity=None): """Create a bar chart. Parameters ---------- x : array-like, optional y : TODO, optional label : TODO, optional mode : 'group' or 'stack', default 'group' opacity : TODO, optional Returns ------- Chart A Chart with bar graph data. """
assert x is not None or y is not None, "x or y must be something" yn = 'y' + str(yaxis) if y is None: y = x x = None if x is None: x = np.arange(len(y)) else: x = _try_pydatetime(x) x = np.atleast_1d(x) y = np.atleast_1d(y) if y.ndim == 2: if not hasattr(label, '__iter__'): if label is None: label = _labels() else: label = _labels(label) data = [go.Bar(x=x, y=yy, name=ll, yaxis=yn, opacity=opacity) for ll, yy in zip(label, y.T)] else: data = [go.Bar(x=x, y=y, name=label, yaxis=yn, opacity=opacity)] if yaxis == 1: return Chart(data=data, layout={'barmode': mode}) return Chart(data=data, layout={'barmode': mode, 'yaxis' + str(yaxis): dict(overlaying='y')})
<SYSTEM_TASK:> Fill to zero. <END_TASK> <USER_TASK:> Description: def fill_zero( x=None, y=None, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers', **kargs ): """Fill to zero. Parameters ---------- x : array-like, optional y : TODO, optional label : TODO, optional Returns ------- Chart """
return line( x=x, y=y, label=label, color=color, width=width, dash=dash, opacity=opacity, mode=mode, fill='tozeroy', **kargs )
<SYSTEM_TASK:> Fill between `ylow` and `yhigh`. <END_TASK> <USER_TASK:> Description: def fill_between( x=None, ylow=None, yhigh=None, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers', **kargs ): """Fill between `ylow` and `yhigh`. Parameters ---------- x : array-like, optional ylow : TODO, optional yhigh : TODO, optional Returns ------- Chart """
plot = line( x=x, y=ylow, label=label, color=color, width=width, dash=dash, opacity=opacity, mode=mode, fill=None, **kargs ) plot += line( x=x, y=yhigh, label=label, color=color, width=width, dash=dash, opacity=opacity, mode=mode, fill='tonexty', **kargs ) return plot
<SYSTEM_TASK:> Rug chart. <END_TASK> <USER_TASK:> Description: def rug(x, label=None, opacity=None): """Rug chart. Parameters ---------- x : array-like, optional label : TODO, optional opacity : TODO, optional Returns ------- Chart """
x = _try_pydatetime(x) x = np.atleast_1d(x) data = [ go.Scatter( x=x, y=np.ones_like(x), name=label, opacity=opacity, mode='markers', marker=dict(symbol='line-ns-open'), ) ] layout = dict( barmode='overlay', hovermode='closest', legend=dict(traceorder='reversed'), xaxis1=dict(zeroline=False), yaxis1=dict( domain=[0.85, 1], showline=False, showgrid=False, zeroline=False, anchor='free', position=0.0, showticklabels=False, ), ) return Chart(data=data, layout=layout)
<SYSTEM_TASK:> Surface plot. <END_TASK> <USER_TASK:> Description: def surface(x, y, z): """Surface plot. Parameters ---------- x : array-like, optional y : array-like, optional z : array-like, optional Returns ------- Chart """
data = [go.Surface(x=x, y=y, z=z)] return Chart(data=data)
<SYSTEM_TASK:> 2D Histogram. <END_TASK> <USER_TASK:> Description: def hist2d(x, y, label=None, opacity=None): """2D Histogram. Parameters ---------- x : array-like, optional y : array-like, optional label : TODO, optional opacity : float, optional Returns ------- Chart """
x = np.atleast_1d(x) y = np.atleast_1d(y) data = [go.Histogram2d(x=x, y=y, opacity=opacity, name=label)] return Chart(data=data)
<SYSTEM_TASK:> Set the angle of the y-axis tick labels. <END_TASK> <USER_TASK:> Description: def ytickangle(self, angle, index=1): """Set the angle of the y-axis tick labels. Parameters ---------- value : int Angle in degrees index : int, optional Y-axis index Returns ------- Chart """
self.layout['yaxis' + str(index)]['tickangle'] = angle return self
<SYSTEM_TASK:> Set the size of the label. <END_TASK> <USER_TASK:> Description: def ylabelsize(self, size, index=1): """Set the size of the label. Parameters ---------- size : int Returns ------- Chart """
self.layout['yaxis' + str(index)]['titlefont']['size'] = size return self
<SYSTEM_TASK:> Set yaxis limits. <END_TASK> <USER_TASK:> Description: def ylim(self, low, high, index=1): """Set yaxis limits. Parameters ---------- low : number high : number index : int, optional Returns ------- Chart """
self.layout['yaxis' + str(index)]['range'] = [low, high] return self
<SYSTEM_TASK:> Display the chart. <END_TASK> <USER_TASK:> Description: def show( self, filename: Optional[str] = None, show_link: bool = True, auto_open: bool = True, detect_notebook: bool = True, ) -> None: """Display the chart. Parameters ---------- filename : str, optional Save plot to this filename, otherwise it's saved to a temporary file. show_link : bool, optional Show link to plotly. auto_open : bool, optional Automatically open the plot (in the browser). detect_notebook : bool, optional Try to detect if we're running in a notebook. """
kargs = {} if detect_notebook and _detect_notebook(): py.init_notebook_mode() plot = py.iplot else: plot = py.plot if filename is None: filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name kargs['filename'] = filename kargs['auto_open'] = auto_open plot(self, show_link=show_link, **kargs)
<SYSTEM_TASK:> Given a function, it returns a string that pretty much looks how the <END_TASK> <USER_TASK:> Description: def get_method_sig(method): """ Given a function, it returns a string that pretty much looks how the function signature_ would be written in python. :param method: a python method :return: A string similar describing the pythong method signature_. eg: "my_method(first_argArg, second_arg=42, third_arg='something')" """
# The return value of ArgSpec is a bit weird, as the list of arguments and # list of defaults are returned in separate array. # eg: ArgSpec(args=['first_arg', 'second_arg', 'third_arg'], # varargs=None, keywords=None, defaults=(42, 'something')) argspec = inspect.getargspec(method) arg_index=0 args = [] # Use the args and defaults array returned by argspec and find out # which arguments has default for arg in argspec.args: default_arg = _get_default_arg(argspec.args, argspec.defaults, arg_index) if default_arg.has_default: args.append("%s=%s" % (arg, default_arg.default_value)) else: args.append(arg) arg_index += 1 return "%s(%s)" % (method.__name__, ", ".join(args))
<SYSTEM_TASK:> `ThenAt` enables you to create a partially apply many arguments to a function, the returned partial expects a single arguments which will be applied at the `n`th position of the original function. <END_TASK> <USER_TASK:> Description: def ThenAt(self, n, f, *_args, **kwargs): """ `ThenAt` enables you to create a partially apply many arguments to a function, the returned partial expects a single arguments which will be applied at the `n`th position of the original function. **Arguments** * **n**: position at which the created partial will apply its awaited argument on the original function. * **f**: function which the partial will be created. * **_args & kwargs**: all `*_args` and `**kwargs` will be passed to the function `f`. * `_return_type = None`: type of the returned `builder`, if `None` it will return the same type of the current `builder`. This special kwarg will NOT be passed to `f`. You can think of `n` as the position that the value being piped down will pass through the `f`. Say you have the following expression D == fun(A, B, C) all the following are equivalent from phi import P, Pipe, ThenAt D == Pipe(A, ThenAt(1, fun, B, C)) D == Pipe(B, ThenAt(2, fun, A, C)) D == Pipe(C, ThenAt(3, fun, A, B)) you could also use the shortcuts `Then`, `Then2`,..., `Then5`, which are more readable from phi import P, Pipe D == Pipe(A, P.Then(fun, B, C)) D == Pipe(B, P.Then2(fun, A, C)) D == Pipe(C, P.Then3(fun, A, B)) There is a special case not discussed above: `n = 0`. When this happens only the arguments given will be applied to `f`, this method it will return a partial that expects a single argument but completely ignores it from phi import P D == Pipe(None, P.ThenAt(0, fun, A, B, C)) D == Pipe(None, P.Then0(fun, A, B, C)) **Examples** Max of 6 and the argument: from phi import P assert 6 == P.Pipe( 2, P.Then(max, 6) ) Previous is equivalent to assert 6 == max(2, 6) Open a file in read mode (`'r'`) from phi import P f = P.Pipe( "file.txt", P.Then(open, 'r') ) Previous is equivalent to f = open("file.txt", 'r') Split a string by whitespace and then get the length of each word from phi import P assert [5, 5, 5] == P.Pipe( "Again hello world", P.Then(str.split, ' ') .Then2(map, len) ) Previous is equivalent to x = "Again hello world" x = str.split(x, ' ') x = map(len, x) assert [5, 5, 5] == x As you see, `Then2` was very useful because `map` accepts and `iterable` as its `2nd` parameter. You can rewrite the previous using the [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) and the `phi.builder.Builder.Obj` object from phi import P, Obj assert [5, 5, 5] == P.Pipe( "Again hello world", Obj.split(' '), P.map(len) ) **Also see** * `phi.builder.Builder.Obj` * [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) * `phi.builder.Builder.RegisterAt` """
_return_type = None n_args = n - 1 if '_return_type' in kwargs: _return_type = kwargs['_return_type'] del kwargs['_return_type'] @utils.lift def g(x): new_args = _args[0:n_args] + (x,) + _args[n_args:] if n_args >= 0 else _args return f(*new_args, **kwargs) return self.__then__(g, _return_type=_return_type)
<SYSTEM_TASK:> `Seq` is used to express function composition. The expression <END_TASK> <USER_TASK:> Description: def Seq(self, *sequence, **kwargs): """ `Seq` is used to express function composition. The expression Seq(f, g) be equivalent to lambda x: g(f(x)) As you see, its a little different from the mathematical definition. Excecution order flow from left to right, this makes reading and reasoning about code way more easy. This bahaviour is based upon the `|>` (pipe) operator found in languages like F#, Elixir and Elm. You can pack as many expressions as you like and they will be applied in order to the data that is passed through them when compiled an excecuted. In general, the following rules apply for Seq: **General Sequence** Seq(f0, f1, ..., fn-1, fn) is equivalent to lambda x: fn(fn-1(...(f1(f0(x))))) **Single Function** Seq(f) is equivalent to f **Identity** The empty Seq Seq() is equivalent to lambda x: x ### Examples from phi import P, Seq f = Seq( P * 2, P + 1, P ** 2 ) assert f(1) == 9 # ((1 * 2) + 1) ** 2 The previous example using `P.Pipe` from phi import P assert 9 == P.Pipe( 1, P * 2, #1 * 2 == 2 P + 1, #2 + 1 == 3 P ** 2 #3 ** 2 == 9 ) """
fs = [ _parse(elem)._f for elem in sequence ] def g(x, state): return functools.reduce(lambda args, f: f(*args), fs, (x, state)) return self.__then__(g, **kwargs)
<SYSTEM_TASK:> Returns the number of graphemes in the string. <END_TASK> <USER_TASK:> Description: def length(string, until=None): """ Returns the number of graphemes in the string. Note that this functions needs to traverse the full string to calculate the length, unlike `len(string)` and it's time consumption is linear to the length of the string (up to the `until` value). Only counts up to the `until` argument, if given. This is useful when testing the length of a string against some limit and the excess length is not interesting. >>> rainbow_flag = "🏳️‍🌈" >>> len(rainbow_flag) 4 >>> graphemes.length(rainbow_flag) 1 >>> graphemes.length("".join(str(i) for i in range(100)), 30) 30 """
if until is None: return sum(1 for _ in GraphemeIterator(string)) iterator = graphemes(string) count = 0 while True: try: if count >= until: break next(iterator) except StopIteration: break else: count += 1 return count
<SYSTEM_TASK:> Returns a substring of the given string, counting graphemes instead of codepoints. <END_TASK> <USER_TASK:> Description: def slice(string, start=None, end=None): """ Returns a substring of the given string, counting graphemes instead of codepoints. Negative indices is currently not supported. >>> string = "tamil நி (ni)" >>> string[:7] 'tamil ந' >>> grapheme.slice(string, end=7) 'tamil நி' >>> string[7:] 'ி (ni)' >>> grapheme.slice(string, 7) ' (ni)' """
if start is None: start = 0 if end is not None and start >= end: return "" if start < 0: raise NotImplementedError("Negative indexing is currently not supported.") sum_ = 0 start_index = None for grapheme_index, grapheme_length in enumerate(grapheme_lengths(string)): if grapheme_index == start: start_index = sum_ elif grapheme_index == end: return string[start_index:sum_] sum_ += grapheme_length if start_index is not None: return string[start_index:] return ""
<SYSTEM_TASK:> Returns true if the sequence of graphemes in substring is also present in string. <END_TASK> <USER_TASK:> Description: def contains(string, substring): """ Returns true if the sequence of graphemes in substring is also present in string. This differs from the normal python `in` operator, since the python operator will return true if the sequence of codepoints are withing the other string without considering grapheme boundaries. Performance notes: Very fast if `substring not in string`, since that also means that the same graphemes can not be in the two strings. Otherwise this function has linear time complexity in relation to the string length. It will traverse the sequence of graphemes until a match is found, so it will generally perform better for grapheme sequences that match early. >>> "🇸🇪" in "🇪🇸🇪🇪" True >>> grapheme.contains("🇪🇸🇪🇪", "🇸🇪") False """
if substring not in string: return False substr_graphemes = list(graphemes(substring)) if len(substr_graphemes) == 0: return True elif len(substr_graphemes) == 1: return substr_graphemes[0] in graphemes(string) else: str_iter = graphemes(string) str_sub_part = [] for _ in range(len(substr_graphemes)): try: str_sub_part.append(next(str_iter)) except StopIteration: return False for g in str_iter: if str_sub_part == substr_graphemes: return True str_sub_part.append(g) str_sub_part.pop(0) return str_sub_part == substr_graphemes
<SYSTEM_TASK:> Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes. <END_TASK> <USER_TASK:> Description: def startswith(string, prefix): """ Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes. str.startswith may return true for a prefix that is not visually represented as a prefix if a grapheme cluster is continued after the prefix ends. >>> grapheme.startswith("✊🏾", "✊") False >>> "✊🏾".startswith("✊") True """
return string.startswith(prefix) and safe_split_index(string, len(prefix)) == len(prefix)
<SYSTEM_TASK:> Like str.endswith, but also checks that the string ends with the given prefixes sequence of graphemes. <END_TASK> <USER_TASK:> Description: def endswith(string, suffix): """ Like str.endswith, but also checks that the string ends with the given prefixes sequence of graphemes. str.endswith may return true for a suffix that is not visually represented as a suffix if a grapheme cluster is initiated before the suffix starts. >>> grapheme.endswith("🏳️‍🌈", "🌈") False >>> "🏳️‍🌈".endswith("🌈") True """
expected_index = len(string) - len(suffix) return string.endswith(suffix) and safe_split_index(string, expected_index) == expected_index
<SYSTEM_TASK:> Returns the highest index up to `max_len` at which the given string can be sliced, without breaking a grapheme. <END_TASK> <USER_TASK:> Description: def safe_split_index(string, max_len): """ Returns the highest index up to `max_len` at which the given string can be sliced, without breaking a grapheme. This is useful for when you want to split or take a substring from a string, and don't really care about the exact grapheme length, but don't want to risk breaking existing graphemes. This function does normally not traverse the full grapheme sequence up to the given length, so it can be used for arbitrarily long strings and high `max_len`s. However, some grapheme boundaries depend on the previous state, so the worst case performance is O(n). In practice, it's only very long non-broken sequences of country flags (represented as Regional Indicators) that will perform badly. The return value will always be between `0` and `len(string)`. >>> string = "tamil நி (ni)" >>> i = grapheme.safe_split_index(string, 7) >>> i 6 >>> string[:i] 'tamil ' >>> string[i:] 'நி (ni)' """
last_index = get_last_certain_break_index(string, max_len) for l in grapheme_lengths(string[last_index:]): if last_index + l > max_len: break last_index += l return last_index
<SYSTEM_TASK:> Write a header structure into a B1 logfile. <END_TASK> <USER_TASK:> Description: def writeB1logfile(filename, data): """Write a header structure into a B1 logfile. Inputs: filename: name of the file. data: header dictionary Notes: exceptions pass through to the caller. """
allkeys = list(data.keys()) f = open(filename, 'wt', encoding='utf-8') for ld in _logfile_data: # process each line linebegin = ld[0] fieldnames = ld[1] # set the default formatter if it is not given if len(ld) < 3: formatter = str elif ld[2] is None: formatter = str else: formatter = ld[2] # this will contain the formatted values. formatted = '' if isinstance(fieldnames, str): # scalar field name, just one field. Formatter should be a # callable. if fieldnames not in allkeys: # this field has already been processed continue try: formatted = formatter(data[fieldnames]) except KeyError: # field not found in param structure continue elif isinstance(fieldnames, tuple): # more than one field names in a tuple. In this case, formatter can # be a tuple of callables... if all([(fn not in allkeys) for fn in fieldnames]): # if all the fields have been processed: continue if isinstance(formatter, tuple) and len(formatter) == len(fieldnames): formatted = ' '.join([ft(data[fn]) for ft, fn in zip(formatter, fieldnames)]) # ...or a single callable... elif not isinstance(formatter, tuple): formatted = formatter([data[fn] for fn in fieldnames]) # ...otherwise raise an exception. else: raise SyntaxError('Programming error: formatter should be a scalar or a tuple\ of the same length as the field names in logfile_data.') else: # fieldnames is neither a string, nor a tuple. raise SyntaxError( 'Invalid syntax (programming error) in logfile_data in writeparamfile().') # try to get the values linetowrite = linebegin + ':\t' + formatted + '\n' f.write(linetowrite) if isinstance(fieldnames, tuple): for fn in fieldnames: # remove the params treated. if fn in allkeys: allkeys.remove(fn) else: if fieldnames in allkeys: allkeys.remove(fieldnames) # write untreated params for k in allkeys: linetowrite = k + ':\t' + str(data[k]) + '\n' f.write(linetowrite) f.close()
<SYSTEM_TASK:> Helper function to interpret lines in an EDF file header. <END_TASK> <USER_TASK:> Description: def _readedf_extractline(left, right): """Helper function to interpret lines in an EDF file header. """
functions = [int, float, lambda l:float(l.split(None, 1)[0]), lambda l:int(l.split(None, 1)[0]), dateutil.parser.parse, lambda x:str(x)] for f in functions: try: right = f(right) break except ValueError: continue return right
<SYSTEM_TASK:> Read a header from a MarResearch .image file. <END_TASK> <USER_TASK:> Description: def readmarheader(filename): """Read a header from a MarResearch .image file."""
with open(filename, 'rb') as f: intheader = np.fromstring(f.read(10 * 4), np.int32) floatheader = np.fromstring(f.read(15 * 4), '<f4') strheader = f.read(24) f.read(4) otherstrings = [f.read(16) for i in range(29)] return {'Xsize': intheader[0], 'Ysize': intheader[1], 'MeasTime': intheader[8], 'BeamPosX': floatheader[7], 'BeamPosY': floatheader[8], 'Wavelength': floatheader[9], 'Dist': floatheader[10], '__Origin__': 'MarResearch .image', 'recordlength': intheader[2], 'highintensitypixels': intheader[4], 'highintensityrecords': intheader[5], 'Date': dateutil.parser.parse(strheader), 'Detector': 'MARCCD', '__particle__': 'photon'}
<SYSTEM_TASK:> Second order polynomial <END_TASK> <USER_TASK:> Description: def Square(x, a, b, c): """Second order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the second-order term ``b``: coefficient of the first-order term ``c``: additive constant Formula: -------- ``a*x^2 + b*x + c`` """
return a * x ** 2 + b * x + c
<SYSTEM_TASK:> Third order polynomial <END_TASK> <USER_TASK:> Description: def Cube(x, a, b, c, d): """Third order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the third-order term ``b``: coefficient of the second-order term ``c``: coefficient of the first-order term ``d``: additive constant Formula: -------- ``a*x^3 + b*x^2 + c*x + d`` """
return a * x ** 3 + b * x ** 2 + c * x + d
<SYSTEM_TASK:> Find all subdirectory of a directory. <END_TASK> <USER_TASK:> Description: def find_subdirs(startdir='.', recursion_depth=None): """Find all subdirectory of a directory. Inputs: startdir: directory to start with. Defaults to the current folder. recursion_depth: number of levels to traverse. None is infinite. Output: a list of absolute names of subfolders. Examples: >>> find_subdirs('dir',0) # returns just ['dir'] >>> find_subdirs('dir',1) # returns all direct (first-level) subdirs # of 'dir'. """
startdir = os.path.expanduser(startdir) direct_subdirs = [os.path.join(startdir, x) for x in os.listdir( startdir) if os.path.isdir(os.path.join(startdir, x))] if recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = recursion_depth - 1 if (recursion_depth is not None) and (recursion_depth <= 1): return [startdir] + direct_subdirs else: subdirs = [] for d in direct_subdirs: subdirs.extend(find_subdirs(d, next_recursion_depth)) return [startdir] + subdirs
<SYSTEM_TASK:> Find multiple peaks in the dataset given by vectors x and y. <END_TASK> <USER_TASK:> Description: def findpeak_multi(x, y, dy, N, Ntolerance, Nfit=None, curve='Lorentz', return_xfit=False, return_stat=False): """Find multiple peaks in the dataset given by vectors x and y. Points are searched for in the dataset where the N points before and after have strictly lower values than them. To get rid of false negatives caused by fluctuations, Ntolerance is introduced. It is the number of outlier points to be tolerated, i.e. points on the left-hand side of the peak where the growing tendency breaks or on the right-hand side where the diminishing tendency breaks. Increasing this number, however gives rise to false positives. Inputs: x, y, dy: vectors defining the data-set. dy can be None. N, Ntolerance: the parameters of the peak-finding routines Nfit: the number of points on the left and on the right of the peak to be used for least squares refinement of the peak positions. curve: the type of the curve to be fitted to the peaks. Can be 'Lorentz' or 'Gauss' return_xfit: if the abscissa used for fitting is to be returned. return_stat: if the fitting statistics is to be returned for each peak. Outputs: position, hwhm, baseline, amplitude, (xfit): lists Notes: Peaks are identified where the curve grows N points before and decreases N points after. On noisy curves Ntolerance may improve the results, i.e. decreases the 2*N above mentioned criteria. """
if Nfit is None: Nfit = N # find points where the curve grows for N points before them and # decreases for N points after them. To accomplish this, we create # an indicator array of the sign of the first derivative. sgndiff = np.sign(np.diff(y)) xdiff = x[:-1] # associate difference values to the lower 'x' value. pix = np.arange(len(x) - 1) # pixel coordinates create an indicator # array as the sum of sgndiff shifted left and right. whenever an # element of this is 2*N, it fulfills the criteria above. indicator = np.zeros(len(sgndiff) - 2 * N) for i in range(2 * N): indicator += np.sign(N - i) * sgndiff[i:-2 * N + i] # add the last one, since the indexing is different (would be # [2*N:0], which is not what we want) indicator += -sgndiff[2 * N:] # find the positions (indices) of the peak. The strict criteria is # relaxed somewhat by using the Ntolerance value. Note the use of # 2*Ntolerance, since each outlier point creates two outliers in # sgndiff (-1 insted of +1 and vice versa). peakpospix = pix[N:-N][indicator >= 2 * N - 2 * Ntolerance] ypeak = y[peakpospix] # Now refine the found positions by least-squares fitting. But # first we have to sort out other non-peaks, i.e. found points # which have other found points with higher values in their [-N,N] # neighbourhood. pos = []; ampl = []; hwhm = []; baseline = []; xfit = []; stat = [] dy1 = None for i in range(len(ypeak)): if not [j for j in list(range(i + 1, len(ypeak))) + list(range(0, i)) if abs(peakpospix[j] - peakpospix[i]) <= N and ypeak[i] < ypeak[j]]: # only leave maxima. idx = peakpospix[i] if dy is not None: dy1 = dy[(idx - Nfit):(idx + Nfit + 1)] xfit_ = x[(idx - Nfit):(idx + Nfit + 1)] pos_, hwhm_, baseline_, ampl_, stat_ = findpeak_single(xfit_, y[(idx - Nfit):(idx + Nfit + 1)], dy1, position=x[idx], return_stat=True) stat.append(stat_) xfit.append(xfit_) pos.append(pos_) ampl.append(ampl_) hwhm.append(hwhm_) baseline.append(baseline_) results = [pos, hwhm, baseline, ampl] if return_xfit: results.append(xfit) if return_stat: results.append(stat) return tuple(results)
<SYSTEM_TASK:> Open a SPEC file and read its content <END_TASK> <USER_TASK:> Description: def readspec(filename, read_scan=None): """Open a SPEC file and read its content Inputs: filename: string the file to open read_scan: None, 'all' or integer the index of scan to be read from the file. If None, no scan should be read. If 'all', all scans should be read. If a number, just the scan with that number should be read. Output: the data in the spec file in a dict. """
with open(filename, 'rt') as f: sf = {'motors': [], 'maxscannumber': 0} sf['originalfilename'] = filename lastscannumber = None while True: l = f.readline() if l.startswith('#F'): sf['filename'] = l[2:].strip() elif l.startswith('#E'): sf['epoch'] = int(l[2:].strip()) sf['datetime'] = datetime.datetime.fromtimestamp(sf['epoch']) elif l.startswith('#D'): sf['datestring'] = l[2:].strip() elif l.startswith('#C'): sf['comment'] = l[2:].strip() elif l.startswith('#O'): try: l = l.split(None, 1)[1] except IndexError: continue if 'motors' not in list(sf.keys()): sf['motors'] = [] sf['motors'].extend([x.strip() for x in l.split(' ')]) elif not l.strip(): # empty line, signifies the end of the header part. The next # line will be a scan. break sf['scans'] = {} if read_scan is not None: if read_scan == 'all': nr = None else: nr = read_scan try: while True: s = readspecscan(f, nr) if isinstance(s, dict): sf['scans'][s['number']] = s if nr is not None: break sf['maxscannumber'] = max( sf['maxscannumber'], s['number']) elif s is not None: sf['maxscannumber'] = max(sf['maxscannumber'], s) except SpecFileEOF: pass else: while True: l = f.readline() if not l: break if l.startswith('#S'): n = int(l[2:].split()[0]) sf['maxscannumber'] = max(sf['maxscannumber'], n) for n in sf['scans']: s = sf['scans'][n] s['motors'] = sf['motors'] if 'comment' not in s: s['comment'] = sf['comment'] if 'positions' not in s: s['positions'] = [None] * len(sf['motors']) return sf
<SYSTEM_TASK:> Find beam center with the "gravity" method <END_TASK> <USER_TASK:> Description: def findbeam_gravity(data, mask): """Find beam center with the "gravity" method Inputs: data: scattering image mask: mask matrix Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin, starting from 1 """
# for each row and column find the center of gravity data1 = data.copy() # take a copy, because elements will be tampered with data1[mask == 0] = 0 # set masked elements to zero # vector of x (row) coordinates x = np.arange(data1.shape[0]) # vector of y (column) coordinates y = np.arange(data1.shape[1]) # two column vectors, both containing ones. The length of onex and # oney corresponds to length of x and y, respectively. onex = np.ones_like(x) oney = np.ones_like(y) # Multiply the matrix with x. Each element of the resulting column # vector will contain the center of gravity of the corresponding row # in the matrix, multiplied by the "weight". Thus: nix_i=sum_j( A_ij # * x_j). If we divide this by spamx_i=sum_j(A_ij), then we get the # center of gravity. The length of this column vector is len(y). nix = np.dot(x, data1) spamx = np.dot(onex, data1) # indices where both nix and spamx is nonzero. goodx = ((nix != 0) & (spamx != 0)) # trim y, nix and spamx by goodx, eliminate invalid points. nix = nix[goodx] spamx = spamx[goodx] # now do the same for the column direction. niy = np.dot(data1, y) spamy = np.dot(data1, oney) goody = ((niy != 0) & (spamy != 0)) niy = niy[goody] spamy = spamy[goody] # column coordinate of the center in each row will be contained in # ycent, the row coordinate of the center in each column will be # in xcent. ycent = nix / spamx xcent = niy / spamy # return the mean values as the centers. return [xcent.mean(), ycent.mean()]
<SYSTEM_TASK:> Find beam center with the "slices" method <END_TASK> <USER_TASK:> Description: def findbeam_slices(data, orig_initial, mask=None, maxiter=0, epsfcn=0.001, dmin=0, dmax=np.inf, sector_width=np.pi / 9.0, extent=10, callback=None): """Find beam center with the "slices" method Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.leastsq epsfcn: input for scipy.optimize.leastsq dmin: disregard pixels nearer to the origin than this dmax: disregard pixels farther from the origin than this sector_width: width of sectors in radians extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin. """
if mask is None: mask = np.ones(data.shape) data = data.astype(np.double) def targetfunc(orig, data, mask, orig_orig, callback): # integrate four sectors I = [None] * 4 p, Ints, A = radint_nsector(data, None, -1, -1, -1, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask=mask, phi0=np.pi / 4 - 0.5 * sector_width, dphi=sector_width, Nsector=4) minpix = max(max(p.min(0).tolist()), dmin) maxpix = min(min(p.max(0).tolist()), dmax) if (maxpix < minpix): raise ValueError('The four slices do not overlap! Please give a\ better approximation for the origin or use another centering method.') for i in range(4): I[i] = Ints[:, i][(p[:, i] >= minpix) & (p[:, i] <= maxpix)] ret = ((I[0] - I[2]) ** 2 + (I[1] - I[3]) ** 2) / (maxpix - minpix) if callback is not None: callback() return ret orig = scipy.optimize.leastsq(targetfunc, np.array([extent, extent]), args=(data, 1 - mask.astype(np.uint8), np.array(orig_initial) - extent, callback), maxfev=maxiter, epsfcn=0.01) return orig[0] + np.array(orig_initial) - extent
<SYSTEM_TASK:> Find beam center using azimuthal integration <END_TASK> <USER_TASK:> Description: def findbeam_azimuthal(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None): """Find beam center using azimuthal integration Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """
if mask is None: mask = np.ones(data.shape) data = data.astype(np.double) def targetfunc(orig, data, mask, orig_orig, callback): def sinfun(p, x, y): return (y - np.sin(x + p[1]) * p[0] - p[2]) / np.sqrt(len(x)) t, I, a = azimintpix(data, None, orig[ 0] + orig_orig[0], orig[1] + orig_orig[1], mask.astype('uint8'), Ntheta, dmin, dmax) if len(a) > (a > 0).sum(): raise ValueError('findbeam_azimuthal: non-complete azimuthal average, please consider changing dmin, dmax and/or orig_initial!') p = ((I.max() - I.min()) / 2.0, t[I == I.max()][0], I.mean()) p = scipy.optimize.leastsq(sinfun, p, (t, I))[0] # print "findbeam_azimuthal: orig=",orig,"amplitude=",abs(p[0]) if callback is not None: callback() return abs(p[0]) orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0) return orig1 + np.array(orig_initial) - extent
<SYSTEM_TASK:> Find beam center using azimuthal integration and folding <END_TASK> <USER_TASK:> Description: def findbeam_azimuthal_fold(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None): """Find beam center using azimuthal integration and folding Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration. Should be even! dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """
if Ntheta % 2: raise ValueError('Ntheta should be even!') if mask is None: mask = np.ones_like(data).astype(np.uint8) data = data.astype(np.double) # the function to minimize is the sum of squared difference of two halves of # the azimuthal integral. def targetfunc(orig, data, mask, orig_orig, callback): I = azimintpix(data, None, orig[ 0] + orig_orig[0], orig[1] + orig_orig[1], mask, Ntheta, dmin, dmax)[1] if callback is not None: callback() return np.sum((I[:Ntheta / 2] - I[Ntheta / 2:]) ** 2) / Ntheta orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0) return orig1 + np.array(orig_initial) - extent
<SYSTEM_TASK:> Find beam with 2D weighting of semitransparent beamstop area <END_TASK> <USER_TASK:> Description: def findbeam_semitransparent(data, pri, threshold=0.05): """Find beam with 2D weighting of semitransparent beamstop area Inputs: data: scattering matrix pri: list of four: [xmin,xmax,ymin,ymax] for the borders of the beam area under the semitransparent beamstop. X corresponds to the column index (ie. A[Y,X] is the element of A from the Xth column and the Yth row). You can get these by zooming on the figure and retrieving the result of axis() (like in Matlab) threshold: do not count pixels if their intensity falls below max_intensity*threshold. max_intensity is the highest count rate in the current row or column, respectively. Set None to disable this feature. Outputs: bcx,bcy the x and y coordinates of the primary beam """
rowmin = np.floor(min(pri[2:])) rowmax = np.ceil(max(pri[2:])) colmin = np.floor(min(pri[:2])) colmax = np.ceil(max(pri[:2])) if threshold is not None: # beam area on the scattering image B = data[rowmin:rowmax, colmin:colmax] # print B.shape # row and column indices Ri = np.arange(rowmin, rowmax) Ci = np.arange(colmin, colmax) # print len(Ri) # print len(Ci) Ravg = B.mean(1) # average over column index, will be a concave curve Cavg = B.mean(0) # average over row index, will be a concave curve # find the maxima im both directions and their positions maxR = Ravg.max() maxRpos = Ravg.argmax() maxC = Cavg.max() maxCpos = Cavg.argmax() # cut off pixels which are smaller than threshold*peak_height Rmin = Ri[ ((Ravg - Ravg[0]) >= ((maxR - Ravg[0]) * threshold)) & (Ri < maxRpos)][0] Rmax = Ri[ ((Ravg - Ravg[-1]) >= ((maxR - Ravg[-1]) * threshold)) & (Ri > maxRpos)][-1] Cmin = Ci[ ((Cavg - Cavg[0]) >= ((maxC - Cavg[0]) * threshold)) & (Ci < maxCpos)][0] Cmax = Ci[ ((Cavg - Cavg[-1]) >= ((maxC - Cavg[-1]) * threshold)) & (Ci > maxCpos)][-1] else: Rmin = rowmin Rmax = rowmax Cmin = colmin Cmax = colmax d = data[Rmin:Rmax + 1, Cmin:Cmax + 1] x = np.arange(Rmin, Rmax + 1) y = np.arange(Cmin, Cmax + 1) bcx = (d.sum(1) * x).sum() / d.sum() bcy = (d.sum(0) * y).sum() / d.sum() return bcx, bcy
<SYSTEM_TASK:> Find the beam by minimizing the width of a peak in the radial average. <END_TASK> <USER_TASK:> Description: def findbeam_radialpeak(data, orig_initial, mask, rmin, rmax, maxiter=100, drive_by='amplitude', extent=10, callback=None): """Find the beam by minimizing the width of a peak in the radial average. Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the peak range. drive_by: 'hwhm' to minimize the hwhm of the peak or 'amplitude' to maximize the peak amplitude extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian will be fitted. """
orig_initial = np.array(orig_initial) mask = 1 - mask.astype(np.uint8) data = data.astype(np.double) pix = np.arange(rmin * 1.0, rmax * 1.0, 1) if drive_by.lower() == 'hwhm': def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] hwhm = float(misc.findpeak_single(pix, I)[1]) # print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p if callback is not None: callback() return abs(hwhm) elif drive_by.lower() == 'amplitude': def targetfunc(orig, data, mask, orig_orig, callback): I = radintpix( data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1] fp = misc.findpeak_single(pix, I) height = -float(fp[2] + fp[3]) # print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p if callback is not None: callback() return height else: raise ValueError('Invalid argument for drive_by %s' % drive_by) orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]), args=( data, mask, orig_initial - extent, callback), maxiter=maxiter, disp=0) return np.array(orig_initial) - extent + orig1
<SYSTEM_TASK:> Calculate a scaling factor, by which this curve is to be multiplied to best fit the other one. <END_TASK> <USER_TASK:> Description: def scalefactor(self, other, qmin=None, qmax=None, Npoints=None): """Calculate a scaling factor, by which this curve is to be multiplied to best fit the other one. Inputs: other: the other curve (an instance of GeneralCurve or of a subclass of it) qmin: lower cut-off (None to determine the common range automatically) qmax: upper cut-off (None to determine the common range automatically) Npoints: number of points to use in the common x-range (None defaults to the lowest value among the two datasets) Outputs: The scaling factor determined by interpolating both datasets to the same abscissa and calculating the ratio of their integrals, calculated by the trapezoid formula. Error propagation is taken into account. """
if qmin is None: qmin = max(self.q.min(), other.q.min()) if qmax is None: xmax = min(self.q.max(), other.q.max()) data1 = self.trim(qmin, qmax) data2 = other.trim(qmin, qmax) if Npoints is None: Npoints = min(len(data1), len(data2)) commonx = np.linspace( max(data1.q.min(), data2.q.min()), min(data2.q.max(), data1.q.max()), Npoints) data1 = data1.interpolate(commonx) data2 = data2.interpolate(commonx) return nonlinear_odr(data1.Intensity, data2.Intensity, data1.Error, data2.Error, lambda x, a: a * x, [1])[0]
<SYSTEM_TASK:> Insert fixed parameters in a covariance matrix <END_TASK> <USER_TASK:> Description: def _substitute_fixed_parameters_covar(self, covar): """Insert fixed parameters in a covariance matrix"""
covar_resolved = np.empty((len(self._fixed_parameters), len(self._fixed_parameters))) indices_of_fixed_parameters = [i for i in range(len(self.parameters())) if self._fixed_parameters[i] is not None] indices_of_free_parameters = [i for i in range(len(self.parameters())) if self._fixed_parameters[i] is None] for i in range(covar_resolved.shape[0]): if i in indices_of_fixed_parameters: # the i-eth argument was fixed. This means that the row and column corresponding to this argument # must be None covar_resolved[i, :] = 0 continue for j in range(covar_resolved.shape[1]): if j in indices_of_fixed_parameters: covar_resolved[:, j] = 0 continue covar_resolved[i, j] = covar[indices_of_free_parameters.index(i), indices_of_free_parameters.index(j)] return covar_resolved
<SYSTEM_TASK:> Load a radial scattering curve <END_TASK> <USER_TASK:> Description: def loadcurve(self, fsn: int) -> classes2.Curve: """Load a radial scattering curve"""
return classes2.Curve.new_from_file(self.find_file(self._exposureclass + '_%05d.txt' % fsn))
<SYSTEM_TASK:> Save the intensity and error matrices to a file <END_TASK> <USER_TASK:> Description: def writeint2dnorm(filename, Intensity, Error=None): """Save the intensity and error matrices to a file Inputs ------ filename: string the name of the file Intensity: np.ndarray the intensity matrix Error: np.ndarray, optional the error matrix (can be ``None``, if no error matrix is to be saved) Output ------ None """
whattosave = {'Intensity': Intensity} if Error is not None: whattosave['Error'] = Error if filename.upper().endswith('.NPZ'): np.savez(filename, **whattosave) elif filename.upper().endswith('.MAT'): scipy.io.savemat(filename, whattosave) else: # text file np.savetxt(filename, Intensity) if Error is not None: name, ext = os.path.splitext(filename) np.savetxt(name + '_error' + ext, Error)
<SYSTEM_TASK:> Read a version 2 Bessy Data File <END_TASK> <USER_TASK:> Description: def readbdfv2(filename, bdfext='.bdf', bhfext='.bhf'): """Read a version 2 Bessy Data File Inputs ------ filename: string the name of the input file. One can give the complete header or datafile name or just the base name without the extensions. bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ the data structure in a dict. Header is loaded implicitely. Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are loaded. """
datas = header.readbhfv2(filename, True, bdfext, bhfext) return datas
<SYSTEM_TASK:> Read a two-dimensional scattering pattern from a MarResearch .image file. <END_TASK> <USER_TASK:> Description: def readmar(filename): """Read a two-dimensional scattering pattern from a MarResearch .image file. """
hed = header.readmarheader(filename) with open(filename, 'rb') as f: h = f.read(hed['recordlength']) data = np.fromstring( f.read(2 * hed['Xsize'] * hed['Ysize']), '<u2').astype(np.float64) if hed['highintensitypixels'] > 0: raise NotImplementedError( 'Intensities over 65535 are not yet supported!') data = data.reshape(hed['Xsize'], hed['Ysize']) return data, hed
<SYSTEM_TASK:> Write a version 2 Bessy Data File <END_TASK> <USER_TASK:> Description: def writebdfv2(filename, bdf, bdfext='.bdf', bhfext='.bhf'): """Write a version 2 Bessy Data File Inputs ------ filename: string the name of the output file. One can give the complete header or datafile name or just the base name without the extensions. bdf: dict the BDF structure (in the same format as loaded by ``readbdfv2()`` bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ None Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are saved. """
if filename.endswith(bdfext): basename = filename[:-len(bdfext)] elif filename.endswith(bhfext): basename = filename[:-len(bhfext)] else: basename = filename header.writebhfv2(basename + '.bhf', bdf) f = open(basename + '.bdf', 'wb') keys = ['RAWDATA', 'RAWERROR', 'CORRDATA', 'CORRERROR', 'NANDATA'] keys.extend( [x for x in list(bdf.keys()) if isinstance(bdf[x], np.ndarray) and x not in keys]) for k in keys: if k not in list(bdf.keys()): continue f.write('#%s[%d:%d]\n' % (k, bdf['xdim'], bdf['ydim'])) f.write(np.rot90(bdf[k], 3).astype('float32').tostring(order='F')) f.close()
<SYSTEM_TASK:> Fill up missing padding in a string. <END_TASK> <USER_TASK:> Description: def fill_padding(padded_string): # type: (bytes) -> bytes """ Fill up missing padding in a string. This function makes sure that the string has length which is multiplication of 4, and if not, fills the missing places with dots. :param str padded_string: string to be decoded that might miss padding dots. :return: properly padded string :rtype: str """
length = len(padded_string) reminder = len(padded_string) % 4 if reminder: return padded_string.ljust(length + 4 - reminder, b'.') return padded_string
<SYSTEM_TASK:> Decode the result of querystringsafe_base64_encode or a regular base64. <END_TASK> <USER_TASK:> Description: def decode(encoded): # type: (bytes) -> bytes """ Decode the result of querystringsafe_base64_encode or a regular base64. .. note :: As a regular base64 string does not contain dots, replacing dots with equal signs does basically noting to it. Also, base64.urlsafe_b64decode allows to decode both safe and unsafe base64. Therefore this function may also be used to decode the regular base64. :param (str, unicode) encoded: querystringsafe_base64 string or unicode :rtype: str, bytes :return: decoded string """
padded_string = fill_padding(encoded) return urlsafe_b64decode(padded_string.replace(b'.', b'='))
<SYSTEM_TASK:> Flatten a dict. <END_TASK> <USER_TASK:> Description: def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None): """Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """
if max_recursion_depth is not None and max_recursion_depth <= 0: # we reached the maximum recursion depth, refuse to go further return original_dict if max_recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = max_recursion_depth - 1 dict1 = {} for k in original_dict: if not isinstance(original_dict[k], dict): dict1[k] = original_dict[k] else: dict_recursed = flatten_hierarchical_dict( original_dict[k], separator, next_recursion_depth) dict1.update( dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed])) return dict1
<SYSTEM_TASK:> Do a Shull-Roess fitting on the scattering data. <END_TASK> <USER_TASK:> Description: def fit_shullroess(q, Intensity, Error, R0=None, r=None): """Do a Shull-Roess fitting on the scattering data. Inputs: q: np.ndarray[ndim=1] vector of the q values (4*pi*sin(theta)/lambda) Intensity: np.ndarray[ndim=1] Intensity vector Error: np.ndarray[ndim=1] Error of the intensity (absolute uncertainty, 1sigma) R0: scalar first guess for the mean radius (None to autodetermine, default) r: np.ndarray[ndim=1] vector of the abscissa of the resulting size distribution (None to autodetermine, default) Output: A: ErrorValue the fitted value of the intensity scaling factor r0: the r0 parameter of the maxwellian size distribution n: the n parameter of the maxwellian size distribution r: the abscissa of the fitted size distribution maxw: the size distribution stat: the statistics dictionary, returned by nlsq_fit() Note: This first searches for r0, which best linearizes the log(Intensity) vs. log(q**2+3/r0**2) relation. After this is found, the parameters of the fitted line give the parameters of a Maxwellian-like particle size distribution function. After it a proper least squares fitting is carried out, using the obtained values as initial parameters. """
q = np.array(q) Intensity = np.array(Intensity) Error = np.array(Error) if R0 is None: r0s = np.linspace(1, 2 * np.pi / q.min(), 1000) def naive_fit_chi2(q, Intensity, r0): p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1) return ((np.polyval(p, q) - Intensity) ** 2).sum() / (len(q) - 3) chi2 = np.array([naive_fit_chi2(q, Intensity, r0) for r0 in r0s.tolist()]) R0 = r0s[chi2 == chi2.min()][0] def naive_fit(q, Intensity, r0): p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1) return np.exp(p[1]), -2 * p[0] - 4 K, n = naive_fit(q, Intensity, R0) def SR_function(q, A, r0, n): return A * (q ** 2 + 3 / r0 ** 2) ** (-(n + 4.) * 0.5) p, dp, statdict = easylsq.nlsq_fit(q, Intensity, Error, SR_function, (K, R0, n)) n = ErrorValue(p[2], dp[2]) r0 = ErrorValue(p[1], dp[1]) A = ErrorValue(p[0], dp[0]) if r is None: r = np.linspace(np.pi / q.max(), np.pi / q.min(), 1000) return A, r0, n, r, maxwellian(r, r0, n), statdict
<SYSTEM_TASK:> Find file in multiple directories. <END_TASK> <USER_TASK:> Description: def findfileindirs(filename, dirs=None, use_pythonpath=True, use_searchpath=True, notfound_is_fatal=True, notfound_val=None): """Find file in multiple directories. Inputs: filename: the file name to be searched for. dirs: list of folders or None use_pythonpath: use the Python module search path use_searchpath: use the sastool search path. notfound_is_fatal: if an exception is to be raised if the file cannot be found. notfound_val: the value which should be returned if the file is not found (only relevant if notfound_is_fatal is False) Outputs: the full path of the file. Notes: if filename is an absolute path by itself, folders in 'dir' won't be checked, only the existence of the file will be verified. """
if os.path.isabs(filename): if os.path.exists(filename): return filename elif notfound_is_fatal: raise IOError('File ' + filename + ' not found.') else: return notfound_val if dirs is None: dirs = [] dirs = normalize_listargument(dirs) if not dirs: # dirs is empty dirs = ['.'] if use_pythonpath: dirs.extend(sys.path) if use_searchpath: dirs.extend(sastool_search_path) # expand ~ and ~user constructs dirs = [os.path.expanduser(d) for d in dirs] logger.debug('Searching for file %s in several folders: %s' % (filename, ', '.join(dirs))) for d in dirs: if os.path.exists(os.path.join(d, filename)): logger.debug('Found file %s in folder %s.' % (filename, d)) return os.path.join(d, filename) logger.debug('Not found file %s in any folders.' % filename) if notfound_is_fatal: raise IOError('File %s not found in any of the directories.' % filename) else: return notfound_val
<SYSTEM_TASK:> Calculate the two-theta matrix for a scattering matrix <END_TASK> <USER_TASK:> Description: def twotheta(matrix, bcx, bcy, pixsizeperdist): """Calculate the two-theta matrix for a scattering matrix Inputs: matrix: only the shape of it is needed bcx, bcy: beam position (counting from 0; x is row, y is column index) pixsizeperdist: the pixel size divided by the sample-to-detector distance Outputs: the two theta matrix, same shape as 'matrix'. """
col, row = np.meshgrid(list(range(matrix.shape[1])), list(range(matrix.shape[0]))) return np.arctan(np.sqrt((row - bcx) ** 2 + (col - bcy) ** 2) * pixsizeperdist)
<SYSTEM_TASK:> Solid-angle correction for two-dimensional SAS images <END_TASK> <USER_TASK:> Description: def solidangle(twotheta, sampletodetectordistance, pixelsize=None): """Solid-angle correction for two-dimensional SAS images Inputs: twotheta: matrix of two-theta values sampletodetectordistance: sample-to-detector distance pixelsize: the pixel size in mm The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. """
if pixelsize is None: pixelsize = 1 return sampletodetectordistance ** 2 / np.cos(twotheta) ** 3 / pixelsize ** 2
<SYSTEM_TASK:> Solid-angle correction for two-dimensional SAS images with error propagation <END_TASK> <USER_TASK:> Description: def solidangle_errorprop(twotheta, dtwotheta, sampletodetectordistance, dsampletodetectordistance, pixelsize=None): """Solid-angle correction for two-dimensional SAS images with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: absolute error of sample-to-detector distance Outputs two matrices of the same shape as twotheta. The scattering intensity matrix should be multiplied by the first one. The second one is the propagated error of the first one. """
SAC = solidangle(twotheta, sampletodetectordistance, pixelsize) if pixelsize is None: pixelsize = 1 return (SAC, (sampletodetectordistance * (4 * dsampletodetectordistance ** 2 * np.cos(twotheta) ** 2 + 9 * dtwotheta ** 2 * sampletodetectordistance ** 2 * np.sin(twotheta) ** 2) ** 0.5 / np.cos(twotheta) ** 4) / pixelsize ** 2)
<SYSTEM_TASK:> Correction for angle-dependent absorption of the sample <END_TASK> <USER_TASK:> Description: def angledependentabsorption(twotheta, transmission): """Correction for angle-dependent absorption of the sample Inputs: twotheta: matrix of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. Note, that this does not corrects for sample transmission by itself, as the 2*theta -> 0 limit of this matrix is unity. Twotheta==0 and transmission==1 cases are handled correctly (the limit is 1 in both cases). """
cor = np.ones(twotheta.shape) if transmission == 1: return cor mud = -np.log(transmission) cor[twotheta > 0] = transmission * mud * (1 - 1 / np.cos(twotheta[twotheta > 0])) / (np.exp(-mud / np.cos(twotheta[twotheta > 0])) - np.exp(-mud)) return cor
<SYSTEM_TASK:> Correction for angle-dependent absorption of the sample with error propagation <END_TASK> <USER_TASK:> Description: def angledependentabsorption_errorprop(twotheta, dtwotheta, transmission, dtransmission): """Correction for angle-dependent absorption of the sample with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) dtransmission: the absolute error of the transmission of the sample Two matrices are returned: the first one is the correction (intensity matrix should be multiplied by it), the second is its absolute error. """
# error propagation formula calculated using sympy return (angledependentabsorption(twotheta, transmission), _calc_angledependentabsorption_error(twotheta, dtwotheta, transmission, dtransmission))
<SYSTEM_TASK:> Correction for the angle dependent absorption of air in the scattered <END_TASK> <USER_TASK:> Description: def angledependentairtransmission(twotheta, mu_air, sampletodetectordistance): """Correction for the angle dependent absorption of air in the scattered beam path. Inputs: twotheta: matrix of two-theta values mu_air: the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix."""
return np.exp(mu_air * sampletodetectordistance / np.cos(twotheta))
<SYSTEM_TASK:> Correction for the angle dependent absorption of air in the scattered <END_TASK> <USER_TASK:> Description: def angledependentairtransmission_errorprop(twotheta, dtwotheta, mu_air, dmu_air, sampletodetectordistance, dsampletodetectordistance): """Correction for the angle dependent absorption of air in the scattered beam path, with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: absolute error matrix of two-theta mu_air: the linear absorption coefficient of air dmu_air: error of the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: error of the sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix."""
return (np.exp(mu_air * sampletodetectordistance / np.cos(twotheta)), np.sqrt(dmu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dsampletodetectordistance ** 2 * mu_air ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) / np.cos(twotheta) ** 2 + dtwotheta ** 2 * mu_air ** 2 * sampletodetectordistance ** 2 * np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta)) * np.sin(twotheta) ** 2 / np.cos(twotheta) ** 4) )
<SYSTEM_TASK:> Find file in the path <END_TASK> <USER_TASK:> Description: def find_file(self, filename: str, strip_path: bool = True, what='exposure') -> str: """Find file in the path"""
if what == 'exposure': path = self._path elif what == 'header': path = self._headerpath elif what == 'mask': path = self._maskpath else: path = self._path tried = [] if strip_path: filename = os.path.split(filename)[-1] for d in path: if os.path.exists(os.path.join(d, filename)): tried.append(os.path.join(d, filename)) return os.path.join(d, filename) raise FileNotFoundError('Not found: {}. Tried: {}'.format(filename, ', '.join(tried)))
<SYSTEM_TASK:> Search a file or directory relative to the base path <END_TASK> <USER_TASK:> Description: def get_subpath(self, subpath: str): """Search a file or directory relative to the base path"""
for d in self._path: if os.path.exists(os.path.join(d, subpath)): return os.path.join(d, subpath) raise FileNotFoundError
<SYSTEM_TASK:> Calculate the sum of pixels, not counting the masked ones if only_valid is True. <END_TASK> <USER_TASK:> Description: def sum(self, only_valid=True) -> ErrorValue: """Calculate the sum of pixels, not counting the masked ones if only_valid is True."""
if not only_valid: mask = 1 else: mask = self.mask return ErrorValue((self.intensity * mask).sum(), ((self.error * mask) ** 2).sum() ** 0.5)
<SYSTEM_TASK:> Calculate the mean of the pixels, not counting the masked ones if only_valid is True. <END_TASK> <USER_TASK:> Description: def mean(self, only_valid=True) -> ErrorValue: """Calculate the mean of the pixels, not counting the masked ones if only_valid is True."""
if not only_valid: intensity = self.intensity error = self.error else: intensity = self.intensity[self.mask] error = self.error[self.mask] return ErrorValue(intensity.mean(), (error ** 2).mean() ** 0.5)
<SYSTEM_TASK:> Return the q coordinates of a given pixel. <END_TASK> <USER_TASK:> Description: def pixel_to_q(self, row: float, column: float): """Return the q coordinates of a given pixel. Inputs: row: float the row (vertical) coordinate of the pixel column: float the column (horizontal) coordinate of the pixel Coordinates are 0-based and calculated from the top left corner. """
qrow = 4 * np.pi * np.sin( 0.5 * np.arctan( (row - float(self.header.beamcentery)) * float(self.header.pixelsizey) / float(self.header.distance))) / float(self.header.wavelength) qcol = 4 * np.pi * np.sin(0.5 * np.arctan( (column - float(self.header.beamcenterx)) * float(self.header.pixelsizex) / float(self.header.distance))) / float(self.header.wavelength) return qrow, qcol
<SYSTEM_TASK:> Do a radial averaging <END_TASK> <USER_TASK:> Description: def radial_average(self, qrange=None, pixel=False, returnmask=False, errorpropagation=3, abscissa_errorpropagation=3, raw_result=False) -> Curve: """Do a radial averaging Inputs: qrange: the q-range. If None, auto-determine. If 'linear', auto-determine with linear spacing (same as None). If 'log', auto-determine with log10 spacing. pixel: do a pixel-integration (instead of q) returnmask: if the effective mask matrix is to be returned. errorpropagation: the type of error propagation (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) abscissa_errorpropagation: the type of the error propagation in the abscissa (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) raw_result: if True, do not pack the result in a SASCurve, return the individual np.ndarrays. Outputs: the one-dimensional curve as an instance of SASCurve (if pixel is False) or SASPixelCurve (if pixel is True), if raw_result was True. otherwise the q (or pixel), dq (or dpixel), I, dI, area vectors the mask matrix (if returnmask was True) """
retmask = None if isinstance(qrange, str): if qrange == 'linear': qrange = None autoqrange_linear = True elif qrange == 'log': qrange = None autoqrange_linear = False else: raise ValueError( 'Value given for qrange (''%s'') not understood.' % qrange) else: autoqrange_linear = True # whatever if pixel: abscissa_kind = 3 else: abscissa_kind = 0 res = radint_fullq_errorprop(self.intensity, self.error, self.header.wavelength.val, self.header.wavelength.err, self.header.distance.val, self.header.distance.err, self.header.pixelsizey.val, self.header.pixelsizex.val, self.header.beamcentery.val, self.header.beamcentery.err, self.header.beamcenterx.val, self.header.beamcenterx.err, (self.mask == 0).astype(np.uint8), qrange, returnmask=returnmask, errorpropagation=errorpropagation, autoqrange_linear=autoqrange_linear, abscissa_kind=abscissa_kind, abscissa_errorpropagation=abscissa_errorpropagation) q, dq, I, E, area = res[:5] if not raw_result: c = Curve(q, I, E, dq) if returnmask: return c, res[5] else: return c else: if returnmask: return q, dq, I, E, area, res[5] else: return q, dq, I, E, area
<SYSTEM_TASK:> Extend the mask with the image elements where the intensity is negative. <END_TASK> <USER_TASK:> Description: def mask_negative(self): """Extend the mask with the image elements where the intensity is negative."""
self.mask = np.logical_and(self.mask, ~(self.intensity < 0))
<SYSTEM_TASK:> Do a simultaneous nonlinear least-squares fit and return the fitted <END_TASK> <USER_TASK:> Description: def simultaneous_nonlinear_leastsquares(xs, ys, dys, func, params_inits, verbose=False, **kwargs): """Do a simultaneous nonlinear least-squares fit and return the fitted parameters as instances of ErrorValue. Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- `parset1, parset2 ...`: tuples of fitted parameters corresponding to curve1, curve2, etc. Each tuple contains the values of the fitted parameters as instances of ErrorValue, in the same order as they are in `params_init`. `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit`, except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve. """
p, dp, statdict = simultaneous_nlsq_fit(xs, ys, dys, func, params_inits, verbose, **kwargs) params = [[ErrorValue(p_, dp_) for (p_, dp_) in zip(pcurrent, dpcurrent)] for (pcurrent, dpcurrent) in zip(p, dp)] return tuple(params + [statdict])
<SYSTEM_TASK:> Make a string representation of the value and its uncertainty. <END_TASK> <USER_TASK:> Description: def tostring(self: 'ErrorValue', extra_digits: int = 0, plusminus: str = ' +/- ', fmt: str = None) -> str: """Make a string representation of the value and its uncertainty. Inputs: ------- ``extra_digits``: integer how many extra digits should be shown (plus or minus, zero means that the number of digits should be defined by the magnitude of the uncertainty). ``plusminus``: string the character sequence to be inserted in place of '+/-' including delimiting whitespace. ``fmt``: string or None how to format the output. Currently only strings ending in 'tex' are supported, which render ascii-exponentials (i.e. 3.1415e-2) into a format which is more appropriate to TeX. Outputs: -------- the string representation. """
if isinstance(fmt, str) and fmt.lower().endswith('tex'): return re.subn('(\d*)(\.(\d)*)?[eE]([+-]?\d+)', lambda m: (r'$%s%s\cdot 10^{%s}$' % (m.group(1), m.group(2), m.group(4))).replace('None', ''), self.tostring(extra_digits=extra_digits, plusminus=plusminus, fmt=None))[0] if isinstance(self.val, numbers.Real): try: Ndigits = -int(math.floor(math.log10(self.err))) + extra_digits except (OverflowError, ValueError): return str(self.val) + plusminus + str(self.err) else: return str(round(self.val, Ndigits)) + plusminus + str(round(self.err, Ndigits)) return str(self.val) + ' +/- ' + str(self.err)
<SYSTEM_TASK:> Evaluate a function with error propagation. <END_TASK> <USER_TASK:> Description: def evalfunc(cls, func, *args, **kwargs): """Evaluate a function with error propagation. Inputs: ------- ``func``: callable this is the function to be evaluated. Should return either a number or a np.ndarray. ``*args``: other positional arguments of func. Arguments which are not instances of `ErrorValue` are taken as constants. keyword arguments supported: ``NMC``: number of Monte-Carlo steps. If not defined, defaults to 1000 ``exceptions_to_retry``: list of exception types to ignore: if one of these is raised the given MC step is repeated once again. Notice that this might induce an infinite loop! The exception types in this list should be subclasses of ``Exception``. ``exceptions_to_skip``: list of exception types to skip: if one of these is raised the given MC step is skipped, never to be repeated. The exception types in this list should be subclasses of ``Exception``. Output: ------- ``result``: an `ErrorValue` with the result. The error is estimated via a Monte-Carlo approach to Gaussian error propagation. """
def do_random(x): if isinstance(x, cls): return x.random() else: return x if 'NMC' not in kwargs: kwargs['NMC'] = 1000 if 'exceptions_to_skip' not in kwargs: kwargs['exceptions_to_skip'] = [] if 'exceptions_to_repeat' not in kwargs: kwargs['exceptions_to_repeat'] = [] meanvalue = func(*args) # this way we get either a number or a np.array stdcollector = meanvalue * 0 mciters = 0 while mciters < kwargs['NMC']: try: # IGNORE:W0142 stdcollector += (func(*[do_random(a) for a in args]) - meanvalue) ** 2 mciters += 1 except Exception as e: # IGNORE:W0703 if any(isinstance(e, etype) for etype in kwargs['exceptions_to_skip']): kwargs['NMC'] -= 1 elif any(isinstance(e, etype) for etype in kwargs['exceptions_to_repeat']): pass else: raise return cls(meanvalue, stdcollector ** 0.5 / (kwargs['NMC'] - 1))
<SYSTEM_TASK:> Generalized Guinier scattering <END_TASK> <USER_TASK:> Description: def GeneralGuinier(q, G, Rg, s): """Generalized Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor ``Rg``: radius of gyration ``s``: dimensionality parameter (can be 1, 2, 3) Formula: -------- ``G/q**(3-s)*exp(-(q^2*Rg^2)/s)`` """
return G / q ** (3 - s) * np.exp(-(q * Rg) ** 2 / s)
<SYSTEM_TASK:> Empirical Guinier-Porod scattering <END_TASK> <USER_TASK:> Description: def GuinierPorod(q, G, Rg, alpha): """Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """
return GuinierPorodMulti(q, G, Rg, alpha)
<SYSTEM_TASK:> Empirical Porod-Guinier scattering <END_TASK> <USER_TASK:> Description: def PorodGuinier(q, a, alpha, Rg): """Empirical Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``a``: factor of the power-law branch ``alpha``: power-law exponent ``Rg``: radius of gyration Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``G`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """
return PorodGuinierMulti(q, a, alpha, Rg)
<SYSTEM_TASK:> Empirical Porod-Guinier-Porod scattering <END_TASK> <USER_TASK:> Description: def PorodGuinierPorod(q, a, alpha, Rg, beta): """Empirical Porod-Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``a``: factor of the first power-law branch ``alpha``: exponent of the first power-law branch ``Rg``: radius of gyration ``beta``: exponent of the second power-law branch Formula: -------- ``a*q^alpha`` if ``q<q_sep1``. ``G * exp(-q^2*Rg^2/3)`` if ``q_sep1<q<q_sep2`` and ``b*q^beta`` if ``q_sep2<q``. ``q_sep1``, ``q_sep2``, ``G`` and ``b`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """
return PorodGuinierMulti(q, a, alpha, Rg, beta)
<SYSTEM_TASK:> Empirical Guinier-Porod-Guinier scattering <END_TASK> <USER_TASK:> Description: def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2): """Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """
return GuinierPorodMulti(q, G, Rg1, alpha, Rg2)
<SYSTEM_TASK:> Damped power-law <END_TASK> <USER_TASK:> Description: def DampedPowerlaw(q, a, alpha, sigma): """Damped power-law Inputs: ------- ``q``: independent variable ``a``: factor ``alpha``: exponent ``sigma``: hwhm of the damping Gaussian Formula: -------- ``a*q^alpha*exp(-q^2/(2*sigma^2))`` """
return a * q ** alpha * np.exp(-q ** 2 / (2 * sigma ** 2))
<SYSTEM_TASK:> Sum of a Power-law, a Guinier-Porod curve and a constant. <END_TASK> <USER_TASK:> Description: def PowerlawGuinierPorodConst(q, A, alpha, G, Rg, beta, C): """Sum of a Power-law, a Guinier-Porod curve and a constant. Inputs: ------- ``q``: independent variable (momentum transfer) ``A``: scaling factor of the power-law ``alpha``: power-law exponent ``G``: scaling factor of the Guinier-Porod curve ``Rg``: Radius of gyration ``beta``: power-law exponent of the Guinier-Porod curve ``C``: additive constant Formula: -------- ``A*q^alpha + GuinierPorod(q,G,Rg,beta) + C`` """
return PowerlawPlusConstant(q, A, alpha, C) + GuinierPorod(q, G, Rg, beta)
<SYSTEM_TASK:> Empirical multi-part Guinier-Porod scattering <END_TASK> <USER_TASK:> Description: def GuinierPorodMulti(q, G, *Rgsalphas): """Empirical multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch other arguments: [Rg1, alpha1, Rg2, alpha2, Rg3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """
scalefactor = G funcs = [lambda q: Guinier(q, G, Rgsalphas[0])] indices = np.ones_like(q, dtype=np.bool) constraints = [] for i in range(1, len(Rgsalphas)): if i % 2: # Rgsalphas[i] is an exponent, Rgsalphas[i-1] is a radius of gyration qsep = _PGgen_qsep(Rgsalphas[i], Rgsalphas[i - 1], 3) scalefactor = _PGgen_A(Rgsalphas[i], Rgsalphas[i - 1], 3, scalefactor) funcs.append(lambda q, a=scalefactor, alpha=Rgsalphas[i]: Powerlaw(q, a, alpha)) else: # Rgsalphas[i] is a radius of gyration, Rgsalphas[i-1] is a power-law exponent qsep = _PGgen_qsep(Rgsalphas[i - 1], Rgsalphas[i], 3) scalefactor = _PGgen_G(Rgsalphas[i - 1], Rgsalphas[i], 3, scalefactor) funcs.append(lambda q, G=scalefactor, Rg=Rgsalphas[i]: Guinier(q, G, Rg)) # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
<SYSTEM_TASK:> Empirical multi-part Porod-Guinier scattering <END_TASK> <USER_TASK:> Description: def PorodGuinierMulti(q, A, *alphasRgs): """Empirical multi-part Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``A``: factor for the first Power-law-branch other arguments: [alpha1, Rg1, alpha2, Rg2, alpha3 ...] the radii of gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """
scalefactor = A funcs = [lambda q: Powerlaw(q, A, alphasRgs[0])] indices = np.ones_like(q, dtype=np.bool) constraints = [] for i in range(1, len(alphasRgs)): if i % 2: # alphasRgs[i] is a radius of gyration, alphasRgs[i-1] is a power-law exponent qsep = _PGgen_qsep(alphasRgs[i - 1], alphasRgs[i], 3) scalefactor = _PGgen_G(alphasRgs[i - 1], alphasRgs[i], 3, scalefactor) funcs.append(lambda q, G=scalefactor, Rg=alphasRgs[i]: Guinier(q, G, Rg)) else: # alphasRgs[i] is an exponent, alphasRgs[i-1] is a radius of gyration qsep = _PGgen_qsep(alphasRgs[i], alphasRgs[i - 1], 3) scalefactor = _PGgen_A(alphasRgs[i], alphasRgs[i - 1], 3, scalefactor) funcs.append(lambda q, a=scalefactor, alpha=alphasRgs[i]: a * q ** alpha) # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
<SYSTEM_TASK:> Empirical generalized multi-part Guinier-Porod scattering <END_TASK> <USER_TASK:> Description: def GeneralGuinierPorod(q, factor, *args, **kwargs): """Empirical generalized multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``factor``: factor for the first branch other arguments (*args): the defining arguments of the consecutive parts: radius of gyration (``Rg``) and dimensionality parameter (``s``) for Guinier and exponent (``alpha``) for power-law parts. supported keyword arguments: ``startswithguinier``: True if the first segment is a Guinier-type scattering (this is the default) or False if it is a power-law Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. The exact number of parts is determined from the number of positional arguments (*args). Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """
if kwargs.get('startswithguinier', True): funcs = [lambda q, A = factor:GeneralGuinier(q, A, args[0], args[1])] i = 2 guiniernext = False else: funcs = [lambda q, A = factor: Powerlaw(q, A, args[0])] i = 1 guiniernext = True indices = np.ones_like(q, dtype=np.bool) constraints = [] while i < len(args): if guiniernext: # args[i] is a radius of gyration, args[i+1] is a dimensionality parameter, args[i-1] is a power-law exponent qsep = _PGgen_qsep(args[i - 1], args[i], args[i + 1]) factor = _PGgen_G(args[i - 1], args[i], args[i + 1], factor) funcs.append(lambda q, G=factor, Rg=args[i], s=args[i + 1]: GeneralGuinier(q, G, Rg, s)) guiniernext = False i += 2 else: # args[i] is an exponent, args[i-2] is a radius of gyration, args[i-1] is a dimensionality parameter qsep = _PGgen_qsep(args[i], args[i - 2], args[i - 1]) factor = _PGgen_A(args[i], args[i - 2], args[i - 1], factor) funcs.append(lambda q, a=factor, alpha=args[i]: a * q ** alpha) guiniernext = True i += 1 # this belongs to the previous constraints.append(indices & (q < qsep)) indices[q < qsep] = False constraints.append(indices) return np.piecewise(q, constraints, funcs)
<SYSTEM_TASK:> Scattering intensity of a generalized excluded-volume Gaussian chain <END_TASK> <USER_TASK:> Description: def ExcludedVolumeChain(q, Rg, nu): """Scattering intensity of a generalized excluded-volume Gaussian chain Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration ``nu``: excluded volume exponent Formula: -------- ``(u^(1/nu)*gamma(0.5/nu)*gammainc_lower(0.5/nu,u)- gamma(1/nu)*gammainc_lower(1/nu,u)) / (nu*u^(1/nu))`` where ``u = q^2*Rg^2*(2*nu+1)*(2*nu+2)/6`` is the reduced scattering variable, ``gamma(x)`` is the gamma function and ``gammainc_lower(x,t)`` is the lower incomplete gamma function. Literature: ----------- SASFit manual 6. nov. 2010. Equation (3.60b) """
u = (q * Rg) ** 2 * (2 * nu + 1) * (2 * nu + 2) / 6. return (u ** (0.5 / nu) * gamma(0.5 / nu) * gammainc(0.5 / nu, u) - gamma(1. / nu) * gammainc(1. / nu, u)) / (nu * u ** (1. / nu))
<SYSTEM_TASK:> Borue-Erukhimovich model of microphase separation in polyelectrolytes <END_TASK> <USER_TASK:> Description: def BorueErukhimovich(q, C, r0, s, t): """Borue-Erukhimovich model of microphase separation in polyelectrolytes Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` Literature: ----------- o Borue and Erukhimovich. Macromolecules (1988) 21 (11) 3240-3249 o Shibayama and Tanaka. J. Chem. Phys (1995) 102 (23) 9392 o Moussaid et. al. J. Phys II (France) (1993) 3 (4) 573-594 o Ermi and Amis. Macromolecules (1997) 30 (22) 6937-6942 """
x = q * r0 return C * (x ** 2 + s) / ((x ** 2 + s) * (x ** 2 + t) + 1)
<SYSTEM_TASK:> Borue-Erukhimovich model ending in a power-law. <END_TASK> <USER_TASK:> Description: def BorueErukhimovich_Powerlaw(q, C, r0, s, t, nu): """Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over. """
def get_xsep(alpha, s, t): A = alpha + 2 B = 2 * s * alpha + t * alpha + 4 * s C = s * t * alpha + alpha + alpha * s ** 2 + alpha * s * t - 2 + 2 * s ** 2 D = alpha * s ** 2 * t + alpha * s r = np.roots([A, B, C, D]) #print "get_xsep: ", alpha, s, t, r return r[r > 0][0] ** 0.5 get_B = lambda C, xsep, s, t, nu:C * (xsep ** 2 + s) / ((xsep ** 2 + s) * (xsep ** 2 + t) + 1) * xsep ** (1.0 / nu) x = q * r0 xsep = np.real_if_close(get_xsep(-1.0 / nu, s, t)) A = get_B(C, xsep, s, t, nu) return np.piecewise(q, (x < xsep, x >= xsep), (lambda a:BorueErukhimovich(a, C, r0, s, t), lambda a:A * (a * r0) ** (-1.0 / nu)))
<SYSTEM_TASK:> Tag the current commit with the current version. <END_TASK> <USER_TASK:> Description: def tag(message): # type: () -> None """ Tag the current commit with the current version. """
release_ver = versioning.current() message = message or 'v{} release'.format(release_ver) with conf.within_proj_dir(): log.info("Creating release tag") git.tag( author=git.latest_commit().author, name='v{}'.format(release_ver), message=message, )
<SYSTEM_TASK:> Lint python files. <END_TASK> <USER_TASK:> Description: def lint(exclude, skip_untracked, commit_only): # type: (List[str], bool, bool) -> None """ Lint python files. Args: exclude (list[str]): A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool): If set to **True** it will skip all files not tracked by git. commit_only (bool): Only lint files that are staged for commit. """
exclude = list(exclude) + conf.get('lint.exclude', []) runner = LintRunner(exclude, skip_untracked, commit_only) if not runner.run(): exit(1)
<SYSTEM_TASK:> Decorator for defining lint tools. <END_TASK> <USER_TASK:> Description: def tool(name): # type: (str) -> FunctionType """ Decorator for defining lint tools. Args: name (str): The name of the tool. This name will be used to identify the tool in `pelconf.yaml`. """
global g_tools def decorator(fn): # pylint: disable=missing-docstring # type: (FunctionType) -> FunctionType g_tools[name] = fn return fn return decorator
<SYSTEM_TASK:> Run code checks using pep8. <END_TASK> <USER_TASK:> Description: def pep8_check(files): # type: (List[str]) -> int """ Run code checks using pep8. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. pep8 tool is **very** fast. Especially compared to pylint and the bigger the code base the bigger the difference. If you want to reduce check times you might disable all pep8 checks in pylint and use pep8 for that. This way you use pylint only for the more advanced checks (the number of checks enabled in pylint will make a visible difference in it's run times). """
files = fs.wrap_paths(files) cfg_path = conf.get_path('lint.pep8_cfg', 'ops/tools/pep8.ini') pep8_cmd = 'pep8 --config {} {}'.format(cfg_path, files) return shell.run(pep8_cmd, exit_on_error=False).return_code
<SYSTEM_TASK:> Run code checks using pylint. <END_TASK> <USER_TASK:> Description: def pylint_check(files): # type: (List[str]) -> int """ Run code checks using pylint. Args: files (list[str]): A list of files to check Returns: bool: **True** if all files passed the checks, **False** otherwise. """
files = fs.wrap_paths(files) cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini') pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files) return shell.run(pylint_cmd, exit_on_error=False).return_code
<SYSTEM_TASK:> Run all linters and report results. <END_TASK> <USER_TASK:> Description: def run(self): # type: () -> bool """ Run all linters and report results. Returns: bool: **True** if all checks were successful, **False** otherwise. """
with util.timed_block() as t: files = self._collect_files() log.info("Collected <33>{} <32>files in <33>{}s".format( len(files), t.elapsed_s )) if self.verbose: for p in files: log.info(" <0>{}", p) # No files to lint - return success if empty runs are allowed. if not files: return self.allow_empty with util.timed_block() as t: results = self._run_checks(files) log.info("Code checked in <33>{}s", t.elapsed_s) success = True for name, retcodes in results.items(): if any(x != 0 for x in retcodes): success = False log.err("<35>{} <31>failed with: <33>{}".format( name, retcodes )) return success
<SYSTEM_TASK:> Set Isogeo base URLs according to platform. <END_TASK> <USER_TASK:> Description: def set_base_url(self, platform: str = "prod"): """Set Isogeo base URLs according to platform. :param str platform: platform to use. Options: * prod [DEFAULT] * qa * int """
platform = platform.lower() self.platform = platform if platform == "prod": ssl = True logging.debug("Using production platform.") elif platform == "qa": ssl = False logging.debug("Using Quality Assurance platform (reduced perfs).") else: logging.error( "Platform must be one of: {}".format(" | ".join(self.API_URLS.keys())) ) raise ValueError( 3, "Platform must be one of: {}".format(" | ".join(self.API_URLS.keys())), ) # method ending return ( platform.lower(), self.API_URLS.get(platform), self.APP_URLS.get(platform), self.CSW_URLS.get(platform), self.MNG_URLS.get(platform), self.OC_URLS.get(platform), ssl, )
<SYSTEM_TASK:> Convert a metadata UUID to its URI equivalent. And conversely. <END_TASK> <USER_TASK:> Description: def convert_uuid(self, in_uuid: str = str, mode: bool = 0): """Convert a metadata UUID to its URI equivalent. And conversely. :param str in_uuid: UUID or URI to convert :param int mode: conversion direction. Options: * 0 to HEX * 1 to URN (RFC4122) * 2 to URN (Isogeo specific style) """
# parameters check if not isinstance(in_uuid, str): raise TypeError("'in_uuid' expected a str value.") else: pass if not checker.check_is_uuid(in_uuid): raise ValueError("{} is not a correct UUID".format(in_uuid)) else: pass if not isinstance(mode, int): raise TypeError("'mode' expects an integer value") else: pass # handle Isogeo specific UUID in XML exports if "isogeo:metadata" in in_uuid: in_uuid = "urn:uuid:{}".format(in_uuid.split(":")[-1]) logging.debug("Isogeo UUUID URN spotted: {}".format(in_uuid)) else: pass # operate if mode == 0: return uuid.UUID(in_uuid).hex elif mode == 1: return uuid.UUID(in_uuid).urn elif mode == 2: urn = uuid.UUID(in_uuid).urn return "urn:isogeo:metadata:uuid:{}".format(urn.split(":")[2]) else: raise ValueError("'mode' must be one of: 0 | 1 | 2")
<SYSTEM_TASK:> Pull out the character set, encoding, and encoded text from the input <END_TASK> <USER_TASK:> Description: def encoded_words_to_text(self, in_encoded_words: str): """Pull out the character set, encoding, and encoded text from the input encoded words. Next, it decodes the encoded words into a byte string, using either the quopri module or base64 module as determined by the encoding. Finally, it decodes the byte string using the character set and returns the result. See: - https://github.com/isogeo/isogeo-api-py-minsdk/issues/32 - https://dmorgan.info/posts/encoded-word-syntax/ :param str in_encoded_words: base64 or quori encoded character string. """
# handle RFC2047 quoting if '"' in in_encoded_words: in_encoded_words = in_encoded_words.strip('"') # regex encoded_word_regex = r"=\?{1}(.+)\?{1}([B|Q])\?{1}(.+)\?{1}=" # pull out try: charset, encoding, encoded_text = re.match( encoded_word_regex, in_encoded_words ).groups() except AttributeError: logging.debug("Input text was not encoded into base64 or quori") return in_encoded_words # decode depending on encoding if encoding == "B": byte_string = base64.b64decode(encoded_text) elif encoding == "Q": byte_string = quopri.decodestring(encoded_text) return byte_string.decode(charset)
<SYSTEM_TASK:> Get Isogeo components versions. Authentication not required. <END_TASK> <USER_TASK:> Description: def get_isogeo_version(self, component: str = "api", prot: str = "https"): """Get Isogeo components versions. Authentication not required. :param str component: which platform component. Options: * api [default] * db * app """
# which component if component == "api": version_url = "{}://v1.{}.isogeo.com/about".format(prot, self.api_url) elif component == "db": version_url = "{}://v1.{}.isogeo.com/about/database".format( prot, self.api_url ) elif component == "app" and self.platform == "prod": version_url = "https://app.isogeo.com/about" elif component == "app" and self.platform == "qa": version_url = "https://qa-isogeo-app.azurewebsites.net/about" else: raise ValueError( "Component value must be one of: " "api [default], db, app." ) # send request version_req = requests.get(version_url, proxies=self.proxies, verify=self.ssl) # checking response checker.check_api_response(version_req) # end of method return version_req.json().get("version")
<SYSTEM_TASK:> Constructs the view URL of a metadata. <END_TASK> <USER_TASK:> Description: def get_view_url(self, webapp: str = "oc", **kwargs): """Constructs the view URL of a metadata. :param str webapp: web app destination :param dict kwargs: web app specific parameters. For example see WEBAPPS """
# build wbeapp URL depending on choosen webapp if webapp in self.WEBAPPS: webapp_args = self.WEBAPPS.get(webapp).get("args") # check kwargs parameters if set(webapp_args) <= set(kwargs): # construct and return url url = self.WEBAPPS.get(webapp).get("url") return url.format(**kwargs) else: raise TypeError( "'{}' webapp expects {} argument(s): {}." " Args passed: {}".format( webapp, len(webapp_args), webapp_args, kwargs ) ) else: raise ValueError( "'{}' is not a recognized webapp among: {}." " Try to register it.".format(self.WEBAPPS.keys(), webapp) )
<SYSTEM_TASK:> Register a new WEBAPP to use with the view URL builder. <END_TASK> <USER_TASK:> Description: def register_webapp(self, webapp_name: str, webapp_args: list, webapp_url: str): """Register a new WEBAPP to use with the view URL builder. :param str webapp_name: name of the web app to register :param list webapp_args: dynamic arguments to complete the URL. Typically 'md_id'. :param str webapp_url: URL of the web app to register with args tags to replace. Example: 'https://www.ppige-npdc.fr/portail/geocatalogue?uuid={md_id}' """
# check parameters for arg in webapp_args: if arg not in webapp_url: raise ValueError( "Inconsistent web app arguments and URL." " It should contain arguments to replace" " dynamically. Example: 'http://webapp.com" "/isogeo?metadata={md_id}'" ) # register self.WEBAPPS[webapp_name] = {"args": webapp_args, "url": webapp_url}
<SYSTEM_TASK:> Simple helper to handle pagination. Returns the number of pages for a <END_TASK> <USER_TASK:> Description: def pages_counter(self, total: int, page_size: int = 100) -> int: """Simple helper to handle pagination. Returns the number of pages for a given number of results. :param int total: count of metadata in a search request :param int page_size: count of metadata to display in each page """
if total <= page_size: count_pages = 1 else: if (total % page_size) == 0: count_pages = total / page_size else: count_pages = (total / page_size) + 1 # method ending return int(count_pages)
<SYSTEM_TASK:> Extend share model with additional informations. <END_TASK> <USER_TASK:> Description: def share_extender(self, share: dict, results_filtered: dict): """Extend share model with additional informations. :param dict share: share returned by API :param dict results_filtered: filtered search result """
# add share administration URL creator_id = share.get("_creator").get("_tag")[6:] share["admin_url"] = "{}/groups/{}/admin/shares/{}".format( self.app_url, creator_id, share.get("_id") ) # check if OpenCatalog is activated opencat_url = "{}/s/{}/{}".format( self.oc_url, share.get("_id"), share.get("urlToken") ) if requests.head(opencat_url): share["oc_url"] = opencat_url else: pass # add metadata ids list share["mds_ids"] = (i.get("_id") for i in results_filtered) return share
<SYSTEM_TASK:> Loads API credentials from a file, JSON or INI. <END_TASK> <USER_TASK:> Description: def credentials_loader(self, in_credentials: str = "client_secrets.json") -> dict: """Loads API credentials from a file, JSON or INI. :param str in_credentials: path to the credentials file. By default, look for a client_secrets.json file. """
accepted_extensions = (".ini", ".json") # checks if not path.isfile(in_credentials): raise IOError("Credentials file doesn't exist: {}".format(in_credentials)) else: in_credentials = path.normpath(in_credentials) if path.splitext(in_credentials)[1] not in accepted_extensions: raise ValueError( "Extension of credentials file must be one of {}".format( accepted_extensions ) ) else: kind = path.splitext(in_credentials)[1] # load, check and set if kind == ".json": with open(in_credentials, "r") as f: in_auth = json.loads(f.read()) # check structure heads = ("installed", "web") if not set(in_auth).intersection(set(heads)): raise ValueError( "Input JSON structure is not as expected." " First key must be one of: {}".format(heads) ) # set if "web" in in_auth: # json structure for group application auth_settings = in_auth.get("web") out_auth = { "auth_mode": "group", "client_id": auth_settings.get("client_id"), "client_secret": auth_settings.get("client_secret"), # if not specified, must be a former file then set classic scope "scopes": auth_settings.get("scopes", ["resources:read"]), "uri_auth": auth_settings.get("auth_uri"), "uri_token": auth_settings.get("token_uri"), "uri_base": self.get_url_base_from_url_token( auth_settings.get("token_uri") ), "uri_redirect": None, } else: # assuming in_auth == 'installed' auth_settings = in_auth.get("installed") out_auth = { "auth_mode": "user", "client_id": auth_settings.get("client_id"), "client_secret": auth_settings.get("client_secret"), # if not specified, must be a former file then set classic scope "scopes": auth_settings.get("scopes", ["resources:read"]), "uri_auth": auth_settings.get("auth_uri"), "uri_token": auth_settings.get("token_uri"), "uri_base": self.get_url_base_from_url_token( auth_settings.get("token_uri") ), "uri_redirect": auth_settings.get("redirect_uris", None), } else: # assuming file is an .ini ini_parser = ConfigParser() ini_parser.read(in_credentials) # check structure if "auth" in ini_parser._sections: auth_settings = ini_parser["auth"] else: raise ValueError( "Input INI structure is not as expected." " Section of credentials must be named: auth" ) # set out_auth = { "auth_mode": auth_settings.get("CLIENT_TYPE"), "client_id": auth_settings.get("CLIENT_ID"), "client_secret": auth_settings.get("CLIENT_SECRET"), "uri_auth": auth_settings.get("URI_AUTH"), "uri_token": auth_settings.get("URI_TOKEN"), "uri_base": self.get_url_base_from_url_token( auth_settings.get("URI_TOKEN") ), "uri_redirect": auth_settings.get("URI_REDIRECT"), } # method ending return out_auth