text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vertical(x, ymin=0, ymax=1, color=None, width=None, dash=None, opacity=None):
"""Draws a vertical line from `ymin` to `ymax`. Parameters xmin : int, optional xmax : int, optional color : str, optional width : number, optional Returns ------- Chart """ |
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
layout = dict(
shapes=[dict(type='line', x0=x, x1=x, y0=ymin, y1=ymax, opacity=opacity, line=lineattr)]
)
return Chart(layout=layout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def horizontal(y, xmin=0, xmax=1, color=None, width=None, dash=None, opacity=None):
"""Draws a horizontal line from `xmin` to `xmax`. Parameters xmin : int, optional xmax : int, optional color : str, optional width : number, optional Returns ------- Chart """ |
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
layout = dict(
shapes=[dict(type='line', x0=xmin, x1=xmax, y0=y, y1=y, opacity=opacity, line=lineattr)]
)
return Chart(layout=layout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def line3d( x, y, z, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers' ):
"""Create a 3d line chart.""" |
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
assert x.shape == y.shape
assert y.shape == z.shape
lineattr = {}
if color:
lineattr['color'] = color
if width:
lineattr['width'] = width
if dash:
lineattr['dash'] = dash
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [
go.Scatter3d(x=xx, y=yy, z=zz, name=ll, line=lineattr, mode=mode, opacity=opacity)
for ll, xx, yy, zz in zip(label, x.T, y.T, z.T)
]
else:
data = [go.Scatter3d(x=x, y=y, z=z, name=label, line=lineattr, mode=mode, opacity=opacity)]
return Chart(data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scatter( x=None, y=None, label=None, color=None, width=None, dash=None, opacity=None, markersize=6, yaxis=1, fill=None, text="", mode='markers', ):
"""Draws dots. Parameters x : array-like, optional y : array-like, optional label : array-like, optional Returns ------- Chart """ |
return line(
x=x,
y=y,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
yaxis=yaxis,
fill=fill,
text=text,
markersize=markersize,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bar(x=None, y=None, label=None, mode='group', yaxis=1, opacity=None):
"""Create a bar chart. Parameters x : array-like, optional y : TODO, optional label : TODO, optional mode : 'group' or 'stack', default 'group' opacity : TODO, optional Returns ------- Chart A Chart with bar graph data. """ |
assert x is not None or y is not None, "x or y must be something"
yn = 'y' + str(yaxis)
if y is None:
y = x
x = None
if x is None:
x = np.arange(len(y))
else:
x = _try_pydatetime(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
if y.ndim == 2:
if not hasattr(label, '__iter__'):
if label is None:
label = _labels()
else:
label = _labels(label)
data = [go.Bar(x=x, y=yy, name=ll, yaxis=yn, opacity=opacity) for ll, yy in zip(label, y.T)]
else:
data = [go.Bar(x=x, y=y, name=label, yaxis=yn, opacity=opacity)]
if yaxis == 1:
return Chart(data=data, layout={'barmode': mode})
return Chart(data=data, layout={'barmode': mode, 'yaxis' + str(yaxis): dict(overlaying='y')}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def heatmap(z, x=None, y=None, colorscale='Viridis'):
"""Create a heatmap. Parameters z : TODO x : TODO, optional y : TODO, optional colorscale : TODO, optional Returns ------- Chart """ |
z = np.atleast_1d(z)
data = [go.Heatmap(z=z, x=x, y=y, colorscale=colorscale)]
return Chart(data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill_zero( x=None, y=None, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers', **kargs ):
"""Fill to zero. Parameters x : array-like, optional y : TODO, optional label : TODO, optional Returns ------- Chart """ |
return line(
x=x,
y=y,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill='tozeroy',
**kargs
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill_between( x=None, ylow=None, yhigh=None, label=None, color=None, width=None, dash=None, opacity=None, mode='lines+markers', **kargs ):
"""Fill between `ylow` and `yhigh`. Parameters x : array-like, optional ylow : TODO, optional yhigh : TODO, optional Returns ------- Chart """ |
plot = line(
x=x,
y=ylow,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill=None,
**kargs
)
plot += line(
x=x,
y=yhigh,
label=label,
color=color,
width=width,
dash=dash,
opacity=opacity,
mode=mode,
fill='tonexty',
**kargs
)
return plot |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rug(x, label=None, opacity=None):
"""Rug chart. Parameters x : array-like, optional label : TODO, optional opacity : TODO, optional Returns ------- Chart """ |
x = _try_pydatetime(x)
x = np.atleast_1d(x)
data = [
go.Scatter(
x=x,
y=np.ones_like(x),
name=label,
opacity=opacity,
mode='markers',
marker=dict(symbol='line-ns-open'),
)
]
layout = dict(
barmode='overlay',
hovermode='closest',
legend=dict(traceorder='reversed'),
xaxis1=dict(zeroline=False),
yaxis1=dict(
domain=[0.85, 1],
showline=False,
showgrid=False,
zeroline=False,
anchor='free',
position=0.0,
showticklabels=False,
),
)
return Chart(data=data, layout=layout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def surface(x, y, z):
"""Surface plot. Parameters x : array-like, optional y : array-like, optional z : array-like, optional Returns ------- Chart """ |
data = [go.Surface(x=x, y=y, z=z)]
return Chart(data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hist2d(x, y, label=None, opacity=None):
"""2D Histogram. Parameters x : array-like, optional y : array-like, optional label : TODO, optional opacity : float, optional Returns ------- Chart """ |
x = np.atleast_1d(x)
y = np.atleast_1d(y)
data = [go.Histogram2d(x=x, y=y, opacity=opacity, name=label)]
return Chart(data=data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ytickangle(self, angle, index=1):
"""Set the angle of the y-axis tick labels. Parameters value : int Angle in degrees index : int, optional Y-axis index Returns ------- Chart """ |
self.layout['yaxis' + str(index)]['tickangle'] = angle
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ylabelsize(self, size, index=1):
"""Set the size of the label. Parameters size : int Returns ------- Chart """ |
self.layout['yaxis' + str(index)]['titlefont']['size'] = size
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def yticksize(self, size, index=1):
"""Set the tick font size. Parameters size : int Returns ------- Chart """ |
self.layout['yaxis' + str(index)]['tickfont']['size'] = size
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ytickvals(self, values, index=1):
"""Set the tick values. Parameters values : array-like Returns ------- Chart """ |
self.layout['yaxis' + str(index)]['tickvals'] = values
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def yticktext(self, labels, index=1):
"""Set the tick labels. Parameters labels : array-like Returns ------- Chart """ |
self.layout['yaxis' + str(index)]['ticktext'] = labels
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ylim(self, low, high, index=1):
"""Set yaxis limits. Parameters low : number high : number index : int, optional Returns ------- Chart """ |
self.layout['yaxis' + str(index)]['range'] = [low, high]
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ydtick(self, dtick, index=1):
"""Set the tick distance.""" |
self.layout['yaxis' + str(index)]['dtick'] = dtick
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ynticks(self, nticks, index=1):
"""Set the number of ticks.""" |
self.layout['yaxis' + str(index)]['nticks'] = nticks
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show( self, filename: Optional[str] = None, show_link: bool = True, auto_open: bool = True, detect_notebook: bool = True, ) -> None: """Display the chart. Parameters filename : str, optional Save plot to this filename, otherwise it's saved to a temporary file. show_link : bool, optional Show link to plotly. auto_open : bool, optional Automatically open the plot (in the browser). detect_notebook : bool, optional Try to detect if we're running in a notebook. """ |
kargs = {}
if detect_notebook and _detect_notebook():
py.init_notebook_mode()
plot = py.iplot
else:
plot = py.plot
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
kargs['filename'] = filename
kargs['auto_open'] = auto_open
plot(self, show_link=show_link, **kargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save( self, filename: Optional[str] = None, show_link: bool = True, auto_open: bool = False, output: str = 'file', plotlyjs: bool = True, ) -> str: """Save the chart to an html file.""" |
if filename is None:
filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name
# NOTE: this doesn't work for output 'div'
py.plot(
self,
show_link=show_link,
filename=filename,
auto_open=auto_open,
output_type=output,
include_plotlyjs=plotlyjs,
)
return filename |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_method_sig(method):
""" Given a function, it returns a string that pretty much looks how the function signature_ would be written in python. :param method: a python method :return: A string similar describing the pythong method signature_. eg: "my_method(first_argArg, second_arg=42, third_arg='something')" """ |
# The return value of ArgSpec is a bit weird, as the list of arguments and
# list of defaults are returned in separate array.
# eg: ArgSpec(args=['first_arg', 'second_arg', 'third_arg'],
# varargs=None, keywords=None, defaults=(42, 'something'))
argspec = inspect.getargspec(method)
arg_index=0
args = []
# Use the args and defaults array returned by argspec and find out
# which arguments has default
for arg in argspec.args:
default_arg = _get_default_arg(argspec.args, argspec.defaults, arg_index)
if default_arg.has_default:
args.append("%s=%s" % (arg, default_arg.default_value))
else:
args.append(arg)
arg_index += 1
return "%s(%s)" % (method.__name__, ", ".join(args)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ThenAt(self, n, f, *_args, **kwargs):
""" `ThenAt` enables you to create a partially apply many arguments to a function, the returned partial expects a single arguments which will be applied at the `n`th position of the original function. **Arguments** * **n**: position at which the created partial will apply its awaited argument on the original function. * **f**: function which the partial will be created. * **_args & kwargs**: all `*_args` and `**kwargs` will be passed to the function `f`. * `_return_type = None`: type of the returned `builder`, if `None` it will return the same type of the current `builder`. This special kwarg will NOT be passed to `f`. You can think of `n` as the position that the value being piped down will pass through the `f`. Say you have the following expression D == fun(A, B, C) all the following are equivalent from phi import P, Pipe, ThenAt D == Pipe(A, ThenAt(1, fun, B, C)) D == Pipe(B, ThenAt(2, fun, A, C)) D == Pipe(C, ThenAt(3, fun, A, B)) from phi import P, Pipe D == Pipe(A, P.Then(fun, B, C)) D == Pipe(B, P.Then2(fun, A, C)) D == Pipe(C, P.Then3(fun, A, B)) There is a special case not discussed above: `n = 0`. When this happens only the arguments given will be applied to `f`, this method it will return a partial that expects a single argument but completely ignores it from phi import P D == Pipe(None, P.ThenAt(0, fun, A, B, C)) D == Pipe(None, P.Then0(fun, A, B, C)) **Examples** Max of 6 and the argument: from phi import P assert 6 == P.Pipe( 2, P.Then(max, 6) ) Previous is equivalent to assert 6 == max(2, 6) Open a file in read mode (`'r'`) from phi import P f = P.Pipe( "file.txt", P.Then(open, 'r') ) Previous is equivalent to f = open("file.txt", 'r') Split a string by whitespace and then get the length of each word from phi import P assert [5, 5, 5] == P.Pipe( "Again hello world", P.Then(str.split, ' ') .Then2(map, len) ) Previous is equivalent to x = "Again hello world" x = str.split(x, ' ') x = map(len, x) assert [5, 5, 5] == x As you see, `Then2` was very useful because `map` accepts and `iterable` as its `2nd` parameter. You can rewrite the previous using the [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) and the `phi.builder.Builder.Obj` object from phi import P, Obj assert [5, 5, 5] == P.Pipe( "Again hello world", Obj.split(' '), P.map(len) ) **Also see** * `phi.builder.Builder.Obj` * [PythonBuilder](https://cgarciae.github.io/phi/python_builder.m.html) * `phi.builder.Builder.RegisterAt` """ |
_return_type = None
n_args = n - 1
if '_return_type' in kwargs:
_return_type = kwargs['_return_type']
del kwargs['_return_type']
@utils.lift
def g(x):
new_args = _args[0:n_args] + (x,) + _args[n_args:] if n_args >= 0 else _args
return f(*new_args, **kwargs)
return self.__then__(g, _return_type=_return_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Seq(self, *sequence, **kwargs):
""" `Seq` is used to express function composition. The expression Seq(f, g) be equivalent to lambda x: g(f(x)) As you see, its a little different from the mathematical definition. Excecution order flow from left to right, this makes reading and reasoning about code way more easy. This bahaviour is based upon the `|>` (pipe) operator found in languages like F#, Elixir and Elm. You can pack as many expressions as you like and they will be applied in order to the data that is passed through them when compiled an excecuted. In general, the following rules apply for Seq: **General Sequence** is equivalent to **Single Function** Seq(f) is equivalent to f **Identity** The empty Seq Seq() is equivalent to lambda x: x ### Examples from phi import P, Seq f = Seq( P * 2, P + 1, P ** 2 ) assert f(1) == 9 # ((1 * 2) + 1) ** 2 The previous example using `P.Pipe` from phi import P assert 9 == P.Pipe( 1, P * 2, #1 * 2 == 2 P + 1, #2 + 1 == 3 P ** 2 #3 ** 2 == 9 ) """ |
fs = [ _parse(elem)._f for elem in sequence ]
def g(x, state):
return functools.reduce(lambda args, f: f(*args), fs, (x, state))
return self.__then__(g, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Write(self, *state_args, **state_dict):
"""See `phi.dsl.Expression.Read`""" |
if len(state_dict) + len(state_args) < 1:
raise Exception("Please include at-least 1 state variable, got {0} and {1}".format(state_args, state_dict))
if len(state_dict) > 1:
raise Exception("Please include at-most 1 keyword argument expression, got {0}".format(state_dict))
if len(state_dict) > 0:
state_key = next(iter(state_dict.keys()))
write_expr = state_dict[state_key]
state_args += (state_key,)
expr = self >> write_expr
else:
expr = self
def g(x, state):
update = { key: x for key in state_args }
state = utils.merge(state, update)
#side effect for convenience
_StateContextManager.REFS.update(state)
return x, state
return expr.__then__(g) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Else(self, *Else, **kwargs):
"""See `phi.dsl.Expression.If`""" |
root = self._root
ast = self._ast
next_else = E.Seq(*Else)._f
ast = _add_else(ast, next_else)
g = _compile_if(ast)
return root.__then__(g, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def length(string, until=None):
""" Returns the number of graphemes in the string. Note that this functions needs to traverse the full string to calculate the length, unlike `len(string)` and it's time consumption is linear to the length of the string (up to the `until` value). Only counts up to the `until` argument, if given. This is useful when testing the length of a string against some limit and the excess length is not interesting. 4 1 30 """ |
if until is None:
return sum(1 for _ in GraphemeIterator(string))
iterator = graphemes(string)
count = 0
while True:
try:
if count >= until:
break
next(iterator)
except StopIteration:
break
else:
count += 1
return count |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def slice(string, start=None, end=None):
""" Returns a substring of the given string, counting graphemes instead of codepoints. Negative indices is currently not supported. 'tamil ந' 'tamil நி' 'ி (ni)' ' (ni)' """ |
if start is None:
start = 0
if end is not None and start >= end:
return ""
if start < 0:
raise NotImplementedError("Negative indexing is currently not supported.")
sum_ = 0
start_index = None
for grapheme_index, grapheme_length in enumerate(grapheme_lengths(string)):
if grapheme_index == start:
start_index = sum_
elif grapheme_index == end:
return string[start_index:sum_]
sum_ += grapheme_length
if start_index is not None:
return string[start_index:]
return "" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contains(string, substring):
""" Returns true if the sequence of graphemes in substring is also present in string. This differs from the normal python `in` operator, since the python operator will return true if the sequence of codepoints are withing the other string without considering grapheme boundaries. Performance notes: Very fast if `substring not in string`, since that also means that the same graphemes can not be in the two strings. Otherwise this function has linear time complexity in relation to the string length. It will traverse the sequence of graphemes until a match is found, so it will generally perform better for grapheme sequences that match early. True False """ |
if substring not in string:
return False
substr_graphemes = list(graphemes(substring))
if len(substr_graphemes) == 0:
return True
elif len(substr_graphemes) == 1:
return substr_graphemes[0] in graphemes(string)
else:
str_iter = graphemes(string)
str_sub_part = []
for _ in range(len(substr_graphemes)):
try:
str_sub_part.append(next(str_iter))
except StopIteration:
return False
for g in str_iter:
if str_sub_part == substr_graphemes:
return True
str_sub_part.append(g)
str_sub_part.pop(0)
return str_sub_part == substr_graphemes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def startswith(string, prefix):
""" Like str.startswith, but also checks that the string starts with the given prefixes sequence of graphemes. str.startswith may return true for a prefix that is not visually represented as a prefix if a grapheme cluster is continued after the prefix ends. False True """ |
return string.startswith(prefix) and safe_split_index(string, len(prefix)) == len(prefix) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def endswith(string, suffix):
""" Like str.endswith, but also checks that the string ends with the given prefixes sequence of graphemes. str.endswith may return true for a suffix that is not visually represented as a suffix if a grapheme cluster is initiated before the suffix starts. False True """ |
expected_index = len(string) - len(suffix)
return string.endswith(suffix) and safe_split_index(string, expected_index) == expected_index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def safe_split_index(string, max_len):
""" Returns the highest index up to `max_len` at which the given string can be sliced, without breaking a grapheme. This is useful for when you want to split or take a substring from a string, and don't really care about the exact grapheme length, but don't want to risk breaking existing graphemes. This function does normally not traverse the full grapheme sequence up to the given length, so it can be used for arbitrarily long strings and high `max_len`s. However, some grapheme boundaries depend on the previous state, so the worst case performance is O(n). In practice, it's only very long non-broken sequences of country flags (represented as Regional Indicators) that will perform badly. The return value will always be between `0` and `len(string)`. 6 'tamil ' 'நி (ni)' """ |
last_index = get_last_certain_break_index(string, max_len)
for l in grapheme_lengths(string[last_index:]):
if last_index + l > max_len:
break
last_index += l
return last_index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeB1logfile(filename, data):
"""Write a header structure into a B1 logfile. Inputs: filename: name of the file. data: header dictionary Notes: exceptions pass through to the caller. """ |
allkeys = list(data.keys())
f = open(filename, 'wt', encoding='utf-8')
for ld in _logfile_data: # process each line
linebegin = ld[0]
fieldnames = ld[1]
# set the default formatter if it is not given
if len(ld) < 3:
formatter = str
elif ld[2] is None:
formatter = str
else:
formatter = ld[2]
# this will contain the formatted values.
formatted = ''
if isinstance(fieldnames, str):
# scalar field name, just one field. Formatter should be a
# callable.
if fieldnames not in allkeys:
# this field has already been processed
continue
try:
formatted = formatter(data[fieldnames])
except KeyError:
# field not found in param structure
continue
elif isinstance(fieldnames, tuple):
# more than one field names in a tuple. In this case, formatter can
# be a tuple of callables...
if all([(fn not in allkeys) for fn in fieldnames]):
# if all the fields have been processed:
continue
if isinstance(formatter, tuple) and len(formatter) == len(fieldnames):
formatted = ' '.join([ft(data[fn])
for ft, fn in zip(formatter, fieldnames)])
# ...or a single callable...
elif not isinstance(formatter, tuple):
formatted = formatter([data[fn] for fn in fieldnames])
# ...otherwise raise an exception.
else:
raise SyntaxError('Programming error: formatter should be a scalar or a tuple\
of the same length as the field names in logfile_data.')
else: # fieldnames is neither a string, nor a tuple.
raise SyntaxError(
'Invalid syntax (programming error) in logfile_data in writeparamfile().')
# try to get the values
linetowrite = linebegin + ':\t' + formatted + '\n'
f.write(linetowrite)
if isinstance(fieldnames, tuple):
for fn in fieldnames: # remove the params treated.
if fn in allkeys:
allkeys.remove(fn)
else:
if fieldnames in allkeys:
allkeys.remove(fieldnames)
# write untreated params
for k in allkeys:
linetowrite = k + ':\t' + str(data[k]) + '\n'
f.write(linetowrite)
f.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _readedf_extractline(left, right):
"""Helper function to interpret lines in an EDF file header. """ |
functions = [int, float, lambda l:float(l.split(None, 1)[0]),
lambda l:int(l.split(None, 1)[0]),
dateutil.parser.parse, lambda x:str(x)]
for f in functions:
try:
right = f(right)
break
except ValueError:
continue
return right |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readmarheader(filename):
"""Read a header from a MarResearch .image file.""" |
with open(filename, 'rb') as f:
intheader = np.fromstring(f.read(10 * 4), np.int32)
floatheader = np.fromstring(f.read(15 * 4), '<f4')
strheader = f.read(24)
f.read(4)
otherstrings = [f.read(16) for i in range(29)]
return {'Xsize': intheader[0], 'Ysize': intheader[1], 'MeasTime': intheader[8],
'BeamPosX': floatheader[7], 'BeamPosY': floatheader[8],
'Wavelength': floatheader[9], 'Dist': floatheader[10],
'__Origin__': 'MarResearch .image', 'recordlength': intheader[2],
'highintensitypixels': intheader[4],
'highintensityrecords': intheader[5],
'Date': dateutil.parser.parse(strheader),
'Detector': 'MARCCD', '__particle__': 'photon'} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Square(x, a, b, c):
"""Second order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the second-order term ``b``: coefficient of the first-order term ``c``: additive constant Formula: -------- ``a*x^2 + b*x + c`` """ |
return a * x ** 2 + b * x + c |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Cube(x, a, b, c, d):
"""Third order polynomial Inputs: ------- ``x``: independent variable ``a``: coefficient of the third-order term ``b``: coefficient of the second-order term ``c``: coefficient of the first-order term ``d``: additive constant Formula: -------- ``a*x^3 + b*x^2 + c*x + d`` """ |
return a * x ** 3 + b * x ** 2 + c * x + d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def LogNormal(x, a, mu, sigma):
"""PDF of a log-normal distribution Inputs: ------- ``x``: independent variable ``a``: amplitude ``mu``: center parameter ``sigma``: width parameter Formula: -------- ``a/ (2*pi*sigma^2*x^2)^0.5 * exp(-(log(x)-mu)^2/(2*sigma^2)) """ |
return a / np.sqrt(2 * np.pi * sigma ** 2 * x ** 2) *\
np.exp(-(np.log(x) - mu) ** 2 / (2 * sigma ** 2)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_subdirs(startdir='.', recursion_depth=None):
"""Find all subdirectory of a directory. Inputs: startdir: directory to start with. Defaults to the current folder. recursion_depth: number of levels to traverse. None is infinite. Output: a list of absolute names of subfolders. Examples: # of 'dir'. """ |
startdir = os.path.expanduser(startdir)
direct_subdirs = [os.path.join(startdir, x) for x in os.listdir(
startdir) if os.path.isdir(os.path.join(startdir, x))]
if recursion_depth is None:
next_recursion_depth = None
else:
next_recursion_depth = recursion_depth - 1
if (recursion_depth is not None) and (recursion_depth <= 1):
return [startdir] + direct_subdirs
else:
subdirs = []
for d in direct_subdirs:
subdirs.extend(find_subdirs(d, next_recursion_depth))
return [startdir] + subdirs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findpeak_multi(x, y, dy, N, Ntolerance, Nfit=None, curve='Lorentz', return_xfit=False, return_stat=False):
"""Find multiple peaks in the dataset given by vectors x and y. Points are searched for in the dataset where the N points before and after have strictly lower values than them. To get rid of false negatives caused by fluctuations, Ntolerance is introduced. It is the number of outlier points to be tolerated, i.e. points on the left-hand side of the peak where the growing tendency breaks or on the right-hand side where the diminishing tendency breaks. Increasing this number, however gives rise to false positives. Inputs: x, y, dy: vectors defining the data-set. dy can be None. N, Ntolerance: the parameters of the peak-finding routines Nfit: the number of points on the left and on the right of the peak to be used for least squares refinement of the peak positions. curve: the type of the curve to be fitted to the peaks. Can be 'Lorentz' or 'Gauss' return_xfit: if the abscissa used for fitting is to be returned. return_stat: if the fitting statistics is to be returned for each peak. Outputs: position, hwhm, baseline, amplitude, (xfit):
lists Notes: Peaks are identified where the curve grows N points before and decreases N points after. On noisy curves Ntolerance may improve the results, i.e. decreases the 2*N above mentioned criteria. """ |
if Nfit is None:
Nfit = N
# find points where the curve grows for N points before them and
# decreases for N points after them. To accomplish this, we create
# an indicator array of the sign of the first derivative.
sgndiff = np.sign(np.diff(y))
xdiff = x[:-1] # associate difference values to the lower 'x' value.
pix = np.arange(len(x) - 1) # pixel coordinates create an indicator
# array as the sum of sgndiff shifted left and right. whenever an
# element of this is 2*N, it fulfills the criteria above.
indicator = np.zeros(len(sgndiff) - 2 * N)
for i in range(2 * N):
indicator += np.sign(N - i) * sgndiff[i:-2 * N + i]
# add the last one, since the indexing is different (would be
# [2*N:0], which is not what we want)
indicator += -sgndiff[2 * N:]
# find the positions (indices) of the peak. The strict criteria is
# relaxed somewhat by using the Ntolerance value. Note the use of
# 2*Ntolerance, since each outlier point creates two outliers in
# sgndiff (-1 insted of +1 and vice versa).
peakpospix = pix[N:-N][indicator >= 2 * N - 2 * Ntolerance]
ypeak = y[peakpospix]
# Now refine the found positions by least-squares fitting. But
# first we have to sort out other non-peaks, i.e. found points
# which have other found points with higher values in their [-N,N]
# neighbourhood.
pos = []; ampl = []; hwhm = []; baseline = []; xfit = []; stat = []
dy1 = None
for i in range(len(ypeak)):
if not [j for j in list(range(i + 1, len(ypeak))) + list(range(0, i)) if abs(peakpospix[j] - peakpospix[i]) <= N and ypeak[i] < ypeak[j]]:
# only leave maxima.
idx = peakpospix[i]
if dy is not None:
dy1 = dy[(idx - Nfit):(idx + Nfit + 1)]
xfit_ = x[(idx - Nfit):(idx + Nfit + 1)]
pos_, hwhm_, baseline_, ampl_, stat_ = findpeak_single(xfit_, y[(idx - Nfit):(idx + Nfit + 1)], dy1, position=x[idx], return_stat=True)
stat.append(stat_)
xfit.append(xfit_)
pos.append(pos_)
ampl.append(ampl_)
hwhm.append(hwhm_)
baseline.append(baseline_)
results = [pos, hwhm, baseline, ampl]
if return_xfit:
results.append(xfit)
if return_stat:
results.append(stat)
return tuple(results) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readspec(filename, read_scan=None):
"""Open a SPEC file and read its content Inputs: filename: string the file to open read_scan: None, 'all' or integer the index of scan to be read from the file. If None, no scan should be read. If 'all', all scans should be read. If a number, just the scan with that number should be read. Output: the data in the spec file in a dict. """ |
with open(filename, 'rt') as f:
sf = {'motors': [], 'maxscannumber': 0}
sf['originalfilename'] = filename
lastscannumber = None
while True:
l = f.readline()
if l.startswith('#F'):
sf['filename'] = l[2:].strip()
elif l.startswith('#E'):
sf['epoch'] = int(l[2:].strip())
sf['datetime'] = datetime.datetime.fromtimestamp(sf['epoch'])
elif l.startswith('#D'):
sf['datestring'] = l[2:].strip()
elif l.startswith('#C'):
sf['comment'] = l[2:].strip()
elif l.startswith('#O'):
try:
l = l.split(None, 1)[1]
except IndexError:
continue
if 'motors' not in list(sf.keys()):
sf['motors'] = []
sf['motors'].extend([x.strip() for x in l.split(' ')])
elif not l.strip():
# empty line, signifies the end of the header part. The next
# line will be a scan.
break
sf['scans'] = {}
if read_scan is not None:
if read_scan == 'all':
nr = None
else:
nr = read_scan
try:
while True:
s = readspecscan(f, nr)
if isinstance(s, dict):
sf['scans'][s['number']] = s
if nr is not None:
break
sf['maxscannumber'] = max(
sf['maxscannumber'], s['number'])
elif s is not None:
sf['maxscannumber'] = max(sf['maxscannumber'], s)
except SpecFileEOF:
pass
else:
while True:
l = f.readline()
if not l:
break
if l.startswith('#S'):
n = int(l[2:].split()[0])
sf['maxscannumber'] = max(sf['maxscannumber'], n)
for n in sf['scans']:
s = sf['scans'][n]
s['motors'] = sf['motors']
if 'comment' not in s:
s['comment'] = sf['comment']
if 'positions' not in s:
s['positions'] = [None] * len(sf['motors'])
return sf |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def energy(self) -> ErrorValue: """X-ray energy""" |
return (ErrorValue(*(scipy.constants.physical_constants['speed of light in vacuum'][0::2])) *
ErrorValue(*(scipy.constants.physical_constants['Planck constant in eV s'][0::2])) /
scipy.constants.nano /
self.wavelength) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findbeam_gravity(data, mask):
"""Find beam center with the "gravity" method Inputs: data: scattering image mask: mask matrix Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin, starting from 1 """ |
# for each row and column find the center of gravity
data1 = data.copy() # take a copy, because elements will be tampered with
data1[mask == 0] = 0 # set masked elements to zero
# vector of x (row) coordinates
x = np.arange(data1.shape[0])
# vector of y (column) coordinates
y = np.arange(data1.shape[1])
# two column vectors, both containing ones. The length of onex and
# oney corresponds to length of x and y, respectively.
onex = np.ones_like(x)
oney = np.ones_like(y)
# Multiply the matrix with x. Each element of the resulting column
# vector will contain the center of gravity of the corresponding row
# in the matrix, multiplied by the "weight". Thus: nix_i=sum_j( A_ij
# * x_j). If we divide this by spamx_i=sum_j(A_ij), then we get the
# center of gravity. The length of this column vector is len(y).
nix = np.dot(x, data1)
spamx = np.dot(onex, data1)
# indices where both nix and spamx is nonzero.
goodx = ((nix != 0) & (spamx != 0))
# trim y, nix and spamx by goodx, eliminate invalid points.
nix = nix[goodx]
spamx = spamx[goodx]
# now do the same for the column direction.
niy = np.dot(data1, y)
spamy = np.dot(data1, oney)
goody = ((niy != 0) & (spamy != 0))
niy = niy[goody]
spamy = spamy[goody]
# column coordinate of the center in each row will be contained in
# ycent, the row coordinate of the center in each column will be
# in xcent.
ycent = nix / spamx
xcent = niy / spamy
# return the mean values as the centers.
return [xcent.mean(), ycent.mean()] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findbeam_slices(data, orig_initial, mask=None, maxiter=0, epsfcn=0.001, dmin=0, dmax=np.inf, sector_width=np.pi / 9.0, extent=10, callback=None):
"""Find beam center with the "slices" method Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.leastsq epsfcn: input for scipy.optimize.leastsq dmin: disregard pixels nearer to the origin than this dmax: disregard pixels farther from the origin than this sector_width: width of sectors in radians extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x (row) and y (column) coordinates of the origin. """ |
if mask is None:
mask = np.ones(data.shape)
data = data.astype(np.double)
def targetfunc(orig, data, mask, orig_orig, callback):
# integrate four sectors
I = [None] * 4
p, Ints, A = radint_nsector(data, None, -1, -1, -1, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask=mask,
phi0=np.pi / 4 - 0.5 * sector_width, dphi=sector_width,
Nsector=4)
minpix = max(max(p.min(0).tolist()), dmin)
maxpix = min(min(p.max(0).tolist()), dmax)
if (maxpix < minpix):
raise ValueError('The four slices do not overlap! Please give a\
better approximation for the origin or use another centering method.')
for i in range(4):
I[i] = Ints[:, i][(p[:, i] >= minpix) & (p[:, i] <= maxpix)]
ret = ((I[0] - I[2]) ** 2 + (I[1] - I[3]) ** 2) / (maxpix - minpix)
if callback is not None:
callback()
return ret
orig = scipy.optimize.leastsq(targetfunc, np.array([extent, extent]),
args=(data, 1 - mask.astype(np.uint8),
np.array(orig_initial) - extent, callback),
maxfev=maxiter, epsfcn=0.01)
return orig[0] + np.array(orig_initial) - extent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findbeam_azimuthal(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None):
"""Find beam center using azimuthal integration Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """ |
if mask is None:
mask = np.ones(data.shape)
data = data.astype(np.double)
def targetfunc(orig, data, mask, orig_orig, callback):
def sinfun(p, x, y):
return (y - np.sin(x + p[1]) * p[0] - p[2]) / np.sqrt(len(x))
t, I, a = azimintpix(data, None, orig[
0] + orig_orig[0], orig[1] + orig_orig[1], mask.astype('uint8'), Ntheta, dmin, dmax)
if len(a) > (a > 0).sum():
raise ValueError('findbeam_azimuthal: non-complete azimuthal average, please consider changing dmin, dmax and/or orig_initial!')
p = ((I.max() - I.min()) / 2.0, t[I == I.max()][0], I.mean())
p = scipy.optimize.leastsq(sinfun, p, (t, I))[0]
# print "findbeam_azimuthal: orig=",orig,"amplitude=",abs(p[0])
if callback is not None:
callback()
return abs(p[0])
orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]),
args=(data, 1 - mask, np.array(orig_initial) - extent,
callback), maxiter=maxiter, disp=0)
return orig1 + np.array(orig_initial) - extent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findbeam_azimuthal_fold(data, orig_initial, mask=None, maxiter=100, Ntheta=50, dmin=0, dmax=np.inf, extent=10, callback=None):
"""Find beam center using azimuthal integration and folding Inputs: data: scattering matrix orig_initial: estimated value for x (row) and y (column) coordinates of the beam center, starting from 1. mask: mask matrix. If None, nothing will be masked. Otherwise it should be of the same size as data. Nonzero means non-masked. maxiter: maximum number of iterations for scipy.optimize.fmin Ntheta: the number of theta points for the azimuthal integration. Should be even! dmin: pixels nearer to the origin than this will be excluded from the azimuthal integration dmax: pixels farther from the origin than this will be excluded from the azimuthal integration extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Output: a vector of length 2 with the x and y coordinates of the origin, starting from 1 """ |
if Ntheta % 2:
raise ValueError('Ntheta should be even!')
if mask is None:
mask = np.ones_like(data).astype(np.uint8)
data = data.astype(np.double)
# the function to minimize is the sum of squared difference of two halves of
# the azimuthal integral.
def targetfunc(orig, data, mask, orig_orig, callback):
I = azimintpix(data, None, orig[
0] + orig_orig[0], orig[1] + orig_orig[1], mask, Ntheta, dmin, dmax)[1]
if callback is not None:
callback()
return np.sum((I[:Ntheta / 2] - I[Ntheta / 2:]) ** 2) / Ntheta
orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]),
args=(data, 1 - mask, np.array(orig_initial) - extent, callback), maxiter=maxiter, disp=0)
return orig1 + np.array(orig_initial) - extent |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findbeam_semitransparent(data, pri, threshold=0.05):
"""Find beam with 2D weighting of semitransparent beamstop area Inputs: data: scattering matrix pri: list of four: [xmin,xmax,ymin,ymax] for the borders of the beam area under the semitransparent beamstop. X corresponds to the column index (ie. A[Y,X] is the element of A from the Xth column and the Yth row). You can get these by zooming on the figure and retrieving the result of axis() (like in Matlab) threshold: do not count pixels if their intensity falls below max_intensity*threshold. max_intensity is the highest count rate in the current row or column, respectively. Set None to disable this feature. Outputs: bcx,bcy the x and y coordinates of the primary beam """ |
rowmin = np.floor(min(pri[2:]))
rowmax = np.ceil(max(pri[2:]))
colmin = np.floor(min(pri[:2]))
colmax = np.ceil(max(pri[:2]))
if threshold is not None:
# beam area on the scattering image
B = data[rowmin:rowmax, colmin:colmax]
# print B.shape
# row and column indices
Ri = np.arange(rowmin, rowmax)
Ci = np.arange(colmin, colmax)
# print len(Ri)
# print len(Ci)
Ravg = B.mean(1) # average over column index, will be a concave curve
Cavg = B.mean(0) # average over row index, will be a concave curve
# find the maxima im both directions and their positions
maxR = Ravg.max()
maxRpos = Ravg.argmax()
maxC = Cavg.max()
maxCpos = Cavg.argmax()
# cut off pixels which are smaller than threshold*peak_height
Rmin = Ri[
((Ravg - Ravg[0]) >= ((maxR - Ravg[0]) * threshold)) & (Ri < maxRpos)][0]
Rmax = Ri[
((Ravg - Ravg[-1]) >= ((maxR - Ravg[-1]) * threshold)) & (Ri > maxRpos)][-1]
Cmin = Ci[
((Cavg - Cavg[0]) >= ((maxC - Cavg[0]) * threshold)) & (Ci < maxCpos)][0]
Cmax = Ci[
((Cavg - Cavg[-1]) >= ((maxC - Cavg[-1]) * threshold)) & (Ci > maxCpos)][-1]
else:
Rmin = rowmin
Rmax = rowmax
Cmin = colmin
Cmax = colmax
d = data[Rmin:Rmax + 1, Cmin:Cmax + 1]
x = np.arange(Rmin, Rmax + 1)
y = np.arange(Cmin, Cmax + 1)
bcx = (d.sum(1) * x).sum() / d.sum()
bcy = (d.sum(0) * y).sum() / d.sum()
return bcx, bcy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findbeam_radialpeak(data, orig_initial, mask, rmin, rmax, maxiter=100, drive_by='amplitude', extent=10, callback=None):
"""Find the beam by minimizing the width of a peak in the radial average. Inputs: data: scattering matrix orig_initial: first guess for the origin mask: mask matrix. Nonzero is non-masked. rmin,rmax: distance from the origin (in pixels) of the peak range. drive_by: 'hwhm' to minimize the hwhm of the peak or 'amplitude' to maximize the peak amplitude extent: approximate distance of the current and the real origin in pixels. Too high a value makes the fitting procedure unstable. Too low a value does not permit to move away the current origin. callback: callback function (expects no arguments) Outputs: the beam coordinates Notes: A Gaussian will be fitted. """ |
orig_initial = np.array(orig_initial)
mask = 1 - mask.astype(np.uint8)
data = data.astype(np.double)
pix = np.arange(rmin * 1.0, rmax * 1.0, 1)
if drive_by.lower() == 'hwhm':
def targetfunc(orig, data, mask, orig_orig, callback):
I = radintpix(
data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1]
hwhm = float(misc.findpeak_single(pix, I)[1])
# print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p
if callback is not None:
callback()
return abs(hwhm)
elif drive_by.lower() == 'amplitude':
def targetfunc(orig, data, mask, orig_orig, callback):
I = radintpix(
data, None, orig[0] + orig_orig[0], orig[1] + orig_orig[1], mask, pix)[1]
fp = misc.findpeak_single(pix, I)
height = -float(fp[2] + fp[3])
# print orig[0] + orig_orig[0], orig[1] + orig_orig[1], p
if callback is not None:
callback()
return height
else:
raise ValueError('Invalid argument for drive_by %s' % drive_by)
orig1 = scipy.optimize.fmin(targetfunc, np.array([extent, extent]),
args=(
data, mask, orig_initial - extent, callback),
maxiter=maxiter, disp=0)
return np.array(orig_initial) - extent + orig1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scalefactor(self, other, qmin=None, qmax=None, Npoints=None):
"""Calculate a scaling factor, by which this curve is to be multiplied to best fit the other one. Inputs: other: the other curve (an instance of GeneralCurve or of a subclass of it) qmin: lower cut-off (None to determine the common range automatically) qmax: upper cut-off (None to determine the common range automatically) Npoints: number of points to use in the common x-range (None defaults to the lowest value among the two datasets) Outputs: The scaling factor determined by interpolating both datasets to the same abscissa and calculating the ratio of their integrals, calculated by the trapezoid formula. Error propagation is taken into account. """ |
if qmin is None:
qmin = max(self.q.min(), other.q.min())
if qmax is None:
xmax = min(self.q.max(), other.q.max())
data1 = self.trim(qmin, qmax)
data2 = other.trim(qmin, qmax)
if Npoints is None:
Npoints = min(len(data1), len(data2))
commonx = np.linspace(
max(data1.q.min(), data2.q.min()), min(data2.q.max(), data1.q.max()), Npoints)
data1 = data1.interpolate(commonx)
data2 = data2.interpolate(commonx)
return nonlinear_odr(data1.Intensity, data2.Intensity, data1.Error, data2.Error, lambda x, a: a * x, [1])[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _substitute_fixed_parameters_covar(self, covar):
"""Insert fixed parameters in a covariance matrix""" |
covar_resolved = np.empty((len(self._fixed_parameters), len(self._fixed_parameters)))
indices_of_fixed_parameters = [i for i in range(len(self.parameters())) if
self._fixed_parameters[i] is not None]
indices_of_free_parameters = [i for i in range(len(self.parameters())) if self._fixed_parameters[i] is None]
for i in range(covar_resolved.shape[0]):
if i in indices_of_fixed_parameters:
# the i-eth argument was fixed. This means that the row and column corresponding to this argument
# must be None
covar_resolved[i, :] = 0
continue
for j in range(covar_resolved.shape[1]):
if j in indices_of_fixed_parameters:
covar_resolved[:, j] = 0
continue
covar_resolved[i, j] = covar[indices_of_free_parameters.index(i), indices_of_free_parameters.index(j)]
return covar_resolved |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadmask(self, filename: str) -> np.ndarray: """Load a mask file.""" |
mask = scipy.io.loadmat(self.find_file(filename, what='mask'))
maskkey = [k for k in mask.keys() if not (k.startswith('_') or k.endswith('_'))][0]
return mask[maskkey].astype(np.bool) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadcurve(self, fsn: int) -> classes2.Curve: """Load a radial scattering curve""" |
return classes2.Curve.new_from_file(self.find_file(self._exposureclass + '_%05d.txt' % fsn)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writeint2dnorm(filename, Intensity, Error=None):
"""Save the intensity and error matrices to a file Inputs ------ filename: string the name of the file Intensity: np.ndarray the intensity matrix Error: np.ndarray, optional the error matrix (can be ``None``, if no error matrix is to be saved) Output ------ None """ |
whattosave = {'Intensity': Intensity}
if Error is not None:
whattosave['Error'] = Error
if filename.upper().endswith('.NPZ'):
np.savez(filename, **whattosave)
elif filename.upper().endswith('.MAT'):
scipy.io.savemat(filename, whattosave)
else: # text file
np.savetxt(filename, Intensity)
if Error is not None:
name, ext = os.path.splitext(filename)
np.savetxt(name + '_error' + ext, Error) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readbdfv2(filename, bdfext='.bdf', bhfext='.bhf'):
"""Read a version 2 Bessy Data File Inputs ------ filename: string the name of the input file. One can give the complete header or datafile name or just the base name without the extensions. bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ the data structure in a dict. Header is loaded implicitely. Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are loaded. """ |
datas = header.readbhfv2(filename, True, bdfext, bhfext)
return datas |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def readmar(filename):
"""Read a two-dimensional scattering pattern from a MarResearch .image file. """ |
hed = header.readmarheader(filename)
with open(filename, 'rb') as f:
h = f.read(hed['recordlength'])
data = np.fromstring(
f.read(2 * hed['Xsize'] * hed['Ysize']), '<u2').astype(np.float64)
if hed['highintensitypixels'] > 0:
raise NotImplementedError(
'Intensities over 65535 are not yet supported!')
data = data.reshape(hed['Xsize'], hed['Ysize'])
return data, hed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def writebdfv2(filename, bdf, bdfext='.bdf', bhfext='.bhf'):
"""Write a version 2 Bessy Data File Inputs ------ filename: string the name of the output file. One can give the complete header or datafile name or just the base name without the extensions. bdf: dict the BDF structure (in the same format as loaded by ``readbdfv2()`` bdfext: string, optional the extension of the data file bhfext: string, optional the extension of the header file Output ------ None Notes ----- BDFv2 header and scattering data are stored separately in the header and the data files. Given the file name both are saved. """ |
if filename.endswith(bdfext):
basename = filename[:-len(bdfext)]
elif filename.endswith(bhfext):
basename = filename[:-len(bhfext)]
else:
basename = filename
header.writebhfv2(basename + '.bhf', bdf)
f = open(basename + '.bdf', 'wb')
keys = ['RAWDATA', 'RAWERROR', 'CORRDATA', 'CORRERROR', 'NANDATA']
keys.extend(
[x for x in list(bdf.keys()) if isinstance(bdf[x], np.ndarray) and x not in keys])
for k in keys:
if k not in list(bdf.keys()):
continue
f.write('#%s[%d:%d]\n' % (k, bdf['xdim'], bdf['ydim']))
f.write(np.rot90(bdf[k], 3).astype('float32').tostring(order='F'))
f.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill_padding(padded_string):
# type: (bytes) -> bytes """ Fill up missing padding in a string. This function makes sure that the string has length which is multiplication of 4, and if not, fills the missing places with dots. :param str padded_string: string to be decoded that might miss padding dots. :return: properly padded string :rtype: str """ |
length = len(padded_string)
reminder = len(padded_string) % 4
if reminder:
return padded_string.ljust(length + 4 - reminder, b'.')
return padded_string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode(encoded):
# type: (bytes) -> bytes """ Decode the result of querystringsafe_base64_encode or a regular base64. .. note :: As a regular base64 string does not contain dots, replacing dots with equal signs does basically noting to it. Also, base64.urlsafe_b64decode allows to decode both safe and unsafe base64. Therefore this function may also be used to decode the regular base64. :param (str, unicode) encoded: querystringsafe_base64 string or unicode :rtype: str, bytes :return: decoded string """ |
padded_string = fill_padding(encoded)
return urlsafe_b64decode(padded_string.replace(b'.', b'=')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None):
"""Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """ |
if max_recursion_depth is not None and max_recursion_depth <= 0:
# we reached the maximum recursion depth, refuse to go further
return original_dict
if max_recursion_depth is None:
next_recursion_depth = None
else:
next_recursion_depth = max_recursion_depth - 1
dict1 = {}
for k in original_dict:
if not isinstance(original_dict[k], dict):
dict1[k] = original_dict[k]
else:
dict_recursed = flatten_hierarchical_dict(
original_dict[k], separator, next_recursion_depth)
dict1.update(
dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed]))
return dict1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_shullroess(q, Intensity, Error, R0=None, r=None):
"""Do a Shull-Roess fitting on the scattering data. Inputs: q: np.ndarray[ndim=1] vector of the q values (4*pi*sin(theta)/lambda) Intensity: np.ndarray[ndim=1] Intensity vector Error: np.ndarray[ndim=1] Error of the intensity (absolute uncertainty, 1sigma) R0: scalar first guess for the mean radius (None to autodetermine, default) r: np.ndarray[ndim=1] vector of the abscissa of the resulting size distribution (None to autodetermine, default) Output: A: ErrorValue the fitted value of the intensity scaling factor r0: the r0 parameter of the maxwellian size distribution n: the n parameter of the maxwellian size distribution r: the abscissa of the fitted size distribution maxw: the size distribution stat: the statistics dictionary, returned by nlsq_fit() Note: This first searches for r0, which best linearizes the log(Intensity) vs. log(q**2+3/r0**2) relation. After this is found, the parameters of the fitted line give the parameters of a Maxwellian-like particle size distribution function. After it a proper least squares fitting is carried out, using the obtained values as initial parameters. """ |
q = np.array(q)
Intensity = np.array(Intensity)
Error = np.array(Error)
if R0 is None:
r0s = np.linspace(1, 2 * np.pi / q.min(), 1000)
def naive_fit_chi2(q, Intensity, r0):
p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1)
return ((np.polyval(p, q) - Intensity) ** 2).sum() / (len(q) - 3)
chi2 = np.array([naive_fit_chi2(q, Intensity, r0) for r0 in r0s.tolist()])
R0 = r0s[chi2 == chi2.min()][0]
def naive_fit(q, Intensity, r0):
p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1)
return np.exp(p[1]), -2 * p[0] - 4
K, n = naive_fit(q, Intensity, R0)
def SR_function(q, A, r0, n):
return A * (q ** 2 + 3 / r0 ** 2) ** (-(n + 4.) * 0.5)
p, dp, statdict = easylsq.nlsq_fit(q, Intensity, Error, SR_function, (K, R0, n))
n = ErrorValue(p[2], dp[2])
r0 = ErrorValue(p[1], dp[1])
A = ErrorValue(p[0], dp[0])
if r is None:
r = np.linspace(np.pi / q.max(), np.pi / q.min(), 1000)
return A, r0, n, r, maxwellian(r, r0, n), statdict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def findfileindirs(filename, dirs=None, use_pythonpath=True, use_searchpath=True, notfound_is_fatal=True, notfound_val=None):
"""Find file in multiple directories. Inputs: filename: the file name to be searched for. dirs: list of folders or None use_pythonpath: use the Python module search path use_searchpath: use the sastool search path. notfound_is_fatal: if an exception is to be raised if the file cannot be found. notfound_val: the value which should be returned if the file is not found (only relevant if notfound_is_fatal is False) Outputs: the full path of the file. Notes: if filename is an absolute path by itself, folders in 'dir' won't be checked, only the existence of the file will be verified. """ |
if os.path.isabs(filename):
if os.path.exists(filename):
return filename
elif notfound_is_fatal:
raise IOError('File ' + filename + ' not found.')
else:
return notfound_val
if dirs is None:
dirs = []
dirs = normalize_listargument(dirs)
if not dirs: # dirs is empty
dirs = ['.']
if use_pythonpath:
dirs.extend(sys.path)
if use_searchpath:
dirs.extend(sastool_search_path)
# expand ~ and ~user constructs
dirs = [os.path.expanduser(d) for d in dirs]
logger.debug('Searching for file %s in several folders: %s' % (filename, ', '.join(dirs)))
for d in dirs:
if os.path.exists(os.path.join(d, filename)):
logger.debug('Found file %s in folder %s.' % (filename, d))
return os.path.join(d, filename)
logger.debug('Not found file %s in any folders.' % filename)
if notfound_is_fatal:
raise IOError('File %s not found in any of the directories.' % filename)
else:
return notfound_val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def twotheta(matrix, bcx, bcy, pixsizeperdist):
"""Calculate the two-theta matrix for a scattering matrix Inputs: matrix: only the shape of it is needed bcx, bcy: beam position (counting from 0; x is row, y is column index) pixsizeperdist: the pixel size divided by the sample-to-detector distance Outputs: the two theta matrix, same shape as 'matrix'. """ |
col, row = np.meshgrid(list(range(matrix.shape[1])), list(range(matrix.shape[0])))
return np.arctan(np.sqrt((row - bcx) ** 2 + (col - bcy) ** 2) * pixsizeperdist) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solidangle(twotheta, sampletodetectordistance, pixelsize=None):
"""Solid-angle correction for two-dimensional SAS images Inputs: twotheta: matrix of two-theta values sampletodetectordistance: sample-to-detector distance pixelsize: the pixel size in mm The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. """ |
if pixelsize is None:
pixelsize = 1
return sampletodetectordistance ** 2 / np.cos(twotheta) ** 3 / pixelsize ** 2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solidangle_errorprop(twotheta, dtwotheta, sampletodetectordistance, dsampletodetectordistance, pixelsize=None):
"""Solid-angle correction for two-dimensional SAS images with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: absolute error of sample-to-detector distance Outputs two matrices of the same shape as twotheta. The scattering intensity matrix should be multiplied by the first one. The second one is the propagated error of the first one. """ |
SAC = solidangle(twotheta, sampletodetectordistance, pixelsize)
if pixelsize is None:
pixelsize = 1
return (SAC,
(sampletodetectordistance * (4 * dsampletodetectordistance ** 2 * np.cos(twotheta) ** 2 +
9 * dtwotheta ** 2 * sampletodetectordistance ** 2 * np.sin(twotheta) ** 2) ** 0.5
/ np.cos(twotheta) ** 4) / pixelsize ** 2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def angledependentabsorption(twotheta, transmission):
"""Correction for angle-dependent absorption of the sample Inputs: twotheta: matrix of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. Note, that this does not corrects for sample transmission by itself, as the 2*theta -> 0 limit of this matrix is unity. Twotheta==0 and transmission==1 cases are handled correctly (the limit is 1 in both cases). """ |
cor = np.ones(twotheta.shape)
if transmission == 1:
return cor
mud = -np.log(transmission)
cor[twotheta > 0] = transmission * mud * (1 - 1 / np.cos(twotheta[twotheta > 0])) / (np.exp(-mud / np.cos(twotheta[twotheta > 0])) - np.exp(-mud))
return cor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def angledependentabsorption_errorprop(twotheta, dtwotheta, transmission, dtransmission):
"""Correction for angle-dependent absorption of the sample with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: matrix of absolute error of two-theta values transmission: the transmission of the sample (I_after/I_before, or exp(-mu*d)) dtransmission: the absolute error of the transmission of the sample Two matrices are returned: the first one is the correction (intensity matrix should be multiplied by it), the second is its absolute error. """ |
# error propagation formula calculated using sympy
return (angledependentabsorption(twotheta, transmission),
_calc_angledependentabsorption_error(twotheta, dtwotheta, transmission, dtransmission)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def angledependentairtransmission(twotheta, mu_air, sampletodetectordistance):
"""Correction for the angle dependent absorption of air in the scattered beam path. Inputs: twotheta: matrix of two-theta values mu_air: the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.""" |
return np.exp(mu_air * sampletodetectordistance / np.cos(twotheta)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def angledependentairtransmission_errorprop(twotheta, dtwotheta, mu_air, dmu_air, sampletodetectordistance, dsampletodetectordistance):
"""Correction for the angle dependent absorption of air in the scattered beam path, with error propagation Inputs: twotheta: matrix of two-theta values dtwotheta: absolute error matrix of two-theta mu_air: the linear absorption coefficient of air dmu_air: error of the linear absorption coefficient of air sampletodetectordistance: sample-to-detector distance dsampletodetectordistance: error of the sample-to-detector distance 1/mu_air and sampletodetectordistance should have the same dimension The scattering intensity matrix should be multiplied by the resulting correction matrix.""" |
return (np.exp(mu_air * sampletodetectordistance / np.cos(twotheta)),
np.sqrt(dmu_air ** 2 * sampletodetectordistance ** 2 *
np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta))
/ np.cos(twotheta) ** 2 + dsampletodetectordistance ** 2 *
mu_air ** 2 * np.exp(2 * mu_air * sampletodetectordistance /
np.cos(twotheta)) /
np.cos(twotheta) ** 2 + dtwotheta ** 2 * mu_air ** 2 *
sampletodetectordistance ** 2 *
np.exp(2 * mu_air * sampletodetectordistance / np.cos(twotheta))
* np.sin(twotheta) ** 2 / np.cos(twotheta) ** 4)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_file(self, filename: str, strip_path: bool = True, what='exposure') -> str: """Find file in the path""" |
if what == 'exposure':
path = self._path
elif what == 'header':
path = self._headerpath
elif what == 'mask':
path = self._maskpath
else:
path = self._path
tried = []
if strip_path:
filename = os.path.split(filename)[-1]
for d in path:
if os.path.exists(os.path.join(d, filename)):
tried.append(os.path.join(d, filename))
return os.path.join(d, filename)
raise FileNotFoundError('Not found: {}. Tried: {}'.format(filename, ', '.join(tried))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_subpath(self, subpath: str):
"""Search a file or directory relative to the base path""" |
for d in self._path:
if os.path.exists(os.path.join(d, subpath)):
return os.path.join(d, subpath)
raise FileNotFoundError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sum(self, only_valid=True) -> ErrorValue: """Calculate the sum of pixels, not counting the masked ones if only_valid is True.""" |
if not only_valid:
mask = 1
else:
mask = self.mask
return ErrorValue((self.intensity * mask).sum(),
((self.error * mask) ** 2).sum() ** 0.5) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mean(self, only_valid=True) -> ErrorValue: """Calculate the mean of the pixels, not counting the masked ones if only_valid is True.""" |
if not only_valid:
intensity = self.intensity
error = self.error
else:
intensity = self.intensity[self.mask]
error = self.error[self.mask]
return ErrorValue(intensity.mean(),
(error ** 2).mean() ** 0.5) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def twotheta(self) -> ErrorValue: """Calculate the two-theta array""" |
row, column = np.ogrid[0:self.shape[0], 0:self.shape[1]]
rho = (((self.header.beamcentery - row) * self.header.pixelsizey) ** 2 +
((self.header.beamcenterx - column) * self.header.pixelsizex) ** 2) ** 0.5
assert isinstance(self.header.pixelsizex, ErrorValue)
assert isinstance(self.header.pixelsizey, ErrorValue)
assert isinstance(rho, ErrorValue)
return (rho / self.header.distance).arctan() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pixel_to_q(self, row: float, column: float):
"""Return the q coordinates of a given pixel. Inputs: row: float the row (vertical) coordinate of the pixel column: float the column (horizontal) coordinate of the pixel Coordinates are 0-based and calculated from the top left corner. """ |
qrow = 4 * np.pi * np.sin(
0.5 * np.arctan(
(row - float(self.header.beamcentery)) *
float(self.header.pixelsizey) /
float(self.header.distance))) / float(self.header.wavelength)
qcol = 4 * np.pi * np.sin(0.5 * np.arctan(
(column - float(self.header.beamcenterx)) *
float(self.header.pixelsizex) /
float(self.header.distance))) / float(self.header.wavelength)
return qrow, qcol |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def radial_average(self, qrange=None, pixel=False, returnmask=False, errorpropagation=3, abscissa_errorpropagation=3, raw_result=False) -> Curve: """Do a radial averaging Inputs: qrange: the q-range. If None, auto-determine. If 'linear', auto-determine with linear spacing (same as None). If 'log', auto-determine with log10 spacing. pixel: do a pixel-integration (instead of q) returnmask: if the effective mask matrix is to be returned. errorpropagation: the type of error propagation (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) abscissa_errorpropagation: the type of the error propagation in the abscissa (3: highest of squared or std-dev, 2: squared, 1: linear, 0: independent measurements of the same quantity) raw_result: if True, do not pack the result in a SASCurve, return the individual np.ndarrays. Outputs: the one-dimensional curve as an instance of SASCurve (if pixel is False) or SASPixelCurve (if pixel is True), if raw_result was True. otherwise the q (or pixel), dq (or dpixel), I, dI, area vectors the mask matrix (if returnmask was True) """ |
retmask = None
if isinstance(qrange, str):
if qrange == 'linear':
qrange = None
autoqrange_linear = True
elif qrange == 'log':
qrange = None
autoqrange_linear = False
else:
raise ValueError(
'Value given for qrange (''%s'') not understood.' % qrange)
else:
autoqrange_linear = True # whatever
if pixel:
abscissa_kind = 3
else:
abscissa_kind = 0
res = radint_fullq_errorprop(self.intensity, self.error, self.header.wavelength.val,
self.header.wavelength.err, self.header.distance.val,
self.header.distance.err, self.header.pixelsizey.val,
self.header.pixelsizex.val, self.header.beamcentery.val,
self.header.beamcentery.err, self.header.beamcenterx.val,
self.header.beamcenterx.err, (self.mask == 0).astype(np.uint8),
qrange, returnmask=returnmask, errorpropagation=errorpropagation,
autoqrange_linear=autoqrange_linear, abscissa_kind=abscissa_kind,
abscissa_errorpropagation=abscissa_errorpropagation)
q, dq, I, E, area = res[:5]
if not raw_result:
c = Curve(q, I, E, dq)
if returnmask:
return c, res[5]
else:
return c
else:
if returnmask:
return q, dq, I, E, area, res[5]
else:
return q, dq, I, E, area |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mask_negative(self):
"""Extend the mask with the image elements where the intensity is negative.""" |
self.mask = np.logical_and(self.mask, ~(self.intensity < 0)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance(self) -> ErrorValue: """Sample-to-detector distance""" |
if 'DistCalibrated' in self._data:
dist = self._data['DistCalibrated']
else:
dist = self._data["Dist"]
if 'DistCalibratedError' in self._data:
disterr = self._data['DistCalibratedError']
elif 'DistError' in self._data:
disterr = self._data['DistError']
else:
disterr = 0.0
return ErrorValue(dist, disterr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simultaneous_nonlinear_leastsquares(xs, ys, dys, func, params_inits, verbose=False, **kwargs):
"""Do a simultaneous nonlinear least-squares fit and return the fitted parameters as instances of ErrorValue. Input: ------ `xs`: tuple of abscissa vectors (1d numpy ndarrays) `ys`: tuple of ordinate vectors (1d numpy ndarrays) `dys`: tuple of the errors of ordinate vectors (1d numpy ndarrays or Nones) `func`: fitting function (the same for all the datasets) `params_init`: tuples of *lists* or *tuples* (not numpy ndarrays!) of the initial values of the parameters to be fitted. The special value `None` signifies that the corresponding parameter is the same as in the previous dataset. Of course, none of the parameters of the first dataset can be None. `verbose`: if various messages useful for debugging should be printed on stdout. additional keyword arguments get forwarded to nlsq_fit() Output: ------- curve2, etc. Each tuple contains the values of the fitted parameters as instances of ErrorValue, in the same order as they are in `params_init`. `statdict`: statistics dictionary. This is of the same form as in `nlsq_fit`, except that func_value is a sequence of one-dimensional np.ndarrays containing the best-fitting function values for each curve. """ |
p, dp, statdict = simultaneous_nlsq_fit(xs, ys, dys, func, params_inits,
verbose, **kwargs)
params = [[ErrorValue(p_, dp_) for (p_, dp_) in zip(pcurrent, dpcurrent)]
for (pcurrent, dpcurrent) in zip(p, dp)]
return tuple(params + [statdict]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tostring(self: 'ErrorValue', extra_digits: int = 0, plusminus: str = ' +/- ', fmt: str = None) -> str: """Make a string representation of the value and its uncertainty. Inputs: ------- ``extra_digits``: integer how many extra digits should be shown (plus or minus, zero means that the number of digits should be defined by the magnitude of the uncertainty). ``plusminus``: string the character sequence to be inserted in place of '+/-' including delimiting whitespace. ``fmt``: string or None how to format the output. Currently only strings ending in 'tex' are supported, which render ascii-exponentials (i.e. 3.1415e-2) into a format which is more appropriate to TeX. Outputs: -------- the string representation. """ |
if isinstance(fmt, str) and fmt.lower().endswith('tex'):
return re.subn('(\d*)(\.(\d)*)?[eE]([+-]?\d+)',
lambda m: (r'$%s%s\cdot 10^{%s}$' % (m.group(1), m.group(2), m.group(4))).replace('None',
''),
self.tostring(extra_digits=extra_digits, plusminus=plusminus, fmt=None))[0]
if isinstance(self.val, numbers.Real):
try:
Ndigits = -int(math.floor(math.log10(self.err))) + extra_digits
except (OverflowError, ValueError):
return str(self.val) + plusminus + str(self.err)
else:
return str(round(self.val, Ndigits)) + plusminus + str(round(self.err, Ndigits))
return str(self.val) + ' +/- ' + str(self.err) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def evalfunc(cls, func, *args, **kwargs):
"""Evaluate a function with error propagation. Inputs: ------- ``func``: callable this is the function to be evaluated. Should return either a number or a np.ndarray. ``*args``: other positional arguments of func. Arguments which are not instances of `ErrorValue` are taken as constants. keyword arguments supported: ``NMC``: number of Monte-Carlo steps. If not defined, defaults to 1000 ``exceptions_to_retry``: list of exception types to ignore: if one of these is raised the given MC step is repeated once again. Notice that this might induce an infinite loop! The exception types in this list should be subclasses of ``Exception``. ``exceptions_to_skip``: list of exception types to skip: if one of these is raised the given MC step is skipped, never to be repeated. The exception types in this list should be subclasses of ``Exception``. Output: ------- ``result``: an `ErrorValue` with the result. The error is estimated via a Monte-Carlo approach to Gaussian error propagation. """ |
def do_random(x):
if isinstance(x, cls):
return x.random()
else:
return x
if 'NMC' not in kwargs:
kwargs['NMC'] = 1000
if 'exceptions_to_skip' not in kwargs:
kwargs['exceptions_to_skip'] = []
if 'exceptions_to_repeat' not in kwargs:
kwargs['exceptions_to_repeat'] = []
meanvalue = func(*args)
# this way we get either a number or a np.array
stdcollector = meanvalue * 0
mciters = 0
while mciters < kwargs['NMC']:
try:
# IGNORE:W0142
stdcollector += (func(*[do_random(a)
for a in args]) - meanvalue) ** 2
mciters += 1
except Exception as e: # IGNORE:W0703
if any(isinstance(e, etype) for etype in kwargs['exceptions_to_skip']):
kwargs['NMC'] -= 1
elif any(isinstance(e, etype) for etype in kwargs['exceptions_to_repeat']):
pass
else:
raise
return cls(meanvalue, stdcollector ** 0.5 / (kwargs['NMC'] - 1)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GeneralGuinier(q, G, Rg, s):
"""Generalized Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor ``Rg``: radius of gyration ``s``: dimensionality parameter (can be 1, 2, 3) Formula: -------- ``G/q**(3-s)*exp(-(q^2*Rg^2)/s)`` """ |
return G / q ** (3 - s) * np.exp(-(q * Rg) ** 2 / s) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GuinierPorod(q, G, Rg, alpha):
"""Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ |
return GuinierPorodMulti(q, G, Rg, alpha) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def PorodGuinier(q, a, alpha, Rg):
"""Empirical Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``a``: factor of the power-law branch ``alpha``: power-law exponent ``Rg``: radius of gyration Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q>q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``G`` are determined from conditions of smoothness at the cross-over. Literature: B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ |
return PorodGuinierMulti(q, a, alpha, Rg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def PorodGuinierPorod(q, a, alpha, Rg, beta):
"""Empirical Porod-Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``a``: factor of the first power-law branch ``alpha``: exponent of the first power-law branch ``Rg``: radius of gyration ``beta``: exponent of the second power-law branch Formula: -------- ``a*q^alpha`` if ``q<q_sep1``. ``G * exp(-q^2*Rg^2/3)`` if ``q_sep1<q<q_sep2`` and ``b*q^beta`` if ``q_sep2<q``. ``q_sep1``, ``q_sep2``, ``G`` and ``b`` are determined from conditions of smoothness at the cross-overs. Literature: B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ |
return PorodGuinierMulti(q, a, alpha, Rg, beta) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2):
"""Empirical Guinier-Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch ``Rg1``: the first radius of gyration ``alpha``: the power-law exponent ``Rg2``: the second radius of gyration Formula: -------- ``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``. ``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``. ``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``. The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined from conditions of smoothness at the cross-overs. Literature: B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ |
return GuinierPorodMulti(q, G, Rg1, alpha, Rg2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def DampedPowerlaw(q, a, alpha, sigma):
"""Damped power-law Inputs: ------- ``q``: independent variable ``a``: factor ``alpha``: exponent ``sigma``: hwhm of the damping Gaussian Formula: -------- ``a*q^alpha*exp(-q^2/(2*sigma^2))`` """ |
return a * q ** alpha * np.exp(-q ** 2 / (2 * sigma ** 2)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def PowerlawGuinierPorodConst(q, A, alpha, G, Rg, beta, C):
"""Sum of a Power-law, a Guinier-Porod curve and a constant. Inputs: ------- ``q``: independent variable (momentum transfer) ``A``: scaling factor of the power-law ``alpha``: power-law exponent ``G``: scaling factor of the Guinier-Porod curve ``Rg``: Radius of gyration ``beta``: power-law exponent of the Guinier-Porod curve ``C``: additive constant Formula: -------- ``A*q^alpha + GuinierPorod(q,G,Rg,beta) + C`` """ |
return PowerlawPlusConstant(q, A, alpha, C) + GuinierPorod(q, G, Rg, beta) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GuinierPorodMulti(q, G, *Rgsalphas):
"""Empirical multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor for the first Guinier-branch gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ |
scalefactor = G
funcs = [lambda q: Guinier(q, G, Rgsalphas[0])]
indices = np.ones_like(q, dtype=np.bool)
constraints = []
for i in range(1, len(Rgsalphas)):
if i % 2:
# Rgsalphas[i] is an exponent, Rgsalphas[i-1] is a radius of gyration
qsep = _PGgen_qsep(Rgsalphas[i], Rgsalphas[i - 1], 3)
scalefactor = _PGgen_A(Rgsalphas[i], Rgsalphas[i - 1], 3, scalefactor)
funcs.append(lambda q, a=scalefactor, alpha=Rgsalphas[i]: Powerlaw(q, a, alpha))
else:
# Rgsalphas[i] is a radius of gyration, Rgsalphas[i-1] is a power-law exponent
qsep = _PGgen_qsep(Rgsalphas[i - 1], Rgsalphas[i], 3)
scalefactor = _PGgen_G(Rgsalphas[i - 1], Rgsalphas[i], 3, scalefactor)
funcs.append(lambda q, G=scalefactor, Rg=Rgsalphas[i]: Guinier(q, G, Rg))
# this belongs to the previous
constraints.append(indices & (q < qsep))
indices[q < qsep] = False
constraints.append(indices)
return np.piecewise(q, constraints, funcs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def PorodGuinierMulti(q, A, *alphasRgs):
"""Empirical multi-part Porod-Guinier scattering Inputs: ------- ``q``: independent variable ``A``: factor for the first Power-law-branch gyration and power-law exponents of the consecutive parts Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*exp(-q^2*Rg1^2/3)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. Literature: B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ |
scalefactor = A
funcs = [lambda q: Powerlaw(q, A, alphasRgs[0])]
indices = np.ones_like(q, dtype=np.bool)
constraints = []
for i in range(1, len(alphasRgs)):
if i % 2:
# alphasRgs[i] is a radius of gyration, alphasRgs[i-1] is a power-law exponent
qsep = _PGgen_qsep(alphasRgs[i - 1], alphasRgs[i], 3)
scalefactor = _PGgen_G(alphasRgs[i - 1], alphasRgs[i], 3, scalefactor)
funcs.append(lambda q, G=scalefactor, Rg=alphasRgs[i]: Guinier(q, G, Rg))
else:
# alphasRgs[i] is an exponent, alphasRgs[i-1] is a radius of gyration
qsep = _PGgen_qsep(alphasRgs[i], alphasRgs[i - 1], 3)
scalefactor = _PGgen_A(alphasRgs[i], alphasRgs[i - 1], 3, scalefactor)
funcs.append(lambda q, a=scalefactor, alpha=alphasRgs[i]: a * q ** alpha)
# this belongs to the previous
constraints.append(indices & (q < qsep))
indices[q < qsep] = False
constraints.append(indices)
return np.piecewise(q, constraints, funcs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GeneralGuinierPorod(q, factor, *args, **kwargs):
"""Empirical generalized multi-part Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``factor``: factor for the first branch other arguments (*args):
the defining arguments of the consecutive parts: radius of gyration (``Rg``) and dimensionality parameter (``s``) for Guinier and exponent (``alpha``) for power-law parts. supported keyword arguments: ``startswithguinier``: True if the first segment is a Guinier-type scattering (this is the default) or False if it is a power-law Formula: -------- The intensity is a piecewise function with continuous first derivatives. The separating points in ``q`` between the consecutive parts and the intensity factors of them (except the first) are determined from conditions of smoothness (continuity of the function and its first derivative) at the border points of the intervals. Guinier-type (``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``) parts follow each other in alternating sequence. The exact number of parts is determined from the number of positional arguments (*args). Literature: B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ |
if kwargs.get('startswithguinier', True):
funcs = [lambda q, A = factor:GeneralGuinier(q, A, args[0], args[1])]
i = 2
guiniernext = False
else:
funcs = [lambda q, A = factor: Powerlaw(q, A, args[0])]
i = 1
guiniernext = True
indices = np.ones_like(q, dtype=np.bool)
constraints = []
while i < len(args):
if guiniernext:
# args[i] is a radius of gyration, args[i+1] is a dimensionality parameter, args[i-1] is a power-law exponent
qsep = _PGgen_qsep(args[i - 1], args[i], args[i + 1])
factor = _PGgen_G(args[i - 1], args[i], args[i + 1], factor)
funcs.append(lambda q, G=factor, Rg=args[i], s=args[i + 1]: GeneralGuinier(q, G, Rg, s))
guiniernext = False
i += 2
else:
# args[i] is an exponent, args[i-2] is a radius of gyration, args[i-1] is a dimensionality parameter
qsep = _PGgen_qsep(args[i], args[i - 2], args[i - 1])
factor = _PGgen_A(args[i], args[i - 2], args[i - 1], factor)
funcs.append(lambda q, a=factor, alpha=args[i]: a * q ** alpha)
guiniernext = True
i += 1
# this belongs to the previous
constraints.append(indices & (q < qsep))
indices[q < qsep] = False
constraints.append(indices)
return np.piecewise(q, constraints, funcs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ExcludedVolumeChain(q, Rg, nu):
"""Scattering intensity of a generalized excluded-volume Gaussian chain Inputs: ------- ``q``: independent variable ``Rg``: radius of gyration ``nu``: excluded volume exponent Formula: -------- ``(u^(1/nu)*gamma(0.5/nu)*gammainc_lower(0.5/nu,u)- gamma(1/nu)*gammainc_lower(1/nu,u)) / (nu*u^(1/nu))`` where ``u = q^2*Rg^2*(2*nu+1)*(2*nu+2)/6`` is the reduced scattering variable, ``gamma(x)`` is the gamma function and ``gammainc_lower(x,t)`` is the lower incomplete gamma function. Literature: SASFit manual 6. nov. 2010. Equation (3.60b) """ |
u = (q * Rg) ** 2 * (2 * nu + 1) * (2 * nu + 2) / 6.
return (u ** (0.5 / nu) * gamma(0.5 / nu) * gammainc(0.5 / nu, u) -
gamma(1. / nu) * gammainc(1. / nu, u)) / (nu * u ** (1. / nu)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def BorueErukhimovich(q, C, r0, s, t):
"""Borue-Erukhimovich model of microphase separation in polyelectrolytes Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` Literature: o Borue and Erukhimovich. Macromolecules (1988) 21 (11) 3240-3249 o Shibayama and Tanaka. J. Chem. Phys (1995) 102 (23) 9392 o Moussaid et. al. J. Phys II (France) (1993) 3 (4) 573-594 o Ermi and Amis. Macromolecules (1997) 30 (22) 6937-6942 """ |
x = q * r0
return C * (x ** 2 + s) / ((x ** 2 + s) * (x ** 2 + t) + 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def BorueErukhimovich_Powerlaw(q, C, r0, s, t, nu):
"""Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over. """ |
def get_xsep(alpha, s, t):
A = alpha + 2
B = 2 * s * alpha + t * alpha + 4 * s
C = s * t * alpha + alpha + alpha * s ** 2 + alpha * s * t - 2 + 2 * s ** 2
D = alpha * s ** 2 * t + alpha * s
r = np.roots([A, B, C, D])
#print "get_xsep: ", alpha, s, t, r
return r[r > 0][0] ** 0.5
get_B = lambda C, xsep, s, t, nu:C * (xsep ** 2 + s) / ((xsep ** 2 + s) * (xsep ** 2 + t) + 1) * xsep ** (1.0 / nu)
x = q * r0
xsep = np.real_if_close(get_xsep(-1.0 / nu, s, t))
A = get_B(C, xsep, s, t, nu)
return np.piecewise(q, (x < xsep, x >= xsep),
(lambda a:BorueErukhimovich(a, C, r0, s, t),
lambda a:A * (a * r0) ** (-1.0 / nu))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def sample(self, data, interval):
'''Sample a patch from the data object
Parameters
----------
data : dict
A data dict as produced by pumpp.Pump.transform
interval : slice
The time interval to sample
Returns
-------
data_slice : dict
`data` restricted to `interval`.
'''
data_slice = dict()
for key in data:
if '_valid' in key:
continue
index = [slice(None)] * data[key].ndim
# if we have multiple observations for this key, pick one
index[0] = self.rng.randint(0, data[key].shape[0])
index[0] = slice(index[0], index[0] + 1)
for tdim in self._time[key]:
index[tdim] = interval
data_slice[key] = data[key][tuple(index)]
return data_slice |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def indices(self, data):
'''Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch
'''
duration = self.data_duration(data)
for start in range(0, duration - self.duration, self.stride):
yield start |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def scope(self, key):
'''Apply the name scope to a key
Parameters
----------
key : string
Returns
-------
`name/key` if `name` is not `None`;
otherwise, `key`.
'''
if self.name is None:
return key
return '{:s}/{:s}'.format(self.name, key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def register(self, field, shape, dtype):
'''Register a field as a tensor with specified shape and type.
A `Tensor` of the given shape and type will be registered in this
object's `fields` dict.
Parameters
----------
field : str
The name of the field
shape : iterable of `int` or `None`
The shape of the output variable.
This does not include a dimension for multiple outputs.
`None` may be used to indicate variable-length outputs
dtype : type
The data type of the field
Raises
------
ParameterError
If dtype or shape are improperly specified
'''
if not isinstance(dtype, type):
raise ParameterError('dtype={} must be a type'.format(dtype))
if not (isinstance(shape, Iterable) and
all([s is None or isinstance(s, int) for s in shape])):
raise ParameterError('shape={} must be an iterable of integers'.format(shape))
self.fields[self.scope(field)] = Tensor(tuple(shape), dtype) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def merge(self, data):
'''Merge an array of output dictionaries into a single dictionary
with properly scoped names.
Parameters
----------
data : list of dict
Output dicts as produced by `pumpp.task.BaseTaskTransformer.transform`
or `pumpp.feature.FeatureExtractor.transform`.
Returns
-------
data_out : dict
All elements of the input dicts are stacked along the 0 axis,
and keys are re-mapped by `scope`.
'''
data_out = dict()
# Iterate over all keys in data
for key in set().union(*data):
data_out[self.scope(key)] = np.stack([np.asarray(d[key]) for d in data],
axis=0)
return data_out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add(self, operator):
'''Add an operator to the Slicer
Parameters
----------
operator : Scope (TaskTransformer or FeatureExtractor)
The new operator to add
'''
if not isinstance(operator, Scope):
raise ParameterError('Operator {} must be a TaskTransformer '
'or FeatureExtractor'.format(operator))
for key in operator.fields:
self._time[key] = []
# We add 1 to the dimension here to account for batching
for tdim, idx in enumerate(operator.fields[key].shape, 1):
if idx is None:
self._time[key].append(tdim) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def data_duration(self, data):
'''Compute the valid data duration of a dict
Parameters
----------
data : dict
As produced by pumpp.transform
Returns
-------
length : int
The minimum temporal extent of a dynamic observation in data
'''
# Find all the time-like indices of the data
lengths = []
for key in self._time:
for idx in self._time.get(key, []):
lengths.append(data[key].shape[idx])
return min(lengths) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.