repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
bokeh/bokeh | bokeh/io/showing.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/io/showing.py#L46-L145 | def show(obj, browser=None, new="tab", notebook_handle=False, notebook_url="localhost:8888", **kw):
''' Immediately display a Bokeh object or application.
:func:`show` may be called multiple times in a single Jupyter notebook
cell to display multiple objects. The objects are displayed in order.
Args:
obj (LayoutDOM or Application or callable) :
A Bokeh object to display.
Bokeh plots, widgets, layouts (i.e. rows and columns) may be
passed to ``show`` in order to display them. When ``output_file``
has been called, the output will be to an HTML file, which is also
opened in a new browser window or tab. When ``output_notebook``
has been called in a Jupyter notebook, the output will be inline
in the associated notebook output cell.
In a Jupyter notebook, a Bokeh application or callable may also
be passed. A callable will be turned into an Application using a
``FunctionHandler``. The application will be run and displayed
inline in the associated notebook output cell.
browser (str, optional) :
Specify the browser to use to open output files(default: None)
For file output, the **browser** argument allows for specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". Not all platforms may support this option, see
the documentation for the standard library webbrowser_ module for
more information
new (str, optional) :
Specify the browser mode to use for output files (default: "tab")
For file output, opens or raises the browser window showing the
current output file. If **new** is 'tab', then opens a new tab.
If **new** is 'window', then opens a new window.
notebook_handle (bool, optional) :
Whether to create a notebook interaction handle (default: False)
For notebook output, toggles whether a handle which can be used
with ``push_notebook`` is returned. Note that notebook handles
only apply to standalone plots, layouts, etc. They do not apply
when showing Applications in the notebook.
notebook_url (URL, optional) :
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
``notebook_url`` can also be a function that takes one int for the
bound server port. If the port is provided, the function needs
to generate the full public URL to the bokeh server. If None
is passed, the function is to generate the origin URL.
Some parameters are only useful when certain output modes are active:
* The ``browser`` and ``new`` parameters only apply when ``output_file``
is active.
* The ``notebook_handle`` parameter only applies when ``output_notebook``
is active, and non-Application objects are being shown. It is only supported to Jupyter notebook,
raise exception for other notebook types when it is True.
* The ``notebook_url`` parameter only applies when showing Bokeh
Applications in a Jupyter notebook.
* Any additional keyword arguments are passed to :class:`~bokeh.server.Server` when
showing a Bokeh app (added in version 1.1)
Returns:
When in a Jupyter notebook (with ``output_notebook`` enabled)
and ``notebook_handle=True``, returns a handle that can be used by
``push_notebook``, None otherwise.
.. _webbrowser: https://docs.python.org/2/library/webbrowser.html
'''
state = curstate()
is_application = getattr(obj, '_is_a_bokeh_application_class', False)
if not (isinstance(obj, LayoutDOM) or is_application or callable(obj)):
raise ValueError(_BAD_SHOW_MSG)
# TODO (bev) check callable signature more thoroughly
# This ugliness is to prevent importing bokeh.application (which would bring
# in Tornado) just in order to show a non-server object
if is_application or callable(obj):
return run_notebook_hook(state.notebook_type, 'app', obj, state, notebook_url, **kw)
return _show_with_state(obj, state, browser, new, notebook_handle=notebook_handle) | [
"def",
"show",
"(",
"obj",
",",
"browser",
"=",
"None",
",",
"new",
"=",
"\"tab\"",
",",
"notebook_handle",
"=",
"False",
",",
"notebook_url",
"=",
"\"localhost:8888\"",
",",
"*",
"*",
"kw",
")",
":",
"state",
"=",
"curstate",
"(",
")",
"is_application",
"=",
"getattr",
"(",
"obj",
",",
"'_is_a_bokeh_application_class'",
",",
"False",
")",
"if",
"not",
"(",
"isinstance",
"(",
"obj",
",",
"LayoutDOM",
")",
"or",
"is_application",
"or",
"callable",
"(",
"obj",
")",
")",
":",
"raise",
"ValueError",
"(",
"_BAD_SHOW_MSG",
")",
"# TODO (bev) check callable signature more thoroughly",
"# This ugliness is to prevent importing bokeh.application (which would bring",
"# in Tornado) just in order to show a non-server object",
"if",
"is_application",
"or",
"callable",
"(",
"obj",
")",
":",
"return",
"run_notebook_hook",
"(",
"state",
".",
"notebook_type",
",",
"'app'",
",",
"obj",
",",
"state",
",",
"notebook_url",
",",
"*",
"*",
"kw",
")",
"return",
"_show_with_state",
"(",
"obj",
",",
"state",
",",
"browser",
",",
"new",
",",
"notebook_handle",
"=",
"notebook_handle",
")"
]
| Immediately display a Bokeh object or application.
:func:`show` may be called multiple times in a single Jupyter notebook
cell to display multiple objects. The objects are displayed in order.
Args:
obj (LayoutDOM or Application or callable) :
A Bokeh object to display.
Bokeh plots, widgets, layouts (i.e. rows and columns) may be
passed to ``show`` in order to display them. When ``output_file``
has been called, the output will be to an HTML file, which is also
opened in a new browser window or tab. When ``output_notebook``
has been called in a Jupyter notebook, the output will be inline
in the associated notebook output cell.
In a Jupyter notebook, a Bokeh application or callable may also
be passed. A callable will be turned into an Application using a
``FunctionHandler``. The application will be run and displayed
inline in the associated notebook output cell.
browser (str, optional) :
Specify the browser to use to open output files(default: None)
For file output, the **browser** argument allows for specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". Not all platforms may support this option, see
the documentation for the standard library webbrowser_ module for
more information
new (str, optional) :
Specify the browser mode to use for output files (default: "tab")
For file output, opens or raises the browser window showing the
current output file. If **new** is 'tab', then opens a new tab.
If **new** is 'window', then opens a new window.
notebook_handle (bool, optional) :
Whether to create a notebook interaction handle (default: False)
For notebook output, toggles whether a handle which can be used
with ``push_notebook`` is returned. Note that notebook handles
only apply to standalone plots, layouts, etc. They do not apply
when showing Applications in the notebook.
notebook_url (URL, optional) :
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
``notebook_url`` can also be a function that takes one int for the
bound server port. If the port is provided, the function needs
to generate the full public URL to the bokeh server. If None
is passed, the function is to generate the origin URL.
Some parameters are only useful when certain output modes are active:
* The ``browser`` and ``new`` parameters only apply when ``output_file``
is active.
* The ``notebook_handle`` parameter only applies when ``output_notebook``
is active, and non-Application objects are being shown. It is only supported to Jupyter notebook,
raise exception for other notebook types when it is True.
* The ``notebook_url`` parameter only applies when showing Bokeh
Applications in a Jupyter notebook.
* Any additional keyword arguments are passed to :class:`~bokeh.server.Server` when
showing a Bokeh app (added in version 1.1)
Returns:
When in a Jupyter notebook (with ``output_notebook`` enabled)
and ``notebook_handle=True``, returns a handle that can be used by
``push_notebook``, None otherwise.
.. _webbrowser: https://docs.python.org/2/library/webbrowser.html | [
"Immediately",
"display",
"a",
"Bokeh",
"object",
"or",
"application",
"."
]
| python | train |
marrow/WebCore | web/server/cherrypy_.py | https://github.com/marrow/WebCore/blob/38d50f8022ca62976a1e5ff23f7714bd647b6532/web/server/cherrypy_.py#L14-L27 | def serve(application, host='127.0.0.1', port=8080):
"""CherryPy-based WSGI-HTTP server."""
# Instantiate the server with our configuration and application.
server = CherryPyWSGIServer((host, int(port)), application, server_name=host)
# Try to be handy as many terminals allow clicking links.
print("serving on http://{0}:{1}".format(host, port))
# Bind and launch the server; this is a blocking operation.
try:
server.start()
except KeyboardInterrupt:
server.stop() | [
"def",
"serve",
"(",
"application",
",",
"host",
"=",
"'127.0.0.1'",
",",
"port",
"=",
"8080",
")",
":",
"# Instantiate the server with our configuration and application.",
"server",
"=",
"CherryPyWSGIServer",
"(",
"(",
"host",
",",
"int",
"(",
"port",
")",
")",
",",
"application",
",",
"server_name",
"=",
"host",
")",
"# Try to be handy as many terminals allow clicking links.",
"print",
"(",
"\"serving on http://{0}:{1}\"",
".",
"format",
"(",
"host",
",",
"port",
")",
")",
"# Bind and launch the server; this is a blocking operation.",
"try",
":",
"server",
".",
"start",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"server",
".",
"stop",
"(",
")"
]
| CherryPy-based WSGI-HTTP server. | [
"CherryPy",
"-",
"based",
"WSGI",
"-",
"HTTP",
"server",
"."
]
| python | train |
raymondEhlers/pachyderm | pachyderm/generic_config.py | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/generic_config.py#L451-L466 | def iterate_with_selected_objects(analysis_objects: Mapping[Any, Any], **selections: Mapping[str, Any]) -> Iterator[Tuple[Any, Any]]:
""" Iterate over an analysis dictionary with selected attributes.
Args:
analysis_objects: Analysis objects dictionary.
selections: Keyword arguments used to select attributes from the analysis dictionary.
Yields:
object: Matching analysis object.
"""
for key_index, obj in analysis_objects.items():
# If selections is empty, we return every object. If it's not empty, then we only want to return
# objects which are selected in through the selections.
selected_obj = not selections or all([getattr(key_index, selector) == selected_value for selector, selected_value in selections.items()])
if selected_obj:
yield key_index, obj | [
"def",
"iterate_with_selected_objects",
"(",
"analysis_objects",
":",
"Mapping",
"[",
"Any",
",",
"Any",
"]",
",",
"*",
"*",
"selections",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Iterator",
"[",
"Tuple",
"[",
"Any",
",",
"Any",
"]",
"]",
":",
"for",
"key_index",
",",
"obj",
"in",
"analysis_objects",
".",
"items",
"(",
")",
":",
"# If selections is empty, we return every object. If it's not empty, then we only want to return",
"# objects which are selected in through the selections.",
"selected_obj",
"=",
"not",
"selections",
"or",
"all",
"(",
"[",
"getattr",
"(",
"key_index",
",",
"selector",
")",
"==",
"selected_value",
"for",
"selector",
",",
"selected_value",
"in",
"selections",
".",
"items",
"(",
")",
"]",
")",
"if",
"selected_obj",
":",
"yield",
"key_index",
",",
"obj"
]
| Iterate over an analysis dictionary with selected attributes.
Args:
analysis_objects: Analysis objects dictionary.
selections: Keyword arguments used to select attributes from the analysis dictionary.
Yields:
object: Matching analysis object. | [
"Iterate",
"over",
"an",
"analysis",
"dictionary",
"with",
"selected",
"attributes",
"."
]
| python | train |
improbable-research/keanu | keanu-python/keanu/vertex/generated.py | https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L347-L354 | def Multiplication(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Multiplies one vertex by another
:param left: vertex to be multiplied
:param right: vertex to be multiplied
"""
return Double(context.jvm_view().MultiplicationVertex, label, cast_to_double_vertex(left), cast_to_double_vertex(right)) | [
"def",
"Multiplication",
"(",
"left",
":",
"vertex_constructor_param_types",
",",
"right",
":",
"vertex_constructor_param_types",
",",
"label",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Vertex",
":",
"return",
"Double",
"(",
"context",
".",
"jvm_view",
"(",
")",
".",
"MultiplicationVertex",
",",
"label",
",",
"cast_to_double_vertex",
"(",
"left",
")",
",",
"cast_to_double_vertex",
"(",
"right",
")",
")"
]
| Multiplies one vertex by another
:param left: vertex to be multiplied
:param right: vertex to be multiplied | [
"Multiplies",
"one",
"vertex",
"by",
"another",
":",
"param",
"left",
":",
"vertex",
"to",
"be",
"multiplied",
":",
"param",
"right",
":",
"vertex",
"to",
"be",
"multiplied"
]
| python | train |
pandas-dev/pandas | pandas/io/excel/_util.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L89-L119 | def _range2cols(areas):
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols = []
for rng in areas.split(","):
if ":" in rng:
rng = rng.split(":")
cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1))
else:
cols.append(_excel2num(rng))
return cols | [
"def",
"_range2cols",
"(",
"areas",
")",
":",
"cols",
"=",
"[",
"]",
"for",
"rng",
"in",
"areas",
".",
"split",
"(",
"\",\"",
")",
":",
"if",
"\":\"",
"in",
"rng",
":",
"rng",
"=",
"rng",
".",
"split",
"(",
"\":\"",
")",
"cols",
".",
"extend",
"(",
"lrange",
"(",
"_excel2num",
"(",
"rng",
"[",
"0",
"]",
")",
",",
"_excel2num",
"(",
"rng",
"[",
"1",
"]",
")",
"+",
"1",
")",
")",
"else",
":",
"cols",
".",
"append",
"(",
"_excel2num",
"(",
"rng",
")",
")",
"return",
"cols"
]
| Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27] | [
"Convert",
"comma",
"separated",
"list",
"of",
"column",
"names",
"and",
"ranges",
"to",
"indices",
"."
]
| python | train |
pyviz/holoviews | holoviews/plotting/util.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L194-L204 | def is_dynamic_overlay(dmap):
"""
Traverses a DynamicMap graph and determines if any components
were overlaid dynamically (i.e. by * on a DynamicMap).
"""
if not isinstance(dmap, DynamicMap):
return False
elif dmap.callback._is_overlay:
return True
else:
return any(is_dynamic_overlay(dm) for dm in dmap.callback.inputs) | [
"def",
"is_dynamic_overlay",
"(",
"dmap",
")",
":",
"if",
"not",
"isinstance",
"(",
"dmap",
",",
"DynamicMap",
")",
":",
"return",
"False",
"elif",
"dmap",
".",
"callback",
".",
"_is_overlay",
":",
"return",
"True",
"else",
":",
"return",
"any",
"(",
"is_dynamic_overlay",
"(",
"dm",
")",
"for",
"dm",
"in",
"dmap",
".",
"callback",
".",
"inputs",
")"
]
| Traverses a DynamicMap graph and determines if any components
were overlaid dynamically (i.e. by * on a DynamicMap). | [
"Traverses",
"a",
"DynamicMap",
"graph",
"and",
"determines",
"if",
"any",
"components",
"were",
"overlaid",
"dynamically",
"(",
"i",
".",
"e",
".",
"by",
"*",
"on",
"a",
"DynamicMap",
")",
"."
]
| python | train |
RJT1990/pyflux | pyflux/inference/metropolis_hastings.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/inference/metropolis_hastings.py#L66-L100 | def tune_scale(acceptance, scale):
""" Tunes scale for M-H algorithm
Parameters
----------
acceptance : float
The most recent acceptance rate
scale : float
The current scale parameter
Returns
----------
scale : float
An adjusted scale parameter
Notes
----------
Ross : Initially did this by trial and error, then refined by looking at other
implementations, so some credit here to PyMC3 which became a guideline for this.
"""
if acceptance > 0.8:
scale *= 2.0
elif acceptance <= 0.8 and acceptance > 0.4:
scale *= 1.3
elif acceptance < 0.234 and acceptance > 0.1:
scale *= (1/1.3)
elif acceptance <= 0.1 and acceptance > 0.05:
scale *= 0.4
elif acceptance <= 0.05 and acceptance > 0.01:
scale *= 0.2
elif acceptance <= 0.01:
scale *= 0.1
return scale | [
"def",
"tune_scale",
"(",
"acceptance",
",",
"scale",
")",
":",
"if",
"acceptance",
">",
"0.8",
":",
"scale",
"*=",
"2.0",
"elif",
"acceptance",
"<=",
"0.8",
"and",
"acceptance",
">",
"0.4",
":",
"scale",
"*=",
"1.3",
"elif",
"acceptance",
"<",
"0.234",
"and",
"acceptance",
">",
"0.1",
":",
"scale",
"*=",
"(",
"1",
"/",
"1.3",
")",
"elif",
"acceptance",
"<=",
"0.1",
"and",
"acceptance",
">",
"0.05",
":",
"scale",
"*=",
"0.4",
"elif",
"acceptance",
"<=",
"0.05",
"and",
"acceptance",
">",
"0.01",
":",
"scale",
"*=",
"0.2",
"elif",
"acceptance",
"<=",
"0.01",
":",
"scale",
"*=",
"0.1",
"return",
"scale"
]
| Tunes scale for M-H algorithm
Parameters
----------
acceptance : float
The most recent acceptance rate
scale : float
The current scale parameter
Returns
----------
scale : float
An adjusted scale parameter
Notes
----------
Ross : Initially did this by trial and error, then refined by looking at other
implementations, so some credit here to PyMC3 which became a guideline for this. | [
"Tunes",
"scale",
"for",
"M",
"-",
"H",
"algorithm"
]
| python | train |
numenta/nupic | src/nupic/encoders/date.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/date.py#L274-L349 | def getEncodedValues(self, input):
""" See method description in base.py """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return numpy.array([None])
assert isinstance(input, datetime.datetime)
values = []
# -------------------------------------------------------------------------
# Get the scalar values for each sub-field
timetuple = input.timetuple()
timeOfDay = timetuple.tm_hour + float(timetuple.tm_min)/60.0
if self.seasonEncoder is not None:
dayOfYear = timetuple.tm_yday
# input.timetuple() computes the day of year 1 based, so convert to 0 based
values.append(dayOfYear-1)
if self.dayOfWeekEncoder is not None:
dayOfWeek = timetuple.tm_wday + timeOfDay / 24.0
values.append(dayOfWeek)
if self.weekendEncoder is not None:
# saturday, sunday or friday evening
if timetuple.tm_wday == 6 or timetuple.tm_wday == 5 \
or (timetuple.tm_wday == 4 and timeOfDay > 18):
weekend = 1
else:
weekend = 0
values.append(weekend)
if self.customDaysEncoder is not None:
if timetuple.tm_wday in self.customDays:
customDay = 1
else:
customDay = 0
values.append(customDay)
if self.holidayEncoder is not None:
# A "continuous" binary value. = 1 on the holiday itself and smooth ramp
# 0->1 on the day before the holiday and 1->0 on the day after the holiday.
# Currently the only holiday we know about is December 25
# holidays is a list of holidays that occur on a fixed date every year
if len(self.holidays) == 0:
holidays = [(12, 25)]
else:
holidays = self.holidays
val = 0
for h in holidays:
# hdate is midnight on the holiday
if len(h) == 3:
hdate = datetime.datetime(h[0], h[1], h[2], 0, 0, 0)
else:
hdate = datetime.datetime(timetuple.tm_year, h[0], h[1], 0, 0, 0)
if input > hdate:
diff = input - hdate
if diff.days == 0:
# return 1 on the holiday itself
val = 1
break
elif diff.days == 1:
# ramp smoothly from 1 -> 0 on the next day
val = 1.0 - (float(diff.seconds) / 86400)
break
else:
diff = hdate - input
if diff.days == 0:
# ramp smoothly from 0 -> 1 on the previous day
val = 1.0 - (float(diff.seconds) / 86400)
values.append(val)
if self.timeOfDayEncoder is not None:
values.append(timeOfDay)
return values | [
"def",
"getEncodedValues",
"(",
"self",
",",
"input",
")",
":",
"if",
"input",
"==",
"SENTINEL_VALUE_FOR_MISSING_DATA",
":",
"return",
"numpy",
".",
"array",
"(",
"[",
"None",
"]",
")",
"assert",
"isinstance",
"(",
"input",
",",
"datetime",
".",
"datetime",
")",
"values",
"=",
"[",
"]",
"# -------------------------------------------------------------------------",
"# Get the scalar values for each sub-field",
"timetuple",
"=",
"input",
".",
"timetuple",
"(",
")",
"timeOfDay",
"=",
"timetuple",
".",
"tm_hour",
"+",
"float",
"(",
"timetuple",
".",
"tm_min",
")",
"/",
"60.0",
"if",
"self",
".",
"seasonEncoder",
"is",
"not",
"None",
":",
"dayOfYear",
"=",
"timetuple",
".",
"tm_yday",
"# input.timetuple() computes the day of year 1 based, so convert to 0 based",
"values",
".",
"append",
"(",
"dayOfYear",
"-",
"1",
")",
"if",
"self",
".",
"dayOfWeekEncoder",
"is",
"not",
"None",
":",
"dayOfWeek",
"=",
"timetuple",
".",
"tm_wday",
"+",
"timeOfDay",
"/",
"24.0",
"values",
".",
"append",
"(",
"dayOfWeek",
")",
"if",
"self",
".",
"weekendEncoder",
"is",
"not",
"None",
":",
"# saturday, sunday or friday evening",
"if",
"timetuple",
".",
"tm_wday",
"==",
"6",
"or",
"timetuple",
".",
"tm_wday",
"==",
"5",
"or",
"(",
"timetuple",
".",
"tm_wday",
"==",
"4",
"and",
"timeOfDay",
">",
"18",
")",
":",
"weekend",
"=",
"1",
"else",
":",
"weekend",
"=",
"0",
"values",
".",
"append",
"(",
"weekend",
")",
"if",
"self",
".",
"customDaysEncoder",
"is",
"not",
"None",
":",
"if",
"timetuple",
".",
"tm_wday",
"in",
"self",
".",
"customDays",
":",
"customDay",
"=",
"1",
"else",
":",
"customDay",
"=",
"0",
"values",
".",
"append",
"(",
"customDay",
")",
"if",
"self",
".",
"holidayEncoder",
"is",
"not",
"None",
":",
"# A \"continuous\" binary value. = 1 on the holiday itself and smooth ramp",
"# 0->1 on the day before the holiday and 1->0 on the day after the holiday.",
"# Currently the only holiday we know about is December 25",
"# holidays is a list of holidays that occur on a fixed date every year",
"if",
"len",
"(",
"self",
".",
"holidays",
")",
"==",
"0",
":",
"holidays",
"=",
"[",
"(",
"12",
",",
"25",
")",
"]",
"else",
":",
"holidays",
"=",
"self",
".",
"holidays",
"val",
"=",
"0",
"for",
"h",
"in",
"holidays",
":",
"# hdate is midnight on the holiday",
"if",
"len",
"(",
"h",
")",
"==",
"3",
":",
"hdate",
"=",
"datetime",
".",
"datetime",
"(",
"h",
"[",
"0",
"]",
",",
"h",
"[",
"1",
"]",
",",
"h",
"[",
"2",
"]",
",",
"0",
",",
"0",
",",
"0",
")",
"else",
":",
"hdate",
"=",
"datetime",
".",
"datetime",
"(",
"timetuple",
".",
"tm_year",
",",
"h",
"[",
"0",
"]",
",",
"h",
"[",
"1",
"]",
",",
"0",
",",
"0",
",",
"0",
")",
"if",
"input",
">",
"hdate",
":",
"diff",
"=",
"input",
"-",
"hdate",
"if",
"diff",
".",
"days",
"==",
"0",
":",
"# return 1 on the holiday itself",
"val",
"=",
"1",
"break",
"elif",
"diff",
".",
"days",
"==",
"1",
":",
"# ramp smoothly from 1 -> 0 on the next day",
"val",
"=",
"1.0",
"-",
"(",
"float",
"(",
"diff",
".",
"seconds",
")",
"/",
"86400",
")",
"break",
"else",
":",
"diff",
"=",
"hdate",
"-",
"input",
"if",
"diff",
".",
"days",
"==",
"0",
":",
"# ramp smoothly from 0 -> 1 on the previous day",
"val",
"=",
"1.0",
"-",
"(",
"float",
"(",
"diff",
".",
"seconds",
")",
"/",
"86400",
")",
"values",
".",
"append",
"(",
"val",
")",
"if",
"self",
".",
"timeOfDayEncoder",
"is",
"not",
"None",
":",
"values",
".",
"append",
"(",
"timeOfDay",
")",
"return",
"values"
]
| See method description in base.py | [
"See",
"method",
"description",
"in",
"base",
".",
"py"
]
| python | valid |
sorgerlab/indra | indra/sources/geneways/find_full_text_sentence.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L142-L169 | def sentence_matches(self, sentence_text):
"""Returns true iff the sentence contains this mention's upstream
and downstream participants, and if one of the stemmed verbs in
the sentence is the same as the stemmed action type."""
has_upstream = False
has_downstream = False
has_verb = False
# Get the first word of the action type and assume this is the verb
# (Ex. get depends for depends on)
actiontype_words = word_tokenize(self.mention.actiontype)
actiontype_verb_stemmed = stem(actiontype_words[0])
words = word_tokenize(sentence_text)
if self.string_matches_sans_whitespace(sentence_text.lower(),
self.mention.upstream.lower()):
has_upstream = True
if self.string_matches_sans_whitespace(sentence_text.lower(),
self.mention.downstream.lower()):
has_downstream = True
for word in words:
if actiontype_verb_stemmed == stem(word):
has_verb = True
return has_upstream and has_downstream and has_verb | [
"def",
"sentence_matches",
"(",
"self",
",",
"sentence_text",
")",
":",
"has_upstream",
"=",
"False",
"has_downstream",
"=",
"False",
"has_verb",
"=",
"False",
"# Get the first word of the action type and assume this is the verb",
"# (Ex. get depends for depends on)",
"actiontype_words",
"=",
"word_tokenize",
"(",
"self",
".",
"mention",
".",
"actiontype",
")",
"actiontype_verb_stemmed",
"=",
"stem",
"(",
"actiontype_words",
"[",
"0",
"]",
")",
"words",
"=",
"word_tokenize",
"(",
"sentence_text",
")",
"if",
"self",
".",
"string_matches_sans_whitespace",
"(",
"sentence_text",
".",
"lower",
"(",
")",
",",
"self",
".",
"mention",
".",
"upstream",
".",
"lower",
"(",
")",
")",
":",
"has_upstream",
"=",
"True",
"if",
"self",
".",
"string_matches_sans_whitespace",
"(",
"sentence_text",
".",
"lower",
"(",
")",
",",
"self",
".",
"mention",
".",
"downstream",
".",
"lower",
"(",
")",
")",
":",
"has_downstream",
"=",
"True",
"for",
"word",
"in",
"words",
":",
"if",
"actiontype_verb_stemmed",
"==",
"stem",
"(",
"word",
")",
":",
"has_verb",
"=",
"True",
"return",
"has_upstream",
"and",
"has_downstream",
"and",
"has_verb"
]
| Returns true iff the sentence contains this mention's upstream
and downstream participants, and if one of the stemmed verbs in
the sentence is the same as the stemmed action type. | [
"Returns",
"true",
"iff",
"the",
"sentence",
"contains",
"this",
"mention",
"s",
"upstream",
"and",
"downstream",
"participants",
"and",
"if",
"one",
"of",
"the",
"stemmed",
"verbs",
"in",
"the",
"sentence",
"is",
"the",
"same",
"as",
"the",
"stemmed",
"action",
"type",
"."
]
| python | train |
fermiPy/fermipy | fermipy/gtanalysis.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L511-L549 | def create(cls, infile, config=None, params=None, mask=None):
"""Create a new instance of GTAnalysis from an analysis output file
generated with `~fermipy.GTAnalysis.write_roi`. By default
the new instance will inherit the configuration of the saved
analysis instance. The configuration may be overriden by
passing a configuration file path with the ``config``
argument.
Parameters
----------
infile : str
Path to the ROI results file.
config : str
Path to a configuration file. This will override the
configuration in the ROI results file.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
"""
infile = os.path.abspath(infile)
roi_file, roi_data = utils.load_data(infile)
if config is None:
config = roi_data['config']
validate = False
else:
validate = True
gta = cls(config, validate=validate)
gta.setup(init_sources=False)
gta.load_roi(infile, params=params, mask=mask)
return gta | [
"def",
"create",
"(",
"cls",
",",
"infile",
",",
"config",
"=",
"None",
",",
"params",
"=",
"None",
",",
"mask",
"=",
"None",
")",
":",
"infile",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"infile",
")",
"roi_file",
",",
"roi_data",
"=",
"utils",
".",
"load_data",
"(",
"infile",
")",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"roi_data",
"[",
"'config'",
"]",
"validate",
"=",
"False",
"else",
":",
"validate",
"=",
"True",
"gta",
"=",
"cls",
"(",
"config",
",",
"validate",
"=",
"validate",
")",
"gta",
".",
"setup",
"(",
"init_sources",
"=",
"False",
")",
"gta",
".",
"load_roi",
"(",
"infile",
",",
"params",
"=",
"params",
",",
"mask",
"=",
"mask",
")",
"return",
"gta"
]
| Create a new instance of GTAnalysis from an analysis output file
generated with `~fermipy.GTAnalysis.write_roi`. By default
the new instance will inherit the configuration of the saved
analysis instance. The configuration may be overriden by
passing a configuration file path with the ``config``
argument.
Parameters
----------
infile : str
Path to the ROI results file.
config : str
Path to a configuration file. This will override the
configuration in the ROI results file.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask | [
"Create",
"a",
"new",
"instance",
"of",
"GTAnalysis",
"from",
"an",
"analysis",
"output",
"file",
"generated",
"with",
"~fermipy",
".",
"GTAnalysis",
".",
"write_roi",
".",
"By",
"default",
"the",
"new",
"instance",
"will",
"inherit",
"the",
"configuration",
"of",
"the",
"saved",
"analysis",
"instance",
".",
"The",
"configuration",
"may",
"be",
"overriden",
"by",
"passing",
"a",
"configuration",
"file",
"path",
"with",
"the",
"config",
"argument",
"."
]
| python | train |
SoCo/SoCo | soco/groups.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/groups.py#L106-L116 | def short_label(self):
"""str: A short description of the group.
>>> device.group.short_label
'Kitchen + 1'
"""
group_names = sorted([m.player_name for m in self.members])
group_label = group_names[0]
if len(group_names) > 1:
group_label += " + {}".format(len(group_names) - 1)
return group_label | [
"def",
"short_label",
"(",
"self",
")",
":",
"group_names",
"=",
"sorted",
"(",
"[",
"m",
".",
"player_name",
"for",
"m",
"in",
"self",
".",
"members",
"]",
")",
"group_label",
"=",
"group_names",
"[",
"0",
"]",
"if",
"len",
"(",
"group_names",
")",
">",
"1",
":",
"group_label",
"+=",
"\" + {}\"",
".",
"format",
"(",
"len",
"(",
"group_names",
")",
"-",
"1",
")",
"return",
"group_label"
]
| str: A short description of the group.
>>> device.group.short_label
'Kitchen + 1' | [
"str",
":",
"A",
"short",
"description",
"of",
"the",
"group",
"."
]
| python | train |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L56-L156 | def files_in_to_out(self, in_path, out_path=None):
"""Write in files to out, calling the line_handler on each line.
Calls file_in_to_out under the hood to format the in_path payload. The
formatting processing is done by the self.formatter instance.
Args:
in_path: str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
out_path: str or path-like. Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is no an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
Top tip: Path-like objects strip the trailing slash. If
you want to pass in a dir that does not exist yet as
out-path with a trailing /, you should be passing it as a
str to preserve the /.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
in_paths = get_glob(in_path)
in_count = len(in_paths)
if in_count == 0:
logger.debug(f'in path found {in_count} paths.')
else:
logger.debug(f'in path found {in_count} paths:')
for path in in_paths:
logger.debug(f'{path}')
logger.debug(
'herewith ends the paths. will now process each file.')
if in_paths:
# derive the destination directory, ensure it's ready for writing
basedir_out = None
is_outfile_name_known = False
if out_path:
# outpath could be a file, or a dir
pathlib_out = Path(out_path)
# yep, Path() strips trailing /, hence check original string
if isinstance(out_path, str) and out_path.endswith(os.sep):
# ensure dir - mimic posix mkdir -p
pathlib_out.mkdir(parents=True, exist_ok=True)
basedir_out = pathlib_out
elif pathlib_out.is_dir():
basedir_out = pathlib_out
else:
if len(in_paths) > 1:
raise Error(
f'{in_path} resolves to {len(in_paths)} files, '
'but you specified only a single file as out '
f'{out_path}. If the outpath is meant to be a '
'directory, put a / at the end.')
# at this point it must be a file (not dir) path
# make sure that the parent dir exists
basedir_out = pathlib_out.parent
basedir_out.parent.mkdir(parents=True, exist_ok=True)
is_outfile_name_known = True
# loop through all the in files and write them to the out dir
file_counter = 0
is_edit = False
for path in in_paths:
actual_in = Path(path)
# recursive glob returns dirs too, only interested in files
if actual_in.is_file():
if basedir_out:
if is_outfile_name_known:
actual_out = pathlib_out
else:
# default to original src file name if only out dir
# specified without an out file name
actual_out = basedir_out.joinpath(actual_in.name)
logger.debug(f"writing {path} to {actual_out}")
self.in_to_out(in_path=actual_in, out_path=actual_out)
else:
logger.debug(f"editing {path}")
self.in_to_out(in_path=actual_in)
is_edit = True
file_counter += 1
if is_edit:
logger.info(
f"edited & wrote {file_counter} file(s) at {in_path}")
else:
logger.info(
f"read {in_path}, formatted and wrote {file_counter} "
f"file(s) to {out_path}")
else:
logger.info(f"{in_path} found no files") | [
"def",
"files_in_to_out",
"(",
"self",
",",
"in_path",
",",
"out_path",
"=",
"None",
")",
":",
"in_paths",
"=",
"get_glob",
"(",
"in_path",
")",
"in_count",
"=",
"len",
"(",
"in_paths",
")",
"if",
"in_count",
"==",
"0",
":",
"logger",
".",
"debug",
"(",
"f'in path found {in_count} paths.'",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f'in path found {in_count} paths:'",
")",
"for",
"path",
"in",
"in_paths",
":",
"logger",
".",
"debug",
"(",
"f'{path}'",
")",
"logger",
".",
"debug",
"(",
"'herewith ends the paths. will now process each file.'",
")",
"if",
"in_paths",
":",
"# derive the destination directory, ensure it's ready for writing",
"basedir_out",
"=",
"None",
"is_outfile_name_known",
"=",
"False",
"if",
"out_path",
":",
"# outpath could be a file, or a dir",
"pathlib_out",
"=",
"Path",
"(",
"out_path",
")",
"# yep, Path() strips trailing /, hence check original string",
"if",
"isinstance",
"(",
"out_path",
",",
"str",
")",
"and",
"out_path",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
":",
"# ensure dir - mimic posix mkdir -p",
"pathlib_out",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"basedir_out",
"=",
"pathlib_out",
"elif",
"pathlib_out",
".",
"is_dir",
"(",
")",
":",
"basedir_out",
"=",
"pathlib_out",
"else",
":",
"if",
"len",
"(",
"in_paths",
")",
">",
"1",
":",
"raise",
"Error",
"(",
"f'{in_path} resolves to {len(in_paths)} files, '",
"'but you specified only a single file as out '",
"f'{out_path}. If the outpath is meant to be a '",
"'directory, put a / at the end.'",
")",
"# at this point it must be a file (not dir) path",
"# make sure that the parent dir exists",
"basedir_out",
"=",
"pathlib_out",
".",
"parent",
"basedir_out",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"is_outfile_name_known",
"=",
"True",
"# loop through all the in files and write them to the out dir",
"file_counter",
"=",
"0",
"is_edit",
"=",
"False",
"for",
"path",
"in",
"in_paths",
":",
"actual_in",
"=",
"Path",
"(",
"path",
")",
"# recursive glob returns dirs too, only interested in files",
"if",
"actual_in",
".",
"is_file",
"(",
")",
":",
"if",
"basedir_out",
":",
"if",
"is_outfile_name_known",
":",
"actual_out",
"=",
"pathlib_out",
"else",
":",
"# default to original src file name if only out dir",
"# specified without an out file name",
"actual_out",
"=",
"basedir_out",
".",
"joinpath",
"(",
"actual_in",
".",
"name",
")",
"logger",
".",
"debug",
"(",
"f\"writing {path} to {actual_out}\"",
")",
"self",
".",
"in_to_out",
"(",
"in_path",
"=",
"actual_in",
",",
"out_path",
"=",
"actual_out",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"editing {path}\"",
")",
"self",
".",
"in_to_out",
"(",
"in_path",
"=",
"actual_in",
")",
"is_edit",
"=",
"True",
"file_counter",
"+=",
"1",
"if",
"is_edit",
":",
"logger",
".",
"info",
"(",
"f\"edited & wrote {file_counter} file(s) at {in_path}\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"read {in_path}, formatted and wrote {file_counter} \"",
"f\"file(s) to {out_path}\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"{in_path} found no files\"",
")"
]
| Write in files to out, calling the line_handler on each line.
Calls file_in_to_out under the hood to format the in_path payload. The
formatting processing is done by the self.formatter instance.
Args:
in_path: str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
out_path: str or path-like. Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is no an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
Top tip: Path-like objects strip the trailing slash. If
you want to pass in a dir that does not exist yet as
out-path with a trailing /, you should be passing it as a
str to preserve the /.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None. | [
"Write",
"in",
"files",
"to",
"out",
"calling",
"the",
"line_handler",
"on",
"each",
"line",
"."
]
| python | train |
jorgeecardona/dynect | dynect/__init__.py | https://github.com/jorgeecardona/dynect/blob/d2cd85bc510f00108a3a5bfe515f45daae15a482/dynect/__init__.py#L242-L251 | def add_address(self, fqdn, address, ttl=0):
" Add a new address to a domain."
data = {'rdata': {'address': address}, 'ttl': str(ttl)}
# Make request.
response = self.post('/REST/ARecord/%s/%s' % (
self.zone, fqdn), data=data)
return Address(self, data=response.content['data']) | [
"def",
"add_address",
"(",
"self",
",",
"fqdn",
",",
"address",
",",
"ttl",
"=",
"0",
")",
":",
"data",
"=",
"{",
"'rdata'",
":",
"{",
"'address'",
":",
"address",
"}",
",",
"'ttl'",
":",
"str",
"(",
"ttl",
")",
"}",
"# Make request.",
"response",
"=",
"self",
".",
"post",
"(",
"'/REST/ARecord/%s/%s'",
"%",
"(",
"self",
".",
"zone",
",",
"fqdn",
")",
",",
"data",
"=",
"data",
")",
"return",
"Address",
"(",
"self",
",",
"data",
"=",
"response",
".",
"content",
"[",
"'data'",
"]",
")"
]
| Add a new address to a domain. | [
"Add",
"a",
"new",
"address",
"to",
"a",
"domain",
"."
]
| python | train |
adamcharnock/django-hordak | hordak/models/core.py | https://github.com/adamcharnock/django-hordak/blob/0ffcad1d3b388b860c8c47fde12aa40df213066f/hordak/models/core.py#L366-L370 | def sum_to_balance(self):
"""Sum the Legs of the QuerySet to get a `Balance`_ object
"""
result = self.values("amount_currency").annotate(total=models.Sum("amount"))
return Balance([Money(r["total"], r["amount_currency"]) for r in result]) | [
"def",
"sum_to_balance",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"values",
"(",
"\"amount_currency\"",
")",
".",
"annotate",
"(",
"total",
"=",
"models",
".",
"Sum",
"(",
"\"amount\"",
")",
")",
"return",
"Balance",
"(",
"[",
"Money",
"(",
"r",
"[",
"\"total\"",
"]",
",",
"r",
"[",
"\"amount_currency\"",
"]",
")",
"for",
"r",
"in",
"result",
"]",
")"
]
| Sum the Legs of the QuerySet to get a `Balance`_ object | [
"Sum",
"the",
"Legs",
"of",
"the",
"QuerySet",
"to",
"get",
"a",
"Balance",
"_",
"object"
]
| python | train |
JasonKessler/scattertext | scattertext/features/PhraseMachinePhrases.py | https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/features/PhraseMachinePhrases.py#L12-L25 | def get_feats(self, doc):
'''
Parameters
----------
doc, Spacy Doc
Returns
-------
Counter noun chunk -> count
'''
ngram_counter = Counter()
for sent in doc.sents:
ngram_counter += _phrase_counts(sent)
return ngram_counter | [
"def",
"get_feats",
"(",
"self",
",",
"doc",
")",
":",
"ngram_counter",
"=",
"Counter",
"(",
")",
"for",
"sent",
"in",
"doc",
".",
"sents",
":",
"ngram_counter",
"+=",
"_phrase_counts",
"(",
"sent",
")",
"return",
"ngram_counter"
]
| Parameters
----------
doc, Spacy Doc
Returns
-------
Counter noun chunk -> count | [
"Parameters",
"----------",
"doc",
"Spacy",
"Doc"
]
| python | train |
streamlink/streamlink | src/streamlink/plugins/crunchyroll.py | https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugins/crunchyroll.py#L222-L239 | def get_info(self, media_id, fields=None, schema=None):
"""
Returns the data for a certain media item.
:param media_id: id that identifies the media item to be accessed.
:param fields: list of the media"s field to be returned. By default the
API returns some fields, but others are not returned unless they are
explicity asked for. I have no real documentation on the fields, but
they all seem to start with the "media." prefix (e.g. media.name,
media.stream_data).
:param schema: validation schema to use
"""
params = {"media_id": media_id}
if fields:
params["fields"] = ",".join(fields)
return self._api_call("info", params, schema=schema) | [
"def",
"get_info",
"(",
"self",
",",
"media_id",
",",
"fields",
"=",
"None",
",",
"schema",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"media_id\"",
":",
"media_id",
"}",
"if",
"fields",
":",
"params",
"[",
"\"fields\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"fields",
")",
"return",
"self",
".",
"_api_call",
"(",
"\"info\"",
",",
"params",
",",
"schema",
"=",
"schema",
")"
]
| Returns the data for a certain media item.
:param media_id: id that identifies the media item to be accessed.
:param fields: list of the media"s field to be returned. By default the
API returns some fields, but others are not returned unless they are
explicity asked for. I have no real documentation on the fields, but
they all seem to start with the "media." prefix (e.g. media.name,
media.stream_data).
:param schema: validation schema to use | [
"Returns",
"the",
"data",
"for",
"a",
"certain",
"media",
"item",
"."
]
| python | test |
Komnomnomnom/swigibpy | swigibpy.py | https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L2591-L2593 | def realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count):
"""realtimeBar(EWrapper self, TickerId reqId, long time, double open, double high, double low, double close, long volume, double wap, int count)"""
return _swigibpy.EWrapper_realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count) | [
"def",
"realtimeBar",
"(",
"self",
",",
"reqId",
",",
"time",
",",
"open",
",",
"high",
",",
"low",
",",
"close",
",",
"volume",
",",
"wap",
",",
"count",
")",
":",
"return",
"_swigibpy",
".",
"EWrapper_realtimeBar",
"(",
"self",
",",
"reqId",
",",
"time",
",",
"open",
",",
"high",
",",
"low",
",",
"close",
",",
"volume",
",",
"wap",
",",
"count",
")"
]
| realtimeBar(EWrapper self, TickerId reqId, long time, double open, double high, double low, double close, long volume, double wap, int count) | [
"realtimeBar",
"(",
"EWrapper",
"self",
"TickerId",
"reqId",
"long",
"time",
"double",
"open",
"double",
"high",
"double",
"low",
"double",
"close",
"long",
"volume",
"double",
"wap",
"int",
"count",
")"
]
| python | train |
PyconUK/ConferenceScheduler | src/conference_scheduler/scheduler.py | https://github.com/PyconUK/ConferenceScheduler/blob/fb139f0ef2eab5ac8f4919aa4994d94d4e040030/src/conference_scheduler/scheduler.py#L31-L102 | def heuristic(events,
slots,
objective_function=None,
algorithm=heu.hill_climber,
initial_solution=None,
initial_solution_algorithm_kwargs={},
objective_function_algorithm_kwargs={},
**kwargs):
"""
Compute a schedule using a heuristic
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
algorithm : callable
a heuristic algorithm from conference_scheduler.heuristics
initial_solution_algorithm_kwargs : dict
kwargs for the heuristic algorithm for the initial solution
objective_function_algorithm_kwargs : dict
kwargs for the heuristic algorithm for the objective function (if
necessary.
objective_function: callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of tuples giving the event and slot index (for the given
events and slots lists) for all scheduled items.
Example
-------
For a solution where
* event 0 is scheduled in slot 1
* event 1 is scheduled in slot 4
* event 2 is scheduled in slot 5
the resulting list would be::
[(0, 1), (1, 4), (2, 5)]
"""
def count_violations(array):
return len(list(val.array_violations(array, events, slots)))
if initial_solution is None:
X = heu.get_initial_array(events=events, slots=slots)
X = algorithm(initial_array=X,
objective_function=count_violations,
lower_bound=0,
**initial_solution_algorithm_kwargs)
else:
X = initial_solution
if objective_function is not None:
kwargs["beta"] = float('inf')
def func(array):
return objective_function(
events=events, slots=slots, X=array, **kwargs)
X = algorithm(initial_array=X,
objective_function=func,
acceptance_criteria=count_violations,
**objective_function_algorithm_kwargs)
return list(zip(*np.nonzero(X))) | [
"def",
"heuristic",
"(",
"events",
",",
"slots",
",",
"objective_function",
"=",
"None",
",",
"algorithm",
"=",
"heu",
".",
"hill_climber",
",",
"initial_solution",
"=",
"None",
",",
"initial_solution_algorithm_kwargs",
"=",
"{",
"}",
",",
"objective_function_algorithm_kwargs",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"count_violations",
"(",
"array",
")",
":",
"return",
"len",
"(",
"list",
"(",
"val",
".",
"array_violations",
"(",
"array",
",",
"events",
",",
"slots",
")",
")",
")",
"if",
"initial_solution",
"is",
"None",
":",
"X",
"=",
"heu",
".",
"get_initial_array",
"(",
"events",
"=",
"events",
",",
"slots",
"=",
"slots",
")",
"X",
"=",
"algorithm",
"(",
"initial_array",
"=",
"X",
",",
"objective_function",
"=",
"count_violations",
",",
"lower_bound",
"=",
"0",
",",
"*",
"*",
"initial_solution_algorithm_kwargs",
")",
"else",
":",
"X",
"=",
"initial_solution",
"if",
"objective_function",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"beta\"",
"]",
"=",
"float",
"(",
"'inf'",
")",
"def",
"func",
"(",
"array",
")",
":",
"return",
"objective_function",
"(",
"events",
"=",
"events",
",",
"slots",
"=",
"slots",
",",
"X",
"=",
"array",
",",
"*",
"*",
"kwargs",
")",
"X",
"=",
"algorithm",
"(",
"initial_array",
"=",
"X",
",",
"objective_function",
"=",
"func",
",",
"acceptance_criteria",
"=",
"count_violations",
",",
"*",
"*",
"objective_function_algorithm_kwargs",
")",
"return",
"list",
"(",
"zip",
"(",
"*",
"np",
".",
"nonzero",
"(",
"X",
")",
")",
")"
]
| Compute a schedule using a heuristic
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
algorithm : callable
a heuristic algorithm from conference_scheduler.heuristics
initial_solution_algorithm_kwargs : dict
kwargs for the heuristic algorithm for the initial solution
objective_function_algorithm_kwargs : dict
kwargs for the heuristic algorithm for the objective function (if
necessary.
objective_function: callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of tuples giving the event and slot index (for the given
events and slots lists) for all scheduled items.
Example
-------
For a solution where
* event 0 is scheduled in slot 1
* event 1 is scheduled in slot 4
* event 2 is scheduled in slot 5
the resulting list would be::
[(0, 1), (1, 4), (2, 5)] | [
"Compute",
"a",
"schedule",
"using",
"a",
"heuristic"
]
| python | train |
gwastro/pycbc | pycbc/workflow/jobsetup.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/jobsetup.py#L118-L177 | def select_generic_executable(workflow, exe_tag):
""" Returns a class that is appropriate for setting up jobs to run executables
having specific tags in the workflow config.
Executables should not be "specialized" jobs fitting into one of the
select_XXX_class functions above, i.e. not a matched filter or template
bank job, which require extra setup.
Parameters
----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance.
exe_tag : string
The name of the config section storing options for this executable and
the option giving the executable path in the [executables] section.
Returns
--------
exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility
functions appropriate for the given executable. Instances of the class
('jobs') **must** have a method job.create_node()
"""
exe_path = workflow.cp.get("executables", exe_tag)
exe_name = os.path.basename(exe_path)
exe_to_class_map = {
'ligolw_add' : LigolwAddExecutable,
'ligolw_cbc_sstinca' : LigolwSSthincaExecutable,
'pycbc_sqlite_simplify' : PycbcSqliteSimplifyExecutable,
'ligolw_cbc_cluster_coincs': SQLInOutExecutable,
'ligolw_cbc_repop_coinc' : SQLInOutExecutable,
'repop_coinc_expfit' : SQLInOutExecutable,
'ligolw_cbc_dbinjfind' : SQLInOutExecutable,
'lalapps_inspinj' : LalappsInspinjExecutable,
'pycbc_dark_vs_bright_injections' : PycbcDarkVsBrightInjectionsExecutable,
'pycbc_timeslides' : PycbcTimeslidesExecutable,
'pycbc_compute_durations' : ComputeDurationsExecutable,
'pycbc_calculate_far' : PycbcCalculateFarExecutable,
"pycbc_run_sqlite" : SQLInOutExecutable,
# FIXME: We may end up with more than one class for using ligolw_sqlite
# How to deal with this?
"ligolw_sqlite" : ExtractToXMLExecutable,
"pycbc_inspinjfind" : InspinjfindExecutable,
"pycbc_pickle_horizon_distances" : PycbcPickleHorizonDistsExecutable,
"pycbc_combine_likelihood" : PycbcCombineLikelihoodExecutable,
"pycbc_gen_ranking_data" : PycbcGenerateRankingDataExecutable,
"pycbc_calculate_likelihood" : PycbcCalculateLikelihoodExecutable,
"gstlal_inspiral_marginalize_likelihood" : GstlalMarginalizeLikelihoodExecutable,
"pycbc_compute_far_from_snr_chisq_histograms" : GstlalFarfromsnrchisqhistExecutable,
"gstlal_inspiral_plot_sensitivity" : GstlalPlotSensitivity,
"gstlal_inspiral_plot_background" : GstlalPlotBackground,
"gstlal_inspiral_plotsummary" : GstlalPlotSummary,
"gstlal_inspiral_summary_page" : GstlalSummaryPage,
"pycbc_condition_strain" : PycbcConditionStrainExecutable
}
try:
return exe_to_class_map[exe_name]
except KeyError:
# Should we try some sort of default class??
raise NotImplementedError(
"No job class exists for executable %s, exiting" % exe_name) | [
"def",
"select_generic_executable",
"(",
"workflow",
",",
"exe_tag",
")",
":",
"exe_path",
"=",
"workflow",
".",
"cp",
".",
"get",
"(",
"\"executables\"",
",",
"exe_tag",
")",
"exe_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"exe_path",
")",
"exe_to_class_map",
"=",
"{",
"'ligolw_add'",
":",
"LigolwAddExecutable",
",",
"'ligolw_cbc_sstinca'",
":",
"LigolwSSthincaExecutable",
",",
"'pycbc_sqlite_simplify'",
":",
"PycbcSqliteSimplifyExecutable",
",",
"'ligolw_cbc_cluster_coincs'",
":",
"SQLInOutExecutable",
",",
"'ligolw_cbc_repop_coinc'",
":",
"SQLInOutExecutable",
",",
"'repop_coinc_expfit'",
":",
"SQLInOutExecutable",
",",
"'ligolw_cbc_dbinjfind'",
":",
"SQLInOutExecutable",
",",
"'lalapps_inspinj'",
":",
"LalappsInspinjExecutable",
",",
"'pycbc_dark_vs_bright_injections'",
":",
"PycbcDarkVsBrightInjectionsExecutable",
",",
"'pycbc_timeslides'",
":",
"PycbcTimeslidesExecutable",
",",
"'pycbc_compute_durations'",
":",
"ComputeDurationsExecutable",
",",
"'pycbc_calculate_far'",
":",
"PycbcCalculateFarExecutable",
",",
"\"pycbc_run_sqlite\"",
":",
"SQLInOutExecutable",
",",
"# FIXME: We may end up with more than one class for using ligolw_sqlite",
"# How to deal with this?",
"\"ligolw_sqlite\"",
":",
"ExtractToXMLExecutable",
",",
"\"pycbc_inspinjfind\"",
":",
"InspinjfindExecutable",
",",
"\"pycbc_pickle_horizon_distances\"",
":",
"PycbcPickleHorizonDistsExecutable",
",",
"\"pycbc_combine_likelihood\"",
":",
"PycbcCombineLikelihoodExecutable",
",",
"\"pycbc_gen_ranking_data\"",
":",
"PycbcGenerateRankingDataExecutable",
",",
"\"pycbc_calculate_likelihood\"",
":",
"PycbcCalculateLikelihoodExecutable",
",",
"\"gstlal_inspiral_marginalize_likelihood\"",
":",
"GstlalMarginalizeLikelihoodExecutable",
",",
"\"pycbc_compute_far_from_snr_chisq_histograms\"",
":",
"GstlalFarfromsnrchisqhistExecutable",
",",
"\"gstlal_inspiral_plot_sensitivity\"",
":",
"GstlalPlotSensitivity",
",",
"\"gstlal_inspiral_plot_background\"",
":",
"GstlalPlotBackground",
",",
"\"gstlal_inspiral_plotsummary\"",
":",
"GstlalPlotSummary",
",",
"\"gstlal_inspiral_summary_page\"",
":",
"GstlalSummaryPage",
",",
"\"pycbc_condition_strain\"",
":",
"PycbcConditionStrainExecutable",
"}",
"try",
":",
"return",
"exe_to_class_map",
"[",
"exe_name",
"]",
"except",
"KeyError",
":",
"# Should we try some sort of default class??",
"raise",
"NotImplementedError",
"(",
"\"No job class exists for executable %s, exiting\"",
"%",
"exe_name",
")"
]
| Returns a class that is appropriate for setting up jobs to run executables
having specific tags in the workflow config.
Executables should not be "specialized" jobs fitting into one of the
select_XXX_class functions above, i.e. not a matched filter or template
bank job, which require extra setup.
Parameters
----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance.
exe_tag : string
The name of the config section storing options for this executable and
the option giving the executable path in the [executables] section.
Returns
--------
exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility
functions appropriate for the given executable. Instances of the class
('jobs') **must** have a method job.create_node() | [
"Returns",
"a",
"class",
"that",
"is",
"appropriate",
"for",
"setting",
"up",
"jobs",
"to",
"run",
"executables",
"having",
"specific",
"tags",
"in",
"the",
"workflow",
"config",
".",
"Executables",
"should",
"not",
"be",
"specialized",
"jobs",
"fitting",
"into",
"one",
"of",
"the",
"select_XXX_class",
"functions",
"above",
"i",
".",
"e",
".",
"not",
"a",
"matched",
"filter",
"or",
"template",
"bank",
"job",
"which",
"require",
"extra",
"setup",
"."
]
| python | train |
apache/incubator-heron | heron/tools/cli/src/python/jars.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/cli/src/python/jars.py#L28-L36 | def pick(dirname, pattern):
'''
Get the topology jars
:param dirname:
:param pattern:
:return:
'''
file_list = fnmatch.filter(os.listdir(dirname), pattern)
return file_list[0] if file_list else None | [
"def",
"pick",
"(",
"dirname",
",",
"pattern",
")",
":",
"file_list",
"=",
"fnmatch",
".",
"filter",
"(",
"os",
".",
"listdir",
"(",
"dirname",
")",
",",
"pattern",
")",
"return",
"file_list",
"[",
"0",
"]",
"if",
"file_list",
"else",
"None"
]
| Get the topology jars
:param dirname:
:param pattern:
:return: | [
"Get",
"the",
"topology",
"jars",
":",
"param",
"dirname",
":",
":",
"param",
"pattern",
":",
":",
"return",
":"
]
| python | valid |
project-ncl/pnc-cli | pnc_cli/buildrecords.py | https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L111-L117 | def list_dependency_artifacts(id, page_size=200, page_index=0, sort="", q=""):
"""
List dependency artifacts associated with a BuildRecord
"""
data = list_dependency_artifacts_raw(id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | [
"def",
"list_dependency_artifacts",
"(",
"id",
",",
"page_size",
"=",
"200",
",",
"page_index",
"=",
"0",
",",
"sort",
"=",
"\"\"",
",",
"q",
"=",
"\"\"",
")",
":",
"data",
"=",
"list_dependency_artifacts_raw",
"(",
"id",
",",
"page_size",
",",
"page_index",
",",
"sort",
",",
"q",
")",
"if",
"data",
":",
"return",
"utils",
".",
"format_json_list",
"(",
"data",
")"
]
| List dependency artifacts associated with a BuildRecord | [
"List",
"dependency",
"artifacts",
"associated",
"with",
"a",
"BuildRecord"
]
| python | train |
Unidata/MetPy | metpy/io/_tools.py | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/io/_tools.py#L123-L125 | def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(NamedStruct, self).unpack(s)) | [
"def",
"unpack",
"(",
"self",
",",
"s",
")",
":",
"return",
"self",
".",
"_create",
"(",
"super",
"(",
"NamedStruct",
",",
"self",
")",
".",
"unpack",
"(",
"s",
")",
")"
]
| Parse bytes and return a namedtuple. | [
"Parse",
"bytes",
"and",
"return",
"a",
"namedtuple",
"."
]
| python | train |
pydata/xarray | xarray/core/indexes.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/indexes.py#L39-L58 | def default_indexes(
coords: Mapping[Any, Variable],
dims: Iterable,
) -> 'OrderedDict[Any, pd.Index]':
"""Default indexes for a Dataset/DataArray.
Parameters
----------
coords : Mapping[Any, xarray.Variable]
Coordinate variables from which to draw default indexes.
dims : iterable
Iterable of dimension names.
Returns
-------
Mapping from indexing keys (levels/dimension names) to indexes used for
indexing along that dimension.
"""
return OrderedDict((key, coords[key].to_index())
for key in dims if key in coords) | [
"def",
"default_indexes",
"(",
"coords",
":",
"Mapping",
"[",
"Any",
",",
"Variable",
"]",
",",
"dims",
":",
"Iterable",
",",
")",
"->",
"'OrderedDict[Any, pd.Index]'",
":",
"return",
"OrderedDict",
"(",
"(",
"key",
",",
"coords",
"[",
"key",
"]",
".",
"to_index",
"(",
")",
")",
"for",
"key",
"in",
"dims",
"if",
"key",
"in",
"coords",
")"
]
| Default indexes for a Dataset/DataArray.
Parameters
----------
coords : Mapping[Any, xarray.Variable]
Coordinate variables from which to draw default indexes.
dims : iterable
Iterable of dimension names.
Returns
-------
Mapping from indexing keys (levels/dimension names) to indexes used for
indexing along that dimension. | [
"Default",
"indexes",
"for",
"a",
"Dataset",
"/",
"DataArray",
"."
]
| python | train |
AtomHash/evernode | evernode/classes/json.py | https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/classes/json.py#L55-L58 | def from_file(file_path) -> dict:
""" Load JSON file """
with io.open(file_path, 'r', encoding='utf-8') as json_stream:
return Json.parse(json_stream, True) | [
"def",
"from_file",
"(",
"file_path",
")",
"->",
"dict",
":",
"with",
"io",
".",
"open",
"(",
"file_path",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"json_stream",
":",
"return",
"Json",
".",
"parse",
"(",
"json_stream",
",",
"True",
")"
]
| Load JSON file | [
"Load",
"JSON",
"file"
]
| python | train |
alex-kostirin/pyatomac | atomac/ldtpd/core.py | https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/core.py#L257-L288 | def getobjectinfo(self, window_name, object_name):
"""
Get object properties.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: list of properties
@rtype: list
"""
try:
obj_info = self._get_object_map(window_name, object_name,
wait_for_object=False)
except atomac._a11y.ErrorInvalidUIElement:
# During the test, when the window closed and reopened
# ErrorInvalidUIElement exception will be thrown
self._windows = {}
# Call the method again, after updating apps
obj_info = self._get_object_map(window_name, object_name,
wait_for_object=False)
props = []
if obj_info:
for obj_prop in obj_info.keys():
if not obj_info[obj_prop] or obj_prop == "obj":
# Don't add object handle to the list
continue
props.append(obj_prop)
return props | [
"def",
"getobjectinfo",
"(",
"self",
",",
"window_name",
",",
"object_name",
")",
":",
"try",
":",
"obj_info",
"=",
"self",
".",
"_get_object_map",
"(",
"window_name",
",",
"object_name",
",",
"wait_for_object",
"=",
"False",
")",
"except",
"atomac",
".",
"_a11y",
".",
"ErrorInvalidUIElement",
":",
"# During the test, when the window closed and reopened",
"# ErrorInvalidUIElement exception will be thrown",
"self",
".",
"_windows",
"=",
"{",
"}",
"# Call the method again, after updating apps",
"obj_info",
"=",
"self",
".",
"_get_object_map",
"(",
"window_name",
",",
"object_name",
",",
"wait_for_object",
"=",
"False",
")",
"props",
"=",
"[",
"]",
"if",
"obj_info",
":",
"for",
"obj_prop",
"in",
"obj_info",
".",
"keys",
"(",
")",
":",
"if",
"not",
"obj_info",
"[",
"obj_prop",
"]",
"or",
"obj_prop",
"==",
"\"obj\"",
":",
"# Don't add object handle to the list",
"continue",
"props",
".",
"append",
"(",
"obj_prop",
")",
"return",
"props"
]
| Get object properties.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: list of properties
@rtype: list | [
"Get",
"object",
"properties",
"."
]
| python | valid |
dylanaraps/pywal | pywal/sequences.py | https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/sequences.py#L38-L69 | def create_sequences(colors, vte_fix=False):
"""Create the escape sequences."""
alpha = colors["alpha"]
# Colors 0-15.
sequences = [set_color(index, colors["colors"]["color%s" % index])
for index in range(16)]
# Special colors.
# Source: https://goo.gl/KcoQgP
# 10 = foreground, 11 = background, 12 = cursor foregound
# 13 = mouse foreground, 708 = background border color.
sequences.extend([
set_special(10, colors["special"]["foreground"], "g"),
set_special(11, colors["special"]["background"], "h", alpha),
set_special(12, colors["special"]["cursor"], "l"),
set_special(13, colors["special"]["foreground"], "j"),
set_special(17, colors["special"]["foreground"], "k"),
set_special(19, colors["special"]["background"], "m"),
set_color(232, colors["special"]["background"]),
set_color(256, colors["special"]["foreground"])
])
if not vte_fix:
sequences.extend(
set_special(708, colors["special"]["background"], "", alpha)
)
if OS == "Darwin":
sequences += set_iterm_tab_color(colors["special"]["background"])
return "".join(sequences) | [
"def",
"create_sequences",
"(",
"colors",
",",
"vte_fix",
"=",
"False",
")",
":",
"alpha",
"=",
"colors",
"[",
"\"alpha\"",
"]",
"# Colors 0-15.",
"sequences",
"=",
"[",
"set_color",
"(",
"index",
",",
"colors",
"[",
"\"colors\"",
"]",
"[",
"\"color%s\"",
"%",
"index",
"]",
")",
"for",
"index",
"in",
"range",
"(",
"16",
")",
"]",
"# Special colors.",
"# Source: https://goo.gl/KcoQgP",
"# 10 = foreground, 11 = background, 12 = cursor foregound",
"# 13 = mouse foreground, 708 = background border color.",
"sequences",
".",
"extend",
"(",
"[",
"set_special",
"(",
"10",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"foreground\"",
"]",
",",
"\"g\"",
")",
",",
"set_special",
"(",
"11",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"background\"",
"]",
",",
"\"h\"",
",",
"alpha",
")",
",",
"set_special",
"(",
"12",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"cursor\"",
"]",
",",
"\"l\"",
")",
",",
"set_special",
"(",
"13",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"foreground\"",
"]",
",",
"\"j\"",
")",
",",
"set_special",
"(",
"17",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"foreground\"",
"]",
",",
"\"k\"",
")",
",",
"set_special",
"(",
"19",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"background\"",
"]",
",",
"\"m\"",
")",
",",
"set_color",
"(",
"232",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"background\"",
"]",
")",
",",
"set_color",
"(",
"256",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"foreground\"",
"]",
")",
"]",
")",
"if",
"not",
"vte_fix",
":",
"sequences",
".",
"extend",
"(",
"set_special",
"(",
"708",
",",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"background\"",
"]",
",",
"\"\"",
",",
"alpha",
")",
")",
"if",
"OS",
"==",
"\"Darwin\"",
":",
"sequences",
"+=",
"set_iterm_tab_color",
"(",
"colors",
"[",
"\"special\"",
"]",
"[",
"\"background\"",
"]",
")",
"return",
"\"\"",
".",
"join",
"(",
"sequences",
")"
]
| Create the escape sequences. | [
"Create",
"the",
"escape",
"sequences",
"."
]
| python | train |
ValvePython/steam | steam/client/builtins/leaderboards.py | https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/client/builtins/leaderboards.py#L164-L186 | def get_iter(self, times, seconds, chunk_size=2000):
"""Make a iterator over the entries
See :class:`steam.util.throttle.ConstantRateLimit` for ``times`` and ``seconds`` parameters.
:param chunk_size: number of entries per request
:type chunk_size: :class:`int`
:returns: generator object
:rtype: :class:`generator`
The iterator essentially buffers ``chuck_size`` number of entries, and ensures
we are not sending messages too fast.
For example, the ``__iter__`` method on this class uses ``get_iter(1, 1, 2000)``
"""
def entry_generator():
with ConstantRateLimit(times, seconds, sleep_func=self._steam.sleep) as r:
for entries in chunks(self, chunk_size):
if not entries:
return
for entry in entries:
yield entry
r.wait()
return entry_generator() | [
"def",
"get_iter",
"(",
"self",
",",
"times",
",",
"seconds",
",",
"chunk_size",
"=",
"2000",
")",
":",
"def",
"entry_generator",
"(",
")",
":",
"with",
"ConstantRateLimit",
"(",
"times",
",",
"seconds",
",",
"sleep_func",
"=",
"self",
".",
"_steam",
".",
"sleep",
")",
"as",
"r",
":",
"for",
"entries",
"in",
"chunks",
"(",
"self",
",",
"chunk_size",
")",
":",
"if",
"not",
"entries",
":",
"return",
"for",
"entry",
"in",
"entries",
":",
"yield",
"entry",
"r",
".",
"wait",
"(",
")",
"return",
"entry_generator",
"(",
")"
]
| Make a iterator over the entries
See :class:`steam.util.throttle.ConstantRateLimit` for ``times`` and ``seconds`` parameters.
:param chunk_size: number of entries per request
:type chunk_size: :class:`int`
:returns: generator object
:rtype: :class:`generator`
The iterator essentially buffers ``chuck_size`` number of entries, and ensures
we are not sending messages too fast.
For example, the ``__iter__`` method on this class uses ``get_iter(1, 1, 2000)`` | [
"Make",
"a",
"iterator",
"over",
"the",
"entries"
]
| python | train |
NASA-AMMOS/AIT-Core | ait/core/tlm.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/tlm.py#L342-L353 | def encode(self, value):
"""Encodes the given value according to this FieldDefinition."""
if type(value) == str and self.enum and value in self.enum:
value = self.enum[value]
if type(value) == int:
if self.shift > 0:
value <<= self.shift
if self.mask is not None:
value &= self.mask
return self.type.encode(value) if self.type else bytearray() | [
"def",
"encode",
"(",
"self",
",",
"value",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"str",
"and",
"self",
".",
"enum",
"and",
"value",
"in",
"self",
".",
"enum",
":",
"value",
"=",
"self",
".",
"enum",
"[",
"value",
"]",
"if",
"type",
"(",
"value",
")",
"==",
"int",
":",
"if",
"self",
".",
"shift",
">",
"0",
":",
"value",
"<<=",
"self",
".",
"shift",
"if",
"self",
".",
"mask",
"is",
"not",
"None",
":",
"value",
"&=",
"self",
".",
"mask",
"return",
"self",
".",
"type",
".",
"encode",
"(",
"value",
")",
"if",
"self",
".",
"type",
"else",
"bytearray",
"(",
")"
]
| Encodes the given value according to this FieldDefinition. | [
"Encodes",
"the",
"given",
"value",
"according",
"to",
"this",
"FieldDefinition",
"."
]
| python | train |
morepath/more.jwtauth | more/jwtauth/refresh.py | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/refresh.py#L4-L21 | def verify_refresh_request(request):
"""
Wrapper around JWTIdentityPolicy.verify_refresh which verify
if the request to refresh the token is valid.
If valid it returns the userid which can be used to create to
create an updated identity with ``remember_identity``.
Otherwise it raises an exception based on InvalidTokenError.
:param request: request object
:type request: :class:`morepath.Request`
:returns: userid
:raises: InvalidTokenError, ExpiredSignatureError, DecodeError,
MissingRequiredClaimError
"""
jwtauth_settings = request.app.settings.jwtauth.__dict__.copy()
identity_policy = JWTIdentityPolicy(**jwtauth_settings)
return identity_policy.verify_refresh(request) | [
"def",
"verify_refresh_request",
"(",
"request",
")",
":",
"jwtauth_settings",
"=",
"request",
".",
"app",
".",
"settings",
".",
"jwtauth",
".",
"__dict__",
".",
"copy",
"(",
")",
"identity_policy",
"=",
"JWTIdentityPolicy",
"(",
"*",
"*",
"jwtauth_settings",
")",
"return",
"identity_policy",
".",
"verify_refresh",
"(",
"request",
")"
]
| Wrapper around JWTIdentityPolicy.verify_refresh which verify
if the request to refresh the token is valid.
If valid it returns the userid which can be used to create to
create an updated identity with ``remember_identity``.
Otherwise it raises an exception based on InvalidTokenError.
:param request: request object
:type request: :class:`morepath.Request`
:returns: userid
:raises: InvalidTokenError, ExpiredSignatureError, DecodeError,
MissingRequiredClaimError | [
"Wrapper",
"around",
"JWTIdentityPolicy",
".",
"verify_refresh",
"which",
"verify",
"if",
"the",
"request",
"to",
"refresh",
"the",
"token",
"is",
"valid",
".",
"If",
"valid",
"it",
"returns",
"the",
"userid",
"which",
"can",
"be",
"used",
"to",
"create",
"to",
"create",
"an",
"updated",
"identity",
"with",
"remember_identity",
".",
"Otherwise",
"it",
"raises",
"an",
"exception",
"based",
"on",
"InvalidTokenError",
"."
]
| python | train |
lambdalisue/maidenhair | src/maidenhair/parsers/base.py | https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/parsers/base.py#L33-L49 | def load(self, filename, **kwargs):
"""
Parse a file specified with the filename and return an numpy array
Parameters
----------
filename : string
A path of a file
Returns
-------
ndarray
An instance of numpy array
"""
with open(filename, 'r') as f:
return self.parse(f, **kwargs) | [
"def",
"load",
"(",
"self",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"self",
".",
"parse",
"(",
"f",
",",
"*",
"*",
"kwargs",
")"
]
| Parse a file specified with the filename and return an numpy array
Parameters
----------
filename : string
A path of a file
Returns
-------
ndarray
An instance of numpy array | [
"Parse",
"a",
"file",
"specified",
"with",
"the",
"filename",
"and",
"return",
"an",
"numpy",
"array"
]
| python | train |
openxc/openxc-python | openxc/sources/base.py | https://github.com/openxc/openxc-python/blob/4becb4a6310bd658c125195ef6ffea4deaf7d7e7/openxc/sources/base.py#L141-L163 | def run(self):
"""Continuously read data from the source and attempt to parse a valid
message from the buffer of bytes. When a message is parsed, passes it
off to the callback if one is set.
"""
message_buffer = b""
while self.running:
try:
message_buffer += self.source.read_logs()
except DataSourceError as e:
if self.running:
LOG.warn("Can't read logs from data source -- stopping: %s", e)
break
except NotImplementedError as e:
LOG.info("%s doesn't support logging" % self)
break
while True:
if "\x00" not in message_buffer:
break
record, _, remainder = message_buffer.partition(b"\x00")
self.record(record)
message_buffer = remainder | [
"def",
"run",
"(",
"self",
")",
":",
"message_buffer",
"=",
"b\"\"",
"while",
"self",
".",
"running",
":",
"try",
":",
"message_buffer",
"+=",
"self",
".",
"source",
".",
"read_logs",
"(",
")",
"except",
"DataSourceError",
"as",
"e",
":",
"if",
"self",
".",
"running",
":",
"LOG",
".",
"warn",
"(",
"\"Can't read logs from data source -- stopping: %s\"",
",",
"e",
")",
"break",
"except",
"NotImplementedError",
"as",
"e",
":",
"LOG",
".",
"info",
"(",
"\"%s doesn't support logging\"",
"%",
"self",
")",
"break",
"while",
"True",
":",
"if",
"\"\\x00\"",
"not",
"in",
"message_buffer",
":",
"break",
"record",
",",
"_",
",",
"remainder",
"=",
"message_buffer",
".",
"partition",
"(",
"b\"\\x00\"",
")",
"self",
".",
"record",
"(",
"record",
")",
"message_buffer",
"=",
"remainder"
]
| Continuously read data from the source and attempt to parse a valid
message from the buffer of bytes. When a message is parsed, passes it
off to the callback if one is set. | [
"Continuously",
"read",
"data",
"from",
"the",
"source",
"and",
"attempt",
"to",
"parse",
"a",
"valid",
"message",
"from",
"the",
"buffer",
"of",
"bytes",
".",
"When",
"a",
"message",
"is",
"parsed",
"passes",
"it",
"off",
"to",
"the",
"callback",
"if",
"one",
"is",
"set",
"."
]
| python | train |
SwissDataScienceCenter/renku-python | renku/cli/workflow.py | https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/workflow.py#L120-L123 | def rename(client, old, new, force):
"""Rename the workflow named <old> to <new>."""
from renku.models.refs import LinkReference
LinkReference(client=client, name=_ref(old)).rename(_ref(new), force=force) | [
"def",
"rename",
"(",
"client",
",",
"old",
",",
"new",
",",
"force",
")",
":",
"from",
"renku",
".",
"models",
".",
"refs",
"import",
"LinkReference",
"LinkReference",
"(",
"client",
"=",
"client",
",",
"name",
"=",
"_ref",
"(",
"old",
")",
")",
".",
"rename",
"(",
"_ref",
"(",
"new",
")",
",",
"force",
"=",
"force",
")"
]
| Rename the workflow named <old> to <new>. | [
"Rename",
"the",
"workflow",
"named",
"<old",
">",
"to",
"<new",
">",
"."
]
| python | train |
mandiant/ioc_writer | ioc_writer/ioc_common.py | https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_common.py#L237-L250 | def make_fileitem_peinfo_detectedentrypointsignature_name(entrypoint_name, condition='is', negate=False,
preserve_case=False):
"""
Create a node for FileItem/PEInfo/DetectedEntryPointSignature/Name
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/PEInfo/DetectedEntryPointSignature/Name'
content_type = 'string'
content = entrypoint_name
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | [
"def",
"make_fileitem_peinfo_detectedentrypointsignature_name",
"(",
"entrypoint_name",
",",
"condition",
"=",
"'is'",
",",
"negate",
"=",
"False",
",",
"preserve_case",
"=",
"False",
")",
":",
"document",
"=",
"'FileItem'",
"search",
"=",
"'FileItem/PEInfo/DetectedEntryPointSignature/Name'",
"content_type",
"=",
"'string'",
"content",
"=",
"entrypoint_name",
"ii_node",
"=",
"ioc_api",
".",
"make_indicatoritem_node",
"(",
"condition",
",",
"document",
",",
"search",
",",
"content_type",
",",
"content",
",",
"negate",
"=",
"negate",
",",
"preserve_case",
"=",
"preserve_case",
")",
"return",
"ii_node"
]
| Create a node for FileItem/PEInfo/DetectedEntryPointSignature/Name
:return: A IndicatorItem represented as an Element node | [
"Create",
"a",
"node",
"for",
"FileItem",
"/",
"PEInfo",
"/",
"DetectedEntryPointSignature",
"/",
"Name",
":",
"return",
":",
"A",
"IndicatorItem",
"represented",
"as",
"an",
"Element",
"node"
]
| python | train |
numenta/htmresearch | htmresearch/frameworks/poirazi_neuron_model/data_tools.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/poirazi_neuron_model/data_tools.py#L317-L329 | def generate_RF_bins(data, dim = 40, num_bins = 10):
"""
Generates bins for the encoder. Bins are designed to have equal frequency,
per Poirazi & Mel (2001), which requires reading the data once.
Bins are represented as the intervals dividing them.
"""
intervals = []
for i in range(dim):
current_dim_data = [data[x][i] for x in range(len(data))]
current_dim_data = numpy.sort(current_dim_data)
intervals.append([current_dim_data[int(len(current_dim_data)*x/num_bins)]
for x in range(1, num_bins)])
return intervals | [
"def",
"generate_RF_bins",
"(",
"data",
",",
"dim",
"=",
"40",
",",
"num_bins",
"=",
"10",
")",
":",
"intervals",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"dim",
")",
":",
"current_dim_data",
"=",
"[",
"data",
"[",
"x",
"]",
"[",
"i",
"]",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
"]",
"current_dim_data",
"=",
"numpy",
".",
"sort",
"(",
"current_dim_data",
")",
"intervals",
".",
"append",
"(",
"[",
"current_dim_data",
"[",
"int",
"(",
"len",
"(",
"current_dim_data",
")",
"*",
"x",
"/",
"num_bins",
")",
"]",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"num_bins",
")",
"]",
")",
"return",
"intervals"
]
| Generates bins for the encoder. Bins are designed to have equal frequency,
per Poirazi & Mel (2001), which requires reading the data once.
Bins are represented as the intervals dividing them. | [
"Generates",
"bins",
"for",
"the",
"encoder",
".",
"Bins",
"are",
"designed",
"to",
"have",
"equal",
"frequency",
"per",
"Poirazi",
"&",
"Mel",
"(",
"2001",
")",
"which",
"requires",
"reading",
"the",
"data",
"once",
".",
"Bins",
"are",
"represented",
"as",
"the",
"intervals",
"dividing",
"them",
"."
]
| python | train |
gebn/wood | wood/__init__.py | https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L63-L88 | def compare(left: Union[str, pathlib.Path, _Entity],
right: Union[str, pathlib.Path, _Entity]) -> Comparison:
"""
Compare two paths.
:param left: The left side or "before" entity.
:param right: The right side or "after" entity.
:return: A comparison details what has changed from the left side to the
right side.
"""
def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity:
"""
Turns any one of a number of types of input into an entity.
:param param: The input - either a path string, a path object, or a
full blown entity.
:return: The input param as an entity.
"""
if isinstance(param, str):
param = pathlib.Path(param)
if isinstance(param, pathlib.Path):
param = _Entity.from_path(param)
return param
return Comparison.compare(normalise(left), normalise(right)) | [
"def",
"compare",
"(",
"left",
":",
"Union",
"[",
"str",
",",
"pathlib",
".",
"Path",
",",
"_Entity",
"]",
",",
"right",
":",
"Union",
"[",
"str",
",",
"pathlib",
".",
"Path",
",",
"_Entity",
"]",
")",
"->",
"Comparison",
":",
"def",
"normalise",
"(",
"param",
":",
"Union",
"[",
"str",
",",
"pathlib",
".",
"Path",
",",
"_Entity",
"]",
")",
"->",
"_Entity",
":",
"\"\"\"\n Turns any one of a number of types of input into an entity.\n\n :param param: The input - either a path string, a path object, or a\n full blown entity.\n :return: The input param as an entity.\n \"\"\"",
"if",
"isinstance",
"(",
"param",
",",
"str",
")",
":",
"param",
"=",
"pathlib",
".",
"Path",
"(",
"param",
")",
"if",
"isinstance",
"(",
"param",
",",
"pathlib",
".",
"Path",
")",
":",
"param",
"=",
"_Entity",
".",
"from_path",
"(",
"param",
")",
"return",
"param",
"return",
"Comparison",
".",
"compare",
"(",
"normalise",
"(",
"left",
")",
",",
"normalise",
"(",
"right",
")",
")"
]
| Compare two paths.
:param left: The left side or "before" entity.
:param right: The right side or "after" entity.
:return: A comparison details what has changed from the left side to the
right side. | [
"Compare",
"two",
"paths",
"."
]
| python | train |
jayclassless/basicserial | src/basicserial/__init__.py | https://github.com/jayclassless/basicserial/blob/da779edd955ba1009d14fae4e5926e29ad112b9d/src/basicserial/__init__.py#L372-L393 | def from_toml(value, native_datetimes=True):
"""
Deserializes the given value from TOML.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool
"""
if not toml:
raise NotImplementedError('No supported TOML library available')
result = toml.loads(value)
if native_datetimes:
result = convert_datetimes(result)
return result | [
"def",
"from_toml",
"(",
"value",
",",
"native_datetimes",
"=",
"True",
")",
":",
"if",
"not",
"toml",
":",
"raise",
"NotImplementedError",
"(",
"'No supported TOML library available'",
")",
"result",
"=",
"toml",
".",
"loads",
"(",
"value",
")",
"if",
"native_datetimes",
":",
"result",
"=",
"convert_datetimes",
"(",
"result",
")",
"return",
"result"
]
| Deserializes the given value from TOML.
:param value: the value to deserialize
:type value: str
:param native_datetimes:
whether or not strings that look like dates/times should be
automatically cast to the native objects, or left as strings; if not
specified, defaults to ``True``
:type native_datetimes: bool | [
"Deserializes",
"the",
"given",
"value",
"from",
"TOML",
"."
]
| python | train |
estnltk/estnltk | estnltk/prettyprinter/marker.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/marker.py#L100-L132 | def create_tags_with_concatenated_css_classes(tags):
"""Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
"""
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result | [
"def",
"create_tags_with_concatenated_css_classes",
"(",
"tags",
")",
":",
"current_classes",
"=",
"set",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"pos",
",",
"group",
"in",
"group_tags_at_same_position",
"(",
"tags",
")",
":",
"opening",
",",
"closing",
"=",
"get_opening_closing_tags",
"(",
"group",
")",
"# handle closing tags at current position",
"closing_added",
"=",
"False",
"if",
"len",
"(",
"closing",
")",
">",
"0",
":",
"closing_tag",
"=",
"Tag",
"(",
"pos",
",",
"False",
",",
"''",
")",
"for",
"tag",
"in",
"closing",
":",
"current_classes",
".",
"remove",
"(",
"tag",
".",
"css_class",
")",
"result",
".",
"append",
"(",
"closing_tag",
")",
"closing_added",
"=",
"True",
"# handle opening tags at current position",
"opening_added",
"=",
"False",
"if",
"len",
"(",
"opening",
")",
">",
"0",
":",
"# handle the begin of an overlap",
"if",
"not",
"closing_added",
"and",
"len",
"(",
"current_classes",
")",
">",
"0",
":",
"result",
".",
"append",
"(",
"Tag",
"(",
"pos",
",",
"False",
",",
"''",
")",
")",
"for",
"tag",
"in",
"opening",
":",
"current_classes",
".",
"add",
"(",
"tag",
".",
"css_class",
")",
"opening_tag",
"=",
"Tag",
"(",
"pos",
",",
"True",
",",
"' '",
".",
"join",
"(",
"sorted",
"(",
"current_classes",
")",
")",
")",
"result",
".",
"append",
"(",
"opening_tag",
")",
"opening_added",
"=",
"True",
"# handle the end of an overlap",
"if",
"closing_added",
"and",
"not",
"opening_added",
"and",
"len",
"(",
"current_classes",
")",
">",
"0",
":",
"opening_tag",
"=",
"Tag",
"(",
"pos",
",",
"True",
",",
"' '",
".",
"join",
"(",
"sorted",
"(",
"current_classes",
")",
")",
")",
"result",
".",
"append",
"(",
"opening_tag",
")",
"return",
"result"
]
| Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags. | [
"Function",
"that",
"creates",
"<mark",
">",
"tags",
"such",
"that",
"they",
"are",
"not",
"overlapping",
".",
"In",
"order",
"to",
"do",
"this",
"it",
"concatenates",
"the",
"css",
"classes",
"and",
"stores",
"the",
"concatenated",
"result",
"in",
"new",
"tags",
"."
]
| python | train |
alextricity25/dwell_in_you_richly | diyr/utils/bible.py | https://github.com/alextricity25/dwell_in_you_richly/blob/e705e1bc4fc0b8d2aa25680dfc432762b361c783/diyr/utils/bible.py#L122-L141 | def get_chapter(self, book_name, book_chapter, cache_chapter = True):
"""
Returns a chapter of the bible, first checking to see if that
chapter is on disk. If not, hen it attempts to fetch it from
the internet.
NOTE: This is public facing method. If the method signature changes,
then it needs to be documented and backwards-compatablity
needs to be preserved.
"""
try:
logging.debug("Attempting to read chapter from disk")
verses_list = self._get_ondisk_chapter(book_name, book_chapter)
except Exception as e:
logging.debug("Could not read file from disk. Attempting the internet..")
logging.debug(e.message)
verses_list = self._get_online_chapter(book_name, book_chapter,
cache_chapter = cache_chapter)
return verses_list | [
"def",
"get_chapter",
"(",
"self",
",",
"book_name",
",",
"book_chapter",
",",
"cache_chapter",
"=",
"True",
")",
":",
"try",
":",
"logging",
".",
"debug",
"(",
"\"Attempting to read chapter from disk\"",
")",
"verses_list",
"=",
"self",
".",
"_get_ondisk_chapter",
"(",
"book_name",
",",
"book_chapter",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"Could not read file from disk. Attempting the internet..\"",
")",
"logging",
".",
"debug",
"(",
"e",
".",
"message",
")",
"verses_list",
"=",
"self",
".",
"_get_online_chapter",
"(",
"book_name",
",",
"book_chapter",
",",
"cache_chapter",
"=",
"cache_chapter",
")",
"return",
"verses_list"
]
| Returns a chapter of the bible, first checking to see if that
chapter is on disk. If not, hen it attempts to fetch it from
the internet.
NOTE: This is public facing method. If the method signature changes,
then it needs to be documented and backwards-compatablity
needs to be preserved. | [
"Returns",
"a",
"chapter",
"of",
"the",
"bible",
"first",
"checking",
"to",
"see",
"if",
"that",
"chapter",
"is",
"on",
"disk",
".",
"If",
"not",
"hen",
"it",
"attempts",
"to",
"fetch",
"it",
"from",
"the",
"internet",
"."
]
| python | train |
jaywink/federation | federation/entities/diaspora/entities.py | https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/entities/diaspora/entities.py#L36-L47 | def to_xml(self):
"""Convert to XML message."""
element = etree.Element(self._tag_name)
struct_to_xml(element, [
{"text": self.raw_content},
{"guid": self.guid},
{"author": self.handle},
{"public": "true" if self.public else "false"},
{"created_at": format_dt(self.created_at)},
{"provider_display_name": self.provider_display_name},
])
return element | [
"def",
"to_xml",
"(",
"self",
")",
":",
"element",
"=",
"etree",
".",
"Element",
"(",
"self",
".",
"_tag_name",
")",
"struct_to_xml",
"(",
"element",
",",
"[",
"{",
"\"text\"",
":",
"self",
".",
"raw_content",
"}",
",",
"{",
"\"guid\"",
":",
"self",
".",
"guid",
"}",
",",
"{",
"\"author\"",
":",
"self",
".",
"handle",
"}",
",",
"{",
"\"public\"",
":",
"\"true\"",
"if",
"self",
".",
"public",
"else",
"\"false\"",
"}",
",",
"{",
"\"created_at\"",
":",
"format_dt",
"(",
"self",
".",
"created_at",
")",
"}",
",",
"{",
"\"provider_display_name\"",
":",
"self",
".",
"provider_display_name",
"}",
",",
"]",
")",
"return",
"element"
]
| Convert to XML message. | [
"Convert",
"to",
"XML",
"message",
"."
]
| python | train |
ActionAgile/trellostats | trellostats/cli.py | https://github.com/ActionAgile/trellostats/blob/695039ba9a787d0fdb71ec90cee52193ca98e489/trellostats/cli.py#L79-L100 | def report(ctx, board, done, output):
ctx.obj['board_id'] = board
ts = TrelloStats(ctx.obj)
"""
Reporting mode - Daily snapshots of a board for ongoing reporting:
-> trellis report --board=87hiudhw
--spend
--revenue
--done=Done
"""
ct = cycle_time(ts, board, done)
env = get_env()
# Get all render functions from the module and filter out the ones we don't want.
render_functions = [target for target in
dir(sys.modules['trellostats.reports'])
if target.startswith("render_") and
target.endswith(output)]
for render_func in render_functions:
print globals()[render_func](env, **dict(cycle_time=ct)) | [
"def",
"report",
"(",
"ctx",
",",
"board",
",",
"done",
",",
"output",
")",
":",
"ctx",
".",
"obj",
"[",
"'board_id'",
"]",
"=",
"board",
"ts",
"=",
"TrelloStats",
"(",
"ctx",
".",
"obj",
")",
"ct",
"=",
"cycle_time",
"(",
"ts",
",",
"board",
",",
"done",
")",
"env",
"=",
"get_env",
"(",
")",
"# Get all render functions from the module and filter out the ones we don't want.",
"render_functions",
"=",
"[",
"target",
"for",
"target",
"in",
"dir",
"(",
"sys",
".",
"modules",
"[",
"'trellostats.reports'",
"]",
")",
"if",
"target",
".",
"startswith",
"(",
"\"render_\"",
")",
"and",
"target",
".",
"endswith",
"(",
"output",
")",
"]",
"for",
"render_func",
"in",
"render_functions",
":",
"print",
"globals",
"(",
")",
"[",
"render_func",
"]",
"(",
"env",
",",
"*",
"*",
"dict",
"(",
"cycle_time",
"=",
"ct",
")",
")"
]
| Reporting mode - Daily snapshots of a board for ongoing reporting:
-> trellis report --board=87hiudhw
--spend
--revenue
--done=Done | [
"Reporting",
"mode",
"-",
"Daily",
"snapshots",
"of",
"a",
"board",
"for",
"ongoing",
"reporting",
":",
"-",
">",
"trellis",
"report",
"--",
"board",
"=",
"87hiudhw",
"--",
"spend",
"--",
"revenue",
"--",
"done",
"=",
"Done"
]
| python | train |
spotify/gordon-janitor | gordon_janitor/main.py | https://github.com/spotify/gordon-janitor/blob/e0df2002caf3aac528818743d8d0717790957044/gordon_janitor/main.py#L76-L98 | def setup(config_root=''):
"""
Service configuration and logging setup.
Configuration defined in ``gordon-janitor-user.toml`` will overwrite
``gordon-janitor.toml``.
Args:
config_root (str): where configuration should load from,
defaults to current working directory.
Returns:
A dict for Gordon service configuration
"""
config = _load_config(root=config_root)
logging_config = config.get('core', {}).get('logging', {})
log_level = logging_config.get('level', 'INFO').upper()
log_handlers = logging_config.get('handlers') or ['syslog']
ulogger.setup_logging(
progname='gordon-janitor', level=log_level, handlers=log_handlers)
return config | [
"def",
"setup",
"(",
"config_root",
"=",
"''",
")",
":",
"config",
"=",
"_load_config",
"(",
"root",
"=",
"config_root",
")",
"logging_config",
"=",
"config",
".",
"get",
"(",
"'core'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'logging'",
",",
"{",
"}",
")",
"log_level",
"=",
"logging_config",
".",
"get",
"(",
"'level'",
",",
"'INFO'",
")",
".",
"upper",
"(",
")",
"log_handlers",
"=",
"logging_config",
".",
"get",
"(",
"'handlers'",
")",
"or",
"[",
"'syslog'",
"]",
"ulogger",
".",
"setup_logging",
"(",
"progname",
"=",
"'gordon-janitor'",
",",
"level",
"=",
"log_level",
",",
"handlers",
"=",
"log_handlers",
")",
"return",
"config"
]
| Service configuration and logging setup.
Configuration defined in ``gordon-janitor-user.toml`` will overwrite
``gordon-janitor.toml``.
Args:
config_root (str): where configuration should load from,
defaults to current working directory.
Returns:
A dict for Gordon service configuration | [
"Service",
"configuration",
"and",
"logging",
"setup",
"."
]
| python | train |
openego/ding0 | ding0/core/network/grids.py | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/grids.py#L526-L655 | def set_default_branch_type(self, debug=False):
""" Determines default branch type according to grid district's peak load and standard equipment.
Args
----
debug: bool, defaults to False
If True, information is printed during process
Returns
-------
:pandas:`pandas.Series<series>`
default branch type: pandas Series object. If no appropriate type is found, return largest possible one.
:pandas:`pandas.Series<series>`
default branch type max: pandas Series object. Largest available line/cable type
Notes
-----
Parameter values for cables and lines are taken from [#]_, [#]_ and [#]_.
Lines are chosen to have 60 % load relative to their nominal capacity according to [#]_.
Decision on usage of overhead lines vs. cables is determined by load density of the considered region. Urban
areas usually are equipped with underground cables whereas rural areas often have overhead lines as MV
distribution system [#]_.
References
----------
.. [#] Klaus Heuck et al., "Elektrische Energieversorgung", Vieweg+Teubner, Wiesbaden, 2007
.. [#] René Flosdorff et al., "Elektrische Energieverteilung", Vieweg+Teubner, 2005
.. [#] Südkabel GmbH, "Einadrige VPE-isolierte Mittelspannungskabel",
http://www.suedkabel.de/cms/upload/pdf/Garnituren/Einadrige_VPE-isolierte_Mittelspannungskabel.pdf, 2017
.. [#] Deutsche Energie-Agentur GmbH (dena), "dena-Verteilnetzstudie. Ausbau- und Innovationsbedarf der
Stromverteilnetze in Deutschland bis 2030.", 2012
.. [#] Tao, X., "Automatisierte Grundsatzplanung von
Mittelspannungsnetzen", Dissertation, RWTH Aachen, 2007
"""
# decide whether cable or line is used (initially for entire grid) and set grid's attribute
if self.v_level == 20:
self.default_branch_kind = 'line'
elif self.v_level == 10:
self.default_branch_kind = 'cable'
# get power factor for loads
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
# get max. count of half rings per MV grid district
mv_half_ring_count_max = int(cfg_ding0.get('mv_routing_tech_constraints',
'mv_half_ring_count_max'))
#mv_half_ring_count_max=20
# load cable/line assumptions, file_names and parameter
if self.default_branch_kind == 'line':
load_factor_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_line_lc_normal'))
branch_parameters = self.network.static_data['MV_overhead_lines']
# load cables as well to use it within settlements
branch_parameters_settle = self.network.static_data['MV_cables']
# select types with appropriate voltage level
branch_parameters_settle = branch_parameters_settle[branch_parameters_settle['U_n'] == self.v_level]
elif self.default_branch_kind == 'cable':
load_factor_normal = float(cfg_ding0.get('assumptions',
'load_factor_mv_cable_lc_normal'))
branch_parameters = self.network.static_data['MV_cables']
else:
raise ValueError('Grid\'s default_branch_kind is invalid, could not set branch parameters.')
# select appropriate branch params according to voltage level, sorted ascending by max. current
# use <240mm2 only (ca. 420A) for initial rings and for disambiguation of agg. LA
branch_parameters = branch_parameters[branch_parameters['U_n'] == self.v_level]
branch_parameters = branch_parameters[branch_parameters['reinforce_only'] == 0].sort_values('I_max_th')
# get largest line/cable type
branch_type_max = branch_parameters.loc[branch_parameters['I_max_th'].idxmax()]
# set aggregation flag using largest available line/cable
self.set_nodes_aggregation_flag(branch_type_max['I_max_th'] * load_factor_normal)
# calc peak current sum (= "virtual" current) of whole grid (I = S / sqrt(3) / U) excluding load areas of type
# satellite and aggregated
peak_current_sum = ((self.grid_district.peak_load -
self.grid_district.peak_load_satellites -
self.grid_district.peak_load_aggregated) /
cos_phi_load /
(3**0.5) / self.v_level) # units: kVA / kV = A
branch_type_settle = branch_type_settle_max = None
# search the smallest possible line/cable for MV grid district in equipment datasets for all load areas
# excluding those of type satellite and aggregated
for idx, row in branch_parameters.iterrows():
# calc number of required rings using peak current sum of grid district,
# load factor and max. current of line/cable
half_ring_count = round(peak_current_sum / (row['I_max_th'] * load_factor_normal))
if debug:
logger.debug('=== Selection of default branch type in {} ==='.format(self))
logger.debug('Peak load= {} kVA'.format(self.grid_district.peak_load))
logger.debug('Peak current={}'.format(peak_current_sum))
logger.debug('I_max_th={}'.format(row['I_max_th']))
logger.debug('Half ring count={}'.format(half_ring_count))
# if count of half rings is below or equal max. allowed count, use current branch type as default
if half_ring_count <= mv_half_ring_count_max:
if self.default_branch_kind == 'line':
# take only cables that can handle at least the current of the line
branch_parameters_settle_filter = branch_parameters_settle[\
branch_parameters_settle['I_max_th'] - row['I_max_th'] > 0]
# get cable type with similar (but greater) I_max_th
# note: only grids with lines as default branch kind get cables in settlements
# (not required in grids with cables as default branch kind)
branch_type_settle = branch_parameters_settle_filter.loc[\
branch_parameters_settle_filter['I_max_th'].idxmin()]
return row, branch_type_max, branch_type_settle
# no equipment was found, return largest available line/cable
if debug:
logger.debug('No appropriate line/cable type could be found for '
'{}, declare some load areas as aggregated.'.format(self))
if self.default_branch_kind == 'line':
branch_type_settle_max = branch_parameters_settle.loc[branch_parameters_settle['I_max_th'].idxmax()]
return branch_type_max, branch_type_max, branch_type_settle_max | [
"def",
"set_default_branch_type",
"(",
"self",
",",
"debug",
"=",
"False",
")",
":",
"# decide whether cable or line is used (initially for entire grid) and set grid's attribute",
"if",
"self",
".",
"v_level",
"==",
"20",
":",
"self",
".",
"default_branch_kind",
"=",
"'line'",
"elif",
"self",
".",
"v_level",
"==",
"10",
":",
"self",
".",
"default_branch_kind",
"=",
"'cable'",
"# get power factor for loads",
"cos_phi_load",
"=",
"cfg_ding0",
".",
"get",
"(",
"'assumptions'",
",",
"'cos_phi_load'",
")",
"# get max. count of half rings per MV grid district",
"mv_half_ring_count_max",
"=",
"int",
"(",
"cfg_ding0",
".",
"get",
"(",
"'mv_routing_tech_constraints'",
",",
"'mv_half_ring_count_max'",
")",
")",
"#mv_half_ring_count_max=20",
"# load cable/line assumptions, file_names and parameter",
"if",
"self",
".",
"default_branch_kind",
"==",
"'line'",
":",
"load_factor_normal",
"=",
"float",
"(",
"cfg_ding0",
".",
"get",
"(",
"'assumptions'",
",",
"'load_factor_mv_line_lc_normal'",
")",
")",
"branch_parameters",
"=",
"self",
".",
"network",
".",
"static_data",
"[",
"'MV_overhead_lines'",
"]",
"# load cables as well to use it within settlements",
"branch_parameters_settle",
"=",
"self",
".",
"network",
".",
"static_data",
"[",
"'MV_cables'",
"]",
"# select types with appropriate voltage level",
"branch_parameters_settle",
"=",
"branch_parameters_settle",
"[",
"branch_parameters_settle",
"[",
"'U_n'",
"]",
"==",
"self",
".",
"v_level",
"]",
"elif",
"self",
".",
"default_branch_kind",
"==",
"'cable'",
":",
"load_factor_normal",
"=",
"float",
"(",
"cfg_ding0",
".",
"get",
"(",
"'assumptions'",
",",
"'load_factor_mv_cable_lc_normal'",
")",
")",
"branch_parameters",
"=",
"self",
".",
"network",
".",
"static_data",
"[",
"'MV_cables'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Grid\\'s default_branch_kind is invalid, could not set branch parameters.'",
")",
"# select appropriate branch params according to voltage level, sorted ascending by max. current",
"# use <240mm2 only (ca. 420A) for initial rings and for disambiguation of agg. LA",
"branch_parameters",
"=",
"branch_parameters",
"[",
"branch_parameters",
"[",
"'U_n'",
"]",
"==",
"self",
".",
"v_level",
"]",
"branch_parameters",
"=",
"branch_parameters",
"[",
"branch_parameters",
"[",
"'reinforce_only'",
"]",
"==",
"0",
"]",
".",
"sort_values",
"(",
"'I_max_th'",
")",
"# get largest line/cable type",
"branch_type_max",
"=",
"branch_parameters",
".",
"loc",
"[",
"branch_parameters",
"[",
"'I_max_th'",
"]",
".",
"idxmax",
"(",
")",
"]",
"# set aggregation flag using largest available line/cable",
"self",
".",
"set_nodes_aggregation_flag",
"(",
"branch_type_max",
"[",
"'I_max_th'",
"]",
"*",
"load_factor_normal",
")",
"# calc peak current sum (= \"virtual\" current) of whole grid (I = S / sqrt(3) / U) excluding load areas of type",
"# satellite and aggregated",
"peak_current_sum",
"=",
"(",
"(",
"self",
".",
"grid_district",
".",
"peak_load",
"-",
"self",
".",
"grid_district",
".",
"peak_load_satellites",
"-",
"self",
".",
"grid_district",
".",
"peak_load_aggregated",
")",
"/",
"cos_phi_load",
"/",
"(",
"3",
"**",
"0.5",
")",
"/",
"self",
".",
"v_level",
")",
"# units: kVA / kV = A",
"branch_type_settle",
"=",
"branch_type_settle_max",
"=",
"None",
"# search the smallest possible line/cable for MV grid district in equipment datasets for all load areas",
"# excluding those of type satellite and aggregated",
"for",
"idx",
",",
"row",
"in",
"branch_parameters",
".",
"iterrows",
"(",
")",
":",
"# calc number of required rings using peak current sum of grid district,",
"# load factor and max. current of line/cable",
"half_ring_count",
"=",
"round",
"(",
"peak_current_sum",
"/",
"(",
"row",
"[",
"'I_max_th'",
"]",
"*",
"load_factor_normal",
")",
")",
"if",
"debug",
":",
"logger",
".",
"debug",
"(",
"'=== Selection of default branch type in {} ==='",
".",
"format",
"(",
"self",
")",
")",
"logger",
".",
"debug",
"(",
"'Peak load= {} kVA'",
".",
"format",
"(",
"self",
".",
"grid_district",
".",
"peak_load",
")",
")",
"logger",
".",
"debug",
"(",
"'Peak current={}'",
".",
"format",
"(",
"peak_current_sum",
")",
")",
"logger",
".",
"debug",
"(",
"'I_max_th={}'",
".",
"format",
"(",
"row",
"[",
"'I_max_th'",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"'Half ring count={}'",
".",
"format",
"(",
"half_ring_count",
")",
")",
"# if count of half rings is below or equal max. allowed count, use current branch type as default",
"if",
"half_ring_count",
"<=",
"mv_half_ring_count_max",
":",
"if",
"self",
".",
"default_branch_kind",
"==",
"'line'",
":",
"# take only cables that can handle at least the current of the line",
"branch_parameters_settle_filter",
"=",
"branch_parameters_settle",
"[",
"branch_parameters_settle",
"[",
"'I_max_th'",
"]",
"-",
"row",
"[",
"'I_max_th'",
"]",
">",
"0",
"]",
"# get cable type with similar (but greater) I_max_th",
"# note: only grids with lines as default branch kind get cables in settlements",
"# (not required in grids with cables as default branch kind)",
"branch_type_settle",
"=",
"branch_parameters_settle_filter",
".",
"loc",
"[",
"branch_parameters_settle_filter",
"[",
"'I_max_th'",
"]",
".",
"idxmin",
"(",
")",
"]",
"return",
"row",
",",
"branch_type_max",
",",
"branch_type_settle",
"# no equipment was found, return largest available line/cable",
"if",
"debug",
":",
"logger",
".",
"debug",
"(",
"'No appropriate line/cable type could be found for '",
"'{}, declare some load areas as aggregated.'",
".",
"format",
"(",
"self",
")",
")",
"if",
"self",
".",
"default_branch_kind",
"==",
"'line'",
":",
"branch_type_settle_max",
"=",
"branch_parameters_settle",
".",
"loc",
"[",
"branch_parameters_settle",
"[",
"'I_max_th'",
"]",
".",
"idxmax",
"(",
")",
"]",
"return",
"branch_type_max",
",",
"branch_type_max",
",",
"branch_type_settle_max"
]
| Determines default branch type according to grid district's peak load and standard equipment.
Args
----
debug: bool, defaults to False
If True, information is printed during process
Returns
-------
:pandas:`pandas.Series<series>`
default branch type: pandas Series object. If no appropriate type is found, return largest possible one.
:pandas:`pandas.Series<series>`
default branch type max: pandas Series object. Largest available line/cable type
Notes
-----
Parameter values for cables and lines are taken from [#]_, [#]_ and [#]_.
Lines are chosen to have 60 % load relative to their nominal capacity according to [#]_.
Decision on usage of overhead lines vs. cables is determined by load density of the considered region. Urban
areas usually are equipped with underground cables whereas rural areas often have overhead lines as MV
distribution system [#]_.
References
----------
.. [#] Klaus Heuck et al., "Elektrische Energieversorgung", Vieweg+Teubner, Wiesbaden, 2007
.. [#] René Flosdorff et al., "Elektrische Energieverteilung", Vieweg+Teubner, 2005
.. [#] Südkabel GmbH, "Einadrige VPE-isolierte Mittelspannungskabel",
http://www.suedkabel.de/cms/upload/pdf/Garnituren/Einadrige_VPE-isolierte_Mittelspannungskabel.pdf, 2017
.. [#] Deutsche Energie-Agentur GmbH (dena), "dena-Verteilnetzstudie. Ausbau- und Innovationsbedarf der
Stromverteilnetze in Deutschland bis 2030.", 2012
.. [#] Tao, X., "Automatisierte Grundsatzplanung von
Mittelspannungsnetzen", Dissertation, RWTH Aachen, 2007 | [
"Determines",
"default",
"branch",
"type",
"according",
"to",
"grid",
"district",
"s",
"peak",
"load",
"and",
"standard",
"equipment",
"."
]
| python | train |
cs50/python-cs50 | src/cs50/cs50.py | https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/cs50.py#L91-L109 | def get_float(prompt=None):
"""
Read a line of text from standard input and return the equivalent float
as precisely as possible; if text does not represent a double, user is
prompted to retry. If line can't be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if len(s) > 0 and re.search(r"^[+-]?\d*(?:\.\d*)?$", s):
try:
return float(s)
except ValueError:
pass
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="") | [
"def",
"get_float",
"(",
"prompt",
"=",
"None",
")",
":",
"while",
"True",
":",
"s",
"=",
"get_string",
"(",
"prompt",
")",
"if",
"s",
"is",
"None",
":",
"return",
"None",
"if",
"len",
"(",
"s",
")",
">",
"0",
"and",
"re",
".",
"search",
"(",
"r\"^[+-]?\\d*(?:\\.\\d*)?$\"",
",",
"s",
")",
":",
"try",
":",
"return",
"float",
"(",
"s",
")",
"except",
"ValueError",
":",
"pass",
"# Temporarily here for backwards compatibility",
"if",
"prompt",
"is",
"None",
":",
"print",
"(",
"\"Retry: \"",
",",
"end",
"=",
"\"\"",
")"
]
| Read a line of text from standard input and return the equivalent float
as precisely as possible; if text does not represent a double, user is
prompted to retry. If line can't be read, return None. | [
"Read",
"a",
"line",
"of",
"text",
"from",
"standard",
"input",
"and",
"return",
"the",
"equivalent",
"float",
"as",
"precisely",
"as",
"possible",
";",
"if",
"text",
"does",
"not",
"represent",
"a",
"double",
"user",
"is",
"prompted",
"to",
"retry",
".",
"If",
"line",
"can",
"t",
"be",
"read",
"return",
"None",
"."
]
| python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_hardware.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_hardware.py#L428-L440 | def get_flexports_output_flexport_list_port_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_flexports = ET.Element("get_flexports")
config = get_flexports
output = ET.SubElement(get_flexports, "output")
flexport_list = ET.SubElement(output, "flexport-list")
port_id = ET.SubElement(flexport_list, "port-id")
port_id.text = kwargs.pop('port_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_flexports_output_flexport_list_port_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_flexports",
"=",
"ET",
".",
"Element",
"(",
"\"get_flexports\"",
")",
"config",
"=",
"get_flexports",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_flexports",
",",
"\"output\"",
")",
"flexport_list",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"flexport-list\"",
")",
"port_id",
"=",
"ET",
".",
"SubElement",
"(",
"flexport_list",
",",
"\"port-id\"",
")",
"port_id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'port_id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| Auto Generated Code | [
"Auto",
"Generated",
"Code"
]
| python | train |
JasonKessler/scattertext | scattertext/representations/EmbeddingsResolver.py | https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/representations/EmbeddingsResolver.py#L81-L97 | def project(self, projection_model=None):
'''
:param projection_model: sklearn unsupervised model (e.g., PCA) by default the recommended model is umap.UMAP,
which requires UMAP in to be installed
:return: array, shape (num dimension, vocab size)
'''
if self.embeddings_ is None:
raise Exception("Run set_embeddings_model or set_embeddings to get embeddings")
if projection_model is None:
try:
import umap
except:
raise Exception("Please install umap (pip install umap-learn) to use the default projection_model.")
projection_model = umap.UMAP(min_dist=0.5, metric='cosine')
axes = projection_model.fit_transform(self.embeddings_)
return axes | [
"def",
"project",
"(",
"self",
",",
"projection_model",
"=",
"None",
")",
":",
"if",
"self",
".",
"embeddings_",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Run set_embeddings_model or set_embeddings to get embeddings\"",
")",
"if",
"projection_model",
"is",
"None",
":",
"try",
":",
"import",
"umap",
"except",
":",
"raise",
"Exception",
"(",
"\"Please install umap (pip install umap-learn) to use the default projection_model.\"",
")",
"projection_model",
"=",
"umap",
".",
"UMAP",
"(",
"min_dist",
"=",
"0.5",
",",
"metric",
"=",
"'cosine'",
")",
"axes",
"=",
"projection_model",
".",
"fit_transform",
"(",
"self",
".",
"embeddings_",
")",
"return",
"axes"
]
| :param projection_model: sklearn unsupervised model (e.g., PCA) by default the recommended model is umap.UMAP,
which requires UMAP in to be installed
:return: array, shape (num dimension, vocab size) | [
":",
"param",
"projection_model",
":",
"sklearn",
"unsupervised",
"model",
"(",
"e",
".",
"g",
".",
"PCA",
")",
"by",
"default",
"the",
"recommended",
"model",
"is",
"umap",
".",
"UMAP",
"which",
"requires",
"UMAP",
"in",
"to",
"be",
"installed"
]
| python | train |
saltstack/salt | salt/runners/net.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/net.py#L121-L132 | def _get_net_runner_opts():
'''
Return the net.find runner options.
'''
runner_opts = __opts__.get('runners', {}).get('net.find', {})
return {
'target': runner_opts.get('target', _DEFAULT_TARGET),
'expr_form': runner_opts.get('expr_form', _DEFAULT_EXPR_FORM),
'ignore_interfaces': runner_opts.get('ignore_interfaces', _DEFAULT_IGNORE_INTF),
'display': runner_opts.get('display', _DEFAULT_DISPLAY),
'outputter': runner_opts.get('outputter', _DEFAULT_OUTPUTTER),
} | [
"def",
"_get_net_runner_opts",
"(",
")",
":",
"runner_opts",
"=",
"__opts__",
".",
"get",
"(",
"'runners'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'net.find'",
",",
"{",
"}",
")",
"return",
"{",
"'target'",
":",
"runner_opts",
".",
"get",
"(",
"'target'",
",",
"_DEFAULT_TARGET",
")",
",",
"'expr_form'",
":",
"runner_opts",
".",
"get",
"(",
"'expr_form'",
",",
"_DEFAULT_EXPR_FORM",
")",
",",
"'ignore_interfaces'",
":",
"runner_opts",
".",
"get",
"(",
"'ignore_interfaces'",
",",
"_DEFAULT_IGNORE_INTF",
")",
",",
"'display'",
":",
"runner_opts",
".",
"get",
"(",
"'display'",
",",
"_DEFAULT_DISPLAY",
")",
",",
"'outputter'",
":",
"runner_opts",
".",
"get",
"(",
"'outputter'",
",",
"_DEFAULT_OUTPUTTER",
")",
",",
"}"
]
| Return the net.find runner options. | [
"Return",
"the",
"net",
".",
"find",
"runner",
"options",
"."
]
| python | train |
gabstopper/smc-python | smc/routing/route_map.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/routing/route_map.py#L229-L307 | def create(self, name, action='permit', goto=None, finish=False,
call=None, comment=None, add_pos=None, after=None,
before=None, **match_condition):
"""
Create a route map rule. You can provide match conditions
by using keyword arguments specifying the required types.
You can also create the route map rule and add match conditions
after.
:param str name: name for this rule
:param str action: permit or deny
:param str goto: specify a rule section to goto after if there
is a match condition. This will override the finish parameter
:param bool finish: finish stops the processing after a match condition.
If finish is False, processing will continue to the next rule.
:param RouteMap call: call another route map after matching.
:param str comment: optional comment for the rule
:param int add_pos: position to insert the rule, starting with position 1. If
the position value is greater than the number of rules, the rule is inserted at
the bottom. If add_pos is not provided, rule is inserted in position 1. Mutually
exclusive with ``after`` and ``before`` params.
:param str after: Rule tag to add this rule after. Mutually exclusive with ``add_pos``
and ``before`` params.
:param str before: Rule tag to add this rule before. Mutually exclusive with ``add_pos``
and ``after`` params.
:param match_condition: keyword values identifying initial
values for the match condition. Valid keyword arguments
are 'access_list', 'next_hop', 'metric' and 'peer_address'.
You can also optionally pass the keyword 'match_condition'
with an instance of MatchCondition.
:raises CreateRuleFailed: failure to insert rule with reason
:raises ElementNotFound: if references elements in a match condition
this can be raised when the element specified is not found.
.. seealso:: :class:`~MatchCondition` for valid elements and
expected values for each type.
"""
json = {'name': name,
'action': action,
'finish': finish,
'goto': goto.href if goto else None,
'call_route_map_ref': None if not call else call.href,
'comment': comment}
if not match_condition:
json.update(match_condition=[])
else:
if 'match_condition' in match_condition:
conditions = match_condition.pop('match_condition')
else:
conditions = MatchCondition()
if 'peer_address' in match_condition:
conditions.add_peer_address(
match_condition.pop('peer_address'))
if 'next_hop' in match_condition:
conditions.add_next_hop(
match_condition.pop('next_hop'))
if 'metric' in match_condition:
conditions.add_metric(
match_condition.pop('metric'))
if 'access_list' in match_condition:
conditions.add_access_list(
match_condition.pop('access_list'))
json.update(match_condition=conditions.conditions)
params = None
href = self.href
if add_pos is not None:
href = self.add_at_position(add_pos)
elif before or after:
params = self.add_before_after(before, after)
return ElementCreator(
self.__class__,
exception=CreateRuleFailed,
href=href,
params=params,
json=json) | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"action",
"=",
"'permit'",
",",
"goto",
"=",
"None",
",",
"finish",
"=",
"False",
",",
"call",
"=",
"None",
",",
"comment",
"=",
"None",
",",
"add_pos",
"=",
"None",
",",
"after",
"=",
"None",
",",
"before",
"=",
"None",
",",
"*",
"*",
"match_condition",
")",
":",
"json",
"=",
"{",
"'name'",
":",
"name",
",",
"'action'",
":",
"action",
",",
"'finish'",
":",
"finish",
",",
"'goto'",
":",
"goto",
".",
"href",
"if",
"goto",
"else",
"None",
",",
"'call_route_map_ref'",
":",
"None",
"if",
"not",
"call",
"else",
"call",
".",
"href",
",",
"'comment'",
":",
"comment",
"}",
"if",
"not",
"match_condition",
":",
"json",
".",
"update",
"(",
"match_condition",
"=",
"[",
"]",
")",
"else",
":",
"if",
"'match_condition'",
"in",
"match_condition",
":",
"conditions",
"=",
"match_condition",
".",
"pop",
"(",
"'match_condition'",
")",
"else",
":",
"conditions",
"=",
"MatchCondition",
"(",
")",
"if",
"'peer_address'",
"in",
"match_condition",
":",
"conditions",
".",
"add_peer_address",
"(",
"match_condition",
".",
"pop",
"(",
"'peer_address'",
")",
")",
"if",
"'next_hop'",
"in",
"match_condition",
":",
"conditions",
".",
"add_next_hop",
"(",
"match_condition",
".",
"pop",
"(",
"'next_hop'",
")",
")",
"if",
"'metric'",
"in",
"match_condition",
":",
"conditions",
".",
"add_metric",
"(",
"match_condition",
".",
"pop",
"(",
"'metric'",
")",
")",
"if",
"'access_list'",
"in",
"match_condition",
":",
"conditions",
".",
"add_access_list",
"(",
"match_condition",
".",
"pop",
"(",
"'access_list'",
")",
")",
"json",
".",
"update",
"(",
"match_condition",
"=",
"conditions",
".",
"conditions",
")",
"params",
"=",
"None",
"href",
"=",
"self",
".",
"href",
"if",
"add_pos",
"is",
"not",
"None",
":",
"href",
"=",
"self",
".",
"add_at_position",
"(",
"add_pos",
")",
"elif",
"before",
"or",
"after",
":",
"params",
"=",
"self",
".",
"add_before_after",
"(",
"before",
",",
"after",
")",
"return",
"ElementCreator",
"(",
"self",
".",
"__class__",
",",
"exception",
"=",
"CreateRuleFailed",
",",
"href",
"=",
"href",
",",
"params",
"=",
"params",
",",
"json",
"=",
"json",
")"
]
| Create a route map rule. You can provide match conditions
by using keyword arguments specifying the required types.
You can also create the route map rule and add match conditions
after.
:param str name: name for this rule
:param str action: permit or deny
:param str goto: specify a rule section to goto after if there
is a match condition. This will override the finish parameter
:param bool finish: finish stops the processing after a match condition.
If finish is False, processing will continue to the next rule.
:param RouteMap call: call another route map after matching.
:param str comment: optional comment for the rule
:param int add_pos: position to insert the rule, starting with position 1. If
the position value is greater than the number of rules, the rule is inserted at
the bottom. If add_pos is not provided, rule is inserted in position 1. Mutually
exclusive with ``after`` and ``before`` params.
:param str after: Rule tag to add this rule after. Mutually exclusive with ``add_pos``
and ``before`` params.
:param str before: Rule tag to add this rule before. Mutually exclusive with ``add_pos``
and ``after`` params.
:param match_condition: keyword values identifying initial
values for the match condition. Valid keyword arguments
are 'access_list', 'next_hop', 'metric' and 'peer_address'.
You can also optionally pass the keyword 'match_condition'
with an instance of MatchCondition.
:raises CreateRuleFailed: failure to insert rule with reason
:raises ElementNotFound: if references elements in a match condition
this can be raised when the element specified is not found.
.. seealso:: :class:`~MatchCondition` for valid elements and
expected values for each type. | [
"Create",
"a",
"route",
"map",
"rule",
".",
"You",
"can",
"provide",
"match",
"conditions",
"by",
"using",
"keyword",
"arguments",
"specifying",
"the",
"required",
"types",
".",
"You",
"can",
"also",
"create",
"the",
"route",
"map",
"rule",
"and",
"add",
"match",
"conditions",
"after",
".",
":",
"param",
"str",
"name",
":",
"name",
"for",
"this",
"rule",
":",
"param",
"str",
"action",
":",
"permit",
"or",
"deny",
":",
"param",
"str",
"goto",
":",
"specify",
"a",
"rule",
"section",
"to",
"goto",
"after",
"if",
"there",
"is",
"a",
"match",
"condition",
".",
"This",
"will",
"override",
"the",
"finish",
"parameter",
":",
"param",
"bool",
"finish",
":",
"finish",
"stops",
"the",
"processing",
"after",
"a",
"match",
"condition",
".",
"If",
"finish",
"is",
"False",
"processing",
"will",
"continue",
"to",
"the",
"next",
"rule",
".",
":",
"param",
"RouteMap",
"call",
":",
"call",
"another",
"route",
"map",
"after",
"matching",
".",
":",
"param",
"str",
"comment",
":",
"optional",
"comment",
"for",
"the",
"rule",
":",
"param",
"int",
"add_pos",
":",
"position",
"to",
"insert",
"the",
"rule",
"starting",
"with",
"position",
"1",
".",
"If",
"the",
"position",
"value",
"is",
"greater",
"than",
"the",
"number",
"of",
"rules",
"the",
"rule",
"is",
"inserted",
"at",
"the",
"bottom",
".",
"If",
"add_pos",
"is",
"not",
"provided",
"rule",
"is",
"inserted",
"in",
"position",
"1",
".",
"Mutually",
"exclusive",
"with",
"after",
"and",
"before",
"params",
".",
":",
"param",
"str",
"after",
":",
"Rule",
"tag",
"to",
"add",
"this",
"rule",
"after",
".",
"Mutually",
"exclusive",
"with",
"add_pos",
"and",
"before",
"params",
".",
":",
"param",
"str",
"before",
":",
"Rule",
"tag",
"to",
"add",
"this",
"rule",
"before",
".",
"Mutually",
"exclusive",
"with",
"add_pos",
"and",
"after",
"params",
".",
":",
"param",
"match_condition",
":",
"keyword",
"values",
"identifying",
"initial",
"values",
"for",
"the",
"match",
"condition",
".",
"Valid",
"keyword",
"arguments",
"are",
"access_list",
"next_hop",
"metric",
"and",
"peer_address",
".",
"You",
"can",
"also",
"optionally",
"pass",
"the",
"keyword",
"match_condition",
"with",
"an",
"instance",
"of",
"MatchCondition",
".",
":",
"raises",
"CreateRuleFailed",
":",
"failure",
"to",
"insert",
"rule",
"with",
"reason",
":",
"raises",
"ElementNotFound",
":",
"if",
"references",
"elements",
"in",
"a",
"match",
"condition",
"this",
"can",
"be",
"raised",
"when",
"the",
"element",
"specified",
"is",
"not",
"found",
".",
"..",
"seealso",
"::",
":",
"class",
":",
"~MatchCondition",
"for",
"valid",
"elements",
"and",
"expected",
"values",
"for",
"each",
"type",
"."
]
| python | train |
gwastro/pycbc | pycbc/transforms.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/transforms.py#L1686-L1711 | def read_transforms_from_config(cp, section="transforms"):
"""Returns a list of PyCBC transform instances for a section in the
given configuration file.
If the transforms are nested (i.e., the output of one transform is the
input of another), the returned list will be sorted by the order of the
nests.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"transforms", string}
Prefix on section names from which to retrieve the transforms.
Returns
-------
list
A list of the parsed transforms.
"""
trans = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
t = transforms[name].from_config(cp, section, subsection)
trans.append(t)
return order_transforms(trans) | [
"def",
"read_transforms_from_config",
"(",
"cp",
",",
"section",
"=",
"\"transforms\"",
")",
":",
"trans",
"=",
"[",
"]",
"for",
"subsection",
"in",
"cp",
".",
"get_subsections",
"(",
"section",
")",
":",
"name",
"=",
"cp",
".",
"get_opt_tag",
"(",
"section",
",",
"\"name\"",
",",
"subsection",
")",
"t",
"=",
"transforms",
"[",
"name",
"]",
".",
"from_config",
"(",
"cp",
",",
"section",
",",
"subsection",
")",
"trans",
".",
"append",
"(",
"t",
")",
"return",
"order_transforms",
"(",
"trans",
")"
]
| Returns a list of PyCBC transform instances for a section in the
given configuration file.
If the transforms are nested (i.e., the output of one transform is the
input of another), the returned list will be sorted by the order of the
nests.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"transforms", string}
Prefix on section names from which to retrieve the transforms.
Returns
-------
list
A list of the parsed transforms. | [
"Returns",
"a",
"list",
"of",
"PyCBC",
"transform",
"instances",
"for",
"a",
"section",
"in",
"the",
"given",
"configuration",
"file",
"."
]
| python | train |
fedelemantuano/tika-app-python | tikapp/utils.py | https://github.com/fedelemantuano/tika-app-python/blob/9a462aa611af2032306c78a9c996c8545288c212/tikapp/utils.py#L87-L113 | def write_payload(payload=None, objectInput=None):
"""
This function writes a base64 payload or file object on disk.
Args:
payload (string): payload in base64
objectInput (object): file object/standard input to analyze
Returns:
Path of file
"""
temp = tempfile.mkstemp()[1]
log.debug("Write payload in temp file {!r}".format(temp))
with open(temp, 'wb') as f:
if payload:
payload = base64.b64decode(payload)
elif objectInput:
if six.PY3:
payload = objectInput.buffer.read()
elif six.PY2:
payload = objectInput.read()
f.write(payload)
return temp | [
"def",
"write_payload",
"(",
"payload",
"=",
"None",
",",
"objectInput",
"=",
"None",
")",
":",
"temp",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"[",
"1",
"]",
"log",
".",
"debug",
"(",
"\"Write payload in temp file {!r}\"",
".",
"format",
"(",
"temp",
")",
")",
"with",
"open",
"(",
"temp",
",",
"'wb'",
")",
"as",
"f",
":",
"if",
"payload",
":",
"payload",
"=",
"base64",
".",
"b64decode",
"(",
"payload",
")",
"elif",
"objectInput",
":",
"if",
"six",
".",
"PY3",
":",
"payload",
"=",
"objectInput",
".",
"buffer",
".",
"read",
"(",
")",
"elif",
"six",
".",
"PY2",
":",
"payload",
"=",
"objectInput",
".",
"read",
"(",
")",
"f",
".",
"write",
"(",
"payload",
")",
"return",
"temp"
]
| This function writes a base64 payload or file object on disk.
Args:
payload (string): payload in base64
objectInput (object): file object/standard input to analyze
Returns:
Path of file | [
"This",
"function",
"writes",
"a",
"base64",
"payload",
"or",
"file",
"object",
"on",
"disk",
"."
]
| python | train |
cdgriffith/Reusables | reusables/cli.py | https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L204-L231 | def cp(src, dst, overwrite=False):
"""
Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it?
"""
if not isinstance(src, list):
src = [src]
dst = os.path.expanduser(dst)
dst_folder = os.path.isdir(dst)
if len(src) > 1 and not dst_folder:
raise OSError("Cannot copy multiple item to same file")
for item in src:
source = os.path.expanduser(item)
destination = (dst if not dst_folder else
os.path.join(dst, os.path.basename(source)))
if not overwrite and os.path.exists(destination):
_logger.warning("Not replacing {0} with {1}, overwrite not enabled"
"".format(destination, source))
continue
shutil.copy(source, destination) | [
"def",
"cp",
"(",
"src",
",",
"dst",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"src",
",",
"list",
")",
":",
"src",
"=",
"[",
"src",
"]",
"dst",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"dst",
")",
"dst_folder",
"=",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
"if",
"len",
"(",
"src",
")",
">",
"1",
"and",
"not",
"dst_folder",
":",
"raise",
"OSError",
"(",
"\"Cannot copy multiple item to same file\"",
")",
"for",
"item",
"in",
"src",
":",
"source",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"item",
")",
"destination",
"=",
"(",
"dst",
"if",
"not",
"dst_folder",
"else",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"os",
".",
"path",
".",
"basename",
"(",
"source",
")",
")",
")",
"if",
"not",
"overwrite",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"destination",
")",
":",
"_logger",
".",
"warning",
"(",
"\"Not replacing {0} with {1}, overwrite not enabled\"",
"\"\"",
".",
"format",
"(",
"destination",
",",
"source",
")",
")",
"continue",
"shutil",
".",
"copy",
"(",
"source",
",",
"destination",
")"
]
| Copy files to a new location.
:param src: list (or string) of paths of files to copy
:param dst: file or folder to copy item(s) to
:param overwrite: IF the file already exists, should I overwrite it? | [
"Copy",
"files",
"to",
"a",
"new",
"location",
"."
]
| python | train |
Jammy2211/PyAutoLens | autolens/data/array/scaled_array.py | https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/scaled_array.py#L351-L371 | def resized_scaled_array_from_array(self, new_shape, new_centre_pixels=None, new_centre_arcsec=None):
"""resized the array to a new shape and at a new origin.
Parameters
-----------
new_shape : (int, int)
The new two-dimensional shape of the array.
"""
if new_centre_pixels is None and new_centre_arcsec is None:
new_centre = (-1, -1) # In Numba, the input origin must be the same image type as the origin, thus we cannot
# pass 'None' and instead use the tuple (-1, -1).
elif new_centre_pixels is not None and new_centre_arcsec is None:
new_centre = new_centre_pixels
elif new_centre_pixels is None and new_centre_arcsec is not None:
new_centre = self.arc_second_coordinates_to_pixel_coordinates(arc_second_coordinates=new_centre_arcsec)
else:
raise exc.DataException('You have supplied two centres (pixels and arc-seconds) to the resize scaled'
'array function')
return self.new_with_array(array=array_util.resized_array_2d_from_array_2d_and_resized_shape(
array_2d=self, resized_shape=new_shape, origin=new_centre)) | [
"def",
"resized_scaled_array_from_array",
"(",
"self",
",",
"new_shape",
",",
"new_centre_pixels",
"=",
"None",
",",
"new_centre_arcsec",
"=",
"None",
")",
":",
"if",
"new_centre_pixels",
"is",
"None",
"and",
"new_centre_arcsec",
"is",
"None",
":",
"new_centre",
"=",
"(",
"-",
"1",
",",
"-",
"1",
")",
"# In Numba, the input origin must be the same image type as the origin, thus we cannot",
"# pass 'None' and instead use the tuple (-1, -1).",
"elif",
"new_centre_pixels",
"is",
"not",
"None",
"and",
"new_centre_arcsec",
"is",
"None",
":",
"new_centre",
"=",
"new_centre_pixels",
"elif",
"new_centre_pixels",
"is",
"None",
"and",
"new_centre_arcsec",
"is",
"not",
"None",
":",
"new_centre",
"=",
"self",
".",
"arc_second_coordinates_to_pixel_coordinates",
"(",
"arc_second_coordinates",
"=",
"new_centre_arcsec",
")",
"else",
":",
"raise",
"exc",
".",
"DataException",
"(",
"'You have supplied two centres (pixels and arc-seconds) to the resize scaled'",
"'array function'",
")",
"return",
"self",
".",
"new_with_array",
"(",
"array",
"=",
"array_util",
".",
"resized_array_2d_from_array_2d_and_resized_shape",
"(",
"array_2d",
"=",
"self",
",",
"resized_shape",
"=",
"new_shape",
",",
"origin",
"=",
"new_centre",
")",
")"
]
| resized the array to a new shape and at a new origin.
Parameters
-----------
new_shape : (int, int)
The new two-dimensional shape of the array. | [
"resized",
"the",
"array",
"to",
"a",
"new",
"shape",
"and",
"at",
"a",
"new",
"origin",
"."
]
| python | valid |
iotile/coretools | iotileemulate/iotile/emulate/internal/rpc_queue.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/internal/rpc_queue.py#L44-L58 | def put_task(self, func, args, response):
"""Place a task onto the RPC queue.
This temporary functionality will go away but it lets you run a
task synchronously with RPC dispatch by placing it onto the
RCP queue.
Args:
func (callable): The function to execute
args (iterable): The function arguments
response (GenericResponse): The response object to signal the
result on.
"""
self._rpc_queue.put_nowait((func, args, response)) | [
"def",
"put_task",
"(",
"self",
",",
"func",
",",
"args",
",",
"response",
")",
":",
"self",
".",
"_rpc_queue",
".",
"put_nowait",
"(",
"(",
"func",
",",
"args",
",",
"response",
")",
")"
]
| Place a task onto the RPC queue.
This temporary functionality will go away but it lets you run a
task synchronously with RPC dispatch by placing it onto the
RCP queue.
Args:
func (callable): The function to execute
args (iterable): The function arguments
response (GenericResponse): The response object to signal the
result on. | [
"Place",
"a",
"task",
"onto",
"the",
"RPC",
"queue",
"."
]
| python | train |
sentinel-hub/sentinelhub-py | sentinelhub/aws.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L534-L557 | def get_aws_index(self):
"""
Returns tile index on AWS. If `tile_index` was not set during class initialization it will be determined
according to existing tiles on AWS.
:return: Index of tile on AWS
:rtype: int
"""
if self.aws_index is not None:
return self.aws_index
tile_info_list = get_tile_info(self.tile_name, self.datetime, all_tiles=True)
if not tile_info_list:
raise ValueError('Cannot find aws_index for specified tile and time')
if self.data_source is DataSource.SENTINEL2_L2A:
for tile_info in sorted(tile_info_list, key=self._parse_aws_index):
try:
self.aws_index = self._parse_aws_index(tile_info)
self.get_tile_info()
return self.aws_index
except AwsDownloadFailedException:
pass
return self._parse_aws_index(tile_info_list[0]) | [
"def",
"get_aws_index",
"(",
"self",
")",
":",
"if",
"self",
".",
"aws_index",
"is",
"not",
"None",
":",
"return",
"self",
".",
"aws_index",
"tile_info_list",
"=",
"get_tile_info",
"(",
"self",
".",
"tile_name",
",",
"self",
".",
"datetime",
",",
"all_tiles",
"=",
"True",
")",
"if",
"not",
"tile_info_list",
":",
"raise",
"ValueError",
"(",
"'Cannot find aws_index for specified tile and time'",
")",
"if",
"self",
".",
"data_source",
"is",
"DataSource",
".",
"SENTINEL2_L2A",
":",
"for",
"tile_info",
"in",
"sorted",
"(",
"tile_info_list",
",",
"key",
"=",
"self",
".",
"_parse_aws_index",
")",
":",
"try",
":",
"self",
".",
"aws_index",
"=",
"self",
".",
"_parse_aws_index",
"(",
"tile_info",
")",
"self",
".",
"get_tile_info",
"(",
")",
"return",
"self",
".",
"aws_index",
"except",
"AwsDownloadFailedException",
":",
"pass",
"return",
"self",
".",
"_parse_aws_index",
"(",
"tile_info_list",
"[",
"0",
"]",
")"
]
| Returns tile index on AWS. If `tile_index` was not set during class initialization it will be determined
according to existing tiles on AWS.
:return: Index of tile on AWS
:rtype: int | [
"Returns",
"tile",
"index",
"on",
"AWS",
".",
"If",
"tile_index",
"was",
"not",
"set",
"during",
"class",
"initialization",
"it",
"will",
"be",
"determined",
"according",
"to",
"existing",
"tiles",
"on",
"AWS",
"."
]
| python | train |
FPGAwars/apio | apio/commands/examples.py | https://github.com/FPGAwars/apio/blob/5c6310f11a061a760764c6b5847bfb431dc3d0bc/apio/commands/examples.py#L29-L45 | def cli(ctx, list, dir, files, project_dir, sayno):
"""Manage verilog examples.\n
Install with `apio install examples`"""
exit_code = 0
if list:
exit_code = Examples().list_examples()
elif dir:
exit_code = Examples().copy_example_dir(dir, project_dir, sayno)
elif files:
exit_code = Examples().copy_example_files(files, project_dir, sayno)
else:
click.secho(ctx.get_help())
click.secho(Examples().examples_of_use_cad())
ctx.exit(exit_code) | [
"def",
"cli",
"(",
"ctx",
",",
"list",
",",
"dir",
",",
"files",
",",
"project_dir",
",",
"sayno",
")",
":",
"exit_code",
"=",
"0",
"if",
"list",
":",
"exit_code",
"=",
"Examples",
"(",
")",
".",
"list_examples",
"(",
")",
"elif",
"dir",
":",
"exit_code",
"=",
"Examples",
"(",
")",
".",
"copy_example_dir",
"(",
"dir",
",",
"project_dir",
",",
"sayno",
")",
"elif",
"files",
":",
"exit_code",
"=",
"Examples",
"(",
")",
".",
"copy_example_files",
"(",
"files",
",",
"project_dir",
",",
"sayno",
")",
"else",
":",
"click",
".",
"secho",
"(",
"ctx",
".",
"get_help",
"(",
")",
")",
"click",
".",
"secho",
"(",
"Examples",
"(",
")",
".",
"examples_of_use_cad",
"(",
")",
")",
"ctx",
".",
"exit",
"(",
"exit_code",
")"
]
| Manage verilog examples.\n
Install with `apio install examples` | [
"Manage",
"verilog",
"examples",
".",
"\\",
"n",
"Install",
"with",
"apio",
"install",
"examples"
]
| python | train |
trailofbits/manticore | manticore/native/cpu/x86.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L529-L548 | def _get_flags(self, reg):
""" Build EFLAGS/RFLAGS from flags """
def make_symbolic(flag_expr):
register_size = 32 if reg == 'EFLAGS' else 64
value, offset = flag_expr
return Operators.ITEBV(register_size, value,
BitVecConstant(register_size, 1 << offset),
BitVecConstant(register_size, 0))
flags = []
for flag, offset in self._flags.items():
flags.append((self._registers[flag], offset))
if any(issymbolic(flag) for flag, offset in flags):
res = reduce(operator.or_, map(make_symbolic, flags))
else:
res = 0
for flag, offset in flags:
res += flag << offset
return res | [
"def",
"_get_flags",
"(",
"self",
",",
"reg",
")",
":",
"def",
"make_symbolic",
"(",
"flag_expr",
")",
":",
"register_size",
"=",
"32",
"if",
"reg",
"==",
"'EFLAGS'",
"else",
"64",
"value",
",",
"offset",
"=",
"flag_expr",
"return",
"Operators",
".",
"ITEBV",
"(",
"register_size",
",",
"value",
",",
"BitVecConstant",
"(",
"register_size",
",",
"1",
"<<",
"offset",
")",
",",
"BitVecConstant",
"(",
"register_size",
",",
"0",
")",
")",
"flags",
"=",
"[",
"]",
"for",
"flag",
",",
"offset",
"in",
"self",
".",
"_flags",
".",
"items",
"(",
")",
":",
"flags",
".",
"append",
"(",
"(",
"self",
".",
"_registers",
"[",
"flag",
"]",
",",
"offset",
")",
")",
"if",
"any",
"(",
"issymbolic",
"(",
"flag",
")",
"for",
"flag",
",",
"offset",
"in",
"flags",
")",
":",
"res",
"=",
"reduce",
"(",
"operator",
".",
"or_",
",",
"map",
"(",
"make_symbolic",
",",
"flags",
")",
")",
"else",
":",
"res",
"=",
"0",
"for",
"flag",
",",
"offset",
"in",
"flags",
":",
"res",
"+=",
"flag",
"<<",
"offset",
"return",
"res"
]
| Build EFLAGS/RFLAGS from flags | [
"Build",
"EFLAGS",
"/",
"RFLAGS",
"from",
"flags"
]
| python | valid |
infothrill/python-dyndnsc | dyndnsc/plugins/manager.py | https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/plugins/manager.py#L129-L135 | def add_plugin(self, plugin):
"""Add the given plugin."""
# allow plugins loaded via entry points to override builtin plugins
new_name = self.plugin_name(plugin)
self._plugins[:] = [p for p in self._plugins
if self.plugin_name(p) != new_name]
self._plugins.append(plugin) | [
"def",
"add_plugin",
"(",
"self",
",",
"plugin",
")",
":",
"# allow plugins loaded via entry points to override builtin plugins",
"new_name",
"=",
"self",
".",
"plugin_name",
"(",
"plugin",
")",
"self",
".",
"_plugins",
"[",
":",
"]",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"_plugins",
"if",
"self",
".",
"plugin_name",
"(",
"p",
")",
"!=",
"new_name",
"]",
"self",
".",
"_plugins",
".",
"append",
"(",
"plugin",
")"
]
| Add the given plugin. | [
"Add",
"the",
"given",
"plugin",
"."
]
| python | train |
openstack/proliantutils | proliantutils/ilo/ris.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L790-L807 | def _perform_power_op(self, oper):
"""Perform requested power operation.
:param oper: Type of power button press to simulate.
Supported values: 'ON', 'ForceOff', 'ForceRestart' and
'Nmi'
:raises: IloError, on an error from iLO.
"""
power_settings = {"Action": "Reset",
"ResetType": oper}
systems_uri = "/rest/v1/Systems/1"
status, headers, response = self._rest_post(systems_uri, None,
power_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | [
"def",
"_perform_power_op",
"(",
"self",
",",
"oper",
")",
":",
"power_settings",
"=",
"{",
"\"Action\"",
":",
"\"Reset\"",
",",
"\"ResetType\"",
":",
"oper",
"}",
"systems_uri",
"=",
"\"/rest/v1/Systems/1\"",
"status",
",",
"headers",
",",
"response",
"=",
"self",
".",
"_rest_post",
"(",
"systems_uri",
",",
"None",
",",
"power_settings",
")",
"if",
"status",
">=",
"300",
":",
"msg",
"=",
"self",
".",
"_get_extended_error",
"(",
"response",
")",
"raise",
"exception",
".",
"IloError",
"(",
"msg",
")"
]
| Perform requested power operation.
:param oper: Type of power button press to simulate.
Supported values: 'ON', 'ForceOff', 'ForceRestart' and
'Nmi'
:raises: IloError, on an error from iLO. | [
"Perform",
"requested",
"power",
"operation",
"."
]
| python | train |
tensorflow/tensor2tensor | tensor2tensor/models/video/savp.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/savp.py#L228-L262 | def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss | [
"def",
"get_gan_loss",
"(",
"self",
",",
"true_frames",
",",
"gen_frames",
",",
"name",
")",
":",
"# D - STEP",
"with",
"tf",
".",
"variable_scope",
"(",
"\"%s_discriminator\"",
"%",
"name",
",",
"reuse",
"=",
"tf",
".",
"AUTO_REUSE",
")",
":",
"gan_d_loss",
",",
"_",
",",
"fake_logits_stop",
"=",
"self",
".",
"d_step",
"(",
"true_frames",
",",
"gen_frames",
")",
"# G - STEP",
"with",
"tf",
".",
"variable_scope",
"(",
"\"%s_discriminator\"",
"%",
"name",
",",
"reuse",
"=",
"True",
")",
":",
"gan_g_loss_pos_d",
",",
"gan_g_loss_neg_d",
"=",
"self",
".",
"g_step",
"(",
"gen_frames",
",",
"fake_logits_stop",
")",
"gan_g_loss",
"=",
"gan_g_loss_pos_d",
"+",
"gan_g_loss_neg_d",
"tf",
".",
"summary",
".",
"scalar",
"(",
"\"gan_loss_%s\"",
"%",
"name",
",",
"gan_g_loss_pos_d",
"+",
"gan_d_loss",
")",
"if",
"self",
".",
"hparams",
".",
"gan_optimization",
"==",
"\"joint\"",
":",
"gan_loss",
"=",
"gan_g_loss",
"+",
"gan_d_loss",
"else",
":",
"curr_step",
"=",
"self",
".",
"get_iteration_num",
"(",
")",
"gan_loss",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"logical_not",
"(",
"curr_step",
"%",
"2",
"==",
"0",
")",
",",
"lambda",
":",
"gan_g_loss",
",",
"lambda",
":",
"gan_d_loss",
")",
"return",
"gan_loss"
]
| Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss | [
"Get",
"the",
"discriminator",
"+",
"generator",
"loss",
"at",
"every",
"step",
"."
]
| python | train |
saltstack/salt | salt/modules/boto_s3_bucket.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_s3_bucket.py#L845-L866 | def delete_cors(Bucket,
region=None, key=None, keyid=None, profile=None):
'''
Delete the CORS configuration for the given bucket
Returns {deleted: true} if CORS was deleted and returns
{deleted: False} if CORS was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.delete_cors my_bucket
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_bucket_cors(Bucket=Bucket)
return {'deleted': True, 'name': Bucket}
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)} | [
"def",
"delete_cors",
"(",
"Bucket",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"conn",
".",
"delete_bucket_cors",
"(",
"Bucket",
"=",
"Bucket",
")",
"return",
"{",
"'deleted'",
":",
"True",
",",
"'name'",
":",
"Bucket",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'deleted'",
":",
"False",
",",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
]
| Delete the CORS configuration for the given bucket
Returns {deleted: true} if CORS was deleted and returns
{deleted: False} if CORS was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.delete_cors my_bucket | [
"Delete",
"the",
"CORS",
"configuration",
"for",
"the",
"given",
"bucket"
]
| python | train |
housecanary/hc-api-python | housecanary/excel/analytics_data_excel.py | https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/excel/analytics_data_excel.py#L222-L297 | def process_data(key, data_list, result_info_key, identifier_keys):
""" Given a key as the endpoint name, pulls the data for that endpoint out
of the data_list for each address, processes the data into a more
excel-friendly format and returns that data.
Args:
key: the endpoint name of the data to process
data_list: the main data list to take the data from
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
Returns:
A list of dicts (rows) to be written to a worksheet
"""
master_data = []
for item_data in data_list:
data = item_data[key]
if data is None:
current_item_data = {}
else:
if key == 'property/value':
current_item_data = data['value']
elif key == 'property/details':
top_level_keys = ['property', 'assessment']
current_item_data = flatten_top_level_keys(data, top_level_keys)
elif key == 'property/school':
current_item_data = data['school']
school_list = []
for school_type_key in current_item_data:
schools = current_item_data[school_type_key]
for school in schools:
school['school_type'] = school_type_key
school['school_address'] = school['address']
school['school_zipcode'] = school['zipcode']
school_list.append(school)
current_item_data = school_list
elif key == 'property/value_forecast':
current_item_data = {}
for month_key in data:
current_item_data[month_key] = data[month_key]['value']
elif key in ['property/value_within_block', 'property/rental_value_within_block']:
current_item_data = flatten_top_level_keys(data, [
'housecanary_value_percentile_range',
'housecanary_value_sqft_percentile_range',
'client_value_percentile_range',
'client_value_sqft_percentile_range'
])
elif key in ['property/zip_details', 'zip/details']:
top_level_keys = ['multi_family', 'single_family']
current_item_data = flatten_top_level_keys(data, top_level_keys)
else:
current_item_data = data
if isinstance(current_item_data, dict):
_set_identifier_fields(current_item_data, item_data, result_info_key, identifier_keys)
master_data.append(current_item_data)
else:
# it's a list
for item in current_item_data:
_set_identifier_fields(item, item_data, result_info_key, identifier_keys)
master_data.extend(current_item_data)
return master_data | [
"def",
"process_data",
"(",
"key",
",",
"data_list",
",",
"result_info_key",
",",
"identifier_keys",
")",
":",
"master_data",
"=",
"[",
"]",
"for",
"item_data",
"in",
"data_list",
":",
"data",
"=",
"item_data",
"[",
"key",
"]",
"if",
"data",
"is",
"None",
":",
"current_item_data",
"=",
"{",
"}",
"else",
":",
"if",
"key",
"==",
"'property/value'",
":",
"current_item_data",
"=",
"data",
"[",
"'value'",
"]",
"elif",
"key",
"==",
"'property/details'",
":",
"top_level_keys",
"=",
"[",
"'property'",
",",
"'assessment'",
"]",
"current_item_data",
"=",
"flatten_top_level_keys",
"(",
"data",
",",
"top_level_keys",
")",
"elif",
"key",
"==",
"'property/school'",
":",
"current_item_data",
"=",
"data",
"[",
"'school'",
"]",
"school_list",
"=",
"[",
"]",
"for",
"school_type_key",
"in",
"current_item_data",
":",
"schools",
"=",
"current_item_data",
"[",
"school_type_key",
"]",
"for",
"school",
"in",
"schools",
":",
"school",
"[",
"'school_type'",
"]",
"=",
"school_type_key",
"school",
"[",
"'school_address'",
"]",
"=",
"school",
"[",
"'address'",
"]",
"school",
"[",
"'school_zipcode'",
"]",
"=",
"school",
"[",
"'zipcode'",
"]",
"school_list",
".",
"append",
"(",
"school",
")",
"current_item_data",
"=",
"school_list",
"elif",
"key",
"==",
"'property/value_forecast'",
":",
"current_item_data",
"=",
"{",
"}",
"for",
"month_key",
"in",
"data",
":",
"current_item_data",
"[",
"month_key",
"]",
"=",
"data",
"[",
"month_key",
"]",
"[",
"'value'",
"]",
"elif",
"key",
"in",
"[",
"'property/value_within_block'",
",",
"'property/rental_value_within_block'",
"]",
":",
"current_item_data",
"=",
"flatten_top_level_keys",
"(",
"data",
",",
"[",
"'housecanary_value_percentile_range'",
",",
"'housecanary_value_sqft_percentile_range'",
",",
"'client_value_percentile_range'",
",",
"'client_value_sqft_percentile_range'",
"]",
")",
"elif",
"key",
"in",
"[",
"'property/zip_details'",
",",
"'zip/details'",
"]",
":",
"top_level_keys",
"=",
"[",
"'multi_family'",
",",
"'single_family'",
"]",
"current_item_data",
"=",
"flatten_top_level_keys",
"(",
"data",
",",
"top_level_keys",
")",
"else",
":",
"current_item_data",
"=",
"data",
"if",
"isinstance",
"(",
"current_item_data",
",",
"dict",
")",
":",
"_set_identifier_fields",
"(",
"current_item_data",
",",
"item_data",
",",
"result_info_key",
",",
"identifier_keys",
")",
"master_data",
".",
"append",
"(",
"current_item_data",
")",
"else",
":",
"# it's a list",
"for",
"item",
"in",
"current_item_data",
":",
"_set_identifier_fields",
"(",
"item",
",",
"item_data",
",",
"result_info_key",
",",
"identifier_keys",
")",
"master_data",
".",
"extend",
"(",
"current_item_data",
")",
"return",
"master_data"
]
| Given a key as the endpoint name, pulls the data for that endpoint out
of the data_list for each address, processes the data into a more
excel-friendly format and returns that data.
Args:
key: the endpoint name of the data to process
data_list: the main data list to take the data from
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
Returns:
A list of dicts (rows) to be written to a worksheet | [
"Given",
"a",
"key",
"as",
"the",
"endpoint",
"name",
"pulls",
"the",
"data",
"for",
"that",
"endpoint",
"out",
"of",
"the",
"data_list",
"for",
"each",
"address",
"processes",
"the",
"data",
"into",
"a",
"more",
"excel",
"-",
"friendly",
"format",
"and",
"returns",
"that",
"data",
"."
]
| python | train |
aparsons/threadfix_api | threadfix_api/threadfix.py | https://github.com/aparsons/threadfix_api/blob/76fd1bd26e9ac863636112cd30d733543807ff7d/threadfix_api/threadfix.py#L244-L251 | def get_waf_rules_by_application(self, waf_id, application_id):
"""
Returns the WAF rule text for one or all of the applications in a WAF. If the application id is -1, it will get
rules for all apps. If the application is a valid application id, rules will be generated for that application.
:param waf_id: WAF identifier.
:param application_id: Application identifier.
"""
return self._request('GET', 'rest/wafs/' + str(waf_id) + '/rules/app/' + str(application_id)) | [
"def",
"get_waf_rules_by_application",
"(",
"self",
",",
"waf_id",
",",
"application_id",
")",
":",
"return",
"self",
".",
"_request",
"(",
"'GET'",
",",
"'rest/wafs/'",
"+",
"str",
"(",
"waf_id",
")",
"+",
"'/rules/app/'",
"+",
"str",
"(",
"application_id",
")",
")"
]
| Returns the WAF rule text for one or all of the applications in a WAF. If the application id is -1, it will get
rules for all apps. If the application is a valid application id, rules will be generated for that application.
:param waf_id: WAF identifier.
:param application_id: Application identifier. | [
"Returns",
"the",
"WAF",
"rule",
"text",
"for",
"one",
"or",
"all",
"of",
"the",
"applications",
"in",
"a",
"WAF",
".",
"If",
"the",
"application",
"id",
"is",
"-",
"1",
"it",
"will",
"get",
"rules",
"for",
"all",
"apps",
".",
"If",
"the",
"application",
"is",
"a",
"valid",
"application",
"id",
"rules",
"will",
"be",
"generated",
"for",
"that",
"application",
".",
":",
"param",
"waf_id",
":",
"WAF",
"identifier",
".",
":",
"param",
"application_id",
":",
"Application",
"identifier",
"."
]
| python | train |
karan/TPB | tpb/tpb.py | https://github.com/karan/TPB/blob/f424a73a10d4bcf4e363d7e7e8cb915a3a057671/tpb/tpb.py#L29-L41 | def self_if_parameters(func):
"""
If any parameter is given, the method's binded object is returned after
executing the function. Else the function's result is returned.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if args or kwargs:
return self
else:
return result
return wrapper | [
"def",
"self_if_parameters",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"args",
"or",
"kwargs",
":",
"return",
"self",
"else",
":",
"return",
"result",
"return",
"wrapper"
]
| If any parameter is given, the method's binded object is returned after
executing the function. Else the function's result is returned. | [
"If",
"any",
"parameter",
"is",
"given",
"the",
"method",
"s",
"binded",
"object",
"is",
"returned",
"after",
"executing",
"the",
"function",
".",
"Else",
"the",
"function",
"s",
"result",
"is",
"returned",
"."
]
| python | train |
alan-turing-institute/topic-modelling-tools | topicmodels/LDA/gibbs.py | https://github.com/alan-turing-institute/topic-modelling-tools/blob/f0cf90cdd06f1072e824b446f201c7469b9de5df/topicmodels/LDA/gibbs.py#L196-L204 | def perplexity(self):
"""
Compute perplexity for each sample.
"""
return samplers_lda.perplexity_comp(self.docid, self.tokens,
self.tt, self.dt, self.N,
self.K, self.samples) | [
"def",
"perplexity",
"(",
"self",
")",
":",
"return",
"samplers_lda",
".",
"perplexity_comp",
"(",
"self",
".",
"docid",
",",
"self",
".",
"tokens",
",",
"self",
".",
"tt",
",",
"self",
".",
"dt",
",",
"self",
".",
"N",
",",
"self",
".",
"K",
",",
"self",
".",
"samples",
")"
]
| Compute perplexity for each sample. | [
"Compute",
"perplexity",
"for",
"each",
"sample",
"."
]
| python | train |
minio/minio-py | minio/api.py | https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L758-L838 | def put_object(self, bucket_name, object_name, data, length,
content_type='application/octet-stream',
metadata=None, sse=None, progress=None,
part_size=DEFAULT_PART_SIZE):
"""
Add a new object to the cloud storage server.
NOTE: Maximum object size supported by this API is 5TiB.
Examples:
file_stat = os.stat('hello.txt')
with open('hello.txt', 'rb') as data:
minio.put_object('foo', 'bar', data, file_stat.st_size, 'text/plain')
- For length lesser than 5MB put_object automatically
does single Put operation.
- For length larger than 5MB put_object automatically
does resumable multipart operation.
:param bucket_name: Bucket of new object.
:param object_name: Name of new object.
:param data: Contents to upload.
:param length: Total length of object.
:param content_type: mime type of object as a string.
:param metadata: Any additional metadata to be uploaded along
with your PUT request.
:param progress: A progress object
:param part_size: Multipart part size
:return: etag
"""
is_valid_sse_object(sse)
is_valid_bucket_name(bucket_name)
is_non_empty_string(object_name)
if progress:
if not isinstance(progress, Thread):
raise TypeError('Progress object should inherit the thread.')
# Set progress bar length and object name before upload
progress.set_meta(total_length=length, object_name=object_name)
if not callable(getattr(data, 'read')):
raise ValueError(
'Invalid input data does not implement a callable read() method')
if length > (part_size * MAX_MULTIPART_COUNT):
raise InvalidArgumentError('Part size * max_parts(10000) is '
' lesser than input length.')
if part_size < MIN_PART_SIZE:
raise InvalidArgumentError('Input part size is smaller '
' than allowed minimum of 5MiB.')
if part_size > MAX_PART_SIZE:
raise InvalidArgumentError('Input part size is bigger '
' than allowed maximum of 5GiB.')
if not metadata:
metadata = {}
metadata = amzprefix_user_metadata(metadata)
metadata['Content-Type'] = 'application/octet-stream' if \
not content_type else content_type
if length > part_size:
return self._stream_put_object(bucket_name, object_name,
data, length, metadata=metadata,
sse=sse, progress=progress,
part_size=part_size)
current_data = data.read(length)
if len(current_data) != length:
raise InvalidArgumentError(
'Could not read {} bytes from data to upload'.format(length)
)
return self._do_put_object(bucket_name, object_name,
current_data, len(current_data),
metadata=metadata, sse=sse,
progress=progress) | [
"def",
"put_object",
"(",
"self",
",",
"bucket_name",
",",
"object_name",
",",
"data",
",",
"length",
",",
"content_type",
"=",
"'application/octet-stream'",
",",
"metadata",
"=",
"None",
",",
"sse",
"=",
"None",
",",
"progress",
"=",
"None",
",",
"part_size",
"=",
"DEFAULT_PART_SIZE",
")",
":",
"is_valid_sse_object",
"(",
"sse",
")",
"is_valid_bucket_name",
"(",
"bucket_name",
")",
"is_non_empty_string",
"(",
"object_name",
")",
"if",
"progress",
":",
"if",
"not",
"isinstance",
"(",
"progress",
",",
"Thread",
")",
":",
"raise",
"TypeError",
"(",
"'Progress object should inherit the thread.'",
")",
"# Set progress bar length and object name before upload",
"progress",
".",
"set_meta",
"(",
"total_length",
"=",
"length",
",",
"object_name",
"=",
"object_name",
")",
"if",
"not",
"callable",
"(",
"getattr",
"(",
"data",
",",
"'read'",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid input data does not implement a callable read() method'",
")",
"if",
"length",
">",
"(",
"part_size",
"*",
"MAX_MULTIPART_COUNT",
")",
":",
"raise",
"InvalidArgumentError",
"(",
"'Part size * max_parts(10000) is '",
"' lesser than input length.'",
")",
"if",
"part_size",
"<",
"MIN_PART_SIZE",
":",
"raise",
"InvalidArgumentError",
"(",
"'Input part size is smaller '",
"' than allowed minimum of 5MiB.'",
")",
"if",
"part_size",
">",
"MAX_PART_SIZE",
":",
"raise",
"InvalidArgumentError",
"(",
"'Input part size is bigger '",
"' than allowed maximum of 5GiB.'",
")",
"if",
"not",
"metadata",
":",
"metadata",
"=",
"{",
"}",
"metadata",
"=",
"amzprefix_user_metadata",
"(",
"metadata",
")",
"metadata",
"[",
"'Content-Type'",
"]",
"=",
"'application/octet-stream'",
"if",
"not",
"content_type",
"else",
"content_type",
"if",
"length",
">",
"part_size",
":",
"return",
"self",
".",
"_stream_put_object",
"(",
"bucket_name",
",",
"object_name",
",",
"data",
",",
"length",
",",
"metadata",
"=",
"metadata",
",",
"sse",
"=",
"sse",
",",
"progress",
"=",
"progress",
",",
"part_size",
"=",
"part_size",
")",
"current_data",
"=",
"data",
".",
"read",
"(",
"length",
")",
"if",
"len",
"(",
"current_data",
")",
"!=",
"length",
":",
"raise",
"InvalidArgumentError",
"(",
"'Could not read {} bytes from data to upload'",
".",
"format",
"(",
"length",
")",
")",
"return",
"self",
".",
"_do_put_object",
"(",
"bucket_name",
",",
"object_name",
",",
"current_data",
",",
"len",
"(",
"current_data",
")",
",",
"metadata",
"=",
"metadata",
",",
"sse",
"=",
"sse",
",",
"progress",
"=",
"progress",
")"
]
| Add a new object to the cloud storage server.
NOTE: Maximum object size supported by this API is 5TiB.
Examples:
file_stat = os.stat('hello.txt')
with open('hello.txt', 'rb') as data:
minio.put_object('foo', 'bar', data, file_stat.st_size, 'text/plain')
- For length lesser than 5MB put_object automatically
does single Put operation.
- For length larger than 5MB put_object automatically
does resumable multipart operation.
:param bucket_name: Bucket of new object.
:param object_name: Name of new object.
:param data: Contents to upload.
:param length: Total length of object.
:param content_type: mime type of object as a string.
:param metadata: Any additional metadata to be uploaded along
with your PUT request.
:param progress: A progress object
:param part_size: Multipart part size
:return: etag | [
"Add",
"a",
"new",
"object",
"to",
"the",
"cloud",
"storage",
"server",
"."
]
| python | train |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/flask/app.py | https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L827-L837 | def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response) | [
"def",
"save_session",
"(",
"self",
",",
"session",
",",
"response",
")",
":",
"return",
"self",
".",
"session_interface",
".",
"save_session",
"(",
"self",
",",
"session",
",",
"response",
")"
]
| Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class` | [
"Saves",
"the",
"session",
"if",
"it",
"needs",
"updates",
".",
"For",
"the",
"default",
"implementation",
"check",
":",
"meth",
":",
"open_session",
".",
"Instead",
"of",
"overriding",
"this",
"method",
"we",
"recommend",
"replacing",
"the",
":",
"class",
":",
"session_interface",
"."
]
| python | test |
softlayer/softlayer-python | SoftLayer/CLI/ssl/add.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/ssl/add.py#L23-L42 | def cli(env, crt, csr, icc, key, notes):
"""Add and upload SSL certificate details."""
template = {
'intermediateCertificate': '',
'certificateSigningRequest': '',
'notes': notes,
}
template['certificate'] = open(crt).read()
template['privateKey'] = open(key).read()
if csr:
body = open(csr).read()
template['certificateSigningRequest'] = body
if icc:
body = open(icc).read()
template['intermediateCertificate'] = body
manager = SoftLayer.SSLManager(env.client)
manager.add_certificate(template) | [
"def",
"cli",
"(",
"env",
",",
"crt",
",",
"csr",
",",
"icc",
",",
"key",
",",
"notes",
")",
":",
"template",
"=",
"{",
"'intermediateCertificate'",
":",
"''",
",",
"'certificateSigningRequest'",
":",
"''",
",",
"'notes'",
":",
"notes",
",",
"}",
"template",
"[",
"'certificate'",
"]",
"=",
"open",
"(",
"crt",
")",
".",
"read",
"(",
")",
"template",
"[",
"'privateKey'",
"]",
"=",
"open",
"(",
"key",
")",
".",
"read",
"(",
")",
"if",
"csr",
":",
"body",
"=",
"open",
"(",
"csr",
")",
".",
"read",
"(",
")",
"template",
"[",
"'certificateSigningRequest'",
"]",
"=",
"body",
"if",
"icc",
":",
"body",
"=",
"open",
"(",
"icc",
")",
".",
"read",
"(",
")",
"template",
"[",
"'intermediateCertificate'",
"]",
"=",
"body",
"manager",
"=",
"SoftLayer",
".",
"SSLManager",
"(",
"env",
".",
"client",
")",
"manager",
".",
"add_certificate",
"(",
"template",
")"
]
| Add and upload SSL certificate details. | [
"Add",
"and",
"upload",
"SSL",
"certificate",
"details",
"."
]
| python | train |
cirruscluster/cirruscluster | cirruscluster/cluster/mapr.py | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/mapr.py#L365-L376 | def __StartMaster(self):
""" Starts a master node, configures it, and starts services. """
num_masters = len(self.cluster.get_instances_in_role("master", "running"))
assert(num_masters < 1)
logging.info( "waiting for masters to start")
if self.config.master_on_spot_instances:
self.__LaunchSpotMasterInstances()
else:
self.__LaunchOnDemandMasterInstances()
time.sleep(1)
self.__ConfigureMaster()
return True | [
"def",
"__StartMaster",
"(",
"self",
")",
":",
"num_masters",
"=",
"len",
"(",
"self",
".",
"cluster",
".",
"get_instances_in_role",
"(",
"\"master\"",
",",
"\"running\"",
")",
")",
"assert",
"(",
"num_masters",
"<",
"1",
")",
"logging",
".",
"info",
"(",
"\"waiting for masters to start\"",
")",
"if",
"self",
".",
"config",
".",
"master_on_spot_instances",
":",
"self",
".",
"__LaunchSpotMasterInstances",
"(",
")",
"else",
":",
"self",
".",
"__LaunchOnDemandMasterInstances",
"(",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"self",
".",
"__ConfigureMaster",
"(",
")",
"return",
"True"
]
| Starts a master node, configures it, and starts services. | [
"Starts",
"a",
"master",
"node",
"configures",
"it",
"and",
"starts",
"services",
"."
]
| python | train |
globus/globus-cli | globus_cli/helpers/auth_flows.py | https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/helpers/auth_flows.py#L26-L62 | def do_link_auth_flow(session_params=None, force_new_client=False):
"""
Prompts the user with a link to authenticate with globus auth
and authorize the CLI to act on their behalf.
"""
session_params = session_params or {}
# get the ConfidentialApp client object
auth_client = internal_auth_client(
requires_instance=True, force_new_client=force_new_client
)
# start the Confidential App Grant flow
auth_client.oauth2_start_flow(
redirect_uri=auth_client.base_url + "v2/web/auth-code",
refresh_tokens=True,
requested_scopes=SCOPES,
)
# prompt
additional_params = {"prompt": "login"}
additional_params.update(session_params)
linkprompt = "Please authenticate with Globus here"
safeprint(
"{0}:\n{1}\n{2}\n{1}\n".format(
linkprompt,
"-" * len(linkprompt),
auth_client.oauth2_get_authorize_url(additional_params=additional_params),
)
)
# come back with auth code
auth_code = click.prompt("Enter the resulting Authorization Code here").strip()
# finish auth flow
exchange_code_and_store_config(auth_client, auth_code)
return True | [
"def",
"do_link_auth_flow",
"(",
"session_params",
"=",
"None",
",",
"force_new_client",
"=",
"False",
")",
":",
"session_params",
"=",
"session_params",
"or",
"{",
"}",
"# get the ConfidentialApp client object",
"auth_client",
"=",
"internal_auth_client",
"(",
"requires_instance",
"=",
"True",
",",
"force_new_client",
"=",
"force_new_client",
")",
"# start the Confidential App Grant flow",
"auth_client",
".",
"oauth2_start_flow",
"(",
"redirect_uri",
"=",
"auth_client",
".",
"base_url",
"+",
"\"v2/web/auth-code\"",
",",
"refresh_tokens",
"=",
"True",
",",
"requested_scopes",
"=",
"SCOPES",
",",
")",
"# prompt",
"additional_params",
"=",
"{",
"\"prompt\"",
":",
"\"login\"",
"}",
"additional_params",
".",
"update",
"(",
"session_params",
")",
"linkprompt",
"=",
"\"Please authenticate with Globus here\"",
"safeprint",
"(",
"\"{0}:\\n{1}\\n{2}\\n{1}\\n\"",
".",
"format",
"(",
"linkprompt",
",",
"\"-\"",
"*",
"len",
"(",
"linkprompt",
")",
",",
"auth_client",
".",
"oauth2_get_authorize_url",
"(",
"additional_params",
"=",
"additional_params",
")",
",",
")",
")",
"# come back with auth code",
"auth_code",
"=",
"click",
".",
"prompt",
"(",
"\"Enter the resulting Authorization Code here\"",
")",
".",
"strip",
"(",
")",
"# finish auth flow",
"exchange_code_and_store_config",
"(",
"auth_client",
",",
"auth_code",
")",
"return",
"True"
]
| Prompts the user with a link to authenticate with globus auth
and authorize the CLI to act on their behalf. | [
"Prompts",
"the",
"user",
"with",
"a",
"link",
"to",
"authenticate",
"with",
"globus",
"auth",
"and",
"authorize",
"the",
"CLI",
"to",
"act",
"on",
"their",
"behalf",
"."
]
| python | train |
teepark/greenhouse | greenhouse/emulation/__init__.py | https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/emulation/__init__.py#L26-L72 | def patched(module_name):
"""import and return a named module with patches applied locally only
this function returns a module after importing it in such as way that it
will operate cooperatively, but not overriding the module globally.
>>> green_httplib = patched("httplib")
>>> # using green_httplib will only block greenlets
>>> import httplib
>>> # using httplib will block threads/processes
>>> # both can exist simultaneously
:param module_name:
the module's name that is to be imported. this can be a dot-delimited
name, in which case the module at the end of the path is the one that
will be returned
:type module_name: str
:returns:
the module indicated by module_name, imported so that it will not block
globally, but also not touching existing global modules
"""
if module_name in _patchers:
return _patched_copy(module_name, _patchers[module_name])
# grab the unpatched version of the module for posterity
old_module = sys.modules.pop(module_name, None)
# apply all the standard library patches we have
saved = [(module_name, old_module)]
for name, patch in _patchers.iteritems():
new_mod = _patched_copy(name, patch)
saved.append((name, sys.modules.pop(name)))
sys.modules[name] = new_mod
try:
# import the requested module with patches in place
result = __import__(module_name, {}, {}, module_name.rsplit(".", 1)[0])
finally:
# put all the original modules back as they were
for name, old_mod in saved:
if old_mod is None:
sys.modules.pop(name, None)
else:
sys.modules[name] = old_mod
return result | [
"def",
"patched",
"(",
"module_name",
")",
":",
"if",
"module_name",
"in",
"_patchers",
":",
"return",
"_patched_copy",
"(",
"module_name",
",",
"_patchers",
"[",
"module_name",
"]",
")",
"# grab the unpatched version of the module for posterity",
"old_module",
"=",
"sys",
".",
"modules",
".",
"pop",
"(",
"module_name",
",",
"None",
")",
"# apply all the standard library patches we have",
"saved",
"=",
"[",
"(",
"module_name",
",",
"old_module",
")",
"]",
"for",
"name",
",",
"patch",
"in",
"_patchers",
".",
"iteritems",
"(",
")",
":",
"new_mod",
"=",
"_patched_copy",
"(",
"name",
",",
"patch",
")",
"saved",
".",
"append",
"(",
"(",
"name",
",",
"sys",
".",
"modules",
".",
"pop",
"(",
"name",
")",
")",
")",
"sys",
".",
"modules",
"[",
"name",
"]",
"=",
"new_mod",
"try",
":",
"# import the requested module with patches in place",
"result",
"=",
"__import__",
"(",
"module_name",
",",
"{",
"}",
",",
"{",
"}",
",",
"module_name",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
")",
"finally",
":",
"# put all the original modules back as they were",
"for",
"name",
",",
"old_mod",
"in",
"saved",
":",
"if",
"old_mod",
"is",
"None",
":",
"sys",
".",
"modules",
".",
"pop",
"(",
"name",
",",
"None",
")",
"else",
":",
"sys",
".",
"modules",
"[",
"name",
"]",
"=",
"old_mod",
"return",
"result"
]
| import and return a named module with patches applied locally only
this function returns a module after importing it in such as way that it
will operate cooperatively, but not overriding the module globally.
>>> green_httplib = patched("httplib")
>>> # using green_httplib will only block greenlets
>>> import httplib
>>> # using httplib will block threads/processes
>>> # both can exist simultaneously
:param module_name:
the module's name that is to be imported. this can be a dot-delimited
name, in which case the module at the end of the path is the one that
will be returned
:type module_name: str
:returns:
the module indicated by module_name, imported so that it will not block
globally, but also not touching existing global modules | [
"import",
"and",
"return",
"a",
"named",
"module",
"with",
"patches",
"applied",
"locally",
"only"
]
| python | train |
mushkevych/scheduler | synergy/workers/abstract_cli_worker.py | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/workers/abstract_cli_worker.py#L27-L49 | def _poll_process(self):
""" between death of a process and its actual termination lies poorly documented requirement -
<purging process' io pipes and reading exit status>.
this can be done either by os.wait() or process.wait()
:return tuple (boolean: alive, int: return_code) """
try:
self.logger.warning(self.cli_process.stderr.read())
self.logger.info(self.cli_process.stdout.read())
return_code = self.cli_process.wait(timeout=0.01)
if return_code is None:
# process is already terminated
self.logger.info('Process {0} is terminated'.format(self.process_name))
else:
# process is terminated; possibly by OS
self.logger.info('Process {0} got terminated. Cleaning up'.format(self.process_name))
self.cli_process = None
return False, return_code
except TimeoutExpired:
# process is alive and OK
return True, None
except Exception:
self.logger.error('Exception on polling: {0}'.format(self.process_name), exc_info=True)
return False, 999 | [
"def",
"_poll_process",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"self",
".",
"cli_process",
".",
"stderr",
".",
"read",
"(",
")",
")",
"self",
".",
"logger",
".",
"info",
"(",
"self",
".",
"cli_process",
".",
"stdout",
".",
"read",
"(",
")",
")",
"return_code",
"=",
"self",
".",
"cli_process",
".",
"wait",
"(",
"timeout",
"=",
"0.01",
")",
"if",
"return_code",
"is",
"None",
":",
"# process is already terminated",
"self",
".",
"logger",
".",
"info",
"(",
"'Process {0} is terminated'",
".",
"format",
"(",
"self",
".",
"process_name",
")",
")",
"else",
":",
"# process is terminated; possibly by OS",
"self",
".",
"logger",
".",
"info",
"(",
"'Process {0} got terminated. Cleaning up'",
".",
"format",
"(",
"self",
".",
"process_name",
")",
")",
"self",
".",
"cli_process",
"=",
"None",
"return",
"False",
",",
"return_code",
"except",
"TimeoutExpired",
":",
"# process is alive and OK",
"return",
"True",
",",
"None",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Exception on polling: {0}'",
".",
"format",
"(",
"self",
".",
"process_name",
")",
",",
"exc_info",
"=",
"True",
")",
"return",
"False",
",",
"999"
]
| between death of a process and its actual termination lies poorly documented requirement -
<purging process' io pipes and reading exit status>.
this can be done either by os.wait() or process.wait()
:return tuple (boolean: alive, int: return_code) | [
"between",
"death",
"of",
"a",
"process",
"and",
"its",
"actual",
"termination",
"lies",
"poorly",
"documented",
"requirement",
"-",
"<purging",
"process",
"io",
"pipes",
"and",
"reading",
"exit",
"status",
">",
".",
"this",
"can",
"be",
"done",
"either",
"by",
"os",
".",
"wait",
"()",
"or",
"process",
".",
"wait",
"()",
":",
"return",
"tuple",
"(",
"boolean",
":",
"alive",
"int",
":",
"return_code",
")"
]
| python | train |
tanghaibao/jcvi | jcvi/apps/ks.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/ks.py#L412-L467 | def prepare(args):
"""
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair.
"""
from jcvi.formats.fasta import Fasta
p = OptionParser(prepare.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
outfile = opts.outfile
if len(args) == 2:
pairsfile, cdsfile = args
pepfile = None
elif len(args) == 3:
pairsfile, cdsfile, pepfile = args
else:
sys.exit(not p.print_help())
f = Fasta(cdsfile)
fp = open(pairsfile)
fw = must_open(outfile, "w")
if pepfile:
assert outfile != "stdout", "Please specify outfile name."
f2 = Fasta(pepfile)
fw2 = must_open(outfile + ".pep", "w")
for row in fp:
if row[0] == '#':
continue
a, b = row.split()[:2]
if a == b:
logging.debug("Self pairs found: {0} - {1}. Ignored".format(a, b))
continue
if a not in f:
a = find_first_isoform(a, f)
assert a, a
if b not in f:
b = find_first_isoform(b, f)
assert b, b
acds = f[a]
bcds = f[b]
SeqIO.write((acds, bcds), fw, "fasta")
if pepfile:
apep = f2[a]
bpep = f2[b]
SeqIO.write((apep, bpep), fw2, "fasta")
fw.close()
if pepfile:
fw2.close() | [
"def",
"prepare",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"formats",
".",
"fasta",
"import",
"Fasta",
"p",
"=",
"OptionParser",
"(",
"prepare",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"outfile",
"=",
"opts",
".",
"outfile",
"if",
"len",
"(",
"args",
")",
"==",
"2",
":",
"pairsfile",
",",
"cdsfile",
"=",
"args",
"pepfile",
"=",
"None",
"elif",
"len",
"(",
"args",
")",
"==",
"3",
":",
"pairsfile",
",",
"cdsfile",
",",
"pepfile",
"=",
"args",
"else",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"f",
"=",
"Fasta",
"(",
"cdsfile",
")",
"fp",
"=",
"open",
"(",
"pairsfile",
")",
"fw",
"=",
"must_open",
"(",
"outfile",
",",
"\"w\"",
")",
"if",
"pepfile",
":",
"assert",
"outfile",
"!=",
"\"stdout\"",
",",
"\"Please specify outfile name.\"",
"f2",
"=",
"Fasta",
"(",
"pepfile",
")",
"fw2",
"=",
"must_open",
"(",
"outfile",
"+",
"\".pep\"",
",",
"\"w\"",
")",
"for",
"row",
"in",
"fp",
":",
"if",
"row",
"[",
"0",
"]",
"==",
"'#'",
":",
"continue",
"a",
",",
"b",
"=",
"row",
".",
"split",
"(",
")",
"[",
":",
"2",
"]",
"if",
"a",
"==",
"b",
":",
"logging",
".",
"debug",
"(",
"\"Self pairs found: {0} - {1}. Ignored\"",
".",
"format",
"(",
"a",
",",
"b",
")",
")",
"continue",
"if",
"a",
"not",
"in",
"f",
":",
"a",
"=",
"find_first_isoform",
"(",
"a",
",",
"f",
")",
"assert",
"a",
",",
"a",
"if",
"b",
"not",
"in",
"f",
":",
"b",
"=",
"find_first_isoform",
"(",
"b",
",",
"f",
")",
"assert",
"b",
",",
"b",
"acds",
"=",
"f",
"[",
"a",
"]",
"bcds",
"=",
"f",
"[",
"b",
"]",
"SeqIO",
".",
"write",
"(",
"(",
"acds",
",",
"bcds",
")",
",",
"fw",
",",
"\"fasta\"",
")",
"if",
"pepfile",
":",
"apep",
"=",
"f2",
"[",
"a",
"]",
"bpep",
"=",
"f2",
"[",
"b",
"]",
"SeqIO",
".",
"write",
"(",
"(",
"apep",
",",
"bpep",
")",
",",
"fw2",
",",
"\"fasta\"",
")",
"fw",
".",
"close",
"(",
")",
"if",
"pepfile",
":",
"fw2",
".",
"close",
"(",
")"
]
| %prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta
Pick sequences from cdsfile to form pairs, ready to be calculated. The
pairsfile can be generated from formats.blast.cscore(). The first two
columns contain the pair. | [
"%prog",
"prepare",
"pairsfile",
"cdsfile",
"[",
"pepfile",
"]",
"-",
"o",
"paired",
".",
"cds",
".",
"fasta"
]
| python | train |
josuebrunel/yahoo-oauth | yahoo_oauth/yahoo_oauth.py | https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/yahoo_oauth.py#L147-L156 | def generate_oauth2_headers(self):
"""Generates header for oauth2
"""
encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8'))
headers={
'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')),
'Content-Type': 'application/x-www-form-urlencoded'
}
return headers | [
"def",
"generate_oauth2_headers",
"(",
"self",
")",
":",
"encoded_credentials",
"=",
"base64",
".",
"b64encode",
"(",
"(",
"'{0}:{1}'",
".",
"format",
"(",
"self",
".",
"consumer_key",
",",
"self",
".",
"consumer_secret",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Basic {0}'",
".",
"format",
"(",
"encoded_credentials",
".",
"decode",
"(",
"'utf-8'",
")",
")",
",",
"'Content-Type'",
":",
"'application/x-www-form-urlencoded'",
"}",
"return",
"headers"
]
| Generates header for oauth2 | [
"Generates",
"header",
"for",
"oauth2"
]
| python | valid |
jepegit/cellpy | cellpy/readers/cellreader.py | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/cellreader.py#L3054-L3065 | def get_number_of_cycles(self, dataset_number=None, steptable=None):
"""Get the number of cycles in the test."""
if steptable is None:
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
d = self.datasets[dataset_number].dfdata
no_cycles = np.amax(d[self.headers_normal.cycle_index_txt])
else:
no_cycles = np.amax(steptable[self.headers_step_table.cycle])
return no_cycles | [
"def",
"get_number_of_cycles",
"(",
"self",
",",
"dataset_number",
"=",
"None",
",",
"steptable",
"=",
"None",
")",
":",
"if",
"steptable",
"is",
"None",
":",
"dataset_number",
"=",
"self",
".",
"_validate_dataset_number",
"(",
"dataset_number",
")",
"if",
"dataset_number",
"is",
"None",
":",
"self",
".",
"_report_empty_dataset",
"(",
")",
"return",
"d",
"=",
"self",
".",
"datasets",
"[",
"dataset_number",
"]",
".",
"dfdata",
"no_cycles",
"=",
"np",
".",
"amax",
"(",
"d",
"[",
"self",
".",
"headers_normal",
".",
"cycle_index_txt",
"]",
")",
"else",
":",
"no_cycles",
"=",
"np",
".",
"amax",
"(",
"steptable",
"[",
"self",
".",
"headers_step_table",
".",
"cycle",
"]",
")",
"return",
"no_cycles"
]
| Get the number of cycles in the test. | [
"Get",
"the",
"number",
"of",
"cycles",
"in",
"the",
"test",
"."
]
| python | train |
marcotcr/lime | lime/lime_text.py | https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/lime_text.py#L182-L199 | def _segment_with_tokens(text, tokens):
"""Segment a string around the tokens created by a passed-in tokenizer"""
list_form = []
text_ptr = 0
for token in tokens:
inter_token_string = []
while not text[text_ptr:].startswith(token):
inter_token_string.append(text[text_ptr])
text_ptr += 1
if text_ptr >= len(text):
raise ValueError("Tokenization produced tokens that do not belong in string!")
text_ptr += len(token)
if inter_token_string:
list_form.append(''.join(inter_token_string))
list_form.append(token)
if text_ptr < len(text):
list_form.append(text[text_ptr:])
return list_form | [
"def",
"_segment_with_tokens",
"(",
"text",
",",
"tokens",
")",
":",
"list_form",
"=",
"[",
"]",
"text_ptr",
"=",
"0",
"for",
"token",
"in",
"tokens",
":",
"inter_token_string",
"=",
"[",
"]",
"while",
"not",
"text",
"[",
"text_ptr",
":",
"]",
".",
"startswith",
"(",
"token",
")",
":",
"inter_token_string",
".",
"append",
"(",
"text",
"[",
"text_ptr",
"]",
")",
"text_ptr",
"+=",
"1",
"if",
"text_ptr",
">=",
"len",
"(",
"text",
")",
":",
"raise",
"ValueError",
"(",
"\"Tokenization produced tokens that do not belong in string!\"",
")",
"text_ptr",
"+=",
"len",
"(",
"token",
")",
"if",
"inter_token_string",
":",
"list_form",
".",
"append",
"(",
"''",
".",
"join",
"(",
"inter_token_string",
")",
")",
"list_form",
".",
"append",
"(",
"token",
")",
"if",
"text_ptr",
"<",
"len",
"(",
"text",
")",
":",
"list_form",
".",
"append",
"(",
"text",
"[",
"text_ptr",
":",
"]",
")",
"return",
"list_form"
]
| Segment a string around the tokens created by a passed-in tokenizer | [
"Segment",
"a",
"string",
"around",
"the",
"tokens",
"created",
"by",
"a",
"passed",
"-",
"in",
"tokenizer"
]
| python | train |
pypyr/pypyr-cli | pypyr/steps/dsl/fileinoutrewriter.py | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L133-L141 | def run_step(self):
"""Write in to out, replacing strings per the replace_pairs."""
formatted_replacements = self.context.get_formatted_iterable(
self.replace_pairs)
iter = StreamReplacePairsRewriterStep.iter_replace_strings(
formatted_replacements)
rewriter = StreamRewriter(iter)
super().run_step(rewriter) | [
"def",
"run_step",
"(",
"self",
")",
":",
"formatted_replacements",
"=",
"self",
".",
"context",
".",
"get_formatted_iterable",
"(",
"self",
".",
"replace_pairs",
")",
"iter",
"=",
"StreamReplacePairsRewriterStep",
".",
"iter_replace_strings",
"(",
"formatted_replacements",
")",
"rewriter",
"=",
"StreamRewriter",
"(",
"iter",
")",
"super",
"(",
")",
".",
"run_step",
"(",
"rewriter",
")"
]
| Write in to out, replacing strings per the replace_pairs. | [
"Write",
"in",
"to",
"out",
"replacing",
"strings",
"per",
"the",
"replace_pairs",
"."
]
| python | train |
hydpy-dev/hydpy | hydpy/auxs/armatools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/armatools.py#L725-L740 | def plot(self, threshold=None, **kwargs):
"""Barplot of the ARMA response."""
try:
# Works under matplotlib 3.
pyplot.bar(x=self.ma.delays+.5, height=self.response,
width=1., fill=False, **kwargs)
except TypeError: # pragma: no cover
# Works under matplotlib 2.
pyplot.bar(left=self.ma.delays+.5, height=self.response,
width=1., fill=False, **kwargs)
pyplot.xlabel('time')
pyplot.ylabel('response')
if threshold is not None:
cumsum = numpy.cumsum(self.response)
idx = numpy.where(cumsum > threshold*cumsum[-1])[0][0]
pyplot.xlim(0., idx) | [
"def",
"plot",
"(",
"self",
",",
"threshold",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"# Works under matplotlib 3.",
"pyplot",
".",
"bar",
"(",
"x",
"=",
"self",
".",
"ma",
".",
"delays",
"+",
".5",
",",
"height",
"=",
"self",
".",
"response",
",",
"width",
"=",
"1.",
",",
"fill",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
":",
"# pragma: no cover",
"# Works under matplotlib 2.",
"pyplot",
".",
"bar",
"(",
"left",
"=",
"self",
".",
"ma",
".",
"delays",
"+",
".5",
",",
"height",
"=",
"self",
".",
"response",
",",
"width",
"=",
"1.",
",",
"fill",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"pyplot",
".",
"xlabel",
"(",
"'time'",
")",
"pyplot",
".",
"ylabel",
"(",
"'response'",
")",
"if",
"threshold",
"is",
"not",
"None",
":",
"cumsum",
"=",
"numpy",
".",
"cumsum",
"(",
"self",
".",
"response",
")",
"idx",
"=",
"numpy",
".",
"where",
"(",
"cumsum",
">",
"threshold",
"*",
"cumsum",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"pyplot",
".",
"xlim",
"(",
"0.",
",",
"idx",
")"
]
| Barplot of the ARMA response. | [
"Barplot",
"of",
"the",
"ARMA",
"response",
"."
]
| python | train |
google/grr | grr/server/grr_response_server/console_utils.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/console_utils.py#L491-L538 | def ExportClientsByKeywords(keywords, filename, token=None):
r"""A script to export clients summaries selected by a keyword search.
This script does a client search for machines matching all of keywords and
writes a .csv summary of the results to filename. Multi-value fields are '\n'
separated.
Args:
keywords: a list of keywords to search for
filename: the name of the file to write to, will be replaced if already
present
token: datastore token.
"""
index = client_index.CreateClientIndex(token=token)
client_list = index.LookupClients(keywords)
logging.info("found %d clients", len(client_list))
if not client_list:
return
writer = csv.DictWriter([
u"client_id",
u"hostname",
u"last_seen",
u"os",
u"os_release",
u"os_version",
u"users",
u"ips",
u"macs",
])
writer.WriteHeader()
for client in aff4.FACTORY.MultiOpen(client_list, token=token):
s = client.Schema
writer.WriteRow({
u"client_id": client.urn.Basename(),
u"hostname": client.Get(s.HOSTNAME),
u"os": client.Get(s.SYSTEM),
u"os_release": client.Get(s.OS_RELEASE),
u"os_version": client.Get(s.OS_VERSION),
u"ips": client.Get(s.HOST_IPS),
u"macs": client.Get(s.MAC_ADDRESS),
u"users": "\n".join(client.Get(s.USERNAMES, [])),
u"last_seen": client.Get(s.PING),
})
with io.open(filename, "w") as csv_out:
csv_out.write(writer.Content()) | [
"def",
"ExportClientsByKeywords",
"(",
"keywords",
",",
"filename",
",",
"token",
"=",
"None",
")",
":",
"index",
"=",
"client_index",
".",
"CreateClientIndex",
"(",
"token",
"=",
"token",
")",
"client_list",
"=",
"index",
".",
"LookupClients",
"(",
"keywords",
")",
"logging",
".",
"info",
"(",
"\"found %d clients\"",
",",
"len",
"(",
"client_list",
")",
")",
"if",
"not",
"client_list",
":",
"return",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"[",
"u\"client_id\"",
",",
"u\"hostname\"",
",",
"u\"last_seen\"",
",",
"u\"os\"",
",",
"u\"os_release\"",
",",
"u\"os_version\"",
",",
"u\"users\"",
",",
"u\"ips\"",
",",
"u\"macs\"",
",",
"]",
")",
"writer",
".",
"WriteHeader",
"(",
")",
"for",
"client",
"in",
"aff4",
".",
"FACTORY",
".",
"MultiOpen",
"(",
"client_list",
",",
"token",
"=",
"token",
")",
":",
"s",
"=",
"client",
".",
"Schema",
"writer",
".",
"WriteRow",
"(",
"{",
"u\"client_id\"",
":",
"client",
".",
"urn",
".",
"Basename",
"(",
")",
",",
"u\"hostname\"",
":",
"client",
".",
"Get",
"(",
"s",
".",
"HOSTNAME",
")",
",",
"u\"os\"",
":",
"client",
".",
"Get",
"(",
"s",
".",
"SYSTEM",
")",
",",
"u\"os_release\"",
":",
"client",
".",
"Get",
"(",
"s",
".",
"OS_RELEASE",
")",
",",
"u\"os_version\"",
":",
"client",
".",
"Get",
"(",
"s",
".",
"OS_VERSION",
")",
",",
"u\"ips\"",
":",
"client",
".",
"Get",
"(",
"s",
".",
"HOST_IPS",
")",
",",
"u\"macs\"",
":",
"client",
".",
"Get",
"(",
"s",
".",
"MAC_ADDRESS",
")",
",",
"u\"users\"",
":",
"\"\\n\"",
".",
"join",
"(",
"client",
".",
"Get",
"(",
"s",
".",
"USERNAMES",
",",
"[",
"]",
")",
")",
",",
"u\"last_seen\"",
":",
"client",
".",
"Get",
"(",
"s",
".",
"PING",
")",
",",
"}",
")",
"with",
"io",
".",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"csv_out",
":",
"csv_out",
".",
"write",
"(",
"writer",
".",
"Content",
"(",
")",
")"
]
| r"""A script to export clients summaries selected by a keyword search.
This script does a client search for machines matching all of keywords and
writes a .csv summary of the results to filename. Multi-value fields are '\n'
separated.
Args:
keywords: a list of keywords to search for
filename: the name of the file to write to, will be replaced if already
present
token: datastore token. | [
"r",
"A",
"script",
"to",
"export",
"clients",
"summaries",
"selected",
"by",
"a",
"keyword",
"search",
"."
]
| python | train |
Vito2015/pyextend | pyextend/core/log.py | https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L192-L212 | def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs() | [
"def",
"set_logger",
"(",
"name",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"mode",
"=",
"'a'",
",",
"level",
"=",
"'NOTSET:NOTSET'",
",",
"fmt",
"=",
"'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s'",
",",
"# fmt='[%(levelname)s] %(asctime)s %(message)s',",
"backup_count",
"=",
"5",
",",
"limit",
"=",
"20480",
",",
"when",
"=",
"None",
",",
"with_filehandler",
"=",
"True",
")",
":",
"level",
"=",
"level",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"level",
")",
"==",
"1",
":",
"# Both set to the same level",
"s_level",
"=",
"f_level",
"=",
"level",
"[",
"0",
"]",
"else",
":",
"s_level",
"=",
"level",
"[",
"0",
"]",
"# StreamHandler log level",
"f_level",
"=",
"level",
"[",
"1",
"]",
"# FileHandler log level",
"init_logger",
"(",
"name",
"=",
"name",
")",
"add_streamhandler",
"(",
"s_level",
",",
"fmt",
")",
"if",
"with_filehandler",
":",
"add_filehandler",
"(",
"f_level",
",",
"fmt",
",",
"filename",
",",
"mode",
",",
"backup_count",
",",
"limit",
",",
"when",
")",
"# Import the common log functions for convenient",
"import_log_funcs",
"(",
")"
]
| Configure the global logger. | [
"Configure",
"the",
"global",
"logger",
"."
]
| python | train |
openstack/python-scciclient | scciclient/irmc/elcm.py | https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/elcm.py#L974-L1000 | def _create_raid_adapter_profile(irmc_info):
"""Attempt delete exist adapter then create new raid adapter on the server.
:param irmc_info: node info
:returns: result: a dict with following values:
{
'raid_config': <data of raid adapter profile>,
'warning': <warning message if there is>
}
"""
try:
# Attempt erase exist adapter on BM Server
elcm_profile_delete(irmc_info, PROFILE_RAID_CONFIG)
except ELCMProfileNotFound:
# Ignore this error as it's not an error in this case
pass
session = elcm_profile_create(irmc_info, PARAM_PATH_RAID_CONFIG)
# Monitoring currently session until done.
session_timeout = irmc_info.get('irmc_raid_session_timeout',
RAID_CONFIG_SESSION_TIMEOUT)
return _process_session_data(irmc_info, 'CONFIG_RAID',
session['Session']['Id'],
session_timeout) | [
"def",
"_create_raid_adapter_profile",
"(",
"irmc_info",
")",
":",
"try",
":",
"# Attempt erase exist adapter on BM Server",
"elcm_profile_delete",
"(",
"irmc_info",
",",
"PROFILE_RAID_CONFIG",
")",
"except",
"ELCMProfileNotFound",
":",
"# Ignore this error as it's not an error in this case",
"pass",
"session",
"=",
"elcm_profile_create",
"(",
"irmc_info",
",",
"PARAM_PATH_RAID_CONFIG",
")",
"# Monitoring currently session until done.",
"session_timeout",
"=",
"irmc_info",
".",
"get",
"(",
"'irmc_raid_session_timeout'",
",",
"RAID_CONFIG_SESSION_TIMEOUT",
")",
"return",
"_process_session_data",
"(",
"irmc_info",
",",
"'CONFIG_RAID'",
",",
"session",
"[",
"'Session'",
"]",
"[",
"'Id'",
"]",
",",
"session_timeout",
")"
]
| Attempt delete exist adapter then create new raid adapter on the server.
:param irmc_info: node info
:returns: result: a dict with following values:
{
'raid_config': <data of raid adapter profile>,
'warning': <warning message if there is>
} | [
"Attempt",
"delete",
"exist",
"adapter",
"then",
"create",
"new",
"raid",
"adapter",
"on",
"the",
"server",
"."
]
| python | train |
apache/incubator-mxnet | python/mxnet/image/detection.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/detection.py#L235-L250 | def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
"""Check if constrains are satisfied"""
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas[valid_objects]
coverages = coverages[np.where(coverages > 0)[0]]
return coverages.size > 0 and np.amin(coverages) > self.min_object_covered | [
"def",
"_check_satisfy_constraints",
"(",
"self",
",",
"label",
",",
"xmin",
",",
"ymin",
",",
"xmax",
",",
"ymax",
",",
"width",
",",
"height",
")",
":",
"if",
"(",
"xmax",
"-",
"xmin",
")",
"*",
"(",
"ymax",
"-",
"ymin",
")",
"<",
"2",
":",
"return",
"False",
"# only 1 pixel",
"x1",
"=",
"float",
"(",
"xmin",
")",
"/",
"width",
"y1",
"=",
"float",
"(",
"ymin",
")",
"/",
"height",
"x2",
"=",
"float",
"(",
"xmax",
")",
"/",
"width",
"y2",
"=",
"float",
"(",
"ymax",
")",
"/",
"height",
"object_areas",
"=",
"self",
".",
"_calculate_areas",
"(",
"label",
"[",
":",
",",
"1",
":",
"]",
")",
"valid_objects",
"=",
"np",
".",
"where",
"(",
"object_areas",
"*",
"width",
"*",
"height",
">",
"2",
")",
"[",
"0",
"]",
"if",
"valid_objects",
".",
"size",
"<",
"1",
":",
"return",
"False",
"intersects",
"=",
"self",
".",
"_intersect",
"(",
"label",
"[",
"valid_objects",
",",
"1",
":",
"]",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
"coverages",
"=",
"self",
".",
"_calculate_areas",
"(",
"intersects",
")",
"/",
"object_areas",
"[",
"valid_objects",
"]",
"coverages",
"=",
"coverages",
"[",
"np",
".",
"where",
"(",
"coverages",
">",
"0",
")",
"[",
"0",
"]",
"]",
"return",
"coverages",
".",
"size",
">",
"0",
"and",
"np",
".",
"amin",
"(",
"coverages",
")",
">",
"self",
".",
"min_object_covered"
]
| Check if constrains are satisfied | [
"Check",
"if",
"constrains",
"are",
"satisfied"
]
| python | train |
SiLab-Bonn/pyBAR | pybar/analysis/analyze_raw_data.py | https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analyze_raw_data.py#L643-L921 | def interpret_word_table(self, analyzed_data_file=None, use_settings_from_file=True, fei4b=None):
'''Interprets the raw data word table of all given raw data files with the c++ library.
Creates the h5 output file and PDF plots.
Parameters
----------
analyzed_data_file : string
The file name of the output analyzed data file. If None, the output analyzed data file
specified during initialization is taken.
use_settings_from_file : boolean
True if the needed parameters should be extracted from the raw data file
fei4b : boolean
True if the raw data is from FE-I4B.
'''
logging.info('Interpreting raw data file(s): ' + (', ').join(self.files_dict.keys()))
if self._create_meta_word_index:
meta_word = np.empty((self._chunk_size,), dtype=dtype_from_descr(data_struct.MetaInfoWordTable))
self.interpreter.set_meta_data_word_index(meta_word)
self.interpreter.reset_event_variables()
self.interpreter.reset_counters()
self.meta_data = analysis_utils.combine_meta_data(self.files_dict, meta_data_v2=self.interpreter.meta_table_v2)
if self.meta_data is None or self.meta_data.shape[0] == 0:
raise analysis_utils.IncompleteInputError('Meta data is empty. Stopping interpretation.')
self.interpreter.set_meta_data(self.meta_data) # tell interpreter the word index per readout to be able to calculate the event number per read out
meta_data_size = self.meta_data.shape[0]
self.meta_event_index = np.zeros((meta_data_size,), dtype=[('metaEventIndex', np.uint64)]) # this array is filled by the interpreter and holds the event number per read out
self.interpreter.set_meta_event_data(self.meta_event_index) # tell the interpreter the data container to write the meta event index to
if self.scan_parameters is None:
self.histogram.set_no_scan_parameter()
else:
self.scan_parameter_index = analysis_utils.get_scan_parameters_index(self.scan_parameters) # a array that labels unique scan parameter combinations
self.histogram.add_scan_parameter(self.scan_parameter_index) # just add an index for the different scan parameter combinations
if self._create_cluster_size_hist: # Cluster size result histogram
self._cluster_size_hist = np.zeros(shape=(6, ), dtype=np.uint32)
if self._create_cluster_tot_hist: # Cluster tot/size result histogram
self._cluster_tot_hist = np.zeros(shape=(16, 6), dtype=np.uint32)
close_analyzed_data_file = False
if analyzed_data_file is not None: # if an output file name is specified create new file for analyzed data
if self.is_open(self.out_file_h5) and os.path.abspath(analyzed_data_file) == os.path.abspath(self.out_file_h5.filename):
out_file_h5 = self.out_file_h5
else:
# normalize path
analyzed_data_file = os.path.abspath(analyzed_data_file)
if os.path.splitext(analyzed_data_file)[1].lower() != ".h5":
analyzed_data_file = os.path.splitext(analyzed_data_file)[0] + ".h5"
out_file_h5 = tb.open_file(analyzed_data_file, mode="w", title="Interpreted FE-I4 raw data")
close_analyzed_data_file = True
elif self.is_open(self.out_file_h5):
out_file_h5 = self.out_file_h5
else:
out_file_h5 = None
tmp_out_file_h5 = self.out_file_h5
if not self.is_open(self.out_file_h5) and self.is_open(out_file_h5):
close_analyzed_data_file = False
tmp_out_file_h5 = out_file_h5
self.out_file_h5 = out_file_h5
if self.is_open(self.out_file_h5):
self._analyzed_data_file = self.out_file_h5.filename
else:
self._analyzed_data_file is None
if self._analyzed_data_file is not None:
if self._create_hit_table is True:
description = data_struct.HitInfoTable().columns.copy()
hit_table = self.out_file_h5.create_table(self.out_file_h5.root, name='Hits', description=description, title='hit_data', filters=self._filter_table, chunkshape=(self._chunk_size / 100,))
if self._create_meta_word_index is True:
meta_word_index_table = self.out_file_h5.create_table(self.out_file_h5.root, name='EventMetaData', description=data_struct.MetaInfoWordTable, title='event_meta_data', filters=self._filter_table, chunkshape=(self._chunk_size / 10,))
if self._create_cluster_table:
cluster_table = self.out_file_h5.create_table(self.out_file_h5.root, name='Cluster', description=data_struct.ClusterInfoTable, title='Cluster data', filters=self._filter_table, expectedrows=self._chunk_size)
if self._create_cluster_hit_table:
description = data_struct.ClusterHitInfoTable().columns.copy()
cluster_hit_table = self.out_file_h5.create_table(self.out_file_h5.root, name='ClusterHits', description=description, title='cluster_hit_data', filters=self._filter_table, expectedrows=self._chunk_size)
logging.info("Interpreting raw data...")
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=analysis_utils.get_total_n_data_words(self.files_dict), term_width=80)
progress_bar.start()
total_words = 0
for file_index, raw_data_file in enumerate(self.files_dict.keys()): # loop over all raw data files
self.interpreter.reset_meta_data_counter()
with tb.open_file(raw_data_file, mode="r") as in_file_h5:
if use_settings_from_file:
self._deduce_settings_from_file(in_file_h5)
else:
self.fei4b = fei4b
if self.interpreter.meta_table_v2:
index_start = in_file_h5.root.meta_data.read(field='index_start')
index_stop = in_file_h5.root.meta_data.read(field='index_stop')
else:
index_start = in_file_h5.root.meta_data.read(field='start_index')
index_stop = in_file_h5.root.meta_data.read(field='stop_index')
bad_word_index = set()
# Check for bad data
if self._correct_corrupted_data:
tw = 2147483648 # trigger word
dh = 15269888 # data header
is_fe_data_header = logical_and(is_fe_word, is_data_header)
found_first_trigger = False
readout_slices = np.column_stack((index_start, index_stop))
previous_prepend_data_headers = None
prepend_data_headers = None
last_good_readout_index = None
last_index_with_event_data = None
for read_out_index, (index_start, index_stop) in enumerate(readout_slices):
try:
raw_data = in_file_h5.root.raw_data.read(index_start, index_stop)
except OverflowError, e:
pass
except tb.exceptions.HDF5ExtError:
break
# previous data chunk had bad data, check for good data
if (index_start - 1) in bad_word_index:
bad_data, current_prepend_data_headers, _ , _ = check_bad_data(raw_data, prepend_data_headers=1, trig_count=None)
if bad_data:
bad_word_index = bad_word_index.union(range(index_start, index_stop))
else:
# logging.info("found good data in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, index_start, index_stop, read_out_index, (index_stop - index_start)))
if last_good_readout_index + 1 == read_out_index - 1:
logging.warning("found bad data in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, readout_slices[last_good_readout_index][1], readout_slices[read_out_index - 1][1], last_good_readout_index + 1, (readout_slices[read_out_index - 1][1] - readout_slices[last_good_readout_index][1])))
else:
logging.warning("found bad data in %s from index %d to %d (chunk %d to %d, length %d)" % (in_file_h5.filename, readout_slices[last_good_readout_index][1], readout_slices[read_out_index - 1][1], last_good_readout_index + 1, read_out_index - 1, (readout_slices[read_out_index - 1][1] - readout_slices[last_good_readout_index][1])))
previous_good_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_good_readout_index][0], readout_slices[last_good_readout_index][1] - 1)
previous_bad_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_good_readout_index][1] - 1, readout_slices[read_out_index - 1][1])
fixed_raw_data, _ = fix_raw_data(previous_bad_raw_data, lsb_byte=None)
fixed_raw_data = np.r_[previous_good_raw_data, fixed_raw_data, raw_data]
_, prepend_data_headers, n_triggers, n_dh = check_bad_data(fixed_raw_data, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)
last_good_readout_index = read_out_index
if n_triggers != 0 or n_dh != 0:
last_index_with_event_data = read_out_index
last_event_data_prepend_data_headers = prepend_data_headers
fixed_previous_raw_data = np.r_[previous_good_raw_data, fixed_raw_data]
_, previous_prepend_data_headers, _ , _ = check_bad_data(fixed_previous_raw_data, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)
# check for bad data
else:
# workaround for first data chunk, might have missing trigger in some rare cases (already fixed in firmware)
if read_out_index == 0 and (np.any(is_trigger_word(raw_data) >= 1) or np.any(is_fe_data_header(raw_data) >= 1)):
bad_data, current_prepend_data_headers, n_triggers , n_dh = check_bad_data(raw_data, prepend_data_headers=1, trig_count=None)
# check for full last event in data
if current_prepend_data_headers == self.trig_count:
current_prepend_data_headers = None
# usually check for bad data happens here
else:
bad_data, current_prepend_data_headers, n_triggers , n_dh = check_bad_data(raw_data, prepend_data_headers=prepend_data_headers, trig_count=self.trig_count)
# do additional check with follow up data chunk and decide whether current chunk is defect or not
if bad_data:
if read_out_index == 0:
fixed_raw_data_chunk, _ = fix_raw_data(raw_data, lsb_byte=None)
fixed_raw_data_list = [fixed_raw_data_chunk]
else:
previous_raw_data = in_file_h5.root.raw_data.read(*readout_slices[read_out_index - 1])
raw_data_with_previous_data_word = np.r_[previous_raw_data[-1], raw_data]
fixed_raw_data_chunk, _ = fix_raw_data(raw_data_with_previous_data_word, lsb_byte=None)
fixed_raw_data = np.r_[previous_raw_data[:-1], fixed_raw_data_chunk]
# last data word of chunk before broken chunk migh be a trigger word or data header which cannot be recovered
fixed_raw_data_with_tw = np.r_[previous_raw_data[:-1], tw, fixed_raw_data_chunk]
fixed_raw_data_with_dh = np.r_[previous_raw_data[:-1], dh, fixed_raw_data_chunk]
fixed_raw_data_list = [fixed_raw_data, fixed_raw_data_with_tw, fixed_raw_data_with_dh]
bad_fixed_data, _, _ , _ = check_bad_data(fixed_raw_data_with_dh, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)
bad_fixed_data = map(lambda data: check_bad_data(data, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)[0], fixed_raw_data_list)
if not all(bad_fixed_data): # good fixed data
# last word in chunk before currrent chunk is also bad
if index_start != 0:
bad_word_index.add(index_start - 1)
# adding all word from current chunk
bad_word_index = bad_word_index.union(range(index_start, index_stop))
last_good_readout_index = read_out_index - 1
else:
# a previous chunk might be broken and the last data word becomes a trigger word, so do additional checks
if last_index_with_event_data and last_event_data_prepend_data_headers != read_out_index:
before_bad_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_index_with_event_data - 1][0], readout_slices[last_index_with_event_data - 1][1] - 1)
previous_bad_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_index_with_event_data][0] - 1, readout_slices[last_index_with_event_data][1])
fixed_raw_data, _ = fix_raw_data(previous_bad_raw_data, lsb_byte=None)
previous_good_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_index_with_event_data][1], readout_slices[read_out_index - 1][1])
fixed_raw_data = np.r_[before_bad_raw_data, fixed_raw_data, previous_good_raw_data, raw_data]
bad_fixed_previous_data, current_prepend_data_headers, _, _ = check_bad_data(fixed_raw_data, prepend_data_headers=last_event_data_prepend_data_headers, trig_count=self.trig_count)
if not bad_fixed_previous_data:
logging.warning("found bad data in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, readout_slices[last_index_with_event_data][0], readout_slices[last_index_with_event_data][1], last_index_with_event_data, (readout_slices[last_index_with_event_data][1] - readout_slices[last_index_with_event_data][0])))
bad_word_index = bad_word_index.union(range(readout_slices[last_index_with_event_data][0] - 1, readout_slices[last_index_with_event_data][1]))
else:
logging.warning("found bad data which cannot be corrected in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, index_start, index_stop, read_out_index, (index_stop - index_start)))
else:
logging.warning("found bad data which cannot be corrected in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, index_start, index_stop, read_out_index, (index_stop - index_start)))
if n_triggers != 0 or n_dh != 0:
last_index_with_event_data = read_out_index
last_event_data_prepend_data_headers = prepend_data_headers
if not bad_data or (bad_data and bad_fixed_data):
previous_prepend_data_headers = prepend_data_headers
prepend_data_headers = current_prepend_data_headers
consecutive_bad_words_list = consecutive(sorted(bad_word_index))
lsb_byte = None
# Loop over raw data in chunks
for word_index in range(0, in_file_h5.root.raw_data.shape[0], self._chunk_size): # loop over all words in the actual raw data file
try:
raw_data = in_file_h5.root.raw_data.read(word_index, word_index + self._chunk_size)
except OverflowError, e:
logging.error('%s: 2^31 xrange() limitation in 32-bit Python', e)
except tb.exceptions.HDF5ExtError:
logging.warning('Raw data file %s has missing raw data. Continue raw data analysis.', in_file_h5.filename)
break
total_words += raw_data.shape[0]
# fix bad data
if self._correct_corrupted_data:
# increase word shift for every bad data chunk in raw data chunk
word_shift = 0
chunk_indices = np.arange(word_index, word_index + self._chunk_size)
for consecutive_bad_word_indices in consecutive_bad_words_list:
selected_words = np.intersect1d(consecutive_bad_word_indices, chunk_indices, assume_unique=True)
if selected_words.shape[0]:
fixed_raw_data, lsb_byte = fix_raw_data(raw_data[selected_words - word_index - word_shift], lsb_byte=lsb_byte)
raw_data = np.r_[raw_data[:selected_words[0] - word_index - word_shift], fixed_raw_data, raw_data[selected_words[-1] - word_index + 1 - word_shift:]]
# check if last word of bad data chunk in current raw data chunk
if consecutive_bad_word_indices[-1] in selected_words:
lsb_byte = None
# word shift by removing data word at the beginning of each defect chunk
word_shift += 1
# bad data chunk is at the end of current raw data chunk
else:
break
self.interpreter.interpret_raw_data(raw_data) # interpret the raw data
# store remaining buffered event in the interpreter at the end of the last file
if file_index == len(self.files_dict.keys()) - 1 and word_index == range(0, in_file_h5.root.raw_data.shape[0], self._chunk_size)[-1]: # store hits of the latest event of the last file
self.interpreter.store_event()
hits = self.interpreter.get_hits()
if self.scan_parameters is not None:
nEventIndex = self.interpreter.get_n_meta_data_event()
self.histogram.add_meta_event_index(self.meta_event_index, nEventIndex)
if self.is_histogram_hits():
self.histogram_hits(hits)
if self.is_cluster_hits():
cluster_hits, clusters = self.cluster_hits(hits)
if self._create_cluster_hit_table:
cluster_hit_table.append(cluster_hits)
if self._create_cluster_table:
cluster_table.append(clusters)
if self._create_cluster_size_hist:
if clusters['size'].shape[0] > 0 and np.max(clusters['size']) + 1 > self._cluster_size_hist.shape[0]:
self._cluster_size_hist.resize(np.max(clusters['size']) + 1)
self._cluster_size_hist += fast_analysis_utils.hist_1d_index(clusters['size'], shape=self._cluster_size_hist.shape)
if self._create_cluster_tot_hist:
if clusters['tot'].shape[0] > 0 and np.max(clusters['tot']) + 1 > self._cluster_tot_hist.shape[0]:
self._cluster_tot_hist.resize((np.max(clusters['tot']) + 1, self._cluster_tot_hist.shape[1]))
if clusters['size'].shape[0] > 0 and np.max(clusters['size']) + 1 > self._cluster_tot_hist.shape[1]:
self._cluster_tot_hist.resize((self._cluster_tot_hist.shape[0], np.max(clusters['size']) + 1))
self._cluster_tot_hist += fast_analysis_utils.hist_2d_index(clusters['tot'], clusters['size'], shape=self._cluster_tot_hist.shape)
if self._analyzed_data_file is not None and self._create_hit_table:
hit_table.append(hits)
if self._analyzed_data_file is not None and self._create_meta_word_index:
size = self.interpreter.get_n_meta_data_word()
meta_word_index_table.append(meta_word[:size])
if total_words <= progress_bar.maxval: # Otherwise exception is thrown
progress_bar.update(total_words)
self.out_file_h5.flush()
progress_bar.finish()
self._create_additional_data()
if close_analyzed_data_file:
self.out_file_h5.close()
self.out_file_h5 = None
self.out_file_h5 = out_file_h5
if self.is_open(self.out_file_h5):
self._analyzed_data_file = self.out_file_h5.filename
else:
self._analyzed_data_file = None | [
"def",
"interpret_word_table",
"(",
"self",
",",
"analyzed_data_file",
"=",
"None",
",",
"use_settings_from_file",
"=",
"True",
",",
"fei4b",
"=",
"None",
")",
":",
"logging",
".",
"info",
"(",
"'Interpreting raw data file(s): '",
"+",
"(",
"', '",
")",
".",
"join",
"(",
"self",
".",
"files_dict",
".",
"keys",
"(",
")",
")",
")",
"if",
"self",
".",
"_create_meta_word_index",
":",
"meta_word",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"_chunk_size",
",",
")",
",",
"dtype",
"=",
"dtype_from_descr",
"(",
"data_struct",
".",
"MetaInfoWordTable",
")",
")",
"self",
".",
"interpreter",
".",
"set_meta_data_word_index",
"(",
"meta_word",
")",
"self",
".",
"interpreter",
".",
"reset_event_variables",
"(",
")",
"self",
".",
"interpreter",
".",
"reset_counters",
"(",
")",
"self",
".",
"meta_data",
"=",
"analysis_utils",
".",
"combine_meta_data",
"(",
"self",
".",
"files_dict",
",",
"meta_data_v2",
"=",
"self",
".",
"interpreter",
".",
"meta_table_v2",
")",
"if",
"self",
".",
"meta_data",
"is",
"None",
"or",
"self",
".",
"meta_data",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"raise",
"analysis_utils",
".",
"IncompleteInputError",
"(",
"'Meta data is empty. Stopping interpretation.'",
")",
"self",
".",
"interpreter",
".",
"set_meta_data",
"(",
"self",
".",
"meta_data",
")",
"# tell interpreter the word index per readout to be able to calculate the event number per read out",
"meta_data_size",
"=",
"self",
".",
"meta_data",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"meta_event_index",
"=",
"np",
".",
"zeros",
"(",
"(",
"meta_data_size",
",",
")",
",",
"dtype",
"=",
"[",
"(",
"'metaEventIndex'",
",",
"np",
".",
"uint64",
")",
"]",
")",
"# this array is filled by the interpreter and holds the event number per read out",
"self",
".",
"interpreter",
".",
"set_meta_event_data",
"(",
"self",
".",
"meta_event_index",
")",
"# tell the interpreter the data container to write the meta event index to",
"if",
"self",
".",
"scan_parameters",
"is",
"None",
":",
"self",
".",
"histogram",
".",
"set_no_scan_parameter",
"(",
")",
"else",
":",
"self",
".",
"scan_parameter_index",
"=",
"analysis_utils",
".",
"get_scan_parameters_index",
"(",
"self",
".",
"scan_parameters",
")",
"# a array that labels unique scan parameter combinations",
"self",
".",
"histogram",
".",
"add_scan_parameter",
"(",
"self",
".",
"scan_parameter_index",
")",
"# just add an index for the different scan parameter combinations",
"if",
"self",
".",
"_create_cluster_size_hist",
":",
"# Cluster size result histogram",
"self",
".",
"_cluster_size_hist",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"6",
",",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"if",
"self",
".",
"_create_cluster_tot_hist",
":",
"# Cluster tot/size result histogram",
"self",
".",
"_cluster_tot_hist",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"16",
",",
"6",
")",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"close_analyzed_data_file",
"=",
"False",
"if",
"analyzed_data_file",
"is",
"not",
"None",
":",
"# if an output file name is specified create new file for analyzed data",
"if",
"self",
".",
"is_open",
"(",
"self",
".",
"out_file_h5",
")",
"and",
"os",
".",
"path",
".",
"abspath",
"(",
"analyzed_data_file",
")",
"==",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"out_file_h5",
".",
"filename",
")",
":",
"out_file_h5",
"=",
"self",
".",
"out_file_h5",
"else",
":",
"# normalize path",
"analyzed_data_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"analyzed_data_file",
")",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"analyzed_data_file",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"!=",
"\".h5\"",
":",
"analyzed_data_file",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"analyzed_data_file",
")",
"[",
"0",
"]",
"+",
"\".h5\"",
"out_file_h5",
"=",
"tb",
".",
"open_file",
"(",
"analyzed_data_file",
",",
"mode",
"=",
"\"w\"",
",",
"title",
"=",
"\"Interpreted FE-I4 raw data\"",
")",
"close_analyzed_data_file",
"=",
"True",
"elif",
"self",
".",
"is_open",
"(",
"self",
".",
"out_file_h5",
")",
":",
"out_file_h5",
"=",
"self",
".",
"out_file_h5",
"else",
":",
"out_file_h5",
"=",
"None",
"tmp_out_file_h5",
"=",
"self",
".",
"out_file_h5",
"if",
"not",
"self",
".",
"is_open",
"(",
"self",
".",
"out_file_h5",
")",
"and",
"self",
".",
"is_open",
"(",
"out_file_h5",
")",
":",
"close_analyzed_data_file",
"=",
"False",
"tmp_out_file_h5",
"=",
"out_file_h5",
"self",
".",
"out_file_h5",
"=",
"out_file_h5",
"if",
"self",
".",
"is_open",
"(",
"self",
".",
"out_file_h5",
")",
":",
"self",
".",
"_analyzed_data_file",
"=",
"self",
".",
"out_file_h5",
".",
"filename",
"else",
":",
"self",
".",
"_analyzed_data_file",
"is",
"None",
"if",
"self",
".",
"_analyzed_data_file",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_create_hit_table",
"is",
"True",
":",
"description",
"=",
"data_struct",
".",
"HitInfoTable",
"(",
")",
".",
"columns",
".",
"copy",
"(",
")",
"hit_table",
"=",
"self",
".",
"out_file_h5",
".",
"create_table",
"(",
"self",
".",
"out_file_h5",
".",
"root",
",",
"name",
"=",
"'Hits'",
",",
"description",
"=",
"description",
",",
"title",
"=",
"'hit_data'",
",",
"filters",
"=",
"self",
".",
"_filter_table",
",",
"chunkshape",
"=",
"(",
"self",
".",
"_chunk_size",
"/",
"100",
",",
")",
")",
"if",
"self",
".",
"_create_meta_word_index",
"is",
"True",
":",
"meta_word_index_table",
"=",
"self",
".",
"out_file_h5",
".",
"create_table",
"(",
"self",
".",
"out_file_h5",
".",
"root",
",",
"name",
"=",
"'EventMetaData'",
",",
"description",
"=",
"data_struct",
".",
"MetaInfoWordTable",
",",
"title",
"=",
"'event_meta_data'",
",",
"filters",
"=",
"self",
".",
"_filter_table",
",",
"chunkshape",
"=",
"(",
"self",
".",
"_chunk_size",
"/",
"10",
",",
")",
")",
"if",
"self",
".",
"_create_cluster_table",
":",
"cluster_table",
"=",
"self",
".",
"out_file_h5",
".",
"create_table",
"(",
"self",
".",
"out_file_h5",
".",
"root",
",",
"name",
"=",
"'Cluster'",
",",
"description",
"=",
"data_struct",
".",
"ClusterInfoTable",
",",
"title",
"=",
"'Cluster data'",
",",
"filters",
"=",
"self",
".",
"_filter_table",
",",
"expectedrows",
"=",
"self",
".",
"_chunk_size",
")",
"if",
"self",
".",
"_create_cluster_hit_table",
":",
"description",
"=",
"data_struct",
".",
"ClusterHitInfoTable",
"(",
")",
".",
"columns",
".",
"copy",
"(",
")",
"cluster_hit_table",
"=",
"self",
".",
"out_file_h5",
".",
"create_table",
"(",
"self",
".",
"out_file_h5",
".",
"root",
",",
"name",
"=",
"'ClusterHits'",
",",
"description",
"=",
"description",
",",
"title",
"=",
"'cluster_hit_data'",
",",
"filters",
"=",
"self",
".",
"_filter_table",
",",
"expectedrows",
"=",
"self",
".",
"_chunk_size",
")",
"logging",
".",
"info",
"(",
"\"Interpreting raw data...\"",
")",
"progress_bar",
"=",
"progressbar",
".",
"ProgressBar",
"(",
"widgets",
"=",
"[",
"''",
",",
"progressbar",
".",
"Percentage",
"(",
")",
",",
"' '",
",",
"progressbar",
".",
"Bar",
"(",
"marker",
"=",
"'*'",
",",
"left",
"=",
"'|'",
",",
"right",
"=",
"'|'",
")",
",",
"' '",
",",
"progressbar",
".",
"AdaptiveETA",
"(",
")",
"]",
",",
"maxval",
"=",
"analysis_utils",
".",
"get_total_n_data_words",
"(",
"self",
".",
"files_dict",
")",
",",
"term_width",
"=",
"80",
")",
"progress_bar",
".",
"start",
"(",
")",
"total_words",
"=",
"0",
"for",
"file_index",
",",
"raw_data_file",
"in",
"enumerate",
"(",
"self",
".",
"files_dict",
".",
"keys",
"(",
")",
")",
":",
"# loop over all raw data files",
"self",
".",
"interpreter",
".",
"reset_meta_data_counter",
"(",
")",
"with",
"tb",
".",
"open_file",
"(",
"raw_data_file",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"in_file_h5",
":",
"if",
"use_settings_from_file",
":",
"self",
".",
"_deduce_settings_from_file",
"(",
"in_file_h5",
")",
"else",
":",
"self",
".",
"fei4b",
"=",
"fei4b",
"if",
"self",
".",
"interpreter",
".",
"meta_table_v2",
":",
"index_start",
"=",
"in_file_h5",
".",
"root",
".",
"meta_data",
".",
"read",
"(",
"field",
"=",
"'index_start'",
")",
"index_stop",
"=",
"in_file_h5",
".",
"root",
".",
"meta_data",
".",
"read",
"(",
"field",
"=",
"'index_stop'",
")",
"else",
":",
"index_start",
"=",
"in_file_h5",
".",
"root",
".",
"meta_data",
".",
"read",
"(",
"field",
"=",
"'start_index'",
")",
"index_stop",
"=",
"in_file_h5",
".",
"root",
".",
"meta_data",
".",
"read",
"(",
"field",
"=",
"'stop_index'",
")",
"bad_word_index",
"=",
"set",
"(",
")",
"# Check for bad data",
"if",
"self",
".",
"_correct_corrupted_data",
":",
"tw",
"=",
"2147483648",
"# trigger word",
"dh",
"=",
"15269888",
"# data header",
"is_fe_data_header",
"=",
"logical_and",
"(",
"is_fe_word",
",",
"is_data_header",
")",
"found_first_trigger",
"=",
"False",
"readout_slices",
"=",
"np",
".",
"column_stack",
"(",
"(",
"index_start",
",",
"index_stop",
")",
")",
"previous_prepend_data_headers",
"=",
"None",
"prepend_data_headers",
"=",
"None",
"last_good_readout_index",
"=",
"None",
"last_index_with_event_data",
"=",
"None",
"for",
"read_out_index",
",",
"(",
"index_start",
",",
"index_stop",
")",
"in",
"enumerate",
"(",
"readout_slices",
")",
":",
"try",
":",
"raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"index_start",
",",
"index_stop",
")",
"except",
"OverflowError",
",",
"e",
":",
"pass",
"except",
"tb",
".",
"exceptions",
".",
"HDF5ExtError",
":",
"break",
"# previous data chunk had bad data, check for good data",
"if",
"(",
"index_start",
"-",
"1",
")",
"in",
"bad_word_index",
":",
"bad_data",
",",
"current_prepend_data_headers",
",",
"_",
",",
"_",
"=",
"check_bad_data",
"(",
"raw_data",
",",
"prepend_data_headers",
"=",
"1",
",",
"trig_count",
"=",
"None",
")",
"if",
"bad_data",
":",
"bad_word_index",
"=",
"bad_word_index",
".",
"union",
"(",
"range",
"(",
"index_start",
",",
"index_stop",
")",
")",
"else",
":",
"# logging.info(\"found good data in %s from index %d to %d (chunk %d, length %d)\" % (in_file_h5.filename, index_start, index_stop, read_out_index, (index_stop - index_start)))",
"if",
"last_good_readout_index",
"+",
"1",
"==",
"read_out_index",
"-",
"1",
":",
"logging",
".",
"warning",
"(",
"\"found bad data in %s from index %d to %d (chunk %d, length %d)\"",
"%",
"(",
"in_file_h5",
".",
"filename",
",",
"readout_slices",
"[",
"last_good_readout_index",
"]",
"[",
"1",
"]",
",",
"readout_slices",
"[",
"read_out_index",
"-",
"1",
"]",
"[",
"1",
"]",
",",
"last_good_readout_index",
"+",
"1",
",",
"(",
"readout_slices",
"[",
"read_out_index",
"-",
"1",
"]",
"[",
"1",
"]",
"-",
"readout_slices",
"[",
"last_good_readout_index",
"]",
"[",
"1",
"]",
")",
")",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"\"found bad data in %s from index %d to %d (chunk %d to %d, length %d)\"",
"%",
"(",
"in_file_h5",
".",
"filename",
",",
"readout_slices",
"[",
"last_good_readout_index",
"]",
"[",
"1",
"]",
",",
"readout_slices",
"[",
"read_out_index",
"-",
"1",
"]",
"[",
"1",
"]",
",",
"last_good_readout_index",
"+",
"1",
",",
"read_out_index",
"-",
"1",
",",
"(",
"readout_slices",
"[",
"read_out_index",
"-",
"1",
"]",
"[",
"1",
"]",
"-",
"readout_slices",
"[",
"last_good_readout_index",
"]",
"[",
"1",
"]",
")",
")",
")",
"previous_good_raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"readout_slices",
"[",
"last_good_readout_index",
"]",
"[",
"0",
"]",
",",
"readout_slices",
"[",
"last_good_readout_index",
"]",
"[",
"1",
"]",
"-",
"1",
")",
"previous_bad_raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"readout_slices",
"[",
"last_good_readout_index",
"]",
"[",
"1",
"]",
"-",
"1",
",",
"readout_slices",
"[",
"read_out_index",
"-",
"1",
"]",
"[",
"1",
"]",
")",
"fixed_raw_data",
",",
"_",
"=",
"fix_raw_data",
"(",
"previous_bad_raw_data",
",",
"lsb_byte",
"=",
"None",
")",
"fixed_raw_data",
"=",
"np",
".",
"r_",
"[",
"previous_good_raw_data",
",",
"fixed_raw_data",
",",
"raw_data",
"]",
"_",
",",
"prepend_data_headers",
",",
"n_triggers",
",",
"n_dh",
"=",
"check_bad_data",
"(",
"fixed_raw_data",
",",
"prepend_data_headers",
"=",
"previous_prepend_data_headers",
",",
"trig_count",
"=",
"self",
".",
"trig_count",
")",
"last_good_readout_index",
"=",
"read_out_index",
"if",
"n_triggers",
"!=",
"0",
"or",
"n_dh",
"!=",
"0",
":",
"last_index_with_event_data",
"=",
"read_out_index",
"last_event_data_prepend_data_headers",
"=",
"prepend_data_headers",
"fixed_previous_raw_data",
"=",
"np",
".",
"r_",
"[",
"previous_good_raw_data",
",",
"fixed_raw_data",
"]",
"_",
",",
"previous_prepend_data_headers",
",",
"_",
",",
"_",
"=",
"check_bad_data",
"(",
"fixed_previous_raw_data",
",",
"prepend_data_headers",
"=",
"previous_prepend_data_headers",
",",
"trig_count",
"=",
"self",
".",
"trig_count",
")",
"# check for bad data",
"else",
":",
"# workaround for first data chunk, might have missing trigger in some rare cases (already fixed in firmware)",
"if",
"read_out_index",
"==",
"0",
"and",
"(",
"np",
".",
"any",
"(",
"is_trigger_word",
"(",
"raw_data",
")",
">=",
"1",
")",
"or",
"np",
".",
"any",
"(",
"is_fe_data_header",
"(",
"raw_data",
")",
">=",
"1",
")",
")",
":",
"bad_data",
",",
"current_prepend_data_headers",
",",
"n_triggers",
",",
"n_dh",
"=",
"check_bad_data",
"(",
"raw_data",
",",
"prepend_data_headers",
"=",
"1",
",",
"trig_count",
"=",
"None",
")",
"# check for full last event in data",
"if",
"current_prepend_data_headers",
"==",
"self",
".",
"trig_count",
":",
"current_prepend_data_headers",
"=",
"None",
"# usually check for bad data happens here",
"else",
":",
"bad_data",
",",
"current_prepend_data_headers",
",",
"n_triggers",
",",
"n_dh",
"=",
"check_bad_data",
"(",
"raw_data",
",",
"prepend_data_headers",
"=",
"prepend_data_headers",
",",
"trig_count",
"=",
"self",
".",
"trig_count",
")",
"# do additional check with follow up data chunk and decide whether current chunk is defect or not",
"if",
"bad_data",
":",
"if",
"read_out_index",
"==",
"0",
":",
"fixed_raw_data_chunk",
",",
"_",
"=",
"fix_raw_data",
"(",
"raw_data",
",",
"lsb_byte",
"=",
"None",
")",
"fixed_raw_data_list",
"=",
"[",
"fixed_raw_data_chunk",
"]",
"else",
":",
"previous_raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"*",
"readout_slices",
"[",
"read_out_index",
"-",
"1",
"]",
")",
"raw_data_with_previous_data_word",
"=",
"np",
".",
"r_",
"[",
"previous_raw_data",
"[",
"-",
"1",
"]",
",",
"raw_data",
"]",
"fixed_raw_data_chunk",
",",
"_",
"=",
"fix_raw_data",
"(",
"raw_data_with_previous_data_word",
",",
"lsb_byte",
"=",
"None",
")",
"fixed_raw_data",
"=",
"np",
".",
"r_",
"[",
"previous_raw_data",
"[",
":",
"-",
"1",
"]",
",",
"fixed_raw_data_chunk",
"]",
"# last data word of chunk before broken chunk migh be a trigger word or data header which cannot be recovered",
"fixed_raw_data_with_tw",
"=",
"np",
".",
"r_",
"[",
"previous_raw_data",
"[",
":",
"-",
"1",
"]",
",",
"tw",
",",
"fixed_raw_data_chunk",
"]",
"fixed_raw_data_with_dh",
"=",
"np",
".",
"r_",
"[",
"previous_raw_data",
"[",
":",
"-",
"1",
"]",
",",
"dh",
",",
"fixed_raw_data_chunk",
"]",
"fixed_raw_data_list",
"=",
"[",
"fixed_raw_data",
",",
"fixed_raw_data_with_tw",
",",
"fixed_raw_data_with_dh",
"]",
"bad_fixed_data",
",",
"_",
",",
"_",
",",
"_",
"=",
"check_bad_data",
"(",
"fixed_raw_data_with_dh",
",",
"prepend_data_headers",
"=",
"previous_prepend_data_headers",
",",
"trig_count",
"=",
"self",
".",
"trig_count",
")",
"bad_fixed_data",
"=",
"map",
"(",
"lambda",
"data",
":",
"check_bad_data",
"(",
"data",
",",
"prepend_data_headers",
"=",
"previous_prepend_data_headers",
",",
"trig_count",
"=",
"self",
".",
"trig_count",
")",
"[",
"0",
"]",
",",
"fixed_raw_data_list",
")",
"if",
"not",
"all",
"(",
"bad_fixed_data",
")",
":",
"# good fixed data",
"# last word in chunk before currrent chunk is also bad",
"if",
"index_start",
"!=",
"0",
":",
"bad_word_index",
".",
"add",
"(",
"index_start",
"-",
"1",
")",
"# adding all word from current chunk",
"bad_word_index",
"=",
"bad_word_index",
".",
"union",
"(",
"range",
"(",
"index_start",
",",
"index_stop",
")",
")",
"last_good_readout_index",
"=",
"read_out_index",
"-",
"1",
"else",
":",
"# a previous chunk might be broken and the last data word becomes a trigger word, so do additional checks",
"if",
"last_index_with_event_data",
"and",
"last_event_data_prepend_data_headers",
"!=",
"read_out_index",
":",
"before_bad_raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"readout_slices",
"[",
"last_index_with_event_data",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"readout_slices",
"[",
"last_index_with_event_data",
"-",
"1",
"]",
"[",
"1",
"]",
"-",
"1",
")",
"previous_bad_raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"0",
"]",
"-",
"1",
",",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"1",
"]",
")",
"fixed_raw_data",
",",
"_",
"=",
"fix_raw_data",
"(",
"previous_bad_raw_data",
",",
"lsb_byte",
"=",
"None",
")",
"previous_good_raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"1",
"]",
",",
"readout_slices",
"[",
"read_out_index",
"-",
"1",
"]",
"[",
"1",
"]",
")",
"fixed_raw_data",
"=",
"np",
".",
"r_",
"[",
"before_bad_raw_data",
",",
"fixed_raw_data",
",",
"previous_good_raw_data",
",",
"raw_data",
"]",
"bad_fixed_previous_data",
",",
"current_prepend_data_headers",
",",
"_",
",",
"_",
"=",
"check_bad_data",
"(",
"fixed_raw_data",
",",
"prepend_data_headers",
"=",
"last_event_data_prepend_data_headers",
",",
"trig_count",
"=",
"self",
".",
"trig_count",
")",
"if",
"not",
"bad_fixed_previous_data",
":",
"logging",
".",
"warning",
"(",
"\"found bad data in %s from index %d to %d (chunk %d, length %d)\"",
"%",
"(",
"in_file_h5",
".",
"filename",
",",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"0",
"]",
",",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"1",
"]",
",",
"last_index_with_event_data",
",",
"(",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"1",
"]",
"-",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"0",
"]",
")",
")",
")",
"bad_word_index",
"=",
"bad_word_index",
".",
"union",
"(",
"range",
"(",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"0",
"]",
"-",
"1",
",",
"readout_slices",
"[",
"last_index_with_event_data",
"]",
"[",
"1",
"]",
")",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"\"found bad data which cannot be corrected in %s from index %d to %d (chunk %d, length %d)\"",
"%",
"(",
"in_file_h5",
".",
"filename",
",",
"index_start",
",",
"index_stop",
",",
"read_out_index",
",",
"(",
"index_stop",
"-",
"index_start",
")",
")",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"\"found bad data which cannot be corrected in %s from index %d to %d (chunk %d, length %d)\"",
"%",
"(",
"in_file_h5",
".",
"filename",
",",
"index_start",
",",
"index_stop",
",",
"read_out_index",
",",
"(",
"index_stop",
"-",
"index_start",
")",
")",
")",
"if",
"n_triggers",
"!=",
"0",
"or",
"n_dh",
"!=",
"0",
":",
"last_index_with_event_data",
"=",
"read_out_index",
"last_event_data_prepend_data_headers",
"=",
"prepend_data_headers",
"if",
"not",
"bad_data",
"or",
"(",
"bad_data",
"and",
"bad_fixed_data",
")",
":",
"previous_prepend_data_headers",
"=",
"prepend_data_headers",
"prepend_data_headers",
"=",
"current_prepend_data_headers",
"consecutive_bad_words_list",
"=",
"consecutive",
"(",
"sorted",
"(",
"bad_word_index",
")",
")",
"lsb_byte",
"=",
"None",
"# Loop over raw data in chunks",
"for",
"word_index",
"in",
"range",
"(",
"0",
",",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"_chunk_size",
")",
":",
"# loop over all words in the actual raw data file",
"try",
":",
"raw_data",
"=",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"read",
"(",
"word_index",
",",
"word_index",
"+",
"self",
".",
"_chunk_size",
")",
"except",
"OverflowError",
",",
"e",
":",
"logging",
".",
"error",
"(",
"'%s: 2^31 xrange() limitation in 32-bit Python'",
",",
"e",
")",
"except",
"tb",
".",
"exceptions",
".",
"HDF5ExtError",
":",
"logging",
".",
"warning",
"(",
"'Raw data file %s has missing raw data. Continue raw data analysis.'",
",",
"in_file_h5",
".",
"filename",
")",
"break",
"total_words",
"+=",
"raw_data",
".",
"shape",
"[",
"0",
"]",
"# fix bad data",
"if",
"self",
".",
"_correct_corrupted_data",
":",
"# increase word shift for every bad data chunk in raw data chunk",
"word_shift",
"=",
"0",
"chunk_indices",
"=",
"np",
".",
"arange",
"(",
"word_index",
",",
"word_index",
"+",
"self",
".",
"_chunk_size",
")",
"for",
"consecutive_bad_word_indices",
"in",
"consecutive_bad_words_list",
":",
"selected_words",
"=",
"np",
".",
"intersect1d",
"(",
"consecutive_bad_word_indices",
",",
"chunk_indices",
",",
"assume_unique",
"=",
"True",
")",
"if",
"selected_words",
".",
"shape",
"[",
"0",
"]",
":",
"fixed_raw_data",
",",
"lsb_byte",
"=",
"fix_raw_data",
"(",
"raw_data",
"[",
"selected_words",
"-",
"word_index",
"-",
"word_shift",
"]",
",",
"lsb_byte",
"=",
"lsb_byte",
")",
"raw_data",
"=",
"np",
".",
"r_",
"[",
"raw_data",
"[",
":",
"selected_words",
"[",
"0",
"]",
"-",
"word_index",
"-",
"word_shift",
"]",
",",
"fixed_raw_data",
",",
"raw_data",
"[",
"selected_words",
"[",
"-",
"1",
"]",
"-",
"word_index",
"+",
"1",
"-",
"word_shift",
":",
"]",
"]",
"# check if last word of bad data chunk in current raw data chunk",
"if",
"consecutive_bad_word_indices",
"[",
"-",
"1",
"]",
"in",
"selected_words",
":",
"lsb_byte",
"=",
"None",
"# word shift by removing data word at the beginning of each defect chunk",
"word_shift",
"+=",
"1",
"# bad data chunk is at the end of current raw data chunk",
"else",
":",
"break",
"self",
".",
"interpreter",
".",
"interpret_raw_data",
"(",
"raw_data",
")",
"# interpret the raw data",
"# store remaining buffered event in the interpreter at the end of the last file",
"if",
"file_index",
"==",
"len",
"(",
"self",
".",
"files_dict",
".",
"keys",
"(",
")",
")",
"-",
"1",
"and",
"word_index",
"==",
"range",
"(",
"0",
",",
"in_file_h5",
".",
"root",
".",
"raw_data",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"_chunk_size",
")",
"[",
"-",
"1",
"]",
":",
"# store hits of the latest event of the last file",
"self",
".",
"interpreter",
".",
"store_event",
"(",
")",
"hits",
"=",
"self",
".",
"interpreter",
".",
"get_hits",
"(",
")",
"if",
"self",
".",
"scan_parameters",
"is",
"not",
"None",
":",
"nEventIndex",
"=",
"self",
".",
"interpreter",
".",
"get_n_meta_data_event",
"(",
")",
"self",
".",
"histogram",
".",
"add_meta_event_index",
"(",
"self",
".",
"meta_event_index",
",",
"nEventIndex",
")",
"if",
"self",
".",
"is_histogram_hits",
"(",
")",
":",
"self",
".",
"histogram_hits",
"(",
"hits",
")",
"if",
"self",
".",
"is_cluster_hits",
"(",
")",
":",
"cluster_hits",
",",
"clusters",
"=",
"self",
".",
"cluster_hits",
"(",
"hits",
")",
"if",
"self",
".",
"_create_cluster_hit_table",
":",
"cluster_hit_table",
".",
"append",
"(",
"cluster_hits",
")",
"if",
"self",
".",
"_create_cluster_table",
":",
"cluster_table",
".",
"append",
"(",
"clusters",
")",
"if",
"self",
".",
"_create_cluster_size_hist",
":",
"if",
"clusters",
"[",
"'size'",
"]",
".",
"shape",
"[",
"0",
"]",
">",
"0",
"and",
"np",
".",
"max",
"(",
"clusters",
"[",
"'size'",
"]",
")",
"+",
"1",
">",
"self",
".",
"_cluster_size_hist",
".",
"shape",
"[",
"0",
"]",
":",
"self",
".",
"_cluster_size_hist",
".",
"resize",
"(",
"np",
".",
"max",
"(",
"clusters",
"[",
"'size'",
"]",
")",
"+",
"1",
")",
"self",
".",
"_cluster_size_hist",
"+=",
"fast_analysis_utils",
".",
"hist_1d_index",
"(",
"clusters",
"[",
"'size'",
"]",
",",
"shape",
"=",
"self",
".",
"_cluster_size_hist",
".",
"shape",
")",
"if",
"self",
".",
"_create_cluster_tot_hist",
":",
"if",
"clusters",
"[",
"'tot'",
"]",
".",
"shape",
"[",
"0",
"]",
">",
"0",
"and",
"np",
".",
"max",
"(",
"clusters",
"[",
"'tot'",
"]",
")",
"+",
"1",
">",
"self",
".",
"_cluster_tot_hist",
".",
"shape",
"[",
"0",
"]",
":",
"self",
".",
"_cluster_tot_hist",
".",
"resize",
"(",
"(",
"np",
".",
"max",
"(",
"clusters",
"[",
"'tot'",
"]",
")",
"+",
"1",
",",
"self",
".",
"_cluster_tot_hist",
".",
"shape",
"[",
"1",
"]",
")",
")",
"if",
"clusters",
"[",
"'size'",
"]",
".",
"shape",
"[",
"0",
"]",
">",
"0",
"and",
"np",
".",
"max",
"(",
"clusters",
"[",
"'size'",
"]",
")",
"+",
"1",
">",
"self",
".",
"_cluster_tot_hist",
".",
"shape",
"[",
"1",
"]",
":",
"self",
".",
"_cluster_tot_hist",
".",
"resize",
"(",
"(",
"self",
".",
"_cluster_tot_hist",
".",
"shape",
"[",
"0",
"]",
",",
"np",
".",
"max",
"(",
"clusters",
"[",
"'size'",
"]",
")",
"+",
"1",
")",
")",
"self",
".",
"_cluster_tot_hist",
"+=",
"fast_analysis_utils",
".",
"hist_2d_index",
"(",
"clusters",
"[",
"'tot'",
"]",
",",
"clusters",
"[",
"'size'",
"]",
",",
"shape",
"=",
"self",
".",
"_cluster_tot_hist",
".",
"shape",
")",
"if",
"self",
".",
"_analyzed_data_file",
"is",
"not",
"None",
"and",
"self",
".",
"_create_hit_table",
":",
"hit_table",
".",
"append",
"(",
"hits",
")",
"if",
"self",
".",
"_analyzed_data_file",
"is",
"not",
"None",
"and",
"self",
".",
"_create_meta_word_index",
":",
"size",
"=",
"self",
".",
"interpreter",
".",
"get_n_meta_data_word",
"(",
")",
"meta_word_index_table",
".",
"append",
"(",
"meta_word",
"[",
":",
"size",
"]",
")",
"if",
"total_words",
"<=",
"progress_bar",
".",
"maxval",
":",
"# Otherwise exception is thrown",
"progress_bar",
".",
"update",
"(",
"total_words",
")",
"self",
".",
"out_file_h5",
".",
"flush",
"(",
")",
"progress_bar",
".",
"finish",
"(",
")",
"self",
".",
"_create_additional_data",
"(",
")",
"if",
"close_analyzed_data_file",
":",
"self",
".",
"out_file_h5",
".",
"close",
"(",
")",
"self",
".",
"out_file_h5",
"=",
"None",
"self",
".",
"out_file_h5",
"=",
"out_file_h5",
"if",
"self",
".",
"is_open",
"(",
"self",
".",
"out_file_h5",
")",
":",
"self",
".",
"_analyzed_data_file",
"=",
"self",
".",
"out_file_h5",
".",
"filename",
"else",
":",
"self",
".",
"_analyzed_data_file",
"=",
"None"
]
| Interprets the raw data word table of all given raw data files with the c++ library.
Creates the h5 output file and PDF plots.
Parameters
----------
analyzed_data_file : string
The file name of the output analyzed data file. If None, the output analyzed data file
specified during initialization is taken.
use_settings_from_file : boolean
True if the needed parameters should be extracted from the raw data file
fei4b : boolean
True if the raw data is from FE-I4B. | [
"Interprets",
"the",
"raw",
"data",
"word",
"table",
"of",
"all",
"given",
"raw",
"data",
"files",
"with",
"the",
"c",
"++",
"library",
".",
"Creates",
"the",
"h5",
"output",
"file",
"and",
"PDF",
"plots",
"."
]
| python | train |
IAMconsortium/pyam | pyam/core.py | https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1084-L1131 | def load_metadata(self, path, *args, **kwargs):
"""Load metadata exported from `pyam.IamDataFrame` instance
Parameters
----------
path: string
xlsx file with metadata exported from `pyam.IamDataFrame` instance
"""
if not os.path.exists(path):
raise ValueError("no metadata file '" + path + "' found!")
if path.endswith('csv'):
df = pd.read_csv(path, *args, **kwargs)
else:
xl = pd.ExcelFile(path)
if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs:
kwargs['sheet_name'] = 'meta'
df = pd.read_excel(path, *args, **kwargs)
req_cols = ['model', 'scenario', 'exclude']
if not set(req_cols).issubset(set(df.columns)):
e = 'File `{}` does not have required columns ({})!'
raise ValueError(e.format(path, req_cols))
# set index, filter to relevant scenarios from imported metadata file
df.set_index(META_IDX, inplace=True)
idx = self.meta.index.intersection(df.index)
n_invalid = len(df) - len(idx)
if n_invalid > 0:
msg = 'Ignoring {} scenario{} from imported metadata'
logger().info(msg.format(n_invalid, 's' if n_invalid > 1 else ''))
if idx.empty:
raise ValueError('No valid scenarios in imported metadata file!')
df = df.loc[idx]
# Merge in imported metadata
msg = 'Importing metadata for {} scenario{} (for total of {})'
logger().info(msg.format(len(df), 's' if len(df) > 1 else '',
len(self.meta)))
for col in df.columns:
self._new_meta_column(col)
self.meta[col] = df[col].combine_first(self.meta[col])
# set column `exclude` to bool
self.meta.exclude = self.meta.exclude.astype('bool') | [
"def",
"load_metadata",
"(",
"self",
",",
"path",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"\"no metadata file '\"",
"+",
"path",
"+",
"\"' found!\"",
")",
"if",
"path",
".",
"endswith",
"(",
"'csv'",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"path",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"xl",
"=",
"pd",
".",
"ExcelFile",
"(",
"path",
")",
"if",
"len",
"(",
"xl",
".",
"sheet_names",
")",
">",
"1",
"and",
"'sheet_name'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'sheet_name'",
"]",
"=",
"'meta'",
"df",
"=",
"pd",
".",
"read_excel",
"(",
"path",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"req_cols",
"=",
"[",
"'model'",
",",
"'scenario'",
",",
"'exclude'",
"]",
"if",
"not",
"set",
"(",
"req_cols",
")",
".",
"issubset",
"(",
"set",
"(",
"df",
".",
"columns",
")",
")",
":",
"e",
"=",
"'File `{}` does not have required columns ({})!'",
"raise",
"ValueError",
"(",
"e",
".",
"format",
"(",
"path",
",",
"req_cols",
")",
")",
"# set index, filter to relevant scenarios from imported metadata file",
"df",
".",
"set_index",
"(",
"META_IDX",
",",
"inplace",
"=",
"True",
")",
"idx",
"=",
"self",
".",
"meta",
".",
"index",
".",
"intersection",
"(",
"df",
".",
"index",
")",
"n_invalid",
"=",
"len",
"(",
"df",
")",
"-",
"len",
"(",
"idx",
")",
"if",
"n_invalid",
">",
"0",
":",
"msg",
"=",
"'Ignoring {} scenario{} from imported metadata'",
"logger",
"(",
")",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"n_invalid",
",",
"'s'",
"if",
"n_invalid",
">",
"1",
"else",
"''",
")",
")",
"if",
"idx",
".",
"empty",
":",
"raise",
"ValueError",
"(",
"'No valid scenarios in imported metadata file!'",
")",
"df",
"=",
"df",
".",
"loc",
"[",
"idx",
"]",
"# Merge in imported metadata",
"msg",
"=",
"'Importing metadata for {} scenario{} (for total of {})'",
"logger",
"(",
")",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"len",
"(",
"df",
")",
",",
"'s'",
"if",
"len",
"(",
"df",
")",
">",
"1",
"else",
"''",
",",
"len",
"(",
"self",
".",
"meta",
")",
")",
")",
"for",
"col",
"in",
"df",
".",
"columns",
":",
"self",
".",
"_new_meta_column",
"(",
"col",
")",
"self",
".",
"meta",
"[",
"col",
"]",
"=",
"df",
"[",
"col",
"]",
".",
"combine_first",
"(",
"self",
".",
"meta",
"[",
"col",
"]",
")",
"# set column `exclude` to bool",
"self",
".",
"meta",
".",
"exclude",
"=",
"self",
".",
"meta",
".",
"exclude",
".",
"astype",
"(",
"'bool'",
")"
]
| Load metadata exported from `pyam.IamDataFrame` instance
Parameters
----------
path: string
xlsx file with metadata exported from `pyam.IamDataFrame` instance | [
"Load",
"metadata",
"exported",
"from",
"pyam",
".",
"IamDataFrame",
"instance"
]
| python | train |
happyleavesaoc/python-limitlessled | limitlessled/group/rgbww.py | https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/rgbww.py#L157-L195 | def transition(self, duration,
color=None, brightness=None, temperature=None):
""" Transition wrapper.
Short-circuit transition as necessary.
:param duration: Time to transition.
:param color: Transition to this color.
:param brightness: Transition to this brightness.
:param temperature: Transition to this temperature.
"""
if color and temperature is not None:
raise ValueError("Cannot transition to color and temperature "
"simultaneously.")
# Transition to white immediately.
if color == RGB_WHITE:
self.white()
# Transition away from white immediately.
elif self.color == RGB_WHITE and color is not None:
self.color = color
# Transition immediately if duration is zero.
if duration == 0:
if brightness is not None:
self.brightness = brightness
if color:
self.color = color
if temperature is not None:
self.temperature = temperature
return
# Perform transition
if color and color != self.color:
self._transition(duration, brightness,
hue=hue_of_color(color),
saturation=saturation_of_color(color))
elif temperature != self.temperature:
self._transition(duration, brightness, temperature=temperature)
elif brightness != self.brightness:
self._transition(duration, brightness) | [
"def",
"transition",
"(",
"self",
",",
"duration",
",",
"color",
"=",
"None",
",",
"brightness",
"=",
"None",
",",
"temperature",
"=",
"None",
")",
":",
"if",
"color",
"and",
"temperature",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot transition to color and temperature \"",
"\"simultaneously.\"",
")",
"# Transition to white immediately.",
"if",
"color",
"==",
"RGB_WHITE",
":",
"self",
".",
"white",
"(",
")",
"# Transition away from white immediately.",
"elif",
"self",
".",
"color",
"==",
"RGB_WHITE",
"and",
"color",
"is",
"not",
"None",
":",
"self",
".",
"color",
"=",
"color",
"# Transition immediately if duration is zero.",
"if",
"duration",
"==",
"0",
":",
"if",
"brightness",
"is",
"not",
"None",
":",
"self",
".",
"brightness",
"=",
"brightness",
"if",
"color",
":",
"self",
".",
"color",
"=",
"color",
"if",
"temperature",
"is",
"not",
"None",
":",
"self",
".",
"temperature",
"=",
"temperature",
"return",
"# Perform transition",
"if",
"color",
"and",
"color",
"!=",
"self",
".",
"color",
":",
"self",
".",
"_transition",
"(",
"duration",
",",
"brightness",
",",
"hue",
"=",
"hue_of_color",
"(",
"color",
")",
",",
"saturation",
"=",
"saturation_of_color",
"(",
"color",
")",
")",
"elif",
"temperature",
"!=",
"self",
".",
"temperature",
":",
"self",
".",
"_transition",
"(",
"duration",
",",
"brightness",
",",
"temperature",
"=",
"temperature",
")",
"elif",
"brightness",
"!=",
"self",
".",
"brightness",
":",
"self",
".",
"_transition",
"(",
"duration",
",",
"brightness",
")"
]
| Transition wrapper.
Short-circuit transition as necessary.
:param duration: Time to transition.
:param color: Transition to this color.
:param brightness: Transition to this brightness.
:param temperature: Transition to this temperature. | [
"Transition",
"wrapper",
"."
]
| python | train |
kensho-technologies/graphql-compiler | graphql_compiler/compiler/ir_lowering_common.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_common.py#L14-L29 | def merge_consecutive_filter_clauses(ir_blocks):
"""Merge consecutive Filter(x), Filter(y) blocks into Filter(x && y) block."""
if not ir_blocks:
return ir_blocks
new_ir_blocks = [ir_blocks[0]]
for block in ir_blocks[1:]:
last_block = new_ir_blocks[-1]
if isinstance(last_block, Filter) and isinstance(block, Filter):
new_ir_blocks[-1] = Filter(
BinaryComposition(u'&&', last_block.predicate, block.predicate))
else:
new_ir_blocks.append(block)
return new_ir_blocks | [
"def",
"merge_consecutive_filter_clauses",
"(",
"ir_blocks",
")",
":",
"if",
"not",
"ir_blocks",
":",
"return",
"ir_blocks",
"new_ir_blocks",
"=",
"[",
"ir_blocks",
"[",
"0",
"]",
"]",
"for",
"block",
"in",
"ir_blocks",
"[",
"1",
":",
"]",
":",
"last_block",
"=",
"new_ir_blocks",
"[",
"-",
"1",
"]",
"if",
"isinstance",
"(",
"last_block",
",",
"Filter",
")",
"and",
"isinstance",
"(",
"block",
",",
"Filter",
")",
":",
"new_ir_blocks",
"[",
"-",
"1",
"]",
"=",
"Filter",
"(",
"BinaryComposition",
"(",
"u'&&'",
",",
"last_block",
".",
"predicate",
",",
"block",
".",
"predicate",
")",
")",
"else",
":",
"new_ir_blocks",
".",
"append",
"(",
"block",
")",
"return",
"new_ir_blocks"
]
| Merge consecutive Filter(x), Filter(y) blocks into Filter(x && y) block. | [
"Merge",
"consecutive",
"Filter",
"(",
"x",
")",
"Filter",
"(",
"y",
")",
"blocks",
"into",
"Filter",
"(",
"x",
"&&",
"y",
")",
"block",
"."
]
| python | train |
cltk/cltk | cltk/phonology/syllabify.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/syllabify.py#L330-L379 | def legal_onsets(self, syllables):
"""
Filters syllable respecting the legality principle
:param syllables: str list
Example:
The method scans for invalid syllable onsets:
>>> s = Syllabifier(["i", "u", "y"], ["o", "ø", "e"], ["a"], ["r"], ["l"], ["m", "n"], ["f", "v", "s", "h"], ["k", "g", "b", "p", "t", "d"])
>>> s.set_invalid_onsets(['lm'])
>>> s.legal_onsets(['a', 'lma', 'tigr'])
['al', 'ma', 'tigr']
You can also define invalid syllable ultima:
>>> s.set_invalid_ultima(['gr'])
>>> s.legal_onsets(['al', 'ma', 'ti', 'gr'])
['al', 'ma', 'tigr']
"""
vowels = self.vowels
for i in range(1, len(syllables)):
onset = ""
for letter in syllables[i]:
if letter in vowels:
break
onset += letter
for j in range(len(onset)):
# Check whether the given onset is valid
if onset[j:] not in self.invalid_onsets:
syllables[i - 1] += onset[:j]
syllables[i] = syllables[i][j:]
break
# Check whether ultima is invalid
if syllables[-1] in self.invalid_ultima:
syllables[-2] += syllables[-1]
syllables = syllables[:-1]
return syllables | [
"def",
"legal_onsets",
"(",
"self",
",",
"syllables",
")",
":",
"vowels",
"=",
"self",
".",
"vowels",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"syllables",
")",
")",
":",
"onset",
"=",
"\"\"",
"for",
"letter",
"in",
"syllables",
"[",
"i",
"]",
":",
"if",
"letter",
"in",
"vowels",
":",
"break",
"onset",
"+=",
"letter",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"onset",
")",
")",
":",
"# Check whether the given onset is valid",
"if",
"onset",
"[",
"j",
":",
"]",
"not",
"in",
"self",
".",
"invalid_onsets",
":",
"syllables",
"[",
"i",
"-",
"1",
"]",
"+=",
"onset",
"[",
":",
"j",
"]",
"syllables",
"[",
"i",
"]",
"=",
"syllables",
"[",
"i",
"]",
"[",
"j",
":",
"]",
"break",
"# Check whether ultima is invalid",
"if",
"syllables",
"[",
"-",
"1",
"]",
"in",
"self",
".",
"invalid_ultima",
":",
"syllables",
"[",
"-",
"2",
"]",
"+=",
"syllables",
"[",
"-",
"1",
"]",
"syllables",
"=",
"syllables",
"[",
":",
"-",
"1",
"]",
"return",
"syllables"
]
| Filters syllable respecting the legality principle
:param syllables: str list
Example:
The method scans for invalid syllable onsets:
>>> s = Syllabifier(["i", "u", "y"], ["o", "ø", "e"], ["a"], ["r"], ["l"], ["m", "n"], ["f", "v", "s", "h"], ["k", "g", "b", "p", "t", "d"])
>>> s.set_invalid_onsets(['lm'])
>>> s.legal_onsets(['a', 'lma', 'tigr'])
['al', 'ma', 'tigr']
You can also define invalid syllable ultima:
>>> s.set_invalid_ultima(['gr'])
>>> s.legal_onsets(['al', 'ma', 'ti', 'gr'])
['al', 'ma', 'tigr'] | [
"Filters",
"syllable",
"respecting",
"the",
"legality",
"principle",
":",
"param",
"syllables",
":",
"str",
"list"
]
| python | train |
erikrose/more-itertools | more_itertools/more.py | https://github.com/erikrose/more-itertools/blob/6a91b4e25c8e12fcf9fc2b53cf8ee0fba293e6f9/more_itertools/more.py#L715-L746 | def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i:i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
) | [
"def",
"substrings_indexes",
"(",
"seq",
",",
"reverse",
"=",
"False",
")",
":",
"r",
"=",
"range",
"(",
"1",
",",
"len",
"(",
"seq",
")",
"+",
"1",
")",
"if",
"reverse",
":",
"r",
"=",
"reversed",
"(",
"r",
")",
"return",
"(",
"(",
"seq",
"[",
"i",
":",
"i",
"+",
"L",
"]",
",",
"i",
",",
"i",
"+",
"L",
")",
"for",
"L",
"in",
"r",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"seq",
")",
"-",
"L",
"+",
"1",
")",
")"
]
| Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order. | [
"Yield",
"all",
"substrings",
"and",
"their",
"positions",
"in",
"*",
"seq",
"*"
]
| python | train |
rwl/pylon | pylon/case.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/case.py#L925-L929 | def save_matpower(self, fd):
""" Serialize the case as a MATPOWER data file.
"""
from pylon.io import MATPOWERWriter
MATPOWERWriter(self).write(fd) | [
"def",
"save_matpower",
"(",
"self",
",",
"fd",
")",
":",
"from",
"pylon",
".",
"io",
"import",
"MATPOWERWriter",
"MATPOWERWriter",
"(",
"self",
")",
".",
"write",
"(",
"fd",
")"
]
| Serialize the case as a MATPOWER data file. | [
"Serialize",
"the",
"case",
"as",
"a",
"MATPOWER",
"data",
"file",
"."
]
| python | train |
port-zero/mite | mite/mite.py | https://github.com/port-zero/mite/blob/b5fa941f60bf43e04ef654ed580ed7ef91211c22/mite/mite.py#L229-L237 | def create_customer(self, name, **kwargs):
"""
Creates a customer with a name. All other parameters are optional. They
are: `note`, `active_hourly_rate`, `hourly_rate`,
`hourly_rates_per_service`, and `archived`.
"""
data = self._wrap_dict("customer", kwargs)
data["customer"]["name"] = name
return self.post("/customers.json", data=data) | [
"def",
"create_customer",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"_wrap_dict",
"(",
"\"customer\"",
",",
"kwargs",
")",
"data",
"[",
"\"customer\"",
"]",
"[",
"\"name\"",
"]",
"=",
"name",
"return",
"self",
".",
"post",
"(",
"\"/customers.json\"",
",",
"data",
"=",
"data",
")"
]
| Creates a customer with a name. All other parameters are optional. They
are: `note`, `active_hourly_rate`, `hourly_rate`,
`hourly_rates_per_service`, and `archived`. | [
"Creates",
"a",
"customer",
"with",
"a",
"name",
".",
"All",
"other",
"parameters",
"are",
"optional",
".",
"They",
"are",
":",
"note",
"active_hourly_rate",
"hourly_rate",
"hourly_rates_per_service",
"and",
"archived",
"."
]
| python | train |
tonybaloney/wily | wily/commands/index.py | https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/commands/index.py#L13-L61 | def index(config, include_message=False):
"""
Show information about the cache and runtime.
:param config: The wily configuration
:type config: :namedtuple:`wily.config.WilyConfig`
:param include_message: Include revision messages
:type include_message: ``bool``
"""
state = State(config=config)
logger.debug("Running show command")
logger.info("--------Configuration---------")
logger.info(f"Path: {config.path}")
logger.info(f"Archiver: {config.archiver}")
logger.info(f"Operators: {config.operators}")
logger.info("")
logger.info("-----------History------------")
data = []
for archiver in state.archivers:
for rev in state.index[archiver].revisions:
if include_message:
data.append(
(
format_revision(rev.revision.key),
rev.revision.author_name,
rev.revision.message[:MAX_MESSAGE_WIDTH],
format_date(rev.revision.date),
)
)
else:
data.append(
(
format_revision(rev.revision.key),
rev.revision.author_name,
format_date(rev.revision.date),
)
)
if include_message:
headers = ("Revision", "Author", "Message", "Date")
else:
headers = ("Revision", "Author", "Date")
print(
tabulate.tabulate(
headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE
)
) | [
"def",
"index",
"(",
"config",
",",
"include_message",
"=",
"False",
")",
":",
"state",
"=",
"State",
"(",
"config",
"=",
"config",
")",
"logger",
".",
"debug",
"(",
"\"Running show command\"",
")",
"logger",
".",
"info",
"(",
"\"--------Configuration---------\"",
")",
"logger",
".",
"info",
"(",
"f\"Path: {config.path}\"",
")",
"logger",
".",
"info",
"(",
"f\"Archiver: {config.archiver}\"",
")",
"logger",
".",
"info",
"(",
"f\"Operators: {config.operators}\"",
")",
"logger",
".",
"info",
"(",
"\"\"",
")",
"logger",
".",
"info",
"(",
"\"-----------History------------\"",
")",
"data",
"=",
"[",
"]",
"for",
"archiver",
"in",
"state",
".",
"archivers",
":",
"for",
"rev",
"in",
"state",
".",
"index",
"[",
"archiver",
"]",
".",
"revisions",
":",
"if",
"include_message",
":",
"data",
".",
"append",
"(",
"(",
"format_revision",
"(",
"rev",
".",
"revision",
".",
"key",
")",
",",
"rev",
".",
"revision",
".",
"author_name",
",",
"rev",
".",
"revision",
".",
"message",
"[",
":",
"MAX_MESSAGE_WIDTH",
"]",
",",
"format_date",
"(",
"rev",
".",
"revision",
".",
"date",
")",
",",
")",
")",
"else",
":",
"data",
".",
"append",
"(",
"(",
"format_revision",
"(",
"rev",
".",
"revision",
".",
"key",
")",
",",
"rev",
".",
"revision",
".",
"author_name",
",",
"format_date",
"(",
"rev",
".",
"revision",
".",
"date",
")",
",",
")",
")",
"if",
"include_message",
":",
"headers",
"=",
"(",
"\"Revision\"",
",",
"\"Author\"",
",",
"\"Message\"",
",",
"\"Date\"",
")",
"else",
":",
"headers",
"=",
"(",
"\"Revision\"",
",",
"\"Author\"",
",",
"\"Date\"",
")",
"print",
"(",
"tabulate",
".",
"tabulate",
"(",
"headers",
"=",
"headers",
",",
"tabular_data",
"=",
"data",
",",
"tablefmt",
"=",
"DEFAULT_GRID_STYLE",
")",
")"
]
| Show information about the cache and runtime.
:param config: The wily configuration
:type config: :namedtuple:`wily.config.WilyConfig`
:param include_message: Include revision messages
:type include_message: ``bool`` | [
"Show",
"information",
"about",
"the",
"cache",
"and",
"runtime",
"."
]
| python | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/common.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L163-L168 | def set (self, id, param, value):
""" Sets the value of a configuration parameter. """
assert isinstance(id, basestring)
assert isinstance(param, basestring)
assert is_iterable_typed(value, basestring)
self.params_.setdefault(param, {})[id] = value | [
"def",
"set",
"(",
"self",
",",
"id",
",",
"param",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"id",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"param",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"value",
",",
"basestring",
")",
"self",
".",
"params_",
".",
"setdefault",
"(",
"param",
",",
"{",
"}",
")",
"[",
"id",
"]",
"=",
"value"
]
| Sets the value of a configuration parameter. | [
"Sets",
"the",
"value",
"of",
"a",
"configuration",
"parameter",
"."
]
| python | train |
rigetti/grove | grove/tomography/tomography.py | https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/tomography.py#L111-L135 | def is_functional(cls):
"""
Checks lazily whether a convex solver is installed that handles positivity constraints.
:return: True if a solver supporting positivity constraints is installed.
:rtype: bool
"""
if not cls._tested:
cls._tested = True
np.random.seed(SEED)
test_problem_dimension = 10
mat = np.random.randn(test_problem_dimension, test_problem_dimension)
posmat = mat.dot(mat.T)
posvar = cvxpy.Variable(test_problem_dimension, test_problem_dimension)
prob = cvxpy.Problem(cvxpy.Minimize((cvxpy.trace(posmat * posvar)
+ cvxpy.norm(posvar))),
[posvar >> 0, cvxpy.trace(posvar) >= 1.])
try:
prob.solve(SOLVER)
cls._functional = True
except cvxpy.SolverError: # pragma no coverage
_log.warning("No convex SDP solver found. You will not be able to solve"
" tomography problems with matrix positivity constraints.")
return cls._functional | [
"def",
"is_functional",
"(",
"cls",
")",
":",
"if",
"not",
"cls",
".",
"_tested",
":",
"cls",
".",
"_tested",
"=",
"True",
"np",
".",
"random",
".",
"seed",
"(",
"SEED",
")",
"test_problem_dimension",
"=",
"10",
"mat",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"test_problem_dimension",
",",
"test_problem_dimension",
")",
"posmat",
"=",
"mat",
".",
"dot",
"(",
"mat",
".",
"T",
")",
"posvar",
"=",
"cvxpy",
".",
"Variable",
"(",
"test_problem_dimension",
",",
"test_problem_dimension",
")",
"prob",
"=",
"cvxpy",
".",
"Problem",
"(",
"cvxpy",
".",
"Minimize",
"(",
"(",
"cvxpy",
".",
"trace",
"(",
"posmat",
"*",
"posvar",
")",
"+",
"cvxpy",
".",
"norm",
"(",
"posvar",
")",
")",
")",
",",
"[",
"posvar",
">>",
"0",
",",
"cvxpy",
".",
"trace",
"(",
"posvar",
")",
">=",
"1.",
"]",
")",
"try",
":",
"prob",
".",
"solve",
"(",
"SOLVER",
")",
"cls",
".",
"_functional",
"=",
"True",
"except",
"cvxpy",
".",
"SolverError",
":",
"# pragma no coverage",
"_log",
".",
"warning",
"(",
"\"No convex SDP solver found. You will not be able to solve\"",
"\" tomography problems with matrix positivity constraints.\"",
")",
"return",
"cls",
".",
"_functional"
]
| Checks lazily whether a convex solver is installed that handles positivity constraints.
:return: True if a solver supporting positivity constraints is installed.
:rtype: bool | [
"Checks",
"lazily",
"whether",
"a",
"convex",
"solver",
"is",
"installed",
"that",
"handles",
"positivity",
"constraints",
"."
]
| python | train |
SwoopSearch/pyaddress | address/address.py | https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L377-L393 | def check_street_suffix(self, token):
"""
Attempts to match a street suffix. If found, it will return the abbreviation, with the first letter capitalized
and a period after it. E.g. "St." or "Ave."
"""
# Suffix must come before street
# print "Suffix check", token, "suffix", self.street_suffix, "street", self.street
if self.street_suffix is None and self.street is None:
# print "upper", token.upper()
if token.upper() in self.parser.suffixes.keys():
suffix = self.parser.suffixes[token.upper()]
self.street_suffix = self._clean(suffix.capitalize() + '.')
return True
elif token.upper() in self.parser.suffixes.values():
self.street_suffix = self._clean(token.capitalize() + '.')
return True
return False | [
"def",
"check_street_suffix",
"(",
"self",
",",
"token",
")",
":",
"# Suffix must come before street",
"# print \"Suffix check\", token, \"suffix\", self.street_suffix, \"street\", self.street",
"if",
"self",
".",
"street_suffix",
"is",
"None",
"and",
"self",
".",
"street",
"is",
"None",
":",
"# print \"upper\", token.upper()",
"if",
"token",
".",
"upper",
"(",
")",
"in",
"self",
".",
"parser",
".",
"suffixes",
".",
"keys",
"(",
")",
":",
"suffix",
"=",
"self",
".",
"parser",
".",
"suffixes",
"[",
"token",
".",
"upper",
"(",
")",
"]",
"self",
".",
"street_suffix",
"=",
"self",
".",
"_clean",
"(",
"suffix",
".",
"capitalize",
"(",
")",
"+",
"'.'",
")",
"return",
"True",
"elif",
"token",
".",
"upper",
"(",
")",
"in",
"self",
".",
"parser",
".",
"suffixes",
".",
"values",
"(",
")",
":",
"self",
".",
"street_suffix",
"=",
"self",
".",
"_clean",
"(",
"token",
".",
"capitalize",
"(",
")",
"+",
"'.'",
")",
"return",
"True",
"return",
"False"
]
| Attempts to match a street suffix. If found, it will return the abbreviation, with the first letter capitalized
and a period after it. E.g. "St." or "Ave." | [
"Attempts",
"to",
"match",
"a",
"street",
"suffix",
".",
"If",
"found",
"it",
"will",
"return",
"the",
"abbreviation",
"with",
"the",
"first",
"letter",
"capitalized",
"and",
"a",
"period",
"after",
"it",
".",
"E",
".",
"g",
".",
"St",
".",
"or",
"Ave",
"."
]
| python | train |
bbangert/lettuce_webdriver | lettuce_webdriver/util.py | https://github.com/bbangert/lettuce_webdriver/blob/d11f8531c43bb7150c316e0dc4ccd083617becf7/lettuce_webdriver/util.py#L79-L85 | def _elements(self):
"""
The cached list of elements.
"""
if not hasattr(self, '_elements_cached'):
setattr(self, '_elements_cached', list(self._select()))
return self._elements_cached | [
"def",
"_elements",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_elements_cached'",
")",
":",
"setattr",
"(",
"self",
",",
"'_elements_cached'",
",",
"list",
"(",
"self",
".",
"_select",
"(",
")",
")",
")",
"return",
"self",
".",
"_elements_cached"
]
| The cached list of elements. | [
"The",
"cached",
"list",
"of",
"elements",
"."
]
| python | train |
AmanoTeam/amanobot | amanobot/aio/__init__.py | https://github.com/AmanoTeam/amanobot/blob/fe546e2e294eec88e637da0b2567c7e7e8662437/amanobot/aio/__init__.py#L521-L530 | async def editMessageMedia(self, msg_identifier, media,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagemedia
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.aio.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return await self._api_request('editMessageMedia', _rectify(p)) | [
"async",
"def",
"editMessageMedia",
"(",
"self",
",",
"msg_identifier",
",",
"media",
",",
"reply_markup",
"=",
"None",
")",
":",
"p",
"=",
"_strip",
"(",
"locals",
"(",
")",
",",
"more",
"=",
"[",
"'msg_identifier'",
"]",
")",
"p",
".",
"update",
"(",
"_dismantle_message_identifier",
"(",
"msg_identifier",
")",
")",
"return",
"await",
"self",
".",
"_api_request",
"(",
"'editMessageMedia'",
",",
"_rectify",
"(",
"p",
")",
")"
]
| See: https://core.telegram.org/bots/api#editmessagemedia
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.aio.Bot.editMessageText` | [
"See",
":",
"https",
":",
"//",
"core",
".",
"telegram",
".",
"org",
"/",
"bots",
"/",
"api#editmessagemedia"
]
| python | train |
MycroftAI/mycroft-precise | precise/util.py | https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/util.py#L31-L33 | def buffer_to_audio(buffer: bytes) -> np.ndarray:
"""Convert a raw mono audio byte string to numpy array of floats"""
return np.fromstring(buffer, dtype='<i2').astype(np.float32, order='C') / 32768.0 | [
"def",
"buffer_to_audio",
"(",
"buffer",
":",
"bytes",
")",
"->",
"np",
".",
"ndarray",
":",
"return",
"np",
".",
"fromstring",
"(",
"buffer",
",",
"dtype",
"=",
"'<i2'",
")",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"order",
"=",
"'C'",
")",
"/",
"32768.0"
]
| Convert a raw mono audio byte string to numpy array of floats | [
"Convert",
"a",
"raw",
"mono",
"audio",
"byte",
"string",
"to",
"numpy",
"array",
"of",
"floats"
]
| python | train |
lwcook/horsetail-matching | horsetailmatching/hm.py | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L921-L936 | def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp | [
"def",
"_matrix_integration",
"(",
"q",
",",
"h",
",",
"t",
")",
":",
"N",
"=",
"len",
"(",
"q",
")",
"# correction if CDF has gone out of trapezium range",
"if",
"h",
"[",
"-",
"1",
"]",
"<",
"0.9",
":",
"h",
"[",
"-",
"1",
"]",
"=",
"1.0",
"W",
"=",
"np",
".",
"zeros",
"(",
"[",
"N",
",",
"N",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"W",
"[",
"i",
",",
"i",
"]",
"=",
"0.5",
"*",
"(",
"h",
"[",
"min",
"(",
"i",
"+",
"1",
",",
"N",
"-",
"1",
")",
"]",
"-",
"h",
"[",
"max",
"(",
"i",
"-",
"1",
",",
"0",
")",
"]",
")",
"dp",
"=",
"(",
"q",
"-",
"t",
")",
".",
"T",
".",
"dot",
"(",
"W",
")",
".",
"dot",
"(",
"q",
"-",
"t",
")",
"return",
"dp"
]
| Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties | [
"Returns",
"the",
"dp",
"metric",
"for",
"a",
"single",
"horsetail",
"curve",
"at",
"a",
"given",
"value",
"of",
"the",
"epistemic",
"uncertainties"
]
| python | train |
wright-group/WrightTools | WrightTools/artists/_helpers.py | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/artists/_helpers.py#L53-L75 | def _title(fig, title, subtitle="", *, margin=1, fontsize=20, subfontsize=18):
"""Add a title to a figure.
Parameters
----------
fig : matplotlib Figure
Figure.
title : string
Title.
subtitle : string
Subtitle.
margin : number (optional)
Distance from top of plot, in inches. Default is 1.
fontsize : number (optional)
Title fontsize. Default is 20.
subfontsize : number (optional)
Subtitle fontsize. Default is 18.
"""
fig.suptitle(title, fontsize=fontsize)
height = fig.get_figheight() # inches
distance = margin / 2. # distance from top of plot, in inches
ratio = 1 - distance / height
fig.text(0.5, ratio, subtitle, fontsize=subfontsize, ha="center", va="top") | [
"def",
"_title",
"(",
"fig",
",",
"title",
",",
"subtitle",
"=",
"\"\"",
",",
"*",
",",
"margin",
"=",
"1",
",",
"fontsize",
"=",
"20",
",",
"subfontsize",
"=",
"18",
")",
":",
"fig",
".",
"suptitle",
"(",
"title",
",",
"fontsize",
"=",
"fontsize",
")",
"height",
"=",
"fig",
".",
"get_figheight",
"(",
")",
"# inches",
"distance",
"=",
"margin",
"/",
"2.",
"# distance from top of plot, in inches",
"ratio",
"=",
"1",
"-",
"distance",
"/",
"height",
"fig",
".",
"text",
"(",
"0.5",
",",
"ratio",
",",
"subtitle",
",",
"fontsize",
"=",
"subfontsize",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"top\"",
")"
]
| Add a title to a figure.
Parameters
----------
fig : matplotlib Figure
Figure.
title : string
Title.
subtitle : string
Subtitle.
margin : number (optional)
Distance from top of plot, in inches. Default is 1.
fontsize : number (optional)
Title fontsize. Default is 20.
subfontsize : number (optional)
Subtitle fontsize. Default is 18. | [
"Add",
"a",
"title",
"to",
"a",
"figure",
"."
]
| python | train |
log2timeline/plaso | plaso/analysis/chrome_extension.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/chrome_extension.py#L154-L207 | def ExamineEvent(self, mediator, event):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
# Only interested in filesystem events.
if event.data_type != 'fs:stat':
return
filename = getattr(event, 'filename', None)
if not filename:
return
# Determine if we have a Chrome extension ID.
if 'chrome' not in filename.lower():
return
if not self._sep:
self._sep = self._GetPathSegmentSeparator(filename)
if '{0:s}Extensions{0:s}'.format(self._sep) not in filename:
return
# Now we have extension IDs, let's check if we've got the
# folder, nothing else.
paths = filename.split(self._sep)
if paths[-2] != 'Extensions':
return
extension_identifier = paths[-1]
if extension_identifier == 'Temp':
return
# Get the user and ID.
user = mediator.GetUsernameForPath(filename)
# We still want this information in here, so that we can
# manually deduce the username.
if not user:
if len(filename) > 25:
user = 'Not found ({0:s}...)'.format(filename[0:25])
else:
user = 'Not found ({0:s})'.format(filename)
extension_string = self._GetTitleFromChromeWebStore(extension_identifier)
if not extension_string:
extension_string = extension_identifier
self._results.setdefault(user, [])
if (extension_string, extension_identifier) not in self._results[user]:
self._results[user].append((extension_string, extension_identifier)) | [
"def",
"ExamineEvent",
"(",
"self",
",",
"mediator",
",",
"event",
")",
":",
"# Only interested in filesystem events.",
"if",
"event",
".",
"data_type",
"!=",
"'fs:stat'",
":",
"return",
"filename",
"=",
"getattr",
"(",
"event",
",",
"'filename'",
",",
"None",
")",
"if",
"not",
"filename",
":",
"return",
"# Determine if we have a Chrome extension ID.",
"if",
"'chrome'",
"not",
"in",
"filename",
".",
"lower",
"(",
")",
":",
"return",
"if",
"not",
"self",
".",
"_sep",
":",
"self",
".",
"_sep",
"=",
"self",
".",
"_GetPathSegmentSeparator",
"(",
"filename",
")",
"if",
"'{0:s}Extensions{0:s}'",
".",
"format",
"(",
"self",
".",
"_sep",
")",
"not",
"in",
"filename",
":",
"return",
"# Now we have extension IDs, let's check if we've got the",
"# folder, nothing else.",
"paths",
"=",
"filename",
".",
"split",
"(",
"self",
".",
"_sep",
")",
"if",
"paths",
"[",
"-",
"2",
"]",
"!=",
"'Extensions'",
":",
"return",
"extension_identifier",
"=",
"paths",
"[",
"-",
"1",
"]",
"if",
"extension_identifier",
"==",
"'Temp'",
":",
"return",
"# Get the user and ID.",
"user",
"=",
"mediator",
".",
"GetUsernameForPath",
"(",
"filename",
")",
"# We still want this information in here, so that we can",
"# manually deduce the username.",
"if",
"not",
"user",
":",
"if",
"len",
"(",
"filename",
")",
">",
"25",
":",
"user",
"=",
"'Not found ({0:s}...)'",
".",
"format",
"(",
"filename",
"[",
"0",
":",
"25",
"]",
")",
"else",
":",
"user",
"=",
"'Not found ({0:s})'",
".",
"format",
"(",
"filename",
")",
"extension_string",
"=",
"self",
".",
"_GetTitleFromChromeWebStore",
"(",
"extension_identifier",
")",
"if",
"not",
"extension_string",
":",
"extension_string",
"=",
"extension_identifier",
"self",
".",
"_results",
".",
"setdefault",
"(",
"user",
",",
"[",
"]",
")",
"if",
"(",
"extension_string",
",",
"extension_identifier",
")",
"not",
"in",
"self",
".",
"_results",
"[",
"user",
"]",
":",
"self",
".",
"_results",
"[",
"user",
"]",
".",
"append",
"(",
"(",
"extension_string",
",",
"extension_identifier",
")",
")"
]
| Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine. | [
"Analyzes",
"an",
"event",
"."
]
| python | train |
twilio/twilio-python | twilio/base/serialize.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/base/serialize.py#L22-L32 | def iso8601_datetime(d):
"""
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
"""
if d == values.unset:
return d
elif isinstance(d, datetime.datetime) or isinstance(d, datetime.date):
return d.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(d, str):
return d | [
"def",
"iso8601_datetime",
"(",
"d",
")",
":",
"if",
"d",
"==",
"values",
".",
"unset",
":",
"return",
"d",
"elif",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"datetime",
")",
"or",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"date",
")",
":",
"return",
"d",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%SZ'",
")",
"elif",
"isinstance",
"(",
"d",
",",
"str",
")",
":",
"return",
"d"
]
| Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date | [
"Return",
"a",
"string",
"representation",
"of",
"a",
"date",
"that",
"the",
"Twilio",
"API",
"understands",
"Format",
"is",
"YYYY",
"-",
"MM",
"-",
"DD",
".",
"Returns",
"None",
"if",
"d",
"is",
"not",
"a",
"string",
"datetime",
"or",
"date"
]
| python | train |
lobocv/pyperform | pyperform/tools.py | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/tools.py#L48-L64 | def remove_decorators(src):
""" Remove decorators from the source code """
src = src.strip()
src_lines = src.splitlines()
multi_line = False
n_deleted = 0
for n in range(len(src_lines)):
line = src_lines[n - n_deleted].strip()
if (line.startswith('@') and 'Benchmark' in line) or multi_line:
del src_lines[n - n_deleted]
n_deleted += 1
if line.endswith(')'):
multi_line = False
else:
multi_line = True
setup_src = '\n'.join(src_lines)
return setup_src | [
"def",
"remove_decorators",
"(",
"src",
")",
":",
"src",
"=",
"src",
".",
"strip",
"(",
")",
"src_lines",
"=",
"src",
".",
"splitlines",
"(",
")",
"multi_line",
"=",
"False",
"n_deleted",
"=",
"0",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"src_lines",
")",
")",
":",
"line",
"=",
"src_lines",
"[",
"n",
"-",
"n_deleted",
"]",
".",
"strip",
"(",
")",
"if",
"(",
"line",
".",
"startswith",
"(",
"'@'",
")",
"and",
"'Benchmark'",
"in",
"line",
")",
"or",
"multi_line",
":",
"del",
"src_lines",
"[",
"n",
"-",
"n_deleted",
"]",
"n_deleted",
"+=",
"1",
"if",
"line",
".",
"endswith",
"(",
"')'",
")",
":",
"multi_line",
"=",
"False",
"else",
":",
"multi_line",
"=",
"True",
"setup_src",
"=",
"'\\n'",
".",
"join",
"(",
"src_lines",
")",
"return",
"setup_src"
]
| Remove decorators from the source code | [
"Remove",
"decorators",
"from",
"the",
"source",
"code"
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.