repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
theiviaxx/Frog
frog/views/tag.py
https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/tag.py#L97-L118
def post(request): """Creates a tag object :param name: Name for tag :type name: str :returns: json """ res = Result() data = request.POST or json.loads(request.body)['body'] name = data.get('name', None) if not name: res.isError = True res.message = "No name given" return JsonResponse(res.asDict()) tag = Tag.objects.get_or_create(name=name.lower())[0] res.append(tag.json()) return JsonResponse(res.asDict())
[ "def", "post", "(", "request", ")", ":", "res", "=", "Result", "(", ")", "data", "=", "request", ".", "POST", "or", "json", ".", "loads", "(", "request", ".", "body", ")", "[", "'body'", "]", "name", "=", "data", ".", "get", "(", "'name'", ",", "None", ")", "if", "not", "name", ":", "res", ".", "isError", "=", "True", "res", ".", "message", "=", "\"No name given\"", "return", "JsonResponse", "(", "res", ".", "asDict", "(", ")", ")", "tag", "=", "Tag", ".", "objects", ".", "get_or_create", "(", "name", "=", "name", ".", "lower", "(", ")", ")", "[", "0", "]", "res", ".", "append", "(", "tag", ".", "json", "(", ")", ")", "return", "JsonResponse", "(", "res", ".", "asDict", "(", ")", ")" ]
Creates a tag object :param name: Name for tag :type name: str :returns: json
[ "Creates", "a", "tag", "object" ]
python
train
21.454545
makinacorpus/django-geojson
djgeojson/serializers.py
https://github.com/makinacorpus/django-geojson/blob/42fa800eea6502c1271c0cfa73c03c2bd499b536/djgeojson/serializers.py#L448-L476
def serialize(self, queryset, **options): """ Serialize a queryset. """ self.options = options self.stream = options.get("stream", StringIO()) self.primary_key = options.get("primary_key", None) self.properties = options.get("properties") self.geometry_field = options.get("geometry_field", "geom") self.use_natural_keys = options.get("use_natural_keys", False) self.bbox = options.get("bbox", None) self.bbox_auto = options.get("bbox_auto", None) self.srid = options.get("srid", GEOJSON_DEFAULT_SRID) self.crs = options.get("crs", True) self.start_serialization() if ValuesQuerySet is not None and isinstance(queryset, ValuesQuerySet): self.serialize_values_queryset(queryset) elif isinstance(queryset, list): self.serialize_object_list(queryset) elif isinstance(queryset, QuerySet): self.serialize_queryset(queryset) self.end_serialization() return self.getvalue()
[ "def", "serialize", "(", "self", ",", "queryset", ",", "*", "*", "options", ")", ":", "self", ".", "options", "=", "options", "self", ".", "stream", "=", "options", ".", "get", "(", "\"stream\"", ",", "StringIO", "(", ")", ")", "self", ".", "primary_key", "=", "options", ".", "get", "(", "\"primary_key\"", ",", "None", ")", "self", ".", "properties", "=", "options", ".", "get", "(", "\"properties\"", ")", "self", ".", "geometry_field", "=", "options", ".", "get", "(", "\"geometry_field\"", ",", "\"geom\"", ")", "self", ".", "use_natural_keys", "=", "options", ".", "get", "(", "\"use_natural_keys\"", ",", "False", ")", "self", ".", "bbox", "=", "options", ".", "get", "(", "\"bbox\"", ",", "None", ")", "self", ".", "bbox_auto", "=", "options", ".", "get", "(", "\"bbox_auto\"", ",", "None", ")", "self", ".", "srid", "=", "options", ".", "get", "(", "\"srid\"", ",", "GEOJSON_DEFAULT_SRID", ")", "self", ".", "crs", "=", "options", ".", "get", "(", "\"crs\"", ",", "True", ")", "self", ".", "start_serialization", "(", ")", "if", "ValuesQuerySet", "is", "not", "None", "and", "isinstance", "(", "queryset", ",", "ValuesQuerySet", ")", ":", "self", ".", "serialize_values_queryset", "(", "queryset", ")", "elif", "isinstance", "(", "queryset", ",", "list", ")", ":", "self", ".", "serialize_object_list", "(", "queryset", ")", "elif", "isinstance", "(", "queryset", ",", "QuerySet", ")", ":", "self", ".", "serialize_queryset", "(", "queryset", ")", "self", ".", "end_serialization", "(", ")", "return", "self", ".", "getvalue", "(", ")" ]
Serialize a queryset.
[ "Serialize", "a", "queryset", "." ]
python
train
35.586207
gplepage/gvar
src/gvar/__init__.py
https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/__init__.py#L1109-L1209
def make_plot( self, count, plot=None, show=False, plottype='probability', bar=dict(alpha=0.15, color='b', linewidth=1.0, edgecolor='b'), errorbar=dict(fmt='b.'), gaussian=dict(ls='--', c='r') ): """ Convert histogram counts in array ``count`` into a plot. Args: count (array): Array of histogram counts (see :meth:`PDFHistogram.count`). plot (plotter): :mod:`matplotlib` plotting window. If ``None`` uses the default window. Default is ``None``. show (boolean): Displayes plot if ``True``; otherwise returns the plot. Default is ``False``. plottype (str): The probabilities in each bin are plotted if ``plottype='probability'`` (default). The average probability density is plot if ``plottype='density'``. The cumulative probability is plotted if ``plottype=cumulative``. bar (dictionary): Additional plotting arguments for the bar graph showing the histogram. This part of the plot is omitted if ``bar=None``. errorbar (dictionary): Additional plotting arguments for the errorbar graph, showing error bars on the histogram. This part of the plot is omitted if ``errorbar=None``. gaussian (dictionary): Additional plotting arguments for the plot of the Gaussian probability for the |GVar| (``g``) specified in the initialization. This part of the plot is omitted if ``gaussian=None`` or if no ``g`` was specified. """ if numpy.ndim(count) != 1: raise ValueError('count must have dimension 1') if plot is None: import matplotlib.pyplot as plot if len(count) == len(self.midpoints) + 2: norm = numpy.sum(count) data = numpy.asarray(count[1:-1]) / norm elif len(count) != len(self.midpoints): raise ValueError( 'wrong data length: %s != %s' % (len(count), len(self.midpoints)) ) else: data = numpy.asarray(count) if plottype == 'cumulative': data = numpy.cumsum(data) data = numpy.array([0.] + data.tolist()) data_sdev = sdev(data) if not numpy.all(data_sdev == 0.0): data_mean = mean(data) plot.errorbar(self.bins, data_mean, data_sdev, **errorbar) if bar is not None: plot.fill_between(self.bins, 0, data_mean, **bar) # mean, +- 1 sigma lines plot.plot([self.bins[0], self.bins[-1]], [0.5, 0.5], 'k:') plot.plot([self.bins[0], self.bins[-1]], [0.158655254, 0.158655254], 'k:') plot.plot([self.bins[0], self.bins[-1]], [0.841344746, 0.841344746], 'k:') else: if plottype == 'density': data = data / self.widths if errorbar is not None: data_sdev = sdev(data) if not numpy.all(data_sdev == 0.0): data_mean = mean(data) plot.errorbar(self.midpoints, data_mean, data_sdev, **errorbar) if bar is not None: plot.bar(self.bins[:-1], mean(data), width=self.widths, align='edge', **bar) if gaussian is not None and self.g is not None: # spline goes through the errorbar points for gaussian stats if plottype == 'cumulative': x = numpy.array(self.bins.tolist() + self.midpoints.tolist()) x.sort() dx = (x - self.g.mean) / self.g.sdev y = (erf(dx / 2**0.5) + 1) / 2. yspline = cspline.CSpline(x, y) plot.ylabel('cumulative probability') plot.ylim(0, 1.0) elif plottype in ['density', 'probability']: x = self.bins dx = (x - self.g.mean) / self.g.sdev y = (erf(dx / 2**0.5) + 1) / 2. x = self.midpoints y = (y[1:] - y[:-1]) if plottype == 'density': y /= self.widths plot.ylabel('probability density') else: plot.ylabel('probability') yspline = cspline.CSpline(x, y) else: raise ValueError('unknown plottype: ' + str(plottype)) if len(x) < 100: ny = int(100. / len(x) + 0.5) * len(x) else: ny = len(x) xplot = numpy.linspace(x[0], x[-1], ny) plot.plot(xplot, yspline(xplot), **gaussian) if show: plot.show() return plot
[ "def", "make_plot", "(", "self", ",", "count", ",", "plot", "=", "None", ",", "show", "=", "False", ",", "plottype", "=", "'probability'", ",", "bar", "=", "dict", "(", "alpha", "=", "0.15", ",", "color", "=", "'b'", ",", "linewidth", "=", "1.0", ",", "edgecolor", "=", "'b'", ")", ",", "errorbar", "=", "dict", "(", "fmt", "=", "'b.'", ")", ",", "gaussian", "=", "dict", "(", "ls", "=", "'--'", ",", "c", "=", "'r'", ")", ")", ":", "if", "numpy", ".", "ndim", "(", "count", ")", "!=", "1", ":", "raise", "ValueError", "(", "'count must have dimension 1'", ")", "if", "plot", "is", "None", ":", "import", "matplotlib", ".", "pyplot", "as", "plot", "if", "len", "(", "count", ")", "==", "len", "(", "self", ".", "midpoints", ")", "+", "2", ":", "norm", "=", "numpy", ".", "sum", "(", "count", ")", "data", "=", "numpy", ".", "asarray", "(", "count", "[", "1", ":", "-", "1", "]", ")", "/", "norm", "elif", "len", "(", "count", ")", "!=", "len", "(", "self", ".", "midpoints", ")", ":", "raise", "ValueError", "(", "'wrong data length: %s != %s'", "%", "(", "len", "(", "count", ")", ",", "len", "(", "self", ".", "midpoints", ")", ")", ")", "else", ":", "data", "=", "numpy", ".", "asarray", "(", "count", ")", "if", "plottype", "==", "'cumulative'", ":", "data", "=", "numpy", ".", "cumsum", "(", "data", ")", "data", "=", "numpy", ".", "array", "(", "[", "0.", "]", "+", "data", ".", "tolist", "(", ")", ")", "data_sdev", "=", "sdev", "(", "data", ")", "if", "not", "numpy", ".", "all", "(", "data_sdev", "==", "0.0", ")", ":", "data_mean", "=", "mean", "(", "data", ")", "plot", ".", "errorbar", "(", "self", ".", "bins", ",", "data_mean", ",", "data_sdev", ",", "*", "*", "errorbar", ")", "if", "bar", "is", "not", "None", ":", "plot", ".", "fill_between", "(", "self", ".", "bins", ",", "0", ",", "data_mean", ",", "*", "*", "bar", ")", "# mean, +- 1 sigma lines", "plot", ".", "plot", "(", "[", "self", ".", "bins", "[", "0", "]", ",", "self", ".", "bins", "[", "-", "1", "]", "]", ",", "[", "0.5", ",", "0.5", "]", ",", "'k:'", ")", "plot", ".", "plot", "(", "[", "self", ".", "bins", "[", "0", "]", ",", "self", ".", "bins", "[", "-", "1", "]", "]", ",", "[", "0.158655254", ",", "0.158655254", "]", ",", "'k:'", ")", "plot", ".", "plot", "(", "[", "self", ".", "bins", "[", "0", "]", ",", "self", ".", "bins", "[", "-", "1", "]", "]", ",", "[", "0.841344746", ",", "0.841344746", "]", ",", "'k:'", ")", "else", ":", "if", "plottype", "==", "'density'", ":", "data", "=", "data", "/", "self", ".", "widths", "if", "errorbar", "is", "not", "None", ":", "data_sdev", "=", "sdev", "(", "data", ")", "if", "not", "numpy", ".", "all", "(", "data_sdev", "==", "0.0", ")", ":", "data_mean", "=", "mean", "(", "data", ")", "plot", ".", "errorbar", "(", "self", ".", "midpoints", ",", "data_mean", ",", "data_sdev", ",", "*", "*", "errorbar", ")", "if", "bar", "is", "not", "None", ":", "plot", ".", "bar", "(", "self", ".", "bins", "[", ":", "-", "1", "]", ",", "mean", "(", "data", ")", ",", "width", "=", "self", ".", "widths", ",", "align", "=", "'edge'", ",", "*", "*", "bar", ")", "if", "gaussian", "is", "not", "None", "and", "self", ".", "g", "is", "not", "None", ":", "# spline goes through the errorbar points for gaussian stats", "if", "plottype", "==", "'cumulative'", ":", "x", "=", "numpy", ".", "array", "(", "self", ".", "bins", ".", "tolist", "(", ")", "+", "self", ".", "midpoints", ".", "tolist", "(", ")", ")", "x", ".", "sort", "(", ")", "dx", "=", "(", "x", "-", "self", ".", "g", ".", "mean", ")", "/", "self", ".", "g", ".", "sdev", "y", "=", "(", "erf", "(", "dx", "/", "2", "**", "0.5", ")", "+", "1", ")", "/", "2.", "yspline", "=", "cspline", ".", "CSpline", "(", "x", ",", "y", ")", "plot", ".", "ylabel", "(", "'cumulative probability'", ")", "plot", ".", "ylim", "(", "0", ",", "1.0", ")", "elif", "plottype", "in", "[", "'density'", ",", "'probability'", "]", ":", "x", "=", "self", ".", "bins", "dx", "=", "(", "x", "-", "self", ".", "g", ".", "mean", ")", "/", "self", ".", "g", ".", "sdev", "y", "=", "(", "erf", "(", "dx", "/", "2", "**", "0.5", ")", "+", "1", ")", "/", "2.", "x", "=", "self", ".", "midpoints", "y", "=", "(", "y", "[", "1", ":", "]", "-", "y", "[", ":", "-", "1", "]", ")", "if", "plottype", "==", "'density'", ":", "y", "/=", "self", ".", "widths", "plot", ".", "ylabel", "(", "'probability density'", ")", "else", ":", "plot", ".", "ylabel", "(", "'probability'", ")", "yspline", "=", "cspline", ".", "CSpline", "(", "x", ",", "y", ")", "else", ":", "raise", "ValueError", "(", "'unknown plottype: '", "+", "str", "(", "plottype", ")", ")", "if", "len", "(", "x", ")", "<", "100", ":", "ny", "=", "int", "(", "100.", "/", "len", "(", "x", ")", "+", "0.5", ")", "*", "len", "(", "x", ")", "else", ":", "ny", "=", "len", "(", "x", ")", "xplot", "=", "numpy", ".", "linspace", "(", "x", "[", "0", "]", ",", "x", "[", "-", "1", "]", ",", "ny", ")", "plot", ".", "plot", "(", "xplot", ",", "yspline", "(", "xplot", ")", ",", "*", "*", "gaussian", ")", "if", "show", ":", "plot", ".", "show", "(", ")", "return", "plot" ]
Convert histogram counts in array ``count`` into a plot. Args: count (array): Array of histogram counts (see :meth:`PDFHistogram.count`). plot (plotter): :mod:`matplotlib` plotting window. If ``None`` uses the default window. Default is ``None``. show (boolean): Displayes plot if ``True``; otherwise returns the plot. Default is ``False``. plottype (str): The probabilities in each bin are plotted if ``plottype='probability'`` (default). The average probability density is plot if ``plottype='density'``. The cumulative probability is plotted if ``plottype=cumulative``. bar (dictionary): Additional plotting arguments for the bar graph showing the histogram. This part of the plot is omitted if ``bar=None``. errorbar (dictionary): Additional plotting arguments for the errorbar graph, showing error bars on the histogram. This part of the plot is omitted if ``errorbar=None``. gaussian (dictionary): Additional plotting arguments for the plot of the Gaussian probability for the |GVar| (``g``) specified in the initialization. This part of the plot is omitted if ``gaussian=None`` or if no ``g`` was specified.
[ "Convert", "histogram", "counts", "in", "array", "count", "into", "a", "plot", "." ]
python
train
47.049505
globality-corp/microcosm-postgres
microcosm_postgres/operations.py
https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/operations.py#L35-L43
def create_all(graph): """ Create all database tables. """ head = get_current_head(graph) if head is None: Model.metadata.create_all(graph.postgres) stamp_head(graph)
[ "def", "create_all", "(", "graph", ")", ":", "head", "=", "get_current_head", "(", "graph", ")", "if", "head", "is", "None", ":", "Model", ".", "metadata", ".", "create_all", "(", "graph", ".", "postgres", ")", "stamp_head", "(", "graph", ")" ]
Create all database tables.
[ "Create", "all", "database", "tables", "." ]
python
train
21.666667
danpoland/pyramid-restful-framework
pyramid_restful/routers.py
https://github.com/danpoland/pyramid-restful-framework/blob/4d8c9db44b1869c3d1fdd59ca304c3166473fcbb/pyramid_restful/routers.py#L205-L217
def get_method_map(self, viewset, method_map): """ Given a viewset, and a mapping of http methods to actions, return a new mapping which only includes any mappings that are actually implemented by the viewset. """ bound_methods = {} for method, action in method_map.items(): if hasattr(viewset, action): bound_methods[method] = action return bound_methods
[ "def", "get_method_map", "(", "self", ",", "viewset", ",", "method_map", ")", ":", "bound_methods", "=", "{", "}", "for", "method", ",", "action", "in", "method_map", ".", "items", "(", ")", ":", "if", "hasattr", "(", "viewset", ",", "action", ")", ":", "bound_methods", "[", "method", "]", "=", "action", "return", "bound_methods" ]
Given a viewset, and a mapping of http methods to actions, return a new mapping which only includes any mappings that are actually implemented by the viewset.
[ "Given", "a", "viewset", "and", "a", "mapping", "of", "http", "methods", "to", "actions", "return", "a", "new", "mapping", "which", "only", "includes", "any", "mappings", "that", "are", "actually", "implemented", "by", "the", "viewset", "." ]
python
train
33.076923
ONSdigital/sdx-common
sdx/common/logger_config.py
https://github.com/ONSdigital/sdx-common/blob/815f6a116d41fddae182943d821dc5f582a9af69/sdx/common/logger_config.py#L5-L38
def logger_initial_config(service_name=None, log_level=None, logger_format=None, logger_date_format=None): '''Set initial logging configurations. :param service_name: Name of the service :type logger: String :param log_level: A string or integer corresponding to a Python logging level :type log_level: String :param logger_format: A string defining the format of the logs :type log_level: String :param logger_date_format: A string defining the format of the date/time in the logs :type log_level: String :rtype: None ''' if not log_level: log_level = os.getenv('LOGGING_LEVEL', 'DEBUG') if not logger_format: logger_format = ( "%(asctime)s.%(msecs)06dZ|" "%(levelname)s: {}: %(message)s" ).format(service_name) if not logger_date_format: logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M:%S") logging.basicConfig(level=log_level, format=logger_format, datefmt=logger_date_format)
[ "def", "logger_initial_config", "(", "service_name", "=", "None", ",", "log_level", "=", "None", ",", "logger_format", "=", "None", ",", "logger_date_format", "=", "None", ")", ":", "if", "not", "log_level", ":", "log_level", "=", "os", ".", "getenv", "(", "'LOGGING_LEVEL'", ",", "'DEBUG'", ")", "if", "not", "logger_format", ":", "logger_format", "=", "(", "\"%(asctime)s.%(msecs)06dZ|\"", "\"%(levelname)s: {}: %(message)s\"", ")", ".", "format", "(", "service_name", ")", "if", "not", "logger_date_format", ":", "logger_date_format", "=", "os", ".", "getenv", "(", "'LOGGING_DATE_FORMAT'", ",", "\"%Y-%m-%dT%H:%M:%S\"", ")", "logging", ".", "basicConfig", "(", "level", "=", "log_level", ",", "format", "=", "logger_format", ",", "datefmt", "=", "logger_date_format", ")" ]
Set initial logging configurations. :param service_name: Name of the service :type logger: String :param log_level: A string or integer corresponding to a Python logging level :type log_level: String :param logger_format: A string defining the format of the logs :type log_level: String :param logger_date_format: A string defining the format of the date/time in the logs :type log_level: String :rtype: None
[ "Set", "initial", "logging", "configurations", "." ]
python
train
32.852941
disqus/django-mailviews
mailviews/messages.py
https://github.com/disqus/django-mailviews/blob/9993d5e911d545b3bc038433986c5f6812e7e965/mailviews/messages.py#L149-L162
def render_subject(self, context): """ Renders the message subject for the given context. The context data is automatically unescaped to avoid rendering HTML entities in ``text/plain`` content. :param context: The context to use when rendering the subject template. :type context: :class:`~django.template.Context` :returns: A rendered subject. :rtype: :class:`str` """ rendered = self.subject_template.render(unescape(context)) return rendered.strip()
[ "def", "render_subject", "(", "self", ",", "context", ")", ":", "rendered", "=", "self", ".", "subject_template", ".", "render", "(", "unescape", "(", "context", ")", ")", "return", "rendered", ".", "strip", "(", ")" ]
Renders the message subject for the given context. The context data is automatically unescaped to avoid rendering HTML entities in ``text/plain`` content. :param context: The context to use when rendering the subject template. :type context: :class:`~django.template.Context` :returns: A rendered subject. :rtype: :class:`str`
[ "Renders", "the", "message", "subject", "for", "the", "given", "context", "." ]
python
valid
37.785714
CamDavidsonPilon/lifelines
lifelines/fitters/__init__.py
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/__init__.py#L986-L1003
def hazard_at_times(self, times, label=None): """ Return a Pandas series of the predicted hazard at specific times. Parameters ----------- times: iterable or float values to return the hazard at. label: string, optional Rename the series returned. Useful for plotting. Returns -------- pd.Series """ label = coalesce(label, self._label) return pd.Series(self._hazard(self._fitted_parameters_, times), index=_to_array(times), name=label)
[ "def", "hazard_at_times", "(", "self", ",", "times", ",", "label", "=", "None", ")", ":", "label", "=", "coalesce", "(", "label", ",", "self", ".", "_label", ")", "return", "pd", ".", "Series", "(", "self", ".", "_hazard", "(", "self", ".", "_fitted_parameters_", ",", "times", ")", ",", "index", "=", "_to_array", "(", "times", ")", ",", "name", "=", "label", ")" ]
Return a Pandas series of the predicted hazard at specific times. Parameters ----------- times: iterable or float values to return the hazard at. label: string, optional Rename the series returned. Useful for plotting. Returns -------- pd.Series
[ "Return", "a", "Pandas", "series", "of", "the", "predicted", "hazard", "at", "specific", "times", "." ]
python
train
29.888889
greenbone/ospd
ospd/ospd.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L1057-L1073
def handle_delete_scan_command(self, scan_et): """ Handles <delete_scan> command. @return: Response string for <delete_scan> command. """ scan_id = scan_et.attrib.get('scan_id') if scan_id is None: return simple_response_str('delete_scan', 404, 'No scan_id attribute') if not self.scan_exists(scan_id): text = "Failed to find scan '{0}'".format(scan_id) return simple_response_str('delete_scan', 404, text) self.check_scan_process(scan_id) if self.delete_scan(scan_id): return simple_response_str('delete_scan', 200, 'OK') raise OSPDError('Scan in progress', 'delete_scan')
[ "def", "handle_delete_scan_command", "(", "self", ",", "scan_et", ")", ":", "scan_id", "=", "scan_et", ".", "attrib", ".", "get", "(", "'scan_id'", ")", "if", "scan_id", "is", "None", ":", "return", "simple_response_str", "(", "'delete_scan'", ",", "404", ",", "'No scan_id attribute'", ")", "if", "not", "self", ".", "scan_exists", "(", "scan_id", ")", ":", "text", "=", "\"Failed to find scan '{0}'\"", ".", "format", "(", "scan_id", ")", "return", "simple_response_str", "(", "'delete_scan'", ",", "404", ",", "text", ")", "self", ".", "check_scan_process", "(", "scan_id", ")", "if", "self", ".", "delete_scan", "(", "scan_id", ")", ":", "return", "simple_response_str", "(", "'delete_scan'", ",", "200", ",", "'OK'", ")", "raise", "OSPDError", "(", "'Scan in progress'", ",", "'delete_scan'", ")" ]
Handles <delete_scan> command. @return: Response string for <delete_scan> command.
[ "Handles", "<delete_scan", ">", "command", "." ]
python
train
42.235294
Jajcus/pyxmpp2
pyxmpp2/streamsasl.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/streamsasl.py#L334-L346
def _process_sasl_failure(self, stream, element): """Process incoming <sasl:failure/> element. [initiating entity only] """ _unused = stream if not self.authenticator: logger.debug("Unexpected SASL response") return False logger.debug("SASL authentication failed: {0!r}".format( element_to_unicode(element))) raise SASLAuthenticationFailed("SASL authentication failed")
[ "def", "_process_sasl_failure", "(", "self", ",", "stream", ",", "element", ")", ":", "_unused", "=", "stream", "if", "not", "self", ".", "authenticator", ":", "logger", ".", "debug", "(", "\"Unexpected SASL response\"", ")", "return", "False", "logger", ".", "debug", "(", "\"SASL authentication failed: {0!r}\"", ".", "format", "(", "element_to_unicode", "(", "element", ")", ")", ")", "raise", "SASLAuthenticationFailed", "(", "\"SASL authentication failed\"", ")" ]
Process incoming <sasl:failure/> element. [initiating entity only]
[ "Process", "incoming", "<sasl", ":", "failure", "/", ">", "element", "." ]
python
valid
37.461538
SuperCowPowers/workbench
workbench/clients/customer_report.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/clients/customer_report.py#L7-L20
def run(): """This client generates customer reports on all the samples in workbench.""" # Grab server args args = client_helper.grab_server_args() # Start up workbench connection workbench = zerorpc.Client(timeout=300, heartbeat=60) workbench.connect('tcp://'+args['server']+':'+args['port']) all_set = workbench.generate_sample_set() results = workbench.set_work_request('view_customer', all_set) for customer in results: print customer['customer']
[ "def", "run", "(", ")", ":", "# Grab server args", "args", "=", "client_helper", ".", "grab_server_args", "(", ")", "# Start up workbench connection", "workbench", "=", "zerorpc", ".", "Client", "(", "timeout", "=", "300", ",", "heartbeat", "=", "60", ")", "workbench", ".", "connect", "(", "'tcp://'", "+", "args", "[", "'server'", "]", "+", "':'", "+", "args", "[", "'port'", "]", ")", "all_set", "=", "workbench", ".", "generate_sample_set", "(", ")", "results", "=", "workbench", ".", "set_work_request", "(", "'view_customer'", ",", "all_set", ")", "for", "customer", "in", "results", ":", "print", "customer", "[", "'customer'", "]" ]
This client generates customer reports on all the samples in workbench.
[ "This", "client", "generates", "customer", "reports", "on", "all", "the", "samples", "in", "workbench", "." ]
python
train
34.857143
Pelagicore/qface
qface/helper/generic.py
https://github.com/Pelagicore/qface/blob/7f60e91e3a91a7cb04cfacbc9ce80f43df444853/qface/helper/generic.py#L5-L12
def jsonify(symbol): """ returns json format for symbol """ try: # all symbols have a toJson method, try it return json.dumps(symbol.toJson(), indent=' ') except AttributeError: pass return json.dumps(symbol, indent=' ')
[ "def", "jsonify", "(", "symbol", ")", ":", "try", ":", "# all symbols have a toJson method, try it", "return", "json", ".", "dumps", "(", "symbol", ".", "toJson", "(", ")", ",", "indent", "=", "' '", ")", "except", "AttributeError", ":", "pass", "return", "json", ".", "dumps", "(", "symbol", ",", "indent", "=", "' '", ")" ]
returns json format for symbol
[ "returns", "json", "format", "for", "symbol" ]
python
train
31.875
TylerTemp/docpie
docpie/parser.py
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/parser.py#L478-L521
def parse_names_and_default(self): """parse for `parse_content` {title: [('-a, --all=STH', 'default'), ...]}""" result = {} for title, text in self.formal_content.items(): if not text: result[title] = [] continue logger.debug('\n' + text) collect = [] to_list = text.splitlines() # parse first line. Should NEVER failed. # this will ensure in `[default: xxx]`, # the `xxx`(e.g: `\t`, `,`) will not be changed by _format_line previous_line = to_list.pop(0) collect.append(self.parse_line_option_indent(previous_line)) for line in to_list: indent_match = self.indent_re.match(line) this_indent = len(indent_match.groupdict()['indent']) if this_indent >= collect[-1]['indent']: # A multi line description previous_line = line continue # new option line # deal the default for previous option collect[-1]['default'] = self.parse_default(previous_line) # deal this option collect.append(self.parse_line_option_indent(line)) logger.debug(collect[-1]) previous_line = line else: collect[-1]['default'] = self.parse_default(previous_line) result[title] = [ (each['option'], each['default']) for each in collect] return result
[ "def", "parse_names_and_default", "(", "self", ")", ":", "result", "=", "{", "}", "for", "title", ",", "text", "in", "self", ".", "formal_content", ".", "items", "(", ")", ":", "if", "not", "text", ":", "result", "[", "title", "]", "=", "[", "]", "continue", "logger", ".", "debug", "(", "'\\n'", "+", "text", ")", "collect", "=", "[", "]", "to_list", "=", "text", ".", "splitlines", "(", ")", "# parse first line. Should NEVER failed.\r", "# this will ensure in `[default: xxx]`,\r", "# the `xxx`(e.g: `\\t`, `,`) will not be changed by _format_line\r", "previous_line", "=", "to_list", ".", "pop", "(", "0", ")", "collect", ".", "append", "(", "self", ".", "parse_line_option_indent", "(", "previous_line", ")", ")", "for", "line", "in", "to_list", ":", "indent_match", "=", "self", ".", "indent_re", ".", "match", "(", "line", ")", "this_indent", "=", "len", "(", "indent_match", ".", "groupdict", "(", ")", "[", "'indent'", "]", ")", "if", "this_indent", ">=", "collect", "[", "-", "1", "]", "[", "'indent'", "]", ":", "# A multi line description\r", "previous_line", "=", "line", "continue", "# new option line\r", "# deal the default for previous option\r", "collect", "[", "-", "1", "]", "[", "'default'", "]", "=", "self", ".", "parse_default", "(", "previous_line", ")", "# deal this option\r", "collect", ".", "append", "(", "self", ".", "parse_line_option_indent", "(", "line", ")", ")", "logger", ".", "debug", "(", "collect", "[", "-", "1", "]", ")", "previous_line", "=", "line", "else", ":", "collect", "[", "-", "1", "]", "[", "'default'", "]", "=", "self", ".", "parse_default", "(", "previous_line", ")", "result", "[", "title", "]", "=", "[", "(", "each", "[", "'option'", "]", ",", "each", "[", "'default'", "]", ")", "for", "each", "in", "collect", "]", "return", "result" ]
parse for `parse_content` {title: [('-a, --all=STH', 'default'), ...]}
[ "parse", "for", "parse_content", "{", "title", ":", "[", "(", "-", "a", "--", "all", "=", "STH", "default", ")", "...", "]", "}" ]
python
train
36.227273
miquelo/resort
packages/resort/component/vagrant.py
https://github.com/miquelo/resort/blob/097a25d3257c91a75c194fd44c2797ab356f85dd/packages/resort/component/vagrant.py#L32-L51
def read(self, cmd_args): """ Execute Vagrant read command. :param list cmd_args: Command argument list. """ args = [ "vagrant", "--machine-readable" ] args.extend(cmd_args) proc = subprocess.Popen(args, stdout=subprocess.PIPE) for line in proc.stdout.readlines(): if len(line) == 0: break yield line.decode("UTF-8").rsplit(",") proc.wait()
[ "def", "read", "(", "self", ",", "cmd_args", ")", ":", "args", "=", "[", "\"vagrant\"", ",", "\"--machine-readable\"", "]", "args", ".", "extend", "(", "cmd_args", ")", "proc", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "for", "line", "in", "proc", ".", "stdout", ".", "readlines", "(", ")", ":", "if", "len", "(", "line", ")", "==", "0", ":", "break", "yield", "line", ".", "decode", "(", "\"UTF-8\"", ")", ".", "rsplit", "(", "\",\"", ")", "proc", ".", "wait", "(", ")" ]
Execute Vagrant read command. :param list cmd_args: Command argument list.
[ "Execute", "Vagrant", "read", "command", ".", ":", "param", "list", "cmd_args", ":", "Command", "argument", "list", "." ]
python
train
18.5
pycontribs/jira
jira/client.py
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L1860-L1870
def remove_watcher(self, issue, watcher): """Remove a user from an issue's watch list. :param issue: ID or key of the issue affected :param watcher: username of the user to remove from the watchers list :rtype: Response """ url = self._get_url('issue/' + str(issue) + '/watchers') params = {'username': watcher} result = self._session.delete(url, params=params) return result
[ "def", "remove_watcher", "(", "self", ",", "issue", ",", "watcher", ")", ":", "url", "=", "self", ".", "_get_url", "(", "'issue/'", "+", "str", "(", "issue", ")", "+", "'/watchers'", ")", "params", "=", "{", "'username'", ":", "watcher", "}", "result", "=", "self", ".", "_session", ".", "delete", "(", "url", ",", "params", "=", "params", ")", "return", "result" ]
Remove a user from an issue's watch list. :param issue: ID or key of the issue affected :param watcher: username of the user to remove from the watchers list :rtype: Response
[ "Remove", "a", "user", "from", "an", "issue", "s", "watch", "list", "." ]
python
train
39.818182
kamikaze/webdav
src/webdav/client.py
https://github.com/kamikaze/webdav/blob/6facff7224023d3e28c8e1592f3c58401c91a0e6/src/webdav/client.py#L605-L622
def info(self, remote_path): """Gets information about resource on WebDAV. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND :param remote_path: the path to remote resource. :return: a dictionary of information attributes and them values with following keys: `created`: date of resource creation, `name`: name of resource, `size`: size of resource, `modified`: date of resource modification. """ urn = Urn(remote_path) if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()): raise RemoteResourceNotFound(remote_path) response = self.execute_request(action='info', path=urn.quote()) path = self.get_full_path(urn) return WebDavXmlUtils.parse_info_response(content=response.content, path=path, hostname=self.webdav.hostname)
[ "def", "info", "(", "self", ",", "remote_path", ")", ":", "urn", "=", "Urn", "(", "remote_path", ")", "if", "not", "self", ".", "check", "(", "urn", ".", "path", "(", ")", ")", "and", "not", "self", ".", "check", "(", "Urn", "(", "remote_path", ",", "directory", "=", "True", ")", ".", "path", "(", ")", ")", ":", "raise", "RemoteResourceNotFound", "(", "remote_path", ")", "response", "=", "self", ".", "execute_request", "(", "action", "=", "'info'", ",", "path", "=", "urn", ".", "quote", "(", ")", ")", "path", "=", "self", ".", "get_full_path", "(", "urn", ")", "return", "WebDavXmlUtils", ".", "parse_info_response", "(", "content", "=", "response", ".", "content", ",", "path", "=", "path", ",", "hostname", "=", "self", ".", "webdav", ".", "hostname", ")" ]
Gets information about resource on WebDAV. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND :param remote_path: the path to remote resource. :return: a dictionary of information attributes and them values with following keys: `created`: date of resource creation, `name`: name of resource, `size`: size of resource, `modified`: date of resource modification.
[ "Gets", "information", "about", "resource", "on", "WebDAV", ".", "More", "information", "you", "can", "find", "by", "link", "http", ":", "//", "webdav", ".", "org", "/", "specs", "/", "rfc4918", ".", "html#METHOD_PROPFIND" ]
python
train
52.388889
PythonOptimizers/cygenja
cygenja/treemap/treemap.py
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/treemap/treemap.py#L185-L201
def add_unique_element(self, location, element): """ Create an entry located at ``location``. Args: location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for instance). element: Element to store. Returns: The created node with the element. Notes: The different sub locations entries **must** exist and the last one **MUST NOT** already exist. Use the more loose :meth:`add_element` method if needed. """ return self._create_entry(location, element, unique=True)
[ "def", "add_unique_element", "(", "self", ",", "location", ",", "element", ")", ":", "return", "self", ".", "_create_entry", "(", "location", ",", "element", ",", "unique", "=", "True", ")" ]
Create an entry located at ``location``. Args: location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for instance). element: Element to store. Returns: The created node with the element. Notes: The different sub locations entries **must** exist and the last one **MUST NOT** already exist. Use the more loose :meth:`add_element` method if needed.
[ "Create", "an", "entry", "located", "at", "location", ".", "Args", ":", "location", ":", "String", "or", ":", "class", ":", "LocationDescriptor", "to", "describe", "a", "separator", "location", "(", "i", ".", "e", ".", "dir1", "/", "dir2", "/", "dir3", "for", "instance", ")", ".", "element", ":", "Element", "to", "store", ".", "Returns", ":", "The", "created", "node", "with", "the", "element", "." ]
python
train
38
uber/tchannel-python
tchannel/tornado/stream.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/stream.py#L259-L280
def maybe_stream(s): """Ensure that the given argument is a stream.""" if isinstance(s, Stream): return s if s is None: stream = InMemStream() stream.close() # we don't intend to write anything return stream if isinstance(s, unicode): s = s.encode('utf-8') if isinstance(s, bytearray): s = bytes(s) if isinstance(s, bytes): stream = InMemStream(s) stream.close() # we don't intend to write anything return stream # s may still conform to the Stream interface. Yay duck typing. return s
[ "def", "maybe_stream", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "Stream", ")", ":", "return", "s", "if", "s", "is", "None", ":", "stream", "=", "InMemStream", "(", ")", "stream", ".", "close", "(", ")", "# we don't intend to write anything", "return", "stream", "if", "isinstance", "(", "s", ",", "unicode", ")", ":", "s", "=", "s", ".", "encode", "(", "'utf-8'", ")", "if", "isinstance", "(", "s", ",", "bytearray", ")", ":", "s", "=", "bytes", "(", "s", ")", "if", "isinstance", "(", "s", ",", "bytes", ")", ":", "stream", "=", "InMemStream", "(", "s", ")", "stream", ".", "close", "(", ")", "# we don't intend to write anything", "return", "stream", "# s may still conform to the Stream interface. Yay duck typing.", "return", "s" ]
Ensure that the given argument is a stream.
[ "Ensure", "that", "the", "given", "argument", "is", "a", "stream", "." ]
python
train
26.090909
sryza/spark-timeseries
python/sparkts/timeseriesrdd.py
https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L190-L199
def with_index(self, new_index): """ Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new index but not in the existing index will be filled in with NaNs. Parameters ---------- new_index : DateTimeIndex """ return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx)
[ "def", "with_index", "(", "self", ",", "new_index", ")", ":", "return", "TimeSeriesRDD", "(", "None", ",", "None", ",", "self", ".", "_jtsrdd", ".", "withIndex", "(", "new_index", ".", "_jdt_index", ")", ",", "self", ".", "ctx", ")" ]
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new index but not in the existing index will be filled in with NaNs. Parameters ---------- new_index : DateTimeIndex
[ "Returns", "a", "TimeSeriesRDD", "rebased", "on", "top", "of", "a", "new", "index", ".", "Any", "timestamps", "that", "exist", "in", "the", "new", "index", "but", "not", "in", "the", "existing", "index", "will", "be", "filled", "in", "with", "NaNs", ".", "Parameters", "----------", "new_index", ":", "DateTimeIndex" ]
python
train
39.9
nim65s/ndh
ndh/utils.py
https://github.com/nim65s/ndh/blob/3e14644e3f701044acbb7aafbf69b51ad6f86d99/ndh/utils.py#L27-L40
def get_env(env_file='.env'): """ Set default environment variables from .env file """ try: with open(env_file) as f: for line in f.readlines(): try: key, val = line.split('=', maxsplit=1) os.environ.setdefault(key.strip(), val.strip()) except ValueError: pass except FileNotFoundError: pass
[ "def", "get_env", "(", "env_file", "=", "'.env'", ")", ":", "try", ":", "with", "open", "(", "env_file", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "try", ":", "key", ",", "val", "=", "line", ".", "split", "(", "'='", ",", "maxsplit", "=", "1", ")", "os", ".", "environ", ".", "setdefault", "(", "key", ".", "strip", "(", ")", ",", "val", ".", "strip", "(", ")", ")", "except", "ValueError", ":", "pass", "except", "FileNotFoundError", ":", "pass" ]
Set default environment variables from .env file
[ "Set", "default", "environment", "variables", "from", ".", "env", "file" ]
python
train
29.857143
saltstack/salt
salt/modules/tls.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tls.py#L566-L609
def validate(cert, ca_name, crl_file): ''' .. versionadded:: Neon Validate a certificate against a given CA/CRL. cert path to the certifiate PEM file or string ca_name name of the CA crl_file full path to the CRL file ''' store = OpenSSL.crypto.X509Store() cert_obj = _read_cert(cert) if cert_obj is None: raise CommandExecutionError( 'Failed to read cert from {0}, see log for details'.format(cert) ) ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name) ca_cert = _read_cert('{0}/{1}_ca_cert.crt'.format(ca_dir, ca_name)) store.add_cert(ca_cert) # These flags tell OpenSSL to check the leaf as well as the # entire cert chain. X509StoreFlags = OpenSSL.crypto.X509StoreFlags store.set_flags(X509StoreFlags.CRL_CHECK | X509StoreFlags.CRL_CHECK_ALL) if crl_file is None: crl = OpenSSL.crypto.CRL() else: with salt.utils.files.fopen(crl_file) as fhr: crl = OpenSSL.crypto.load_crl(OpenSSL.crypto.FILETYPE_PEM, fhr.read()) store.add_crl(crl) context = OpenSSL.crypto.X509StoreContext(store, cert_obj) ret = {} try: context.verify_certificate() ret['valid'] = True except OpenSSL.crypto.X509StoreContextError as e: ret['error'] = str(e) ret['error_cert'] = e.certificate ret['valid'] = False return ret
[ "def", "validate", "(", "cert", ",", "ca_name", ",", "crl_file", ")", ":", "store", "=", "OpenSSL", ".", "crypto", ".", "X509Store", "(", ")", "cert_obj", "=", "_read_cert", "(", "cert", ")", "if", "cert_obj", "is", "None", ":", "raise", "CommandExecutionError", "(", "'Failed to read cert from {0}, see log for details'", ".", "format", "(", "cert", ")", ")", "ca_dir", "=", "'{0}/{1}'", ".", "format", "(", "cert_base_path", "(", ")", ",", "ca_name", ")", "ca_cert", "=", "_read_cert", "(", "'{0}/{1}_ca_cert.crt'", ".", "format", "(", "ca_dir", ",", "ca_name", ")", ")", "store", ".", "add_cert", "(", "ca_cert", ")", "# These flags tell OpenSSL to check the leaf as well as the", "# entire cert chain.", "X509StoreFlags", "=", "OpenSSL", ".", "crypto", ".", "X509StoreFlags", "store", ".", "set_flags", "(", "X509StoreFlags", ".", "CRL_CHECK", "|", "X509StoreFlags", ".", "CRL_CHECK_ALL", ")", "if", "crl_file", "is", "None", ":", "crl", "=", "OpenSSL", ".", "crypto", ".", "CRL", "(", ")", "else", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "crl_file", ")", "as", "fhr", ":", "crl", "=", "OpenSSL", ".", "crypto", ".", "load_crl", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "fhr", ".", "read", "(", ")", ")", "store", ".", "add_crl", "(", "crl", ")", "context", "=", "OpenSSL", ".", "crypto", ".", "X509StoreContext", "(", "store", ",", "cert_obj", ")", "ret", "=", "{", "}", "try", ":", "context", ".", "verify_certificate", "(", ")", "ret", "[", "'valid'", "]", "=", "True", "except", "OpenSSL", ".", "crypto", ".", "X509StoreContextError", "as", "e", ":", "ret", "[", "'error'", "]", "=", "str", "(", "e", ")", "ret", "[", "'error_cert'", "]", "=", "e", ".", "certificate", "ret", "[", "'valid'", "]", "=", "False", "return", "ret" ]
.. versionadded:: Neon Validate a certificate against a given CA/CRL. cert path to the certifiate PEM file or string ca_name name of the CA crl_file full path to the CRL file
[ "..", "versionadded", "::", "Neon" ]
python
train
31.318182
reingart/pyafipws
wslum.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslum.py#L239-L251
def AgregarUbicacionTambo(self, latitud, longitud, domicilio, cod_localidad, cod_provincia, codigo_postal, nombre_partido_depto, **kwargs): "Agrego los datos del productor a la liq." ubic_tambo = {'latitud': latitud, 'longitud': longitud, 'domicilio': domicilio, 'codLocalidad': cod_localidad, 'codProvincia': cod_provincia, 'nombrePartidoDepto': nombre_partido_depto, 'codigoPostal': codigo_postal} self.solicitud['tambo']['ubicacionTambo'] = ubic_tambo return True
[ "def", "AgregarUbicacionTambo", "(", "self", ",", "latitud", ",", "longitud", ",", "domicilio", ",", "cod_localidad", ",", "cod_provincia", ",", "codigo_postal", ",", "nombre_partido_depto", ",", "*", "*", "kwargs", ")", ":", "ubic_tambo", "=", "{", "'latitud'", ":", "latitud", ",", "'longitud'", ":", "longitud", ",", "'domicilio'", ":", "domicilio", ",", "'codLocalidad'", ":", "cod_localidad", ",", "'codProvincia'", ":", "cod_provincia", ",", "'nombrePartidoDepto'", ":", "nombre_partido_depto", ",", "'codigoPostal'", ":", "codigo_postal", "}", "self", ".", "solicitud", "[", "'tambo'", "]", "[", "'ubicacionTambo'", "]", "=", "ubic_tambo", "return", "True" ]
Agrego los datos del productor a la liq.
[ "Agrego", "los", "datos", "del", "productor", "a", "la", "liq", "." ]
python
train
52.230769
openego/ding0
ding0/grid/mv_grid/solvers/local_search.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/solvers/local_search.py#L112-L219
def operator_oropt(self, graph, solution, op_diff_round_digits, anim=None): # TODO: check docstring """Applies Or-Opt intra-route operator to solution Takes chains of nodes (length=3..1 consecutive nodes) from a given route and calculates savings when inserted into another position on the same route (all possible positions). Performes best move (max. saving) and starts over again with new route until no improvement is found. Args ---- graph: :networkx:`NetworkX Graph Obj< >` A NetworkX graaph is used. solution: BaseSolution BaseSolution instance op_diff_round_digits: float Precision (floating point digits) for rounding route length differences. *Details*: In some cases when an exchange is performed on two routes with one node each, the difference between the both solutions (before and after the exchange) is not zero. This is due to internal rounding errors of float type. So the loop won't break (alternating between these two solutions), we need an additional criterion to avoid this behaviour: A threshold to handle values very close to zero as if they were zero (for a more detailed description of the matter see http://floating-point-gui.de or https://docs.python.org/3.5/tutorial/floatingpoint.html) anim: AnimationDing0 AnimationDing0 object Returns ------- LocalSearchSolution A solution (LocalSearchSolution class) Notes ----- Since Or-Opt is an intra-route operator, it has not to be checked if route can allocate (Route's method can_allocate()) nodes during relocation regarding max. peak load/current because the line/cable type is the same along the entire route. However, node order within a route has an impact on the voltage stability so the check would be actually required. Due to large line capacity (load factor of lines/cables ~60 %) the voltage stability issues are neglected. (Inner) Loop variables: * s: length (count of consecutive nodes) of the chain that is moved. Values: 3..1 * i: node that precedes the chain before moving (position in the route `tour`, not node name) * j: node that precedes the chain after moving (position in the route `tour`, not node name) Todo ---- * insert literature reference for Or-algorithm here * Remove ugly nested loops, convert to more efficient matrix operations """ no_ctr = 100 # shorter var names for loop dm = graph._matrix dn = graph._nodes for route in solution.routes(): # exclude routes with single high-demand nodes (Load Areas) if len(route._nodes) == 1: if solution._problem._is_aggregated[str(route._nodes[0])]: continue n = len(route._nodes)+1 # create tour by adding depot at start and end tour = [graph._depot] + route._nodes + [graph._depot] # Or-Opt: Search better solutions by checking possible chain moves while True: length = route.length() length_best = length for s in range(3,0,-1): for i in range(1,n-s): length_diff = (length - dm[dn[tour[i-1].name()]][dn[tour[i].name()]] - dm[dn[tour[i+s-1].name()]][dn[tour[i+s].name()]] + dm[dn[tour[i-1].name()]][dn[tour[i+s].name()]]) for j in range(i+s+1,n+1): if j == n: j2 = 1 else: j2 = j+1 length_new = (length_diff + dm[dn[tour[j-1].name()]][dn[tour[i].name()]] + dm[dn[tour[i+s-1].name()]][dn[tour[j2-1].name()]] - dm[dn[tour[j-1].name()]][dn[tour[j2-1].name()]]) if length_new < length_best: length_best = length_new s_best, i_best, j_best = s, i, j if length_best < length: tour = tour[0:i_best] + tour[i_best+s_best:j_best] + tour[i_best:i_best+s_best] + tour[j_best:n+1] if anim is not None: solution.draw_network(anim) # no improvement found if length_best == length: # replace old route by new (same arg for allocation and deallocation since node order is considered at allocation) solution._routes[solution._routes.index(route)].deallocate(tour[1:-1]) solution._routes[solution._routes.index(route)].allocate(tour[1:-1]) break #solution = LocalSearchSolution(solution, graph, new_routes) return solution
[ "def", "operator_oropt", "(", "self", ",", "graph", ",", "solution", ",", "op_diff_round_digits", ",", "anim", "=", "None", ")", ":", "# TODO: check docstring", "no_ctr", "=", "100", "# shorter var names for loop", "dm", "=", "graph", ".", "_matrix", "dn", "=", "graph", ".", "_nodes", "for", "route", "in", "solution", ".", "routes", "(", ")", ":", "# exclude routes with single high-demand nodes (Load Areas)", "if", "len", "(", "route", ".", "_nodes", ")", "==", "1", ":", "if", "solution", ".", "_problem", ".", "_is_aggregated", "[", "str", "(", "route", ".", "_nodes", "[", "0", "]", ")", "]", ":", "continue", "n", "=", "len", "(", "route", ".", "_nodes", ")", "+", "1", "# create tour by adding depot at start and end", "tour", "=", "[", "graph", ".", "_depot", "]", "+", "route", ".", "_nodes", "+", "[", "graph", ".", "_depot", "]", "# Or-Opt: Search better solutions by checking possible chain moves", "while", "True", ":", "length", "=", "route", ".", "length", "(", ")", "length_best", "=", "length", "for", "s", "in", "range", "(", "3", ",", "0", ",", "-", "1", ")", ":", "for", "i", "in", "range", "(", "1", ",", "n", "-", "s", ")", ":", "length_diff", "=", "(", "length", "-", "dm", "[", "dn", "[", "tour", "[", "i", "-", "1", "]", ".", "name", "(", ")", "]", "]", "[", "dn", "[", "tour", "[", "i", "]", ".", "name", "(", ")", "]", "]", "-", "dm", "[", "dn", "[", "tour", "[", "i", "+", "s", "-", "1", "]", ".", "name", "(", ")", "]", "]", "[", "dn", "[", "tour", "[", "i", "+", "s", "]", ".", "name", "(", ")", "]", "]", "+", "dm", "[", "dn", "[", "tour", "[", "i", "-", "1", "]", ".", "name", "(", ")", "]", "]", "[", "dn", "[", "tour", "[", "i", "+", "s", "]", ".", "name", "(", ")", "]", "]", ")", "for", "j", "in", "range", "(", "i", "+", "s", "+", "1", ",", "n", "+", "1", ")", ":", "if", "j", "==", "n", ":", "j2", "=", "1", "else", ":", "j2", "=", "j", "+", "1", "length_new", "=", "(", "length_diff", "+", "dm", "[", "dn", "[", "tour", "[", "j", "-", "1", "]", ".", "name", "(", ")", "]", "]", "[", "dn", "[", "tour", "[", "i", "]", ".", "name", "(", ")", "]", "]", "+", "dm", "[", "dn", "[", "tour", "[", "i", "+", "s", "-", "1", "]", ".", "name", "(", ")", "]", "]", "[", "dn", "[", "tour", "[", "j2", "-", "1", "]", ".", "name", "(", ")", "]", "]", "-", "dm", "[", "dn", "[", "tour", "[", "j", "-", "1", "]", ".", "name", "(", ")", "]", "]", "[", "dn", "[", "tour", "[", "j2", "-", "1", "]", ".", "name", "(", ")", "]", "]", ")", "if", "length_new", "<", "length_best", ":", "length_best", "=", "length_new", "s_best", ",", "i_best", ",", "j_best", "=", "s", ",", "i", ",", "j", "if", "length_best", "<", "length", ":", "tour", "=", "tour", "[", "0", ":", "i_best", "]", "+", "tour", "[", "i_best", "+", "s_best", ":", "j_best", "]", "+", "tour", "[", "i_best", ":", "i_best", "+", "s_best", "]", "+", "tour", "[", "j_best", ":", "n", "+", "1", "]", "if", "anim", "is", "not", "None", ":", "solution", ".", "draw_network", "(", "anim", ")", "# no improvement found", "if", "length_best", "==", "length", ":", "# replace old route by new (same arg for allocation and deallocation since node order is considered at allocation)", "solution", ".", "_routes", "[", "solution", ".", "_routes", ".", "index", "(", "route", ")", "]", ".", "deallocate", "(", "tour", "[", "1", ":", "-", "1", "]", ")", "solution", ".", "_routes", "[", "solution", ".", "_routes", ".", "index", "(", "route", ")", "]", ".", "allocate", "(", "tour", "[", "1", ":", "-", "1", "]", ")", "break", "#solution = LocalSearchSolution(solution, graph, new_routes)", "return", "solution" ]
Applies Or-Opt intra-route operator to solution Takes chains of nodes (length=3..1 consecutive nodes) from a given route and calculates savings when inserted into another position on the same route (all possible positions). Performes best move (max. saving) and starts over again with new route until no improvement is found. Args ---- graph: :networkx:`NetworkX Graph Obj< >` A NetworkX graaph is used. solution: BaseSolution BaseSolution instance op_diff_round_digits: float Precision (floating point digits) for rounding route length differences. *Details*: In some cases when an exchange is performed on two routes with one node each, the difference between the both solutions (before and after the exchange) is not zero. This is due to internal rounding errors of float type. So the loop won't break (alternating between these two solutions), we need an additional criterion to avoid this behaviour: A threshold to handle values very close to zero as if they were zero (for a more detailed description of the matter see http://floating-point-gui.de or https://docs.python.org/3.5/tutorial/floatingpoint.html) anim: AnimationDing0 AnimationDing0 object Returns ------- LocalSearchSolution A solution (LocalSearchSolution class) Notes ----- Since Or-Opt is an intra-route operator, it has not to be checked if route can allocate (Route's method can_allocate()) nodes during relocation regarding max. peak load/current because the line/cable type is the same along the entire route. However, node order within a route has an impact on the voltage stability so the check would be actually required. Due to large line capacity (load factor of lines/cables ~60 %) the voltage stability issues are neglected. (Inner) Loop variables: * s: length (count of consecutive nodes) of the chain that is moved. Values: 3..1 * i: node that precedes the chain before moving (position in the route `tour`, not node name) * j: node that precedes the chain after moving (position in the route `tour`, not node name) Todo ---- * insert literature reference for Or-algorithm here * Remove ugly nested loops, convert to more efficient matrix operations
[ "Applies", "Or", "-", "Opt", "intra", "-", "route", "operator", "to", "solution", "Takes", "chains", "of", "nodes", "(", "length", "=", "3", "..", "1", "consecutive", "nodes", ")", "from", "a", "given", "route", "and", "calculates", "savings", "when", "inserted", "into", "another", "position", "on", "the", "same", "route", "(", "all", "possible", "positions", ")", ".", "Performes", "best", "move", "(", "max", ".", "saving", ")", "and", "starts", "over", "again", "with", "new", "route", "until", "no", "improvement", "is", "found", ".", "Args", "----", "graph", ":", ":", "networkx", ":", "NetworkX", "Graph", "Obj<", ">", "A", "NetworkX", "graaph", "is", "used", ".", "solution", ":", "BaseSolution", "BaseSolution", "instance", "op_diff_round_digits", ":", "float", "Precision", "(", "floating", "point", "digits", ")", "for", "rounding", "route", "length", "differences", ".", "*", "Details", "*", ":", "In", "some", "cases", "when", "an", "exchange", "is", "performed", "on", "two", "routes", "with", "one", "node", "each", "the", "difference", "between", "the", "both", "solutions", "(", "before", "and", "after", "the", "exchange", ")", "is", "not", "zero", ".", "This", "is", "due", "to", "internal", "rounding", "errors", "of", "float", "type", ".", "So", "the", "loop", "won", "t", "break", "(", "alternating", "between", "these", "two", "solutions", ")", "we", "need", "an", "additional", "criterion", "to", "avoid", "this", "behaviour", ":", "A", "threshold", "to", "handle", "values", "very", "close", "to", "zero", "as", "if", "they", "were", "zero", "(", "for", "a", "more", "detailed", "description", "of", "the", "matter", "see", "http", ":", "//", "floating", "-", "point", "-", "gui", ".", "de", "or", "https", ":", "//", "docs", ".", "python", ".", "org", "/", "3", ".", "5", "/", "tutorial", "/", "floatingpoint", ".", "html", ")", "anim", ":", "AnimationDing0", "AnimationDing0", "object", "Returns", "-------", "LocalSearchSolution", "A", "solution", "(", "LocalSearchSolution", "class", ")", "Notes", "-----", "Since", "Or", "-", "Opt", "is", "an", "intra", "-", "route", "operator", "it", "has", "not", "to", "be", "checked", "if", "route", "can", "allocate", "(", "Route", "s", "method", "can_allocate", "()", ")", "nodes", "during", "relocation", "regarding", "max", ".", "peak", "load", "/", "current", "because", "the", "line", "/", "cable", "type", "is", "the", "same", "along", "the", "entire", "route", ".", "However", "node", "order", "within", "a", "route", "has", "an", "impact", "on", "the", "voltage", "stability", "so", "the", "check", "would", "be", "actually", "required", ".", "Due", "to", "large", "line", "capacity", "(", "load", "factor", "of", "lines", "/", "cables", "~60", "%", ")", "the", "voltage", "stability", "issues", "are", "neglected", "." ]
python
train
48.592593
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/model/network.py
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/model/network.py#L192-L199
def get_attribute_from_indices(self, indices: list, attribute_name: str): """Get attribute values for the requested indices. :param indices: Indices of vertices for which the attribute values are requested. :param attribute_name: The name of the attribute. :return: A list of attribute values for the requested indices. """ return list(np.array(self.graph.vs[attribute_name])[indices])
[ "def", "get_attribute_from_indices", "(", "self", ",", "indices", ":", "list", ",", "attribute_name", ":", "str", ")", ":", "return", "list", "(", "np", ".", "array", "(", "self", ".", "graph", ".", "vs", "[", "attribute_name", "]", ")", "[", "indices", "]", ")" ]
Get attribute values for the requested indices. :param indices: Indices of vertices for which the attribute values are requested. :param attribute_name: The name of the attribute. :return: A list of attribute values for the requested indices.
[ "Get", "attribute", "values", "for", "the", "requested", "indices", "." ]
python
train
53.375
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QAQuery.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery.py#L552-L565
def QA_fetch_risk(message={}, params={"_id": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE): """get the risk message Arguments: query_mes {[type]} -- [description] Keyword Arguments: collection {[type]} -- [description] (default: {DATABASE}) Returns: [type] -- [description] """ collection = DATABASE.risk return [res for res in collection.find(message, params)]
[ "def", "QA_fetch_risk", "(", "message", "=", "{", "}", ",", "params", "=", "{", "\"_id\"", ":", "0", ",", "'assets'", ":", "0", ",", "'timeindex'", ":", "0", ",", "'totaltimeindex'", ":", "0", ",", "'benchmark_assets'", ":", "0", ",", "'month_profit'", ":", "0", "}", ",", "db", "=", "DATABASE", ")", ":", "collection", "=", "DATABASE", ".", "risk", "return", "[", "res", "for", "res", "in", "collection", ".", "find", "(", "message", ",", "params", ")", "]" ]
get the risk message Arguments: query_mes {[type]} -- [description] Keyword Arguments: collection {[type]} -- [description] (default: {DATABASE}) Returns: [type] -- [description]
[ "get", "the", "risk", "message" ]
python
train
33.285714
JelleAalbers/multihist
multihist.py
https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L264-L273
def from_histogram(cls, histogram, bin_edges, axis_names=None): """Make a HistdD from numpy histogram + bin edges :param histogram: Initial histogram :param bin_edges: x bin edges of histogram, y bin edges, ... :return: Histnd instance """ bin_edges = np.array(bin_edges) self = cls(bins=bin_edges, axis_names=axis_names) self.histogram = histogram return self
[ "def", "from_histogram", "(", "cls", ",", "histogram", ",", "bin_edges", ",", "axis_names", "=", "None", ")", ":", "bin_edges", "=", "np", ".", "array", "(", "bin_edges", ")", "self", "=", "cls", "(", "bins", "=", "bin_edges", ",", "axis_names", "=", "axis_names", ")", "self", ".", "histogram", "=", "histogram", "return", "self" ]
Make a HistdD from numpy histogram + bin edges :param histogram: Initial histogram :param bin_edges: x bin edges of histogram, y bin edges, ... :return: Histnd instance
[ "Make", "a", "HistdD", "from", "numpy", "histogram", "+", "bin", "edges", ":", "param", "histogram", ":", "Initial", "histogram", ":", "param", "bin_edges", ":", "x", "bin", "edges", "of", "histogram", "y", "bin", "edges", "...", ":", "return", ":", "Histnd", "instance" ]
python
train
42.3
SteveMcGrath/pySecurityCenter
securitycenter/sc4.py
https://github.com/SteveMcGrath/pySecurityCenter/blob/f0b10b1bcd4fd23a8d4d09ca6774cdf5e1cfd880/securitycenter/sc4.py#L866-L904
def asset_create_combo(self, name, combo, tag='', description=''): '''asset_create_combo name, combination, tag, description Creates a new combination asset list. Operands can be either asset list IDs or be a nested combination asset list. UN-DOCUMENTED CALL: This function is not considered stable. AND = intersection OR = union operand = asset list ID or nested combination. operator = intersection or union. Example: combo = { 'operand1': { 'operand1': '2', 'operand2': '2', 'operation': 'union', }, 'operand2': '3', 'operation': 'intersection' } :param name: Name of the asset list. :type name: string :param combo: dict :param tag: The tag of the asset list. :type tag: string :param description: Description of the asset list. :type description: string ''' return self.raw_query('asset', 'add', data={ 'name': name, 'description': description, 'type': 'combination', 'combinations': combo, })
[ "def", "asset_create_combo", "(", "self", ",", "name", ",", "combo", ",", "tag", "=", "''", ",", "description", "=", "''", ")", ":", "return", "self", ".", "raw_query", "(", "'asset'", ",", "'add'", ",", "data", "=", "{", "'name'", ":", "name", ",", "'description'", ":", "description", ",", "'type'", ":", "'combination'", ",", "'combinations'", ":", "combo", ",", "}", ")" ]
asset_create_combo name, combination, tag, description Creates a new combination asset list. Operands can be either asset list IDs or be a nested combination asset list. UN-DOCUMENTED CALL: This function is not considered stable. AND = intersection OR = union operand = asset list ID or nested combination. operator = intersection or union. Example: combo = { 'operand1': { 'operand1': '2', 'operand2': '2', 'operation': 'union', }, 'operand2': '3', 'operation': 'intersection' } :param name: Name of the asset list. :type name: string :param combo: dict :param tag: The tag of the asset list. :type tag: string :param description: Description of the asset list. :type description: string
[ "asset_create_combo", "name", "combination", "tag", "description", "Creates", "a", "new", "combination", "asset", "list", ".", "Operands", "can", "be", "either", "asset", "list", "IDs", "or", "be", "a", "nested", "combination", "asset", "list", "." ]
python
train
30.230769
dwavesystems/dwavebinarycsp
dwavebinarycsp/core/constraint.py
https://github.com/dwavesystems/dwavebinarycsp/blob/d6b1e70ceaa8f451d7afaa87ea10c7fc948a64e2/dwavebinarycsp/core/constraint.py#L362-L411
def fix_variable(self, v, value): """Fix the value of a variable and remove it from the constraint. Args: v (variable): Variable in the constraint to be set to a constant value. val (int): Value assigned to the variable. Values must match the :class:`.Vartype` of the constraint. Examples: This example creates a constraint that :math:`a \\ne b` on binary variables, fixes variable a to 0, and tests two candidate solutions. >>> import dwavebinarycsp >>> const = dwavebinarycsp.Constraint.from_func(operator.ne, ... ['a', 'b'], dwavebinarycsp.BINARY) >>> const.fix_variable('a', 0) >>> const.check({'b': 1}) True >>> const.check({'b': 0}) False """ variables = self.variables try: idx = variables.index(v) except ValueError: raise ValueError("given variable {} is not part of the constraint".format(v)) if value not in self.vartype.value: raise ValueError("expected value to be in {}, received {} instead".format(self.vartype.value, value)) configurations = frozenset(config[:idx] + config[idx + 1:] # exclude the fixed var for config in self.configurations if config[idx] == value) if not configurations: raise UnsatError("fixing {} to {} makes this constraint unsatisfiable".format(v, value)) variables = variables[:idx] + variables[idx + 1:] self.configurations = configurations self.variables = variables def func(*args): return args in configurations self.func = func self.name = '{} ({} fixed to {})'.format(self.name, v, value)
[ "def", "fix_variable", "(", "self", ",", "v", ",", "value", ")", ":", "variables", "=", "self", ".", "variables", "try", ":", "idx", "=", "variables", ".", "index", "(", "v", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"given variable {} is not part of the constraint\"", ".", "format", "(", "v", ")", ")", "if", "value", "not", "in", "self", ".", "vartype", ".", "value", ":", "raise", "ValueError", "(", "\"expected value to be in {}, received {} instead\"", ".", "format", "(", "self", ".", "vartype", ".", "value", ",", "value", ")", ")", "configurations", "=", "frozenset", "(", "config", "[", ":", "idx", "]", "+", "config", "[", "idx", "+", "1", ":", "]", "# exclude the fixed var", "for", "config", "in", "self", ".", "configurations", "if", "config", "[", "idx", "]", "==", "value", ")", "if", "not", "configurations", ":", "raise", "UnsatError", "(", "\"fixing {} to {} makes this constraint unsatisfiable\"", ".", "format", "(", "v", ",", "value", ")", ")", "variables", "=", "variables", "[", ":", "idx", "]", "+", "variables", "[", "idx", "+", "1", ":", "]", "self", ".", "configurations", "=", "configurations", "self", ".", "variables", "=", "variables", "def", "func", "(", "*", "args", ")", ":", "return", "args", "in", "configurations", "self", ".", "func", "=", "func", "self", ".", "name", "=", "'{} ({} fixed to {})'", ".", "format", "(", "self", ".", "name", ",", "v", ",", "value", ")" ]
Fix the value of a variable and remove it from the constraint. Args: v (variable): Variable in the constraint to be set to a constant value. val (int): Value assigned to the variable. Values must match the :class:`.Vartype` of the constraint. Examples: This example creates a constraint that :math:`a \\ne b` on binary variables, fixes variable a to 0, and tests two candidate solutions. >>> import dwavebinarycsp >>> const = dwavebinarycsp.Constraint.from_func(operator.ne, ... ['a', 'b'], dwavebinarycsp.BINARY) >>> const.fix_variable('a', 0) >>> const.check({'b': 1}) True >>> const.check({'b': 0}) False
[ "Fix", "the", "value", "of", "a", "variable", "and", "remove", "it", "from", "the", "constraint", "." ]
python
valid
36.96
diamondman/proteusisc
proteusisc/command_queue.py
https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/command_queue.py#L391-L432
def _merge_prims(prims, *, debug=False, stagenames=None, stages=None): """Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class. Used by a CommandQueue during compilation and optimization of Primitives. Args: prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together. debug: A boolean for if debug information should be generated. stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True. stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True. Returns: A list or FrameSequence (the same type as prims) of the compined Primitives or Frames. """ if isinstance(prims, FrameSequence): merged_prims = FrameSequence(prims._chain) else: merged_prims = [] working_prim = prims[0] i = 1 logging_tmp = [] while i < len(prims): tmp = prims[i] res = working_prim.merge(tmp) if res is not None: working_prim = res if debug:#pragma: no cover logging_tmp.append( [p.snapshot() for p in merged_prims+[working_prim]]) else: merged_prims.append(working_prim) working_prim = tmp i += 1 merged_prims.append(working_prim) if debug:#pragma: no cover stages.append(logging_tmp) stagenames.append("Merge intermediate states") return merged_prims
[ "def", "_merge_prims", "(", "prims", ",", "*", ",", "debug", "=", "False", ",", "stagenames", "=", "None", ",", "stages", "=", "None", ")", ":", "if", "isinstance", "(", "prims", ",", "FrameSequence", ")", ":", "merged_prims", "=", "FrameSequence", "(", "prims", ".", "_chain", ")", "else", ":", "merged_prims", "=", "[", "]", "working_prim", "=", "prims", "[", "0", "]", "i", "=", "1", "logging_tmp", "=", "[", "]", "while", "i", "<", "len", "(", "prims", ")", ":", "tmp", "=", "prims", "[", "i", "]", "res", "=", "working_prim", ".", "merge", "(", "tmp", ")", "if", "res", "is", "not", "None", ":", "working_prim", "=", "res", "if", "debug", ":", "#pragma: no cover", "logging_tmp", ".", "append", "(", "[", "p", ".", "snapshot", "(", ")", "for", "p", "in", "merged_prims", "+", "[", "working_prim", "]", "]", ")", "else", ":", "merged_prims", ".", "append", "(", "working_prim", ")", "working_prim", "=", "tmp", "i", "+=", "1", "merged_prims", ".", "append", "(", "working_prim", ")", "if", "debug", ":", "#pragma: no cover", "stages", ".", "append", "(", "logging_tmp", ")", "stagenames", ".", "append", "(", "\"Merge intermediate states\"", ")", "return", "merged_prims" ]
Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class. Used by a CommandQueue during compilation and optimization of Primitives. Args: prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together. debug: A boolean for if debug information should be generated. stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True. stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True. Returns: A list or FrameSequence (the same type as prims) of the compined Primitives or Frames.
[ "Helper", "method", "to", "greedily", "combine", "Frames", "(", "of", "Primitives", ")", "or", "Primitives", "based", "on", "the", "rules", "defined", "in", "the", "Primitive", "s", "class", "." ]
python
train
37.595238
gem/oq-engine
openquake/hazardlib/gsim/utils_swiss_gmpe.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/utils_swiss_gmpe.py#L81-L99
def _get_corr_stddevs(C, tau_ss, stddev_types, num_sites, phi_ss, NL=None, tau_value=None): """ Return standard deviations adjusted for single station sigma as the total standard deviation - as proposed to be used in the Swiss Hazard Model [2014]. """ stddevs = [] temp_stddev = phi_ss * phi_ss if tau_value is not None and NL is not None: temp_stddev = temp_stddev + tau_value * tau_value * ((1 + NL) ** 2) else: temp_stddev = temp_stddev + C[tau_ss] * C[tau_ss] for stddev_type in stddev_types: if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt(temp_stddev) + np.zeros(num_sites)) return stddevs
[ "def", "_get_corr_stddevs", "(", "C", ",", "tau_ss", ",", "stddev_types", ",", "num_sites", ",", "phi_ss", ",", "NL", "=", "None", ",", "tau_value", "=", "None", ")", ":", "stddevs", "=", "[", "]", "temp_stddev", "=", "phi_ss", "*", "phi_ss", "if", "tau_value", "is", "not", "None", "and", "NL", "is", "not", "None", ":", "temp_stddev", "=", "temp_stddev", "+", "tau_value", "*", "tau_value", "*", "(", "(", "1", "+", "NL", ")", "**", "2", ")", "else", ":", "temp_stddev", "=", "temp_stddev", "+", "C", "[", "tau_ss", "]", "*", "C", "[", "tau_ss", "]", "for", "stddev_type", "in", "stddev_types", ":", "if", "stddev_type", "==", "const", ".", "StdDev", ".", "TOTAL", ":", "stddevs", ".", "append", "(", "np", ".", "sqrt", "(", "temp_stddev", ")", "+", "np", ".", "zeros", "(", "num_sites", ")", ")", "return", "stddevs" ]
Return standard deviations adjusted for single station sigma as the total standard deviation - as proposed to be used in the Swiss Hazard Model [2014].
[ "Return", "standard", "deviations", "adjusted", "for", "single", "station", "sigma", "as", "the", "total", "standard", "deviation", "-", "as", "proposed", "to", "be", "used", "in", "the", "Swiss", "Hazard", "Model", "[", "2014", "]", "." ]
python
train
36.526316
saltstack/salt
salt/modules/vagrant.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vagrant.py#L156-L176
def _vagrant_ssh_config(vm_): ''' get the information for ssh communication from the new VM :param vm_: the VM's info as we have it now :return: dictionary of ssh stuff ''' machine = vm_['machine'] log.info('requesting vagrant ssh-config for VM %s', machine or '(default)') cmd = 'vagrant ssh-config {}'.format(machine) reply = __salt__['cmd.shell'](cmd, runas=vm_.get('runas'), cwd=vm_.get('cwd'), ignore_retcode=True) ssh_config = {} for line in reply.split('\n'): # build a dictionary of the text reply tokens = line.strip().split() if len(tokens) == 2: # each two-token line becomes a key:value pair ssh_config[tokens[0]] = tokens[1] log.debug('ssh_config=%s', repr(ssh_config)) return ssh_config
[ "def", "_vagrant_ssh_config", "(", "vm_", ")", ":", "machine", "=", "vm_", "[", "'machine'", "]", "log", ".", "info", "(", "'requesting vagrant ssh-config for VM %s'", ",", "machine", "or", "'(default)'", ")", "cmd", "=", "'vagrant ssh-config {}'", ".", "format", "(", "machine", ")", "reply", "=", "__salt__", "[", "'cmd.shell'", "]", "(", "cmd", ",", "runas", "=", "vm_", ".", "get", "(", "'runas'", ")", ",", "cwd", "=", "vm_", ".", "get", "(", "'cwd'", ")", ",", "ignore_retcode", "=", "True", ")", "ssh_config", "=", "{", "}", "for", "line", "in", "reply", ".", "split", "(", "'\\n'", ")", ":", "# build a dictionary of the text reply", "tokens", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", "==", "2", ":", "# each two-token line becomes a key:value pair", "ssh_config", "[", "tokens", "[", "0", "]", "]", "=", "tokens", "[", "1", "]", "log", ".", "debug", "(", "'ssh_config=%s'", ",", "repr", "(", "ssh_config", ")", ")", "return", "ssh_config" ]
get the information for ssh communication from the new VM :param vm_: the VM's info as we have it now :return: dictionary of ssh stuff
[ "get", "the", "information", "for", "ssh", "communication", "from", "the", "new", "VM" ]
python
train
41.190476
aws/aws-encryption-sdk-python
doc/conf.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/doc/conf.py#L21-L27
def get_version(): """Reads the version (MAJOR.MINOR) from this module.""" release = get_release() split_version = release.split(".") if len(split_version) == 3: return ".".join(split_version[:2]) return release
[ "def", "get_version", "(", ")", ":", "release", "=", "get_release", "(", ")", "split_version", "=", "release", ".", "split", "(", "\".\"", ")", "if", "len", "(", "split_version", ")", "==", "3", ":", "return", "\".\"", ".", "join", "(", "split_version", "[", ":", "2", "]", ")", "return", "release" ]
Reads the version (MAJOR.MINOR) from this module.
[ "Reads", "the", "version", "(", "MAJOR", ".", "MINOR", ")", "from", "this", "module", "." ]
python
train
33.285714
droope/droopescan
dscan/common/output.py
https://github.com/droope/droopescan/blob/424c48a0f9d12b4536dbef5a786f0fbd4ce9519a/dscan/common/output.py#L109-L126
def warn(self, msg, whitespace_strp=True): """ For things that have gone seriously wrong but don't merit a program halt. Outputs to stderr, so JsonOutput does not need to override. @param msg: warning to output. @param whitespace_strp: whether to strip whitespace. """ if self.errors_display: if whitespace_strp: msg = strip_whitespace(msg) if not self.log_to_file: msg = colors['warn'] + "[+] " + msg + colors['endc'] else: msg = "[" + time.strftime("%c") + "] " + msg self.print(msg, file=self.error_log)
[ "def", "warn", "(", "self", ",", "msg", ",", "whitespace_strp", "=", "True", ")", ":", "if", "self", ".", "errors_display", ":", "if", "whitespace_strp", ":", "msg", "=", "strip_whitespace", "(", "msg", ")", "if", "not", "self", ".", "log_to_file", ":", "msg", "=", "colors", "[", "'warn'", "]", "+", "\"[+] \"", "+", "msg", "+", "colors", "[", "'endc'", "]", "else", ":", "msg", "=", "\"[\"", "+", "time", ".", "strftime", "(", "\"%c\"", ")", "+", "\"] \"", "+", "msg", "self", ".", "print", "(", "msg", ",", "file", "=", "self", ".", "error_log", ")" ]
For things that have gone seriously wrong but don't merit a program halt. Outputs to stderr, so JsonOutput does not need to override. @param msg: warning to output. @param whitespace_strp: whether to strip whitespace.
[ "For", "things", "that", "have", "gone", "seriously", "wrong", "but", "don", "t", "merit", "a", "program", "halt", ".", "Outputs", "to", "stderr", "so", "JsonOutput", "does", "not", "need", "to", "override", "." ]
python
train
36.166667
nickmckay/LiPD-utilities
Python/lipd/dataframes.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/dataframes.py#L83-L108
def ts_to_df(metadata): """ Create a data frame from one TimeSeries object :param dict metadata: Time Series dictionary :return dict: One data frame per table, organized in a dictionary by name """ logger_dataframes.info("enter ts_to_df") dfs = {} # Plot the variable + values vs year, age, depth (whichever are available) dfs["paleoData"] = pd.DataFrame(_plot_ts_cols(metadata)) # Plot the chronology variables + values in a data frame dfs["chronData"] = _get_key_data(metadata, "chronData_df") # Take out the chronData pandas data frame object if it exists in the metadata # Otherwise, the data frame renderer gets crazy and errors out. if "chronData_df" in metadata: del metadata["chronData_df"] s = collections.OrderedDict(sorted(metadata.items())) # Put key-vars in a data frame to make it easier to visualize dfs["metadata"] = pd.DataFrame(list(s.items()), columns=['Key', 'Value']) logger_dataframes.info("exit ts_to_df") return dfs
[ "def", "ts_to_df", "(", "metadata", ")", ":", "logger_dataframes", ".", "info", "(", "\"enter ts_to_df\"", ")", "dfs", "=", "{", "}", "# Plot the variable + values vs year, age, depth (whichever are available)", "dfs", "[", "\"paleoData\"", "]", "=", "pd", ".", "DataFrame", "(", "_plot_ts_cols", "(", "metadata", ")", ")", "# Plot the chronology variables + values in a data frame", "dfs", "[", "\"chronData\"", "]", "=", "_get_key_data", "(", "metadata", ",", "\"chronData_df\"", ")", "# Take out the chronData pandas data frame object if it exists in the metadata", "# Otherwise, the data frame renderer gets crazy and errors out.", "if", "\"chronData_df\"", "in", "metadata", ":", "del", "metadata", "[", "\"chronData_df\"", "]", "s", "=", "collections", ".", "OrderedDict", "(", "sorted", "(", "metadata", ".", "items", "(", ")", ")", ")", "# Put key-vars in a data frame to make it easier to visualize", "dfs", "[", "\"metadata\"", "]", "=", "pd", ".", "DataFrame", "(", "list", "(", "s", ".", "items", "(", ")", ")", ",", "columns", "=", "[", "'Key'", ",", "'Value'", "]", ")", "logger_dataframes", ".", "info", "(", "\"exit ts_to_df\"", ")", "return", "dfs" ]
Create a data frame from one TimeSeries object :param dict metadata: Time Series dictionary :return dict: One data frame per table, organized in a dictionary by name
[ "Create", "a", "data", "frame", "from", "one", "TimeSeries", "object", ":", "param", "dict", "metadata", ":", "Time", "Series", "dictionary", ":", "return", "dict", ":", "One", "data", "frame", "per", "table", "organized", "in", "a", "dictionary", "by", "name" ]
python
train
38.576923
Jarn/jarn.viewdoc
jarn/viewdoc/viewdoc.py
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L429-L438
def set_defaults(self, config_file): """Set defaults. """ self.defaults = Defaults(config_file) self.python = Python() self.setuptools = Setuptools() self.docutils = Docutils() self.styles = self.defaults.styles self.browser = self.defaults.browser self.list = False
[ "def", "set_defaults", "(", "self", ",", "config_file", ")", ":", "self", ".", "defaults", "=", "Defaults", "(", "config_file", ")", "self", ".", "python", "=", "Python", "(", ")", "self", ".", "setuptools", "=", "Setuptools", "(", ")", "self", ".", "docutils", "=", "Docutils", "(", ")", "self", ".", "styles", "=", "self", ".", "defaults", ".", "styles", "self", ".", "browser", "=", "self", ".", "defaults", ".", "browser", "self", ".", "list", "=", "False" ]
Set defaults.
[ "Set", "defaults", "." ]
python
train
32.9
gwastro/pycbc
pycbc/frame/frame.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/frame/frame.py#L554-L564
def null_advance(self, blocksize): """Advance and insert zeros Parameters ---------- blocksize: int The number of seconds to attempt to read from the channel """ self.raw_buffer.roll(-int(blocksize * self.raw_sample_rate)) self.read_pos += blocksize self.raw_buffer.start_time += blocksize
[ "def", "null_advance", "(", "self", ",", "blocksize", ")", ":", "self", ".", "raw_buffer", ".", "roll", "(", "-", "int", "(", "blocksize", "*", "self", ".", "raw_sample_rate", ")", ")", "self", ".", "read_pos", "+=", "blocksize", "self", ".", "raw_buffer", ".", "start_time", "+=", "blocksize" ]
Advance and insert zeros Parameters ---------- blocksize: int The number of seconds to attempt to read from the channel
[ "Advance", "and", "insert", "zeros" ]
python
train
32.363636
twilio/twilio-python
twilio/rest/studio/v1/flow/execution/execution_context.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/studio/v1/flow/execution/execution_context.py#L35-L46
def get(self): """ Constructs a ExecutionContextContext :returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext """ return ExecutionContextContext( self._version, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], )
[ "def", "get", "(", "self", ")", ":", "return", "ExecutionContextContext", "(", "self", ".", "_version", ",", "flow_sid", "=", "self", ".", "_solution", "[", "'flow_sid'", "]", ",", "execution_sid", "=", "self", ".", "_solution", "[", "'execution_sid'", "]", ",", ")" ]
Constructs a ExecutionContextContext :returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
[ "Constructs", "a", "ExecutionContextContext" ]
python
train
37.5
GNS3/gns3-server
gns3server/compute/dynamips/nodes/router.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/router.py#L121-L141
def _convert_before_2_0_0_b3(self, dynamips_id): """ Before 2.0.0 beta3 the node didn't have a folder by node when we start we move the file, we can't do it in the topology conversion due to case of remote servers """ dynamips_dir = self.project.module_working_directory(self.manager.module_name.lower()) for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", "i{}_*".format(dynamips_id))): dst = os.path.join(self._working_directory, "configs", os.path.basename(path)) if not os.path.exists(dst): try: shutil.move(path, dst) except OSError as e: raise DynamipsError("Can't move {}: {}".format(path, str(e))) for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "*_i{}_*".format(dynamips_id))): dst = os.path.join(self._working_directory, os.path.basename(path)) if not os.path.exists(dst): try: shutil.move(path, dst) except OSError as e: raise DynamipsError("Can't move {}: {}".format(path, str(e)))
[ "def", "_convert_before_2_0_0_b3", "(", "self", ",", "dynamips_id", ")", ":", "dynamips_dir", "=", "self", ".", "project", ".", "module_working_directory", "(", "self", ".", "manager", ".", "module_name", ".", "lower", "(", ")", ")", "for", "path", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "glob", ".", "escape", "(", "dynamips_dir", ")", ",", "\"configs\"", ",", "\"i{}_*\"", ".", "format", "(", "dynamips_id", ")", ")", ")", ":", "dst", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_working_directory", ",", "\"configs\"", ",", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "try", ":", "shutil", ".", "move", "(", "path", ",", "dst", ")", "except", "OSError", "as", "e", ":", "raise", "DynamipsError", "(", "\"Can't move {}: {}\"", ".", "format", "(", "path", ",", "str", "(", "e", ")", ")", ")", "for", "path", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "glob", ".", "escape", "(", "dynamips_dir", ")", ",", "\"*_i{}_*\"", ".", "format", "(", "dynamips_id", ")", ")", ")", ":", "dst", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_working_directory", ",", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "try", ":", "shutil", ".", "move", "(", "path", ",", "dst", ")", "except", "OSError", "as", "e", ":", "raise", "DynamipsError", "(", "\"Can't move {}: {}\"", ".", "format", "(", "path", ",", "str", "(", "e", ")", ")", ")" ]
Before 2.0.0 beta3 the node didn't have a folder by node when we start we move the file, we can't do it in the topology conversion due to case of remote servers
[ "Before", "2", ".", "0", ".", "0", "beta3", "the", "node", "didn", "t", "have", "a", "folder", "by", "node", "when", "we", "start", "we", "move", "the", "file", "we", "can", "t", "do", "it", "in", "the", "topology", "conversion", "due", "to", "case", "of", "remote", "servers" ]
python
train
55.428571
tdsmith/eleven
eleven/eleven.py
https://github.com/tdsmith/eleven/blob/c79b7e784f6d4a76eb4371e69d5ee6f471fe56e1/eleven/eleven.py#L47-L68
def censor_background(sample_frame, ntc_samples=['NTC'], margin=log2(10)): """Selects rows from the sample data frame that fall `margin` or greater cycles earlier than the NTC for that target. NTC wells are recognized by string matching against the Sample column. :param DataFrame sample_frame: A sample data frame. :param iterable ntc_samples: A sequence of strings giving the sample names of your NTC wells, i.e. ['NTC'] :param float margin: The number of cycles earlier than the NTC for a "good" sample, i.e. log2(10) :return: a view of the sample data frame containing only non-background rows :rtype: DataFrame """ ntcs = sample_frame.loc[ sample_frame['Sample'].apply(lambda x: x in ntc_samples), ] if ntcs.empty: return sample_frame g = ntcs.groupby('Target') min_ntcs = g['Cq'].min() # if a target has no NTC, min_ntcs.loc[sample] is NaN # we should retain all values from targets with no NTC # all comparisons with NaN are false # so we test for the "wrong" condition and invert the result censored = sample_frame.loc[ ~(sample_frame['Cq'] > (min_ntcs.loc[sample_frame['Target']] - margin)) ] return censored
[ "def", "censor_background", "(", "sample_frame", ",", "ntc_samples", "=", "[", "'NTC'", "]", ",", "margin", "=", "log2", "(", "10", ")", ")", ":", "ntcs", "=", "sample_frame", ".", "loc", "[", "sample_frame", "[", "'Sample'", "]", ".", "apply", "(", "lambda", "x", ":", "x", "in", "ntc_samples", ")", ",", "]", "if", "ntcs", ".", "empty", ":", "return", "sample_frame", "g", "=", "ntcs", ".", "groupby", "(", "'Target'", ")", "min_ntcs", "=", "g", "[", "'Cq'", "]", ".", "min", "(", ")", "# if a target has no NTC, min_ntcs.loc[sample] is NaN", "# we should retain all values from targets with no NTC", "# all comparisons with NaN are false", "# so we test for the \"wrong\" condition and invert the result", "censored", "=", "sample_frame", ".", "loc", "[", "~", "(", "sample_frame", "[", "'Cq'", "]", ">", "(", "min_ntcs", ".", "loc", "[", "sample_frame", "[", "'Target'", "]", "]", "-", "margin", ")", ")", "]", "return", "censored" ]
Selects rows from the sample data frame that fall `margin` or greater cycles earlier than the NTC for that target. NTC wells are recognized by string matching against the Sample column. :param DataFrame sample_frame: A sample data frame. :param iterable ntc_samples: A sequence of strings giving the sample names of your NTC wells, i.e. ['NTC'] :param float margin: The number of cycles earlier than the NTC for a "good" sample, i.e. log2(10) :return: a view of the sample data frame containing only non-background rows :rtype: DataFrame
[ "Selects", "rows", "from", "the", "sample", "data", "frame", "that", "fall", "margin", "or", "greater", "cycles", "earlier", "than", "the", "NTC", "for", "that", "target", ".", "NTC", "wells", "are", "recognized", "by", "string", "matching", "against", "the", "Sample", "column", "." ]
python
train
53.681818
aestrivex/bctpy
bct/algorithms/clustering.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/clustering.py#L130-L155
def clustering_coef_bu(G): ''' The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector ''' n = len(G) C = np.zeros((n,)) for u in range(n): V, = np.where(G[u, :]) k = len(V) if k >= 2: # degree must be at least 2 S = G[np.ix_(V, V)] C[u] = np.sum(S) / (k * k - k) return C
[ "def", "clustering_coef_bu", "(", "G", ")", ":", "n", "=", "len", "(", "G", ")", "C", "=", "np", ".", "zeros", "(", "(", "n", ",", ")", ")", "for", "u", "in", "range", "(", "n", ")", ":", "V", ",", "=", "np", ".", "where", "(", "G", "[", "u", ",", ":", "]", ")", "k", "=", "len", "(", "V", ")", "if", "k", ">=", "2", ":", "# degree must be at least 2", "S", "=", "G", "[", "np", ".", "ix_", "(", "V", ",", "V", ")", "]", "C", "[", "u", "]", "=", "np", ".", "sum", "(", "S", ")", "/", "(", "k", "*", "k", "-", "k", ")", "return", "C" ]
The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector
[ "The", "clustering", "coefficient", "is", "the", "fraction", "of", "triangles", "around", "a", "node", "(", "equiv", ".", "the", "fraction", "of", "nodes", "neighbors", "that", "are", "neighbors", "of", "each", "other", ")", "." ]
python
train
23.192308
Robpol86/libnl
libnl/nl80211/iw_util.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl80211/iw_util.py#L76-L128
def get_ht_capability(cap): """http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n541. Positional arguments: cap -- c_uint16 Returns: List. """ answers = list() if cap & 1: answers.append('RX LDPC') if cap & 2: answers.append('HT20/HT40') if not cap & 2: answers.append('HT20') if (cap >> 2) & 0x3 == 0: answers.append('Static SM Power Save') if (cap >> 2) & 0x3 == 1: answers.append('Dynamic SM Power Save') if (cap >> 2) & 0x3 == 3: answers.append('SM Power Save disabled') if cap & 16: answers.append('RX Greenfield') if cap & 32: answers.append('RX HT20 SGI') if cap & 64: answers.append('RX HT40 SGI') if cap & 128: answers.append('TX STBC') if (cap >> 8) & 0x3 == 0: answers.append('No RX STBC') if (cap >> 8) & 0x3 == 1: answers.append('RX STBC 1-stream') if (cap >> 8) & 0x3 == 2: answers.append('RX STBC 2-streams') if (cap >> 8) & 0x3 == 3: answers.append('RX STBC 3-streams') if cap & 1024: answers.append('HT Delayed Block Ack') if not cap & 2048: answers.append('Max AMSDU length: 3839 bytes') if cap & 2048: answers.append('Max AMSDU length: 7935 bytes') if cap & 4096: answers.append('DSSS/CCK HT40') if not cap & 4096: answers.append('No DSSS/CCK HT40') if cap & 16384: answers.append('40 MHz Intolerant') if cap & 32768: answers.append('L-SIG TXOP protection') return answers
[ "def", "get_ht_capability", "(", "cap", ")", ":", "answers", "=", "list", "(", ")", "if", "cap", "&", "1", ":", "answers", ".", "append", "(", "'RX LDPC'", ")", "if", "cap", "&", "2", ":", "answers", ".", "append", "(", "'HT20/HT40'", ")", "if", "not", "cap", "&", "2", ":", "answers", ".", "append", "(", "'HT20'", ")", "if", "(", "cap", ">>", "2", ")", "&", "0x3", "==", "0", ":", "answers", ".", "append", "(", "'Static SM Power Save'", ")", "if", "(", "cap", ">>", "2", ")", "&", "0x3", "==", "1", ":", "answers", ".", "append", "(", "'Dynamic SM Power Save'", ")", "if", "(", "cap", ">>", "2", ")", "&", "0x3", "==", "3", ":", "answers", ".", "append", "(", "'SM Power Save disabled'", ")", "if", "cap", "&", "16", ":", "answers", ".", "append", "(", "'RX Greenfield'", ")", "if", "cap", "&", "32", ":", "answers", ".", "append", "(", "'RX HT20 SGI'", ")", "if", "cap", "&", "64", ":", "answers", ".", "append", "(", "'RX HT40 SGI'", ")", "if", "cap", "&", "128", ":", "answers", ".", "append", "(", "'TX STBC'", ")", "if", "(", "cap", ">>", "8", ")", "&", "0x3", "==", "0", ":", "answers", ".", "append", "(", "'No RX STBC'", ")", "if", "(", "cap", ">>", "8", ")", "&", "0x3", "==", "1", ":", "answers", ".", "append", "(", "'RX STBC 1-stream'", ")", "if", "(", "cap", ">>", "8", ")", "&", "0x3", "==", "2", ":", "answers", ".", "append", "(", "'RX STBC 2-streams'", ")", "if", "(", "cap", ">>", "8", ")", "&", "0x3", "==", "3", ":", "answers", ".", "append", "(", "'RX STBC 3-streams'", ")", "if", "cap", "&", "1024", ":", "answers", ".", "append", "(", "'HT Delayed Block Ack'", ")", "if", "not", "cap", "&", "2048", ":", "answers", ".", "append", "(", "'Max AMSDU length: 3839 bytes'", ")", "if", "cap", "&", "2048", ":", "answers", ".", "append", "(", "'Max AMSDU length: 7935 bytes'", ")", "if", "cap", "&", "4096", ":", "answers", ".", "append", "(", "'DSSS/CCK HT40'", ")", "if", "not", "cap", "&", "4096", ":", "answers", ".", "append", "(", "'No DSSS/CCK HT40'", ")", "if", "cap", "&", "16384", ":", "answers", ".", "append", "(", "'40 MHz Intolerant'", ")", "if", "cap", "&", "32768", ":", "answers", ".", "append", "(", "'L-SIG TXOP protection'", ")", "return", "answers" ]
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n541. Positional arguments: cap -- c_uint16 Returns: List.
[ "http", ":", "//", "git", ".", "kernel", ".", "org", "/", "cgit", "/", "linux", "/", "kernel", "/", "git", "/", "jberg", "/", "iw", ".", "git", "/", "tree", "/", "util", ".", "c?id", "=", "v3", ".", "17#n541", "." ]
python
train
29.320755
brentpayne/kennyg
kennyg/sax_handler.py
https://github.com/brentpayne/kennyg/blob/c688dd6d270bb7dcdcce7f08c54eafb1bf3232f2/kennyg/sax_handler.py#L16-L35
def is_valid(self, name=None, debug=False): """ Check to see if the current xml path is to be processed. """ valid_tags = self.action_tree invalid = False for item in self.current_tree: try: if item in valid_tags or self.ALL_TAGS in valid_tags: valid_tags = valid_tags[item if item in valid_tags else self.ALL_TAGS] else: valid_tags = None invalid = True break except (KeyError, TypeError) as e: # object is either missing the key or is not a dictionary type invalid = True break if debug: print name, not invalid and valid_tags is not None return not invalid and valid_tags is not None
[ "def", "is_valid", "(", "self", ",", "name", "=", "None", ",", "debug", "=", "False", ")", ":", "valid_tags", "=", "self", ".", "action_tree", "invalid", "=", "False", "for", "item", "in", "self", ".", "current_tree", ":", "try", ":", "if", "item", "in", "valid_tags", "or", "self", ".", "ALL_TAGS", "in", "valid_tags", ":", "valid_tags", "=", "valid_tags", "[", "item", "if", "item", "in", "valid_tags", "else", "self", ".", "ALL_TAGS", "]", "else", ":", "valid_tags", "=", "None", "invalid", "=", "True", "break", "except", "(", "KeyError", ",", "TypeError", ")", "as", "e", ":", "# object is either missing the key or is not a dictionary type", "invalid", "=", "True", "break", "if", "debug", ":", "print", "name", ",", "not", "invalid", "and", "valid_tags", "is", "not", "None", "return", "not", "invalid", "and", "valid_tags", "is", "not", "None" ]
Check to see if the current xml path is to be processed.
[ "Check", "to", "see", "if", "the", "current", "xml", "path", "is", "to", "be", "processed", "." ]
python
train
40.6
UCSBarchlab/PyRTL
examples/introduction-to-hardware.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/examples/introduction-to-hardware.py#L12-L17
def software_fibonacci(n): """ a normal old python function to return the Nth fibonacci number. """ a, b = 0, 1 for i in range(n): a, b = b, a + b return a
[ "def", "software_fibonacci", "(", "n", ")", ":", "a", ",", "b", "=", "0", ",", "1", "for", "i", "in", "range", "(", "n", ")", ":", "a", ",", "b", "=", "b", ",", "a", "+", "b", "return", "a" ]
a normal old python function to return the Nth fibonacci number.
[ "a", "normal", "old", "python", "function", "to", "return", "the", "Nth", "fibonacci", "number", "." ]
python
train
29
apache/airflow
airflow/utils/log/file_processor_handler.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/file_processor_handler.py#L60-L72
def set_context(self, filename): """ Provide filename context to airflow task handler. :param filename: filename in which the dag is located """ local_loc = self._init_file(filename) self.handler = logging.FileHandler(local_loc) self.handler.setFormatter(self.formatter) self.handler.setLevel(self.level) if self._cur_date < datetime.today(): self._symlink_latest_log_directory() self._cur_date = datetime.today()
[ "def", "set_context", "(", "self", ",", "filename", ")", ":", "local_loc", "=", "self", ".", "_init_file", "(", "filename", ")", "self", ".", "handler", "=", "logging", ".", "FileHandler", "(", "local_loc", ")", "self", ".", "handler", ".", "setFormatter", "(", "self", ".", "formatter", ")", "self", ".", "handler", ".", "setLevel", "(", "self", ".", "level", ")", "if", "self", ".", "_cur_date", "<", "datetime", ".", "today", "(", ")", ":", "self", ".", "_symlink_latest_log_directory", "(", ")", "self", ".", "_cur_date", "=", "datetime", ".", "today", "(", ")" ]
Provide filename context to airflow task handler. :param filename: filename in which the dag is located
[ "Provide", "filename", "context", "to", "airflow", "task", "handler", ".", ":", "param", "filename", ":", "filename", "in", "which", "the", "dag", "is", "located" ]
python
test
38.307692
istresearch/scrapy-cluster
utils/scutils/log_factory.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/log_factory.py#L62-L66
def is_subdict(self, a,b): ''' Return True if a is a subdict of b ''' return all((k in b and b[k]==v) for k,v in a.iteritems())
[ "def", "is_subdict", "(", "self", ",", "a", ",", "b", ")", ":", "return", "all", "(", "(", "k", "in", "b", "and", "b", "[", "k", "]", "==", "v", ")", "for", "k", ",", "v", "in", "a", ".", "iteritems", "(", ")", ")" ]
Return True if a is a subdict of b
[ "Return", "True", "if", "a", "is", "a", "subdict", "of", "b" ]
python
train
31
scott-griffiths/bitstring
bitstring.py
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L3365-L3388
def insert(self, bs, pos=None): """Insert bs at bit position pos. bs -- The bitstring to insert. pos -- The bit position to insert at. Raises ValueError if pos < 0 or pos > self.len. """ bs = Bits(bs) if not bs.len: return self if bs is self: bs = self.__copy__() if pos is None: try: pos = self._pos except AttributeError: raise TypeError("insert require a bit position for this type.") if pos < 0: pos += self.len if not 0 <= pos <= self.len: raise ValueError("Invalid insert position.") self._insert(bs, pos)
[ "def", "insert", "(", "self", ",", "bs", ",", "pos", "=", "None", ")", ":", "bs", "=", "Bits", "(", "bs", ")", "if", "not", "bs", ".", "len", ":", "return", "self", "if", "bs", "is", "self", ":", "bs", "=", "self", ".", "__copy__", "(", ")", "if", "pos", "is", "None", ":", "try", ":", "pos", "=", "self", ".", "_pos", "except", "AttributeError", ":", "raise", "TypeError", "(", "\"insert require a bit position for this type.\"", ")", "if", "pos", "<", "0", ":", "pos", "+=", "self", ".", "len", "if", "not", "0", "<=", "pos", "<=", "self", ".", "len", ":", "raise", "ValueError", "(", "\"Invalid insert position.\"", ")", "self", ".", "_insert", "(", "bs", ",", "pos", ")" ]
Insert bs at bit position pos. bs -- The bitstring to insert. pos -- The bit position to insert at. Raises ValueError if pos < 0 or pos > self.len.
[ "Insert", "bs", "at", "bit", "position", "pos", "." ]
python
train
28.791667
ColtonProvias/sqlalchemy-jsonapi
sqlalchemy_jsonapi/serializer.py
https://github.com/ColtonProvias/sqlalchemy-jsonapi/blob/40f8b5970d44935b27091c2bf3224482d23311bb/sqlalchemy_jsonapi/serializer.py#L627-L690
def get_collection(self, session, query, api_key): """ Fetch a collection of resources of a specified type. :param session: SQLAlchemy session :param query: Dict of query args :param api_type: The type of the model """ model = self._fetch_model(api_key) include = self._parse_include(query.get('include', '').split(',')) fields = self._parse_fields(query) included = {} sorts = query.get('sort', '').split(',') order_by = [] collection = session.query(model) for attr in sorts: if attr == '': break attr_name, is_asc = [attr[1:], False]\ if attr[0] == '-'\ else [attr, True] if attr_name not in model.__mapper__.all_orm_descriptors.keys()\ or not hasattr(model, attr_name)\ or attr_name in model.__mapper__.relationships.keys(): return NotSortableError(model, attr_name) attr = getattr(model, attr_name) if not hasattr(attr, 'asc'): # pragma: no cover return NotSortableError(model, attr_name) check_permission(model, attr_name, Permissions.VIEW) order_by.append(attr.asc() if is_asc else attr.desc()) if len(order_by) > 0: collection = collection.order_by(*order_by) pos = -1 start, end = self._parse_page(query) response = JSONAPIResponse() response.data['data'] = [] for instance in collection: try: check_permission(instance, None, Permissions.VIEW) except PermissionDeniedError: continue pos += 1 if end is not None and (pos < start or pos > end): continue built = self._render_full_resource(instance, include, fields) included.update(built.pop('included')) response.data['data'].append(built) response.data['included'] = list(included.values()) return response
[ "def", "get_collection", "(", "self", ",", "session", ",", "query", ",", "api_key", ")", ":", "model", "=", "self", ".", "_fetch_model", "(", "api_key", ")", "include", "=", "self", ".", "_parse_include", "(", "query", ".", "get", "(", "'include'", ",", "''", ")", ".", "split", "(", "','", ")", ")", "fields", "=", "self", ".", "_parse_fields", "(", "query", ")", "included", "=", "{", "}", "sorts", "=", "query", ".", "get", "(", "'sort'", ",", "''", ")", ".", "split", "(", "','", ")", "order_by", "=", "[", "]", "collection", "=", "session", ".", "query", "(", "model", ")", "for", "attr", "in", "sorts", ":", "if", "attr", "==", "''", ":", "break", "attr_name", ",", "is_asc", "=", "[", "attr", "[", "1", ":", "]", ",", "False", "]", "if", "attr", "[", "0", "]", "==", "'-'", "else", "[", "attr", ",", "True", "]", "if", "attr_name", "not", "in", "model", ".", "__mapper__", ".", "all_orm_descriptors", ".", "keys", "(", ")", "or", "not", "hasattr", "(", "model", ",", "attr_name", ")", "or", "attr_name", "in", "model", ".", "__mapper__", ".", "relationships", ".", "keys", "(", ")", ":", "return", "NotSortableError", "(", "model", ",", "attr_name", ")", "attr", "=", "getattr", "(", "model", ",", "attr_name", ")", "if", "not", "hasattr", "(", "attr", ",", "'asc'", ")", ":", "# pragma: no cover", "return", "NotSortableError", "(", "model", ",", "attr_name", ")", "check_permission", "(", "model", ",", "attr_name", ",", "Permissions", ".", "VIEW", ")", "order_by", ".", "append", "(", "attr", ".", "asc", "(", ")", "if", "is_asc", "else", "attr", ".", "desc", "(", ")", ")", "if", "len", "(", "order_by", ")", ">", "0", ":", "collection", "=", "collection", ".", "order_by", "(", "*", "order_by", ")", "pos", "=", "-", "1", "start", ",", "end", "=", "self", ".", "_parse_page", "(", "query", ")", "response", "=", "JSONAPIResponse", "(", ")", "response", ".", "data", "[", "'data'", "]", "=", "[", "]", "for", "instance", "in", "collection", ":", "try", ":", "check_permission", "(", "instance", ",", "None", ",", "Permissions", ".", "VIEW", ")", "except", "PermissionDeniedError", ":", "continue", "pos", "+=", "1", "if", "end", "is", "not", "None", "and", "(", "pos", "<", "start", "or", "pos", ">", "end", ")", ":", "continue", "built", "=", "self", ".", "_render_full_resource", "(", "instance", ",", "include", ",", "fields", ")", "included", ".", "update", "(", "built", ".", "pop", "(", "'included'", ")", ")", "response", ".", "data", "[", "'data'", "]", ".", "append", "(", "built", ")", "response", ".", "data", "[", "'included'", "]", "=", "list", "(", "included", ".", "values", "(", ")", ")", "return", "response" ]
Fetch a collection of resources of a specified type. :param session: SQLAlchemy session :param query: Dict of query args :param api_type: The type of the model
[ "Fetch", "a", "collection", "of", "resources", "of", "a", "specified", "type", "." ]
python
train
32.15625
daler/trackhub
trackhub/base.py
https://github.com/daler/trackhub/blob/e4655f79177822529f80b923df117e38e28df702/trackhub/base.py#L59-L65
def add_child(self, child): """ Adds self as parent to child, and then adds child. """ child.parent = self self.children.append(child) return child
[ "def", "add_child", "(", "self", ",", "child", ")", ":", "child", ".", "parent", "=", "self", "self", ".", "children", ".", "append", "(", "child", ")", "return", "child" ]
Adds self as parent to child, and then adds child.
[ "Adds", "self", "as", "parent", "to", "child", "and", "then", "adds", "child", "." ]
python
train
27
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L314-L319
def pluck(self, key): """ Convenience version of a common use case of `map`: fetching a property. """ return self._wrap([x.get(key) for x in self.obj])
[ "def", "pluck", "(", "self", ",", "key", ")", ":", "return", "self", ".", "_wrap", "(", "[", "x", ".", "get", "(", "key", ")", "for", "x", "in", "self", ".", "obj", "]", ")" ]
Convenience version of a common use case of `map`: fetching a property.
[ "Convenience", "version", "of", "a", "common", "use", "case", "of", "map", ":", "fetching", "a", "property", "." ]
python
train
31
bitesofcode/projexui
projexui/widgets/xchartwidget/xchartscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchartwidget/xchartscene.py#L163-L174
def drawBackground( self, painter, rect ): """ Draws the backgrounds for the different chart types. :param painter | <QPainter> rect | <QRect> """ if ( self._dirty ): self.rebuild() if ( self.showGrid() ): self.drawGrid(painter)
[ "def", "drawBackground", "(", "self", ",", "painter", ",", "rect", ")", ":", "if", "(", "self", ".", "_dirty", ")", ":", "self", ".", "rebuild", "(", ")", "if", "(", "self", ".", "showGrid", "(", ")", ")", ":", "self", ".", "drawGrid", "(", "painter", ")" ]
Draws the backgrounds for the different chart types. :param painter | <QPainter> rect | <QRect>
[ "Draws", "the", "backgrounds", "for", "the", "different", "chart", "types", ".", ":", "param", "painter", "|", "<QPainter", ">", "rect", "|", "<QRect", ">" ]
python
train
28.833333
notanumber/xapian-haystack
xapian_backend.py
https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1471-L1476
def _or_query(self, term_list, field, field_type): """ Joins each item of term_list decorated by _term_query with an OR. """ term_list = [self._term_query(term, field, field_type) for term in term_list] return xapian.Query(xapian.Query.OP_OR, term_list)
[ "def", "_or_query", "(", "self", ",", "term_list", ",", "field", ",", "field_type", ")", ":", "term_list", "=", "[", "self", ".", "_term_query", "(", "term", ",", "field", ",", "field_type", ")", "for", "term", "in", "term_list", "]", "return", "xapian", ".", "Query", "(", "xapian", ".", "Query", ".", "OP_OR", ",", "term_list", ")" ]
Joins each item of term_list decorated by _term_query with an OR.
[ "Joins", "each", "item", "of", "term_list", "decorated", "by", "_term_query", "with", "an", "OR", "." ]
python
train
48
tjvr/kurt
kurt/__init__.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L1526-L1531
def has_insert(self, shape): """Returns True if any of the inserts have the given shape.""" for insert in self.inserts: if insert.shape == shape: return True return False
[ "def", "has_insert", "(", "self", ",", "shape", ")", ":", "for", "insert", "in", "self", ".", "inserts", ":", "if", "insert", ".", "shape", "==", "shape", ":", "return", "True", "return", "False" ]
Returns True if any of the inserts have the given shape.
[ "Returns", "True", "if", "any", "of", "the", "inserts", "have", "the", "given", "shape", "." ]
python
train
36.166667
nitmir/django-cas-server
cas_server/utils.py
https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/utils.py#L293-L310
def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN): """ Generate a ticket with prefix ``prefix`` and length ``lg`` :param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU) :param int lg: The length of the generated ticket (with the prefix) :return: A randomlly generated ticket of length ``lg`` :rtype: unicode """ random_part = u''.join( random.choice( string.ascii_letters + string.digits ) for _ in range(lg - len(prefix or "") - 1) ) if prefix is not None: return u'%s-%s' % (prefix, random_part) else: return random_part
[ "def", "_gen_ticket", "(", "prefix", "=", "None", ",", "lg", "=", "settings", ".", "CAS_TICKET_LEN", ")", ":", "random_part", "=", "u''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "lg", "-", "len", "(", "prefix", "or", "\"\"", ")", "-", "1", ")", ")", "if", "prefix", "is", "not", "None", ":", "return", "u'%s-%s'", "%", "(", "prefix", ",", "random_part", ")", "else", ":", "return", "random_part" ]
Generate a ticket with prefix ``prefix`` and length ``lg`` :param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU) :param int lg: The length of the generated ticket (with the prefix) :return: A randomlly generated ticket of length ``lg`` :rtype: unicode
[ "Generate", "a", "ticket", "with", "prefix", "prefix", "and", "length", "lg" ]
python
train
35.611111
aestrivex/bctpy
bct/algorithms/reference.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L689-L718
def makerandCIJ_dir(n, k, seed=None): ''' This function generates a directed random network Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray directed random connection matrix Notes ----- no connections are placed on the main diagonal. ''' rng = get_rng(seed) ix, = np.where(np.logical_not(np.eye(n)).flat) rp = rng.permutation(np.size(ix)) CIJ = np.zeros((n, n)) CIJ.flat[ix[rp][:k]] = 1 return CIJ
[ "def", "makerandCIJ_dir", "(", "n", ",", "k", ",", "seed", "=", "None", ")", ":", "rng", "=", "get_rng", "(", "seed", ")", "ix", ",", "=", "np", ".", "where", "(", "np", ".", "logical_not", "(", "np", ".", "eye", "(", "n", ")", ")", ".", "flat", ")", "rp", "=", "rng", ".", "permutation", "(", "np", ".", "size", "(", "ix", ")", ")", "CIJ", "=", "np", ".", "zeros", "(", "(", "n", ",", "n", ")", ")", "CIJ", ".", "flat", "[", "ix", "[", "rp", "]", "[", ":", "k", "]", "]", "=", "1", "return", "CIJ" ]
This function generates a directed random network Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray directed random connection matrix Notes ----- no connections are placed on the main diagonal.
[ "This", "function", "generates", "a", "directed", "random", "network" ]
python
train
24.933333
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/buildconfigurations_api.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/buildconfigurations_api.py#L2333-L2358
def remove_product_version(self, id, product_version_id, **kwargs): """ Removes a product version from the specified config set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_product_version(id, product_version_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration set id (required) :param int product_version_id: Product version id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.remove_product_version_with_http_info(id, product_version_id, **kwargs) else: (data) = self.remove_product_version_with_http_info(id, product_version_id, **kwargs) return data
[ "def", "remove_product_version", "(", "self", ",", "id", ",", "product_version_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "remove_product_version_with_http_info", "(", "id", ",", "product_version_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "remove_product_version_with_http_info", "(", "id", ",", "product_version_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Removes a product version from the specified config set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_product_version(id, product_version_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration set id (required) :param int product_version_id: Product version id (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Removes", "a", "product", "version", "from", "the", "specified", "config", "set", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "remove_product_version", "(", "id", "product_version_id", "callback", "=", "callback_function", ")" ]
python
train
46.230769
opencobra/cobrapy
cobra/core/reaction.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/reaction.py#L869-L900
def subtract_metabolites(self, metabolites, combine=True, reversibly=True): """Subtract metabolites from a reaction. That means add the metabolites with -1*coefficient. If the final coefficient for a metabolite is 0 then the metabolite is removed from the reaction. Notes ----- * A final coefficient < 0 implies a reactant. * The change is reverted upon exit when using the model as a context. Parameters ---------- metabolites : dict Dictionary where the keys are of class Metabolite and the values are the coefficients. These metabolites will be added to the reaction. combine : bool Describes behavior a metabolite already exists in the reaction. True causes the coefficients to be added. False causes the coefficient to be replaced. reversibly : bool Whether to add the change to the context to make the change reversibly or not (primarily intended for internal use). """ self.add_metabolites({ k: -v for k, v in iteritems(metabolites)}, combine=combine, reversibly=reversibly)
[ "def", "subtract_metabolites", "(", "self", ",", "metabolites", ",", "combine", "=", "True", ",", "reversibly", "=", "True", ")", ":", "self", ".", "add_metabolites", "(", "{", "k", ":", "-", "v", "for", "k", ",", "v", "in", "iteritems", "(", "metabolites", ")", "}", ",", "combine", "=", "combine", ",", "reversibly", "=", "reversibly", ")" ]
Subtract metabolites from a reaction. That means add the metabolites with -1*coefficient. If the final coefficient for a metabolite is 0 then the metabolite is removed from the reaction. Notes ----- * A final coefficient < 0 implies a reactant. * The change is reverted upon exit when using the model as a context. Parameters ---------- metabolites : dict Dictionary where the keys are of class Metabolite and the values are the coefficients. These metabolites will be added to the reaction. combine : bool Describes behavior a metabolite already exists in the reaction. True causes the coefficients to be added. False causes the coefficient to be replaced. reversibly : bool Whether to add the change to the context to make the change reversibly or not (primarily intended for internal use).
[ "Subtract", "metabolites", "from", "a", "reaction", "." ]
python
valid
37.375
veltzer/pytconf
pytconf/config.py
https://github.com/veltzer/pytconf/blob/8dee43ace35d0dd2ab1105fb94057f650393360f/pytconf/config.py#L693-L708
def create_str(help_string=NO_HELP, default=NO_DEFAULT): # type: (str, Union[str, NO_DEFAULT_TYPE]) -> str """ Create a string parameter :param help_string: :param default: :return: """ # noinspection PyTypeChecker return ParamFunctions( help_string=help_string, default=default, type_name="str", function_s2t=convert_string_to_string, function_t2s=convert_string_to_string, )
[ "def", "create_str", "(", "help_string", "=", "NO_HELP", ",", "default", "=", "NO_DEFAULT", ")", ":", "# type: (str, Union[str, NO_DEFAULT_TYPE]) -> str", "# noinspection PyTypeChecker", "return", "ParamFunctions", "(", "help_string", "=", "help_string", ",", "default", "=", "default", ",", "type_name", "=", "\"str\"", ",", "function_s2t", "=", "convert_string_to_string", ",", "function_t2s", "=", "convert_string_to_string", ",", ")" ]
Create a string parameter :param help_string: :param default: :return:
[ "Create", "a", "string", "parameter", ":", "param", "help_string", ":", ":", "param", "default", ":", ":", "return", ":" ]
python
train
31.3125
tensorpack/tensorpack
tensorpack/utils/argtools.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/utils/argtools.py#L149-L178
def call_only_once(func): """ Decorate a method or property of a class, so that this method can only be called once for every instance. Calling it more than once will result in exception. """ @functools.wraps(func) def wrapper(*args, **kwargs): self = args[0] # cannot use hasattr here, because hasattr tries to getattr, which # fails if func is a property assert func.__name__ in dir(self), "call_only_once can only be used on method or property!" if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'): cache = self._CALL_ONLY_ONCE_CACHE = set() else: cache = self._CALL_ONLY_ONCE_CACHE cls = type(self) # cannot use ismethod(), because decorated method becomes a function is_method = inspect.isfunction(getattr(cls, func.__name__)) assert func not in cache, \ "{} {}.{} can only be called once per object!".format( 'Method' if is_method else 'Property', cls.__name__, func.__name__) cache.add(func) return func(*args, **kwargs) return wrapper
[ "def", "call_only_once", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", "=", "args", "[", "0", "]", "# cannot use hasattr here, because hasattr tries to getattr, which", "# fails if func is a property", "assert", "func", ".", "__name__", "in", "dir", "(", "self", ")", ",", "\"call_only_once can only be used on method or property!\"", "if", "not", "hasattr", "(", "self", ",", "'_CALL_ONLY_ONCE_CACHE'", ")", ":", "cache", "=", "self", ".", "_CALL_ONLY_ONCE_CACHE", "=", "set", "(", ")", "else", ":", "cache", "=", "self", ".", "_CALL_ONLY_ONCE_CACHE", "cls", "=", "type", "(", "self", ")", "# cannot use ismethod(), because decorated method becomes a function", "is_method", "=", "inspect", ".", "isfunction", "(", "getattr", "(", "cls", ",", "func", ".", "__name__", ")", ")", "assert", "func", "not", "in", "cache", ",", "\"{} {}.{} can only be called once per object!\"", ".", "format", "(", "'Method'", "if", "is_method", "else", "'Property'", ",", "cls", ".", "__name__", ",", "func", ".", "__name__", ")", "cache", ".", "add", "(", "func", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorate a method or property of a class, so that this method can only be called once for every instance. Calling it more than once will result in exception.
[ "Decorate", "a", "method", "or", "property", "of", "a", "class", "so", "that", "this", "method", "can", "only", "be", "called", "once", "for", "every", "instance", ".", "Calling", "it", "more", "than", "once", "will", "result", "in", "exception", "." ]
python
train
36.9
Kaggle/kaggle-api
kaggle/api/kaggle_api_extended.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L1634-L1666
def kernels_initialize(self, folder): """ create a new kernel in a specified folder from template, including json metadata that grabs values from the configuration. Parameters ========== folder: the path of the folder """ if not os.path.isdir(folder): raise ValueError('Invalid folder: ' + folder) resources = [] resource = {'path': 'INSERT_SCRIPT_PATH_HERE'} resources.append(resource) username = self.get_config_value(self.CONFIG_NAME_USER) meta_data = { 'id': username + '/INSERT_KERNEL_SLUG_HERE', 'title': 'INSERT_TITLE_HERE', 'code_file': 'INSERT_CODE_FILE_PATH_HERE', 'language': 'INSERT_LANGUAGE_HERE', 'kernel_type': 'INSERT_KERNEL_TYPE_HERE', 'is_private': 'true', 'enable_gpu': 'false', 'enable_internet': 'false', 'dataset_sources': [], 'competition_sources': [], 'kernel_sources': [], } meta_file = os.path.join(folder, self.KERNEL_METADATA_FILE) with open(meta_file, 'w') as f: json.dump(meta_data, f, indent=2) return meta_file
[ "def", "kernels_initialize", "(", "self", ",", "folder", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "folder", ")", ":", "raise", "ValueError", "(", "'Invalid folder: '", "+", "folder", ")", "resources", "=", "[", "]", "resource", "=", "{", "'path'", ":", "'INSERT_SCRIPT_PATH_HERE'", "}", "resources", ".", "append", "(", "resource", ")", "username", "=", "self", ".", "get_config_value", "(", "self", ".", "CONFIG_NAME_USER", ")", "meta_data", "=", "{", "'id'", ":", "username", "+", "'/INSERT_KERNEL_SLUG_HERE'", ",", "'title'", ":", "'INSERT_TITLE_HERE'", ",", "'code_file'", ":", "'INSERT_CODE_FILE_PATH_HERE'", ",", "'language'", ":", "'INSERT_LANGUAGE_HERE'", ",", "'kernel_type'", ":", "'INSERT_KERNEL_TYPE_HERE'", ",", "'is_private'", ":", "'true'", ",", "'enable_gpu'", ":", "'false'", ",", "'enable_internet'", ":", "'false'", ",", "'dataset_sources'", ":", "[", "]", ",", "'competition_sources'", ":", "[", "]", ",", "'kernel_sources'", ":", "[", "]", ",", "}", "meta_file", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "self", ".", "KERNEL_METADATA_FILE", ")", "with", "open", "(", "meta_file", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "meta_data", ",", "f", ",", "indent", "=", "2", ")", "return", "meta_file" ]
create a new kernel in a specified folder from template, including json metadata that grabs values from the configuration. Parameters ========== folder: the path of the folder
[ "create", "a", "new", "kernel", "in", "a", "specified", "folder", "from", "template", "including", "json", "metadata", "that", "grabs", "values", "from", "the", "configuration", ".", "Parameters", "==========", "folder", ":", "the", "path", "of", "the", "folder" ]
python
train
36.787879
enricobacis/wos
wos/client.py
https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L114-L121
def close(self): """The close operation loads the session if it is valid and then closes it and releases the session seat. All the session data are deleted and become invalid after the request is processed. The session ID can no longer be used in subsequent requests.""" if self._SID: self._auth.service.closeSession() self._SID = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_SID", ":", "self", ".", "_auth", ".", "service", ".", "closeSession", "(", ")", "self", ".", "_SID", "=", "None" ]
The close operation loads the session if it is valid and then closes it and releases the session seat. All the session data are deleted and become invalid after the request is processed. The session ID can no longer be used in subsequent requests.
[ "The", "close", "operation", "loads", "the", "session", "if", "it", "is", "valid", "and", "then", "closes", "it", "and", "releases", "the", "session", "seat", ".", "All", "the", "session", "data", "are", "deleted", "and", "become", "invalid", "after", "the", "request", "is", "processed", ".", "The", "session", "ID", "can", "no", "longer", "be", "used", "in", "subsequent", "requests", "." ]
python
train
49
ska-sa/purr
Purr/Purrer.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Purrer.py#L72-L81
def is_purrlog(path): """Checks if path refers to a valid purrlog. Path must exist, and must contain either at least one directory called entry-YYYYMMDD-HHMMSS, or the file "dirconfig" """ if not os.path.isdir(path): return False if list(filter(os.path.isdir, glob.glob( os.path.join(path, "entry-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]")))): return True return os.path.exists(os.path.join(path, "dirconfig"))
[ "def", "is_purrlog", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "False", "if", "list", "(", "filter", "(", "os", ".", "path", ".", "isdir", ",", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "path", ",", "\"entry-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]\"", ")", ")", ")", ")", ":", "return", "True", "return", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "\"dirconfig\"", ")", ")" ]
Checks if path refers to a valid purrlog. Path must exist, and must contain either at least one directory called entry-YYYYMMDD-HHMMSS, or the file "dirconfig"
[ "Checks", "if", "path", "refers", "to", "a", "valid", "purrlog", ".", "Path", "must", "exist", "and", "must", "contain", "either", "at", "least", "one", "directory", "called", "entry", "-", "YYYYMMDD", "-", "HHMMSS", "or", "the", "file", "dirconfig" ]
python
train
52
MycroftAI/mycroft-precise
precise/scripts/train_incremental.py
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/scripts/train_incremental.py#L95-L110
def retrain(self): """Train for a session, pulling in any new data from the filesystem""" folder = TrainData.from_folder(self.args.folder) train_data, test_data = folder.load(True, not self.args.no_validation) train_data = TrainData.merge(train_data, self.sampled_data) test_data = TrainData.merge(test_data, self.test) train_inputs, train_outputs = train_data print() try: self.listener.runner.model.fit( train_inputs, train_outputs, self.args.batch_size, self.epoch + self.args.epochs, validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch ) finally: self.listener.runner.model.save(self.args.model)
[ "def", "retrain", "(", "self", ")", ":", "folder", "=", "TrainData", ".", "from_folder", "(", "self", ".", "args", ".", "folder", ")", "train_data", ",", "test_data", "=", "folder", ".", "load", "(", "True", ",", "not", "self", ".", "args", ".", "no_validation", ")", "train_data", "=", "TrainData", ".", "merge", "(", "train_data", ",", "self", ".", "sampled_data", ")", "test_data", "=", "TrainData", ".", "merge", "(", "test_data", ",", "self", ".", "test", ")", "train_inputs", ",", "train_outputs", "=", "train_data", "print", "(", ")", "try", ":", "self", ".", "listener", ".", "runner", ".", "model", ".", "fit", "(", "train_inputs", ",", "train_outputs", ",", "self", ".", "args", ".", "batch_size", ",", "self", ".", "epoch", "+", "self", ".", "args", ".", "epochs", ",", "validation_data", "=", "test_data", ",", "callbacks", "=", "self", ".", "callbacks", ",", "initial_epoch", "=", "self", ".", "epoch", ")", "finally", ":", "self", ".", "listener", ".", "runner", ".", "model", ".", "save", "(", "self", ".", "args", ".", "model", ")" ]
Train for a session, pulling in any new data from the filesystem
[ "Train", "for", "a", "session", "pulling", "in", "any", "new", "data", "from", "the", "filesystem" ]
python
train
46.9375
tylertreat/BigQuery-Python
bigquery/schema_builder.py
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/schema_builder.py#L42-L98
def describe_field(k, v, timestamp_parser=default_timestamp_parser): """Given a key representing a column name and value representing the value stored in the column, return a representation of the BigQuery schema element describing that field. Raise errors if invalid value types are provided. Parameters ---------- k : Union[str, unicode] Key representing the column v : Union[str, unicode, int, float, datetime, object] Value mapped to by `k` Returns ------- object Describing the field Raises ------ Exception If invalid value types are provided. Examples -------- >>> describe_field("username", "Bob") {"name": "username", "type": "string", "mode": "nullable"} >>> describe_field("users", [{"username": "Bob"}]) {"name": "users", "type": "record", "mode": "repeated", "fields": [{"name":"username","type":"string","mode":"nullable"}]} """ def bq_schema_field(name, bq_type, mode): return {"name": name, "type": bq_type, "mode": mode} if isinstance(v, list): if len(v) == 0: raise Exception( "Can't describe schema because of empty list {0}:[]".format(k)) v = v[0] mode = "repeated" else: mode = "nullable" bq_type = bigquery_type(v, timestamp_parser=timestamp_parser) if not bq_type: raise InvalidTypeException(k, v) field = bq_schema_field(k, bq_type, mode) if bq_type == "record": try: field['fields'] = schema_from_record(v, timestamp_parser) except InvalidTypeException as e: # recursively construct the key causing the error raise InvalidTypeException("%s.%s" % (k, e.key), e.value) return field
[ "def", "describe_field", "(", "k", ",", "v", ",", "timestamp_parser", "=", "default_timestamp_parser", ")", ":", "def", "bq_schema_field", "(", "name", ",", "bq_type", ",", "mode", ")", ":", "return", "{", "\"name\"", ":", "name", ",", "\"type\"", ":", "bq_type", ",", "\"mode\"", ":", "mode", "}", "if", "isinstance", "(", "v", ",", "list", ")", ":", "if", "len", "(", "v", ")", "==", "0", ":", "raise", "Exception", "(", "\"Can't describe schema because of empty list {0}:[]\"", ".", "format", "(", "k", ")", ")", "v", "=", "v", "[", "0", "]", "mode", "=", "\"repeated\"", "else", ":", "mode", "=", "\"nullable\"", "bq_type", "=", "bigquery_type", "(", "v", ",", "timestamp_parser", "=", "timestamp_parser", ")", "if", "not", "bq_type", ":", "raise", "InvalidTypeException", "(", "k", ",", "v", ")", "field", "=", "bq_schema_field", "(", "k", ",", "bq_type", ",", "mode", ")", "if", "bq_type", "==", "\"record\"", ":", "try", ":", "field", "[", "'fields'", "]", "=", "schema_from_record", "(", "v", ",", "timestamp_parser", ")", "except", "InvalidTypeException", "as", "e", ":", "# recursively construct the key causing the error", "raise", "InvalidTypeException", "(", "\"%s.%s\"", "%", "(", "k", ",", "e", ".", "key", ")", ",", "e", ".", "value", ")", "return", "field" ]
Given a key representing a column name and value representing the value stored in the column, return a representation of the BigQuery schema element describing that field. Raise errors if invalid value types are provided. Parameters ---------- k : Union[str, unicode] Key representing the column v : Union[str, unicode, int, float, datetime, object] Value mapped to by `k` Returns ------- object Describing the field Raises ------ Exception If invalid value types are provided. Examples -------- >>> describe_field("username", "Bob") {"name": "username", "type": "string", "mode": "nullable"} >>> describe_field("users", [{"username": "Bob"}]) {"name": "users", "type": "record", "mode": "repeated", "fields": [{"name":"username","type":"string","mode":"nullable"}]}
[ "Given", "a", "key", "representing", "a", "column", "name", "and", "value", "representing", "the", "value", "stored", "in", "the", "column", "return", "a", "representation", "of", "the", "BigQuery", "schema", "element", "describing", "that", "field", ".", "Raise", "errors", "if", "invalid", "value", "types", "are", "provided", "." ]
python
train
30.491228
espressif/esptool
ecdsa/numbertheory.py
https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/ecdsa/numbertheory.py#L389-L459
def is_prime( n ): """Return True if x is prime, False otherwise. We use the Miller-Rabin test, as given in Menezes et al. p. 138. This test is not exact: there are composite values n for which it returns True. In testing the odd numbers from 10000001 to 19999999, about 66 composites got past the first test, 5 got past the second test, and none got past the third. Since factors of 2, 3, 5, 7, and 11 were detected during preliminary screening, the number of numbers tested by Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7) = 4.57 million. """ # (This is used to study the risk of false positives:) global miller_rabin_test_count miller_rabin_test_count = 0 if n <= smallprimes[-1]: if n in smallprimes: return True else: return False if gcd( n, 2*3*5*7*11 ) != 1: return False # Choose a number of iterations sufficient to reduce the # probability of accepting a composite below 2**-80 # (from Menezes et al. Table 4.4): t = 40 n_bits = 1 + int( math.log( n, 2 ) ) for k, tt in ( ( 100, 27 ), ( 150, 18 ), ( 200, 15 ), ( 250, 12 ), ( 300, 9 ), ( 350, 8 ), ( 400, 7 ), ( 450, 6 ), ( 550, 5 ), ( 650, 4 ), ( 850, 3 ), ( 1300, 2 ), ): if n_bits < k: break t = tt # Run the test t times: s = 0 r = n - 1 while ( r % 2 ) == 0: s = s + 1 r = r // 2 for i in range( t ): a = smallprimes[ i ] y = modular_exp( a, r, n ) if y != 1 and y != n-1: j = 1 while j <= s - 1 and y != n - 1: y = modular_exp( y, 2, n ) if y == 1: miller_rabin_test_count = i + 1 return False j = j + 1 if y != n-1: miller_rabin_test_count = i + 1 return False return True
[ "def", "is_prime", "(", "n", ")", ":", "# (This is used to study the risk of false positives:)", "global", "miller_rabin_test_count", "miller_rabin_test_count", "=", "0", "if", "n", "<=", "smallprimes", "[", "-", "1", "]", ":", "if", "n", "in", "smallprimes", ":", "return", "True", "else", ":", "return", "False", "if", "gcd", "(", "n", ",", "2", "*", "3", "*", "5", "*", "7", "*", "11", ")", "!=", "1", ":", "return", "False", "# Choose a number of iterations sufficient to reduce the", "# probability of accepting a composite below 2**-80", "# (from Menezes et al. Table 4.4):", "t", "=", "40", "n_bits", "=", "1", "+", "int", "(", "math", ".", "log", "(", "n", ",", "2", ")", ")", "for", "k", ",", "tt", "in", "(", "(", "100", ",", "27", ")", ",", "(", "150", ",", "18", ")", ",", "(", "200", ",", "15", ")", ",", "(", "250", ",", "12", ")", ",", "(", "300", ",", "9", ")", ",", "(", "350", ",", "8", ")", ",", "(", "400", ",", "7", ")", ",", "(", "450", ",", "6", ")", ",", "(", "550", ",", "5", ")", ",", "(", "650", ",", "4", ")", ",", "(", "850", ",", "3", ")", ",", "(", "1300", ",", "2", ")", ",", ")", ":", "if", "n_bits", "<", "k", ":", "break", "t", "=", "tt", "# Run the test t times:", "s", "=", "0", "r", "=", "n", "-", "1", "while", "(", "r", "%", "2", ")", "==", "0", ":", "s", "=", "s", "+", "1", "r", "=", "r", "//", "2", "for", "i", "in", "range", "(", "t", ")", ":", "a", "=", "smallprimes", "[", "i", "]", "y", "=", "modular_exp", "(", "a", ",", "r", ",", "n", ")", "if", "y", "!=", "1", "and", "y", "!=", "n", "-", "1", ":", "j", "=", "1", "while", "j", "<=", "s", "-", "1", "and", "y", "!=", "n", "-", "1", ":", "y", "=", "modular_exp", "(", "y", ",", "2", ",", "n", ")", "if", "y", "==", "1", ":", "miller_rabin_test_count", "=", "i", "+", "1", "return", "False", "j", "=", "j", "+", "1", "if", "y", "!=", "n", "-", "1", ":", "miller_rabin_test_count", "=", "i", "+", "1", "return", "False", "return", "True" ]
Return True if x is prime, False otherwise. We use the Miller-Rabin test, as given in Menezes et al. p. 138. This test is not exact: there are composite values n for which it returns True. In testing the odd numbers from 10000001 to 19999999, about 66 composites got past the first test, 5 got past the second test, and none got past the third. Since factors of 2, 3, 5, 7, and 11 were detected during preliminary screening, the number of numbers tested by Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7) = 4.57 million.
[ "Return", "True", "if", "x", "is", "prime", "False", "otherwise", "." ]
python
train
26.380282
bslatkin/dpxdt
dpxdt/server/work_queue_handlers.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue_handlers.py#L126-L169
def view_all_work_queues(): """Page for viewing the index of all active work queues.""" count_list = list( db.session.query( work_queue.WorkQueue.queue_name, work_queue.WorkQueue.status, func.count(work_queue.WorkQueue.task_id)) .group_by(work_queue.WorkQueue.queue_name, work_queue.WorkQueue.status)) queue_dict = {} for name, status, count in count_list: queue_dict[(name, status)] = dict( name=name, status=status, count=count) max_created_list = list( db.session.query( work_queue.WorkQueue.queue_name, work_queue.WorkQueue.status, func.max(work_queue.WorkQueue.created)) .group_by(work_queue.WorkQueue.queue_name, work_queue.WorkQueue.status)) for name, status, newest_created in max_created_list: queue_dict[(name, status)]['newest_created'] = newest_created min_eta_list = list( db.session.query( work_queue.WorkQueue.queue_name, work_queue.WorkQueue.status, func.min(work_queue.WorkQueue.eta)) .group_by(work_queue.WorkQueue.queue_name, work_queue.WorkQueue.status)) for name, status, oldest_eta in min_eta_list: queue_dict[(name, status)]['oldest_eta'] = oldest_eta queue_list = list(queue_dict.values()) queue_list.sort(key=lambda x: (x['name'], x['status'])) context = dict( queue_list=queue_list, ) return render_template('view_work_queue_index.html', **context)
[ "def", "view_all_work_queues", "(", ")", ":", "count_list", "=", "list", "(", "db", ".", "session", ".", "query", "(", "work_queue", ".", "WorkQueue", ".", "queue_name", ",", "work_queue", ".", "WorkQueue", ".", "status", ",", "func", ".", "count", "(", "work_queue", ".", "WorkQueue", ".", "task_id", ")", ")", ".", "group_by", "(", "work_queue", ".", "WorkQueue", ".", "queue_name", ",", "work_queue", ".", "WorkQueue", ".", "status", ")", ")", "queue_dict", "=", "{", "}", "for", "name", ",", "status", ",", "count", "in", "count_list", ":", "queue_dict", "[", "(", "name", ",", "status", ")", "]", "=", "dict", "(", "name", "=", "name", ",", "status", "=", "status", ",", "count", "=", "count", ")", "max_created_list", "=", "list", "(", "db", ".", "session", ".", "query", "(", "work_queue", ".", "WorkQueue", ".", "queue_name", ",", "work_queue", ".", "WorkQueue", ".", "status", ",", "func", ".", "max", "(", "work_queue", ".", "WorkQueue", ".", "created", ")", ")", ".", "group_by", "(", "work_queue", ".", "WorkQueue", ".", "queue_name", ",", "work_queue", ".", "WorkQueue", ".", "status", ")", ")", "for", "name", ",", "status", ",", "newest_created", "in", "max_created_list", ":", "queue_dict", "[", "(", "name", ",", "status", ")", "]", "[", "'newest_created'", "]", "=", "newest_created", "min_eta_list", "=", "list", "(", "db", ".", "session", ".", "query", "(", "work_queue", ".", "WorkQueue", ".", "queue_name", ",", "work_queue", ".", "WorkQueue", ".", "status", ",", "func", ".", "min", "(", "work_queue", ".", "WorkQueue", ".", "eta", ")", ")", ".", "group_by", "(", "work_queue", ".", "WorkQueue", ".", "queue_name", ",", "work_queue", ".", "WorkQueue", ".", "status", ")", ")", "for", "name", ",", "status", ",", "oldest_eta", "in", "min_eta_list", ":", "queue_dict", "[", "(", "name", ",", "status", ")", "]", "[", "'oldest_eta'", "]", "=", "oldest_eta", "queue_list", "=", "list", "(", "queue_dict", ".", "values", "(", ")", ")", "queue_list", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "x", "[", "'name'", "]", ",", "x", "[", "'status'", "]", ")", ")", "context", "=", "dict", "(", "queue_list", "=", "queue_list", ",", ")", "return", "render_template", "(", "'view_work_queue_index.html'", ",", "*", "*", "context", ")" ]
Page for viewing the index of all active work queues.
[ "Page", "for", "viewing", "the", "index", "of", "all", "active", "work", "queues", "." ]
python
train
35.090909
brainiak/brainiak
brainiak/reprsimil/brsa.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L4153-L4160
def _set_rho_grids(self): """ Set the grids and weights for rho used in numerical integration of AR(1) parameters. """ rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \ + 1 / self.rho_bins rho_weights = np.ones(self.rho_bins) / self.rho_bins return rho_grids, rho_weights
[ "def", "_set_rho_grids", "(", "self", ")", ":", "rho_grids", "=", "np", ".", "arange", "(", "self", ".", "rho_bins", ")", "*", "2", "/", "self", ".", "rho_bins", "-", "1", "+", "1", "/", "self", ".", "rho_bins", "rho_weights", "=", "np", ".", "ones", "(", "self", ".", "rho_bins", ")", "/", "self", ".", "rho_bins", "return", "rho_grids", ",", "rho_weights" ]
Set the grids and weights for rho used in numerical integration of AR(1) parameters.
[ "Set", "the", "grids", "and", "weights", "for", "rho", "used", "in", "numerical", "integration", "of", "AR", "(", "1", ")", "parameters", "." ]
python
train
42.625
tensorflow/tensor2tensor
tensor2tensor/data_generators/multinli.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multinli.py#L42-L59
def _maybe_download_corpora(tmp_dir): """Download corpora for multinli. Args: tmp_dir: a string Returns: a string """ mnli_filename = "MNLI.zip" mnli_finalpath = os.path.join(tmp_dir, "MNLI") if not tf.gfile.Exists(mnli_finalpath): zip_filepath = generator_utils.maybe_download( tmp_dir, mnli_filename, _MNLI_URL) zip_ref = zipfile.ZipFile(zip_filepath, "r") zip_ref.extractall(tmp_dir) zip_ref.close() return mnli_finalpath
[ "def", "_maybe_download_corpora", "(", "tmp_dir", ")", ":", "mnli_filename", "=", "\"MNLI.zip\"", "mnli_finalpath", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"MNLI\"", ")", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "mnli_finalpath", ")", ":", "zip_filepath", "=", "generator_utils", ".", "maybe_download", "(", "tmp_dir", ",", "mnli_filename", ",", "_MNLI_URL", ")", "zip_ref", "=", "zipfile", ".", "ZipFile", "(", "zip_filepath", ",", "\"r\"", ")", "zip_ref", ".", "extractall", "(", "tmp_dir", ")", "zip_ref", ".", "close", "(", ")", "return", "mnli_finalpath" ]
Download corpora for multinli. Args: tmp_dir: a string Returns: a string
[ "Download", "corpora", "for", "multinli", "." ]
python
train
25.388889
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L2610-L2643
def get_grade_entry_form_for_update(self, grade_entry_id): """Gets the grade entry form for updating an existing entry. A new grade entry form should be requested for each update transaction. arg: grade_entry_id (osid.id.Id): the ``Id`` of the ``GradeEntry`` return: (osid.grading.GradeEntryForm) - the grade entry form raise: NotFound - ``grade_entry_id`` is not found raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) if not isinstance(grade_entry_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') if (grade_entry_id.get_identifier_namespace() != 'grading.GradeEntry' or grade_entry_id.get_authority() != self._authority): raise errors.InvalidArgument() result = collection.find_one({'_id': ObjectId(grade_entry_id.get_identifier())}) obj_form = objects.GradeEntryForm( osid_object_map=result, effective_agent_id=str(self.get_effective_agent_id()), runtime=self._runtime, proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not UPDATED return obj_form
[ "def", "get_grade_entry_form_for_update", "(", "self", ",", "grade_entry_id", ")", ":", "collection", "=", "JSONClientValidated", "(", "'grading'", ",", "collection", "=", "'GradeEntry'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "not", "isinstance", "(", "grade_entry_id", ",", "ABCId", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the argument is not a valid OSID Id'", ")", "if", "(", "grade_entry_id", ".", "get_identifier_namespace", "(", ")", "!=", "'grading.GradeEntry'", "or", "grade_entry_id", ".", "get_authority", "(", ")", "!=", "self", ".", "_authority", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "result", "=", "collection", ".", "find_one", "(", "{", "'_id'", ":", "ObjectId", "(", "grade_entry_id", ".", "get_identifier", "(", ")", ")", "}", ")", "obj_form", "=", "objects", ".", "GradeEntryForm", "(", "osid_object_map", "=", "result", ",", "effective_agent_id", "=", "str", "(", "self", ".", "get_effective_agent_id", "(", ")", ")", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "self", ".", "_forms", "[", "obj_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "UPDATED", "return", "obj_form" ]
Gets the grade entry form for updating an existing entry. A new grade entry form should be requested for each update transaction. arg: grade_entry_id (osid.id.Id): the ``Id`` of the ``GradeEntry`` return: (osid.grading.GradeEntryForm) - the grade entry form raise: NotFound - ``grade_entry_id`` is not found raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "grade", "entry", "form", "for", "updating", "an", "existing", "entry", "." ]
python
train
46.117647
tamasgal/km3pipe
km3pipe/stats.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L143-L146
def perc(arr, p=95, **kwargs): """Create symmetric percentiles, with ``p`` coverage.""" offset = (100 - p) / 2 return np.percentile(arr, (offset, 100 - offset), **kwargs)
[ "def", "perc", "(", "arr", ",", "p", "=", "95", ",", "*", "*", "kwargs", ")", ":", "offset", "=", "(", "100", "-", "p", ")", "/", "2", "return", "np", ".", "percentile", "(", "arr", ",", "(", "offset", ",", "100", "-", "offset", ")", ",", "*", "*", "kwargs", ")" ]
Create symmetric percentiles, with ``p`` coverage.
[ "Create", "symmetric", "percentiles", "with", "p", "coverage", "." ]
python
train
44.75
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L1844-L1868
def close(self): """close(self)""" if self.isClosed: raise ValueError("operation illegal for closed doc") if hasattr(self, '_outline') and self._outline: self._dropOutline(self._outline) self._outline = None self._reset_page_refs() self.metadata = None self.stream = None self.isClosed = True self.openErrCode = 0 self.openErrMsg = '' self.FontInfos = [] for gmap in self.Graftmaps: self.Graftmaps[gmap] = None self.Graftmaps = {} self.ShownPages = {} val = _fitz.Document_close(self) self.thisown = False return val
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "isClosed", ":", "raise", "ValueError", "(", "\"operation illegal for closed doc\"", ")", "if", "hasattr", "(", "self", ",", "'_outline'", ")", "and", "self", ".", "_outline", ":", "self", ".", "_dropOutline", "(", "self", ".", "_outline", ")", "self", ".", "_outline", "=", "None", "self", ".", "_reset_page_refs", "(", ")", "self", ".", "metadata", "=", "None", "self", ".", "stream", "=", "None", "self", ".", "isClosed", "=", "True", "self", ".", "openErrCode", "=", "0", "self", ".", "openErrMsg", "=", "''", "self", ".", "FontInfos", "=", "[", "]", "for", "gmap", "in", "self", ".", "Graftmaps", ":", "self", ".", "Graftmaps", "[", "gmap", "]", "=", "None", "self", ".", "Graftmaps", "=", "{", "}", "self", ".", "ShownPages", "=", "{", "}", "val", "=", "_fitz", ".", "Document_close", "(", "self", ")", "self", ".", "thisown", "=", "False", "return", "val" ]
close(self)
[ "close", "(", "self", ")" ]
python
train
27.44
MacHu-GWU/crawlib-project
crawlib/cache.py
https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/cache.py#L75-L91
def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs): """ Create a html cache. Html string will be automatically compressed. :param directory: path for the cache directory. :param compress_level: 0 ~ 9, 9 is slowest and smallest. :param kwargs: other arguments. :return: a `diskcache.Cache()` """ cache = diskcache.Cache( directory, disk=CompressedDisk, disk_compress_level=compress_level, disk_value_type_is_binary=value_type_is_binary, **kwargs ) return cache
[ "def", "create_cache", "(", "directory", ",", "compress_level", "=", "6", ",", "value_type_is_binary", "=", "False", ",", "*", "*", "kwargs", ")", ":", "cache", "=", "diskcache", ".", "Cache", "(", "directory", ",", "disk", "=", "CompressedDisk", ",", "disk_compress_level", "=", "compress_level", ",", "disk_value_type_is_binary", "=", "value_type_is_binary", ",", "*", "*", "kwargs", ")", "return", "cache" ]
Create a html cache. Html string will be automatically compressed. :param directory: path for the cache directory. :param compress_level: 0 ~ 9, 9 is slowest and smallest. :param kwargs: other arguments. :return: a `diskcache.Cache()`
[ "Create", "a", "html", "cache", ".", "Html", "string", "will", "be", "automatically", "compressed", "." ]
python
train
32.764706
caseyjlaw/rtpipe
rtpipe/interactive.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/interactive.py#L363-L373
def plotnoise(noisepkl, mergepkl, plot_width=950, plot_height=400): """ Make two panel plot to summary noise analysis with estimated flux scale """ d = pickle.load(open(mergepkl)) ndist, imstd, flagfrac = plotnoisedist(noisepkl, plot_width=plot_width/2, plot_height=plot_height) fluxscale = calcfluxscale(d, imstd, flagfrac) logger.info('Median image noise is {0:.3} Jy.'.format(fluxscale*imstd)) ncum, imnoise = plotnoisecum(noisepkl, fluxscale=fluxscale, plot_width=plot_width/2, plot_height=plot_height) hndle = show(Row(ndist, ncum, width=plot_width, height=plot_height)) return imnoise
[ "def", "plotnoise", "(", "noisepkl", ",", "mergepkl", ",", "plot_width", "=", "950", ",", "plot_height", "=", "400", ")", ":", "d", "=", "pickle", ".", "load", "(", "open", "(", "mergepkl", ")", ")", "ndist", ",", "imstd", ",", "flagfrac", "=", "plotnoisedist", "(", "noisepkl", ",", "plot_width", "=", "plot_width", "/", "2", ",", "plot_height", "=", "plot_height", ")", "fluxscale", "=", "calcfluxscale", "(", "d", ",", "imstd", ",", "flagfrac", ")", "logger", ".", "info", "(", "'Median image noise is {0:.3} Jy.'", ".", "format", "(", "fluxscale", "*", "imstd", ")", ")", "ncum", ",", "imnoise", "=", "plotnoisecum", "(", "noisepkl", ",", "fluxscale", "=", "fluxscale", ",", "plot_width", "=", "plot_width", "/", "2", ",", "plot_height", "=", "plot_height", ")", "hndle", "=", "show", "(", "Row", "(", "ndist", ",", "ncum", ",", "width", "=", "plot_width", ",", "height", "=", "plot_height", ")", ")", "return", "imnoise" ]
Make two panel plot to summary noise analysis with estimated flux scale
[ "Make", "two", "panel", "plot", "to", "summary", "noise", "analysis", "with", "estimated", "flux", "scale" ]
python
train
55.818182
ampl/amplpy
amplpy/ampl.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/ampl.py#L657-L672
def readTable(self, tableName): """ Read the table corresponding to the specified name, equivalent to the AMPL statement: .. code-block:: ampl read table tableName; Args: tableName: Name of the table to be read. """ lock_and_call( lambda: self._impl.readTable(tableName), self._lock )
[ "def", "readTable", "(", "self", ",", "tableName", ")", ":", "lock_and_call", "(", "lambda", ":", "self", ".", "_impl", ".", "readTable", "(", "tableName", ")", ",", "self", ".", "_lock", ")" ]
Read the table corresponding to the specified name, equivalent to the AMPL statement: .. code-block:: ampl read table tableName; Args: tableName: Name of the table to be read.
[ "Read", "the", "table", "corresponding", "to", "the", "specified", "name", "equivalent", "to", "the", "AMPL", "statement", ":" ]
python
train
24
tensorflow/tensor2tensor
tensor2tensor/utils/data_reader.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L80-L164
def batching_scheme(batch_size, max_length, min_length_bucket, length_bucket_step, drop_long_sequences=False, shard_multiplier=1, length_multiplier=1, min_length=0): """A batching scheme based on model hyperparameters. Every batch contains a number of sequences divisible by `shard_multiplier`. Args: batch_size: int, total number of tokens in a batch. max_length: int, sequences longer than this will be skipped. Defaults to batch_size. min_length_bucket: int length_bucket_step: float greater than 1.0 drop_long_sequences: bool, if True, then sequences longer than `max_length` are dropped. This prevents generating batches with more than the usual number of tokens, which can cause out-of-memory errors. shard_multiplier: an integer increasing the batch_size to suit splitting across datashards. length_multiplier: an integer multiplier that is used to increase the batch sizes and sequence length tolerance. min_length: int, sequences shorter than this will be skipped. Returns: A dictionary with parameters that can be passed to input_pipeline: * boundaries: list of bucket boundaries * batch_sizes: list of batch sizes for each length bucket * max_length: int, maximum length of an example Raises: ValueError: If min_length > max_length """ max_length = max_length or batch_size if max_length < min_length: raise ValueError("max_length must be greater or equal to min_length") boundaries = _bucket_boundaries(max_length, min_length_bucket, length_bucket_step) boundaries = [boundary * length_multiplier for boundary in boundaries] max_length *= length_multiplier batch_sizes = [ max(1, batch_size // length) for length in boundaries + [max_length] ] max_batch_size = max(batch_sizes) # Since the Datasets API only allows a single constant for window_size, # and it needs divide all bucket_batch_sizes, we pick a highly-composite # window size and then round down all batch sizes to divisors of that window # size, so that a window can always be divided evenly into batches. # TODO(noam): remove this when Dataset API improves. highly_composite_numbers = [ 1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, 2520, 5040, 7560, 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440, 83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280, 720720, 1081080, 1441440, 2162160, 2882880, 3603600, 4324320, 6486480, 7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400, 36756720, 43243200, 61261200, 73513440, 110270160 ] window_size = max( [i for i in highly_composite_numbers if i <= 3 * max_batch_size]) divisors = [i for i in range(1, window_size + 1) if window_size % i == 0] batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes] window_size *= shard_multiplier batch_sizes = [bs * shard_multiplier for bs in batch_sizes] # The Datasets API splits one window into multiple batches, which # produces runs of many consecutive batches of the same size. This # is bad for training. To solve this, we will shuffle the batches # using a queue which must be several times as large as the maximum # number of batches per window. max_batches_per_window = window_size // min(batch_sizes) shuffle_queue_size = max_batches_per_window * 3 ret = { "boundaries": boundaries, "batch_sizes": batch_sizes, "min_length": min_length, "max_length": (max_length if drop_long_sequences else 10**9), "shuffle_queue_size": shuffle_queue_size, } return ret
[ "def", "batching_scheme", "(", "batch_size", ",", "max_length", ",", "min_length_bucket", ",", "length_bucket_step", ",", "drop_long_sequences", "=", "False", ",", "shard_multiplier", "=", "1", ",", "length_multiplier", "=", "1", ",", "min_length", "=", "0", ")", ":", "max_length", "=", "max_length", "or", "batch_size", "if", "max_length", "<", "min_length", ":", "raise", "ValueError", "(", "\"max_length must be greater or equal to min_length\"", ")", "boundaries", "=", "_bucket_boundaries", "(", "max_length", ",", "min_length_bucket", ",", "length_bucket_step", ")", "boundaries", "=", "[", "boundary", "*", "length_multiplier", "for", "boundary", "in", "boundaries", "]", "max_length", "*=", "length_multiplier", "batch_sizes", "=", "[", "max", "(", "1", ",", "batch_size", "//", "length", ")", "for", "length", "in", "boundaries", "+", "[", "max_length", "]", "]", "max_batch_size", "=", "max", "(", "batch_sizes", ")", "# Since the Datasets API only allows a single constant for window_size,", "# and it needs divide all bucket_batch_sizes, we pick a highly-composite", "# window size and then round down all batch sizes to divisors of that window", "# size, so that a window can always be divided evenly into batches.", "# TODO(noam): remove this when Dataset API improves.", "highly_composite_numbers", "=", "[", "1", ",", "2", ",", "4", ",", "6", ",", "12", ",", "24", ",", "36", ",", "48", ",", "60", ",", "120", ",", "180", ",", "240", ",", "360", ",", "720", ",", "840", ",", "1260", ",", "1680", ",", "2520", ",", "5040", ",", "7560", ",", "10080", ",", "15120", ",", "20160", ",", "25200", ",", "27720", ",", "45360", ",", "50400", ",", "55440", ",", "83160", ",", "110880", ",", "166320", ",", "221760", ",", "277200", ",", "332640", ",", "498960", ",", "554400", ",", "665280", ",", "720720", ",", "1081080", ",", "1441440", ",", "2162160", ",", "2882880", ",", "3603600", ",", "4324320", ",", "6486480", ",", "7207200", ",", "8648640", ",", "10810800", ",", "14414400", ",", "17297280", ",", "21621600", ",", "32432400", ",", "36756720", ",", "43243200", ",", "61261200", ",", "73513440", ",", "110270160", "]", "window_size", "=", "max", "(", "[", "i", "for", "i", "in", "highly_composite_numbers", "if", "i", "<=", "3", "*", "max_batch_size", "]", ")", "divisors", "=", "[", "i", "for", "i", "in", "range", "(", "1", ",", "window_size", "+", "1", ")", "if", "window_size", "%", "i", "==", "0", "]", "batch_sizes", "=", "[", "max", "(", "[", "d", "for", "d", "in", "divisors", "if", "d", "<=", "bs", "]", ")", "for", "bs", "in", "batch_sizes", "]", "window_size", "*=", "shard_multiplier", "batch_sizes", "=", "[", "bs", "*", "shard_multiplier", "for", "bs", "in", "batch_sizes", "]", "# The Datasets API splits one window into multiple batches, which", "# produces runs of many consecutive batches of the same size. This", "# is bad for training. To solve this, we will shuffle the batches", "# using a queue which must be several times as large as the maximum", "# number of batches per window.", "max_batches_per_window", "=", "window_size", "//", "min", "(", "batch_sizes", ")", "shuffle_queue_size", "=", "max_batches_per_window", "*", "3", "ret", "=", "{", "\"boundaries\"", ":", "boundaries", ",", "\"batch_sizes\"", ":", "batch_sizes", ",", "\"min_length\"", ":", "min_length", ",", "\"max_length\"", ":", "(", "max_length", "if", "drop_long_sequences", "else", "10", "**", "9", ")", ",", "\"shuffle_queue_size\"", ":", "shuffle_queue_size", ",", "}", "return", "ret" ]
A batching scheme based on model hyperparameters. Every batch contains a number of sequences divisible by `shard_multiplier`. Args: batch_size: int, total number of tokens in a batch. max_length: int, sequences longer than this will be skipped. Defaults to batch_size. min_length_bucket: int length_bucket_step: float greater than 1.0 drop_long_sequences: bool, if True, then sequences longer than `max_length` are dropped. This prevents generating batches with more than the usual number of tokens, which can cause out-of-memory errors. shard_multiplier: an integer increasing the batch_size to suit splitting across datashards. length_multiplier: an integer multiplier that is used to increase the batch sizes and sequence length tolerance. min_length: int, sequences shorter than this will be skipped. Returns: A dictionary with parameters that can be passed to input_pipeline: * boundaries: list of bucket boundaries * batch_sizes: list of batch sizes for each length bucket * max_length: int, maximum length of an example Raises: ValueError: If min_length > max_length
[ "A", "batching", "scheme", "based", "on", "model", "hyperparameters", "." ]
python
train
44.223529
evhub/coconut
coconut/constants.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/constants.py#L33-L35
def fixpath(path): """Uniformly format a path.""" return os.path.normpath(os.path.realpath(os.path.expanduser(path)))
[ "def", "fixpath", "(", "path", ")", ":", "return", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", ")" ]
Uniformly format a path.
[ "Uniformly", "format", "a", "path", "." ]
python
train
41
nilp0inter/cpe
cpe/cpe.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpe.py#L389-L408
def _create_cpe_parts(self, system, components): """ Create the structure to store the input type of system associated with components of CPE Name (hardware, operating system and software). :param string system: type of system associated with CPE Name :param dict components: CPE Name components to store :returns: None :exception: KeyError - incorrect system """ if system not in CPEComponent.SYSTEM_VALUES: errmsg = "Key '{0}' is not exist".format(system) raise ValueError(errmsg) elements = [] elements.append(components) pk = CPE._system_and_parts[system] self[pk] = elements
[ "def", "_create_cpe_parts", "(", "self", ",", "system", ",", "components", ")", ":", "if", "system", "not", "in", "CPEComponent", ".", "SYSTEM_VALUES", ":", "errmsg", "=", "\"Key '{0}' is not exist\"", ".", "format", "(", "system", ")", "raise", "ValueError", "(", "errmsg", ")", "elements", "=", "[", "]", "elements", ".", "append", "(", "components", ")", "pk", "=", "CPE", ".", "_system_and_parts", "[", "system", "]", "self", "[", "pk", "]", "=", "elements" ]
Create the structure to store the input type of system associated with components of CPE Name (hardware, operating system and software). :param string system: type of system associated with CPE Name :param dict components: CPE Name components to store :returns: None :exception: KeyError - incorrect system
[ "Create", "the", "structure", "to", "store", "the", "input", "type", "of", "system", "associated", "with", "components", "of", "CPE", "Name", "(", "hardware", "operating", "system", "and", "software", ")", "." ]
python
train
34.6
seequent/properties
properties/basic.py
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/basic.py#L264-L275
def equal(self, value_a, value_b): #pylint: disable=no-self-use """Check if two valid Property values are equal .. note:: This method assumes that :code:`None` and :code:`properties.undefined` are never passed in as values """ equal = value_a == value_b if hasattr(equal, '__iter__'): return all(equal) return equal
[ "def", "equal", "(", "self", ",", "value_a", ",", "value_b", ")", ":", "#pylint: disable=no-self-use", "equal", "=", "value_a", "==", "value_b", "if", "hasattr", "(", "equal", ",", "'__iter__'", ")", ":", "return", "all", "(", "equal", ")", "return", "equal" ]
Check if two valid Property values are equal .. note:: This method assumes that :code:`None` and :code:`properties.undefined` are never passed in as values
[ "Check", "if", "two", "valid", "Property", "values", "are", "equal" ]
python
train
35.833333
pandeylab/pythomics
pythomics/genomics/structures.py
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L72-L80
def add_entry(self, row): """This will parse the VCF entry and also store it within the VCFFile. It will also return the VCFEntry as well. """ var_call = VCFEntry(self.individuals) var_call.parse_entry( row ) self.entries[(var_call.chrom, var_call.pos)] = var_call return var_call
[ "def", "add_entry", "(", "self", ",", "row", ")", ":", "var_call", "=", "VCFEntry", "(", "self", ".", "individuals", ")", "var_call", ".", "parse_entry", "(", "row", ")", "self", ".", "entries", "[", "(", "var_call", ".", "chrom", ",", "var_call", ".", "pos", ")", "]", "=", "var_call", "return", "var_call" ]
This will parse the VCF entry and also store it within the VCFFile. It will also return the VCFEntry as well.
[ "This", "will", "parse", "the", "VCF", "entry", "and", "also", "store", "it", "within", "the", "VCFFile", ".", "It", "will", "also", "return", "the", "VCFEntry", "as", "well", "." ]
python
train
36.555556
acorg/dark-matter
dark/sam.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L449-L653
def queries(self, rcSuffix='', rcNeeded=False, padChar='-', queryInsertionChar='N', unknownQualityChar='!', allowDuplicateIds=False, addAlignment=False): """ Produce padded (with gaps) queries according to the CIGAR string and reference sequence length for each matching query sequence. @param rcSuffix: A C{str} to add to the end of query names that are reverse complemented. This is added before the /1, /2, etc., that are added for duplicated ids (if there are duplicates and C{allowDuplicateIds} is C{False}. @param rcNeeded: If C{True}, queries that are flagged as matching when reverse complemented should have reverse complementing when preparing the output sequences. This must be used if the program that created the SAM/BAM input flags reversed matches but does not also store the reverse complemented query. @param padChar: A C{str} of length one to use to pad queries with to make them the same length as the reference sequence. @param queryInsertionChar: A C{str} of length one to use to insert into queries when the CIGAR string indicates that the alignment of a query would cause a deletion in the reference. This character is inserted as a 'missing' query character (i.e., a base that can be assumed to have been lost due to an error) whose existence is necessary for the match to continue. @param unknownQualityChar: The character to put into the quality string when unknown bases are inserted in the query or the query is padded on the left/right with gaps. @param allowDuplicateIds: If C{True}, repeated query ids (due to secondary or supplemental matches) will not have /1, /2, etc. appended to their ids. So repeated ids may appear in the yielded FASTA. @param addAlignment: If C{True} the reads yielded by the returned generator will also have an C{alignment} attribute, being the C{pysam.AlignedSegment} for the query. @raises InvalidSAM: If a query has an empty SEQ field and either there is no previous alignment or the alignment is not marked as secondary or supplementary. @return: A generator that yields C{Read} instances that are padded with gap characters to align them to the length of the reference sequence. See C{addAlignment}, above, to yield reads with the corresponding C{pysam.AlignedSegment}. """ referenceLength = self.referenceLength # Hold the count for each id so we can add /1, /2 etc to duplicate # ids (unless --allowDuplicateIds was given). idCount = Counter() MATCH_OPERATIONS = {CMATCH, CEQUAL, CDIFF} for lineNumber, alignment in enumerate( self.samFilter.alignments(), start=1): query = alignment.query_sequence quality = ''.join(chr(q + 33) for q in alignment.query_qualities) if alignment.is_reverse: if rcNeeded: query = DNARead('id', query).reverseComplement().sequence quality = quality[::-1] if rcSuffix: alignment.query_name += rcSuffix # Adjust the query id if it's a duplicate and we're not allowing # duplicates. if allowDuplicateIds: queryId = alignment.query_name else: count = idCount[alignment.query_name] idCount[alignment.query_name] += 1 queryId = alignment.query_name + ( '' if count == 0 else '/%d' % count) referenceStart = alignment.reference_start atStart = True queryIndex = 0 referenceIndex = referenceStart alignedSequence = '' alignedQuality = '' for operation, length in alignment.cigartuples: # The operations are tested in the order they appear in # https://samtools.github.io/hts-specs/SAMv1.pdf It would be # more efficient to test them in order of frequency of # occurrence. if operation in MATCH_OPERATIONS: atStart = False alignedSequence += query[queryIndex:queryIndex + length] alignedQuality += quality[queryIndex:queryIndex + length] elif operation == CINS: # Insertion to the reference. This consumes query bases but # we don't output them because the reference cannot be # changed. I.e., these bases in the query would need to be # inserted into the reference. Remove these bases from the # query but record what would have been inserted into the # reference. atStart = False self.referenceInsertions[queryId].append( (referenceIndex, query[queryIndex:queryIndex + length])) elif operation == CDEL: # Delete from the reference. Some bases from the reference # would need to be deleted to continue the match. So we put # an insertion into the query to compensate. atStart = False alignedSequence += queryInsertionChar * length alignedQuality += unknownQualityChar * length elif operation == CREF_SKIP: # Skipped reference. Opens a gap in the query. For # mRNA-to-genome alignment, an N operation represents an # intron. For other types of alignments, the # interpretation of N is not defined. So this is unlikely # to occur. atStart = False alignedSequence += queryInsertionChar * length alignedQuality += unknownQualityChar * length elif operation == CSOFT_CLIP: # Bases in the query that are not part of the match. We # remove these from the query if they protrude before the # start or after the end of the reference. According to the # SAM docs, 'S' operations may only have 'H' operations # between them and the ends of the CIGAR string. if atStart: # Don't set atStart=False, in case there's another 'S' # operation. unwantedLeft = length - referenceStart if unwantedLeft > 0: # The query protrudes left. Copy its right part. alignedSequence += query[ queryIndex + unwantedLeft:queryIndex + length] alignedQuality += quality[ queryIndex + unwantedLeft:queryIndex + length] referenceStart = 0 else: referenceStart -= length alignedSequence += query[ queryIndex:queryIndex + length] alignedQuality += quality[ queryIndex:queryIndex + length] else: unwantedRight = ( (referenceStart + len(alignedSequence) + length) - referenceLength) if unwantedRight > 0: # The query protrudes right. Copy its left part. alignedSequence += query[ queryIndex:queryIndex + length - unwantedRight] alignedQuality += quality[ queryIndex:queryIndex + length - unwantedRight] else: alignedSequence += query[ queryIndex:queryIndex + length] alignedQuality += quality[ queryIndex:queryIndex + length] elif operation == CHARD_CLIP: # Some bases have been completely removed from the query. # This (H) can only be present as the first and/or last # operation. There is nothing to do as the bases are simply # not present in the query string in the SAM/BAM file. pass elif operation == CPAD: # This is "silent deletion from the padded reference", # which consumes neither query nor reference. atStart = False else: raise ValueError('Unknown CIGAR operation:', operation) if operation in _CONSUMES_QUERY: queryIndex += length if operation in _CONSUMES_REFERENCE: referenceIndex += length if queryIndex != len(query): # Oops, we did not consume the entire query. raise ValueError( 'Query %r not fully consumed when parsing CIGAR string. ' 'Query %r (len %d), final query index %d, CIGAR: %r' % (alignment.query_name, query, len(query), queryIndex, alignment.cigartuples)) # We cannot test we consumed the entire reference. The CIGAR # string applies to (and exhausts) the query but is silent # about the part of the reference that lies to the right of the # aligned query. # Put gap characters before and after the aligned sequence so that # it is offset properly and matches the length of the reference. padRightLength = (referenceLength - (referenceStart + len(alignedSequence))) paddedSequence = (padChar * referenceStart + alignedSequence + padChar * padRightLength) paddedQuality = (unknownQualityChar * referenceStart + alignedQuality + unknownQualityChar * padRightLength) read = Read(queryId, paddedSequence, paddedQuality) if addAlignment: read.alignment = alignment yield read
[ "def", "queries", "(", "self", ",", "rcSuffix", "=", "''", ",", "rcNeeded", "=", "False", ",", "padChar", "=", "'-'", ",", "queryInsertionChar", "=", "'N'", ",", "unknownQualityChar", "=", "'!'", ",", "allowDuplicateIds", "=", "False", ",", "addAlignment", "=", "False", ")", ":", "referenceLength", "=", "self", ".", "referenceLength", "# Hold the count for each id so we can add /1, /2 etc to duplicate", "# ids (unless --allowDuplicateIds was given).", "idCount", "=", "Counter", "(", ")", "MATCH_OPERATIONS", "=", "{", "CMATCH", ",", "CEQUAL", ",", "CDIFF", "}", "for", "lineNumber", ",", "alignment", "in", "enumerate", "(", "self", ".", "samFilter", ".", "alignments", "(", ")", ",", "start", "=", "1", ")", ":", "query", "=", "alignment", ".", "query_sequence", "quality", "=", "''", ".", "join", "(", "chr", "(", "q", "+", "33", ")", "for", "q", "in", "alignment", ".", "query_qualities", ")", "if", "alignment", ".", "is_reverse", ":", "if", "rcNeeded", ":", "query", "=", "DNARead", "(", "'id'", ",", "query", ")", ".", "reverseComplement", "(", ")", ".", "sequence", "quality", "=", "quality", "[", ":", ":", "-", "1", "]", "if", "rcSuffix", ":", "alignment", ".", "query_name", "+=", "rcSuffix", "# Adjust the query id if it's a duplicate and we're not allowing", "# duplicates.", "if", "allowDuplicateIds", ":", "queryId", "=", "alignment", ".", "query_name", "else", ":", "count", "=", "idCount", "[", "alignment", ".", "query_name", "]", "idCount", "[", "alignment", ".", "query_name", "]", "+=", "1", "queryId", "=", "alignment", ".", "query_name", "+", "(", "''", "if", "count", "==", "0", "else", "'/%d'", "%", "count", ")", "referenceStart", "=", "alignment", ".", "reference_start", "atStart", "=", "True", "queryIndex", "=", "0", "referenceIndex", "=", "referenceStart", "alignedSequence", "=", "''", "alignedQuality", "=", "''", "for", "operation", ",", "length", "in", "alignment", ".", "cigartuples", ":", "# The operations are tested in the order they appear in", "# https://samtools.github.io/hts-specs/SAMv1.pdf It would be", "# more efficient to test them in order of frequency of", "# occurrence.", "if", "operation", "in", "MATCH_OPERATIONS", ":", "atStart", "=", "False", "alignedSequence", "+=", "query", "[", "queryIndex", ":", "queryIndex", "+", "length", "]", "alignedQuality", "+=", "quality", "[", "queryIndex", ":", "queryIndex", "+", "length", "]", "elif", "operation", "==", "CINS", ":", "# Insertion to the reference. This consumes query bases but", "# we don't output them because the reference cannot be", "# changed. I.e., these bases in the query would need to be", "# inserted into the reference. Remove these bases from the", "# query but record what would have been inserted into the", "# reference.", "atStart", "=", "False", "self", ".", "referenceInsertions", "[", "queryId", "]", ".", "append", "(", "(", "referenceIndex", ",", "query", "[", "queryIndex", ":", "queryIndex", "+", "length", "]", ")", ")", "elif", "operation", "==", "CDEL", ":", "# Delete from the reference. Some bases from the reference", "# would need to be deleted to continue the match. So we put", "# an insertion into the query to compensate.", "atStart", "=", "False", "alignedSequence", "+=", "queryInsertionChar", "*", "length", "alignedQuality", "+=", "unknownQualityChar", "*", "length", "elif", "operation", "==", "CREF_SKIP", ":", "# Skipped reference. Opens a gap in the query. For", "# mRNA-to-genome alignment, an N operation represents an", "# intron. For other types of alignments, the", "# interpretation of N is not defined. So this is unlikely", "# to occur.", "atStart", "=", "False", "alignedSequence", "+=", "queryInsertionChar", "*", "length", "alignedQuality", "+=", "unknownQualityChar", "*", "length", "elif", "operation", "==", "CSOFT_CLIP", ":", "# Bases in the query that are not part of the match. We", "# remove these from the query if they protrude before the", "# start or after the end of the reference. According to the", "# SAM docs, 'S' operations may only have 'H' operations", "# between them and the ends of the CIGAR string.", "if", "atStart", ":", "# Don't set atStart=False, in case there's another 'S'", "# operation.", "unwantedLeft", "=", "length", "-", "referenceStart", "if", "unwantedLeft", ">", "0", ":", "# The query protrudes left. Copy its right part.", "alignedSequence", "+=", "query", "[", "queryIndex", "+", "unwantedLeft", ":", "queryIndex", "+", "length", "]", "alignedQuality", "+=", "quality", "[", "queryIndex", "+", "unwantedLeft", ":", "queryIndex", "+", "length", "]", "referenceStart", "=", "0", "else", ":", "referenceStart", "-=", "length", "alignedSequence", "+=", "query", "[", "queryIndex", ":", "queryIndex", "+", "length", "]", "alignedQuality", "+=", "quality", "[", "queryIndex", ":", "queryIndex", "+", "length", "]", "else", ":", "unwantedRight", "=", "(", "(", "referenceStart", "+", "len", "(", "alignedSequence", ")", "+", "length", ")", "-", "referenceLength", ")", "if", "unwantedRight", ">", "0", ":", "# The query protrudes right. Copy its left part.", "alignedSequence", "+=", "query", "[", "queryIndex", ":", "queryIndex", "+", "length", "-", "unwantedRight", "]", "alignedQuality", "+=", "quality", "[", "queryIndex", ":", "queryIndex", "+", "length", "-", "unwantedRight", "]", "else", ":", "alignedSequence", "+=", "query", "[", "queryIndex", ":", "queryIndex", "+", "length", "]", "alignedQuality", "+=", "quality", "[", "queryIndex", ":", "queryIndex", "+", "length", "]", "elif", "operation", "==", "CHARD_CLIP", ":", "# Some bases have been completely removed from the query.", "# This (H) can only be present as the first and/or last", "# operation. There is nothing to do as the bases are simply", "# not present in the query string in the SAM/BAM file.", "pass", "elif", "operation", "==", "CPAD", ":", "# This is \"silent deletion from the padded reference\",", "# which consumes neither query nor reference.", "atStart", "=", "False", "else", ":", "raise", "ValueError", "(", "'Unknown CIGAR operation:'", ",", "operation", ")", "if", "operation", "in", "_CONSUMES_QUERY", ":", "queryIndex", "+=", "length", "if", "operation", "in", "_CONSUMES_REFERENCE", ":", "referenceIndex", "+=", "length", "if", "queryIndex", "!=", "len", "(", "query", ")", ":", "# Oops, we did not consume the entire query.", "raise", "ValueError", "(", "'Query %r not fully consumed when parsing CIGAR string. '", "'Query %r (len %d), final query index %d, CIGAR: %r'", "%", "(", "alignment", ".", "query_name", ",", "query", ",", "len", "(", "query", ")", ",", "queryIndex", ",", "alignment", ".", "cigartuples", ")", ")", "# We cannot test we consumed the entire reference. The CIGAR", "# string applies to (and exhausts) the query but is silent", "# about the part of the reference that lies to the right of the", "# aligned query.", "# Put gap characters before and after the aligned sequence so that", "# it is offset properly and matches the length of the reference.", "padRightLength", "=", "(", "referenceLength", "-", "(", "referenceStart", "+", "len", "(", "alignedSequence", ")", ")", ")", "paddedSequence", "=", "(", "padChar", "*", "referenceStart", "+", "alignedSequence", "+", "padChar", "*", "padRightLength", ")", "paddedQuality", "=", "(", "unknownQualityChar", "*", "referenceStart", "+", "alignedQuality", "+", "unknownQualityChar", "*", "padRightLength", ")", "read", "=", "Read", "(", "queryId", ",", "paddedSequence", ",", "paddedQuality", ")", "if", "addAlignment", ":", "read", ".", "alignment", "=", "alignment", "yield", "read" ]
Produce padded (with gaps) queries according to the CIGAR string and reference sequence length for each matching query sequence. @param rcSuffix: A C{str} to add to the end of query names that are reverse complemented. This is added before the /1, /2, etc., that are added for duplicated ids (if there are duplicates and C{allowDuplicateIds} is C{False}. @param rcNeeded: If C{True}, queries that are flagged as matching when reverse complemented should have reverse complementing when preparing the output sequences. This must be used if the program that created the SAM/BAM input flags reversed matches but does not also store the reverse complemented query. @param padChar: A C{str} of length one to use to pad queries with to make them the same length as the reference sequence. @param queryInsertionChar: A C{str} of length one to use to insert into queries when the CIGAR string indicates that the alignment of a query would cause a deletion in the reference. This character is inserted as a 'missing' query character (i.e., a base that can be assumed to have been lost due to an error) whose existence is necessary for the match to continue. @param unknownQualityChar: The character to put into the quality string when unknown bases are inserted in the query or the query is padded on the left/right with gaps. @param allowDuplicateIds: If C{True}, repeated query ids (due to secondary or supplemental matches) will not have /1, /2, etc. appended to their ids. So repeated ids may appear in the yielded FASTA. @param addAlignment: If C{True} the reads yielded by the returned generator will also have an C{alignment} attribute, being the C{pysam.AlignedSegment} for the query. @raises InvalidSAM: If a query has an empty SEQ field and either there is no previous alignment or the alignment is not marked as secondary or supplementary. @return: A generator that yields C{Read} instances that are padded with gap characters to align them to the length of the reference sequence. See C{addAlignment}, above, to yield reads with the corresponding C{pysam.AlignedSegment}.
[ "Produce", "padded", "(", "with", "gaps", ")", "queries", "according", "to", "the", "CIGAR", "string", "and", "reference", "sequence", "length", "for", "each", "matching", "query", "sequence", "." ]
python
train
51.814634
dslackw/slpkg
slpkg/pkg/build.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/pkg/build.py#L187-L204
def build_time(start_time): """ Calculate build time per package """ diff_time = round(time.time() - start_time, 2) if diff_time <= 59.99: sum_time = str(diff_time) + " Sec" elif diff_time > 59.99 and diff_time <= 3599.99: sum_time = round(diff_time / 60, 2) sum_time_list = re.findall(r"\d+", str(sum_time)) sum_time = ("{0} Min {1} Sec".format(sum_time_list[0], sum_time_list[1])) elif diff_time > 3599.99: sum_time = round(diff_time / 3600, 2) sum_time_list = re.findall(r"\d+", str(sum_time)) sum_time = ("{0} Hours {1} Min".format(sum_time_list[0], sum_time_list[1])) return sum_time
[ "def", "build_time", "(", "start_time", ")", ":", "diff_time", "=", "round", "(", "time", ".", "time", "(", ")", "-", "start_time", ",", "2", ")", "if", "diff_time", "<=", "59.99", ":", "sum_time", "=", "str", "(", "diff_time", ")", "+", "\" Sec\"", "elif", "diff_time", ">", "59.99", "and", "diff_time", "<=", "3599.99", ":", "sum_time", "=", "round", "(", "diff_time", "/", "60", ",", "2", ")", "sum_time_list", "=", "re", ".", "findall", "(", "r\"\\d+\"", ",", "str", "(", "sum_time", ")", ")", "sum_time", "=", "(", "\"{0} Min {1} Sec\"", ".", "format", "(", "sum_time_list", "[", "0", "]", ",", "sum_time_list", "[", "1", "]", ")", ")", "elif", "diff_time", ">", "3599.99", ":", "sum_time", "=", "round", "(", "diff_time", "/", "3600", ",", "2", ")", "sum_time_list", "=", "re", ".", "findall", "(", "r\"\\d+\"", ",", "str", "(", "sum_time", ")", ")", "sum_time", "=", "(", "\"{0} Hours {1} Min\"", ".", "format", "(", "sum_time_list", "[", "0", "]", ",", "sum_time_list", "[", "1", "]", ")", ")", "return", "sum_time" ]
Calculate build time per package
[ "Calculate", "build", "time", "per", "package" ]
python
train
41.722222
NLeSC/noodles
noodles/patterns/functional_patterns.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/patterns/functional_patterns.py#L26-L39
def any(pred: Callable, xs: Iterable): """ Check if at least one element of the iterable `xs` fullfills predicate `pred`. :param pred: predicate function. :param xs: iterable object. :returns: boolean """ b = find_first(pred, xs) return True if b is not None else False
[ "def", "any", "(", "pred", ":", "Callable", ",", "xs", ":", "Iterable", ")", ":", "b", "=", "find_first", "(", "pred", ",", "xs", ")", "return", "True", "if", "b", "is", "not", "None", "else", "False" ]
Check if at least one element of the iterable `xs` fullfills predicate `pred`. :param pred: predicate function. :param xs: iterable object. :returns: boolean
[ "Check", "if", "at", "least", "one", "element", "of", "the", "iterable", "xs", "fullfills", "predicate", "pred", "." ]
python
train
22
AguaClara/aguaclara
aguaclara/research/environmental_processes_analysis.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/research/environmental_processes_analysis.py#L341-L371
def Solver_CMFR_N(t_data, C_data, theta_guess, C_bar_guess): """Use non-linear least squares to fit the function Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data. :param t_data: Array of times with units :type t_data: float list :param C_data: Array of tracer concentration data with units :type C_data: float list :param theta_guess: Estimate of time spent in one CMFR with units. :type theta_guess: float :param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR)) :type C_bar_guess: float :return: tuple of * **theta** (*float*)- Residence time in seconds * **C_bar** (*float*) - Average concentration with same units as C_bar_guess * **N** (*float*)- Number of CMFRS in series that best fit the data """ C_unitless = C_data.magnitude C_units = str(C_bar_guess.units) t_seconds = (t_data.to(u.s)).magnitude # assume that a guess of 1 reactor in series is close enough to get a solution p0 = [theta_guess.to(u.s).magnitude, C_bar_guess.magnitude,1] popt, pcov = curve_fit(Tracer_CMFR_N, t_seconds, C_unitless, p0) Solver_theta = popt[0]*u.s Solver_C_bar = popt[1]*u(C_units) Solver_N = popt[2] Reactor_results = collections.namedtuple('Reactor_results','theta C_bar N') CMFR = Reactor_results(theta=Solver_theta, C_bar=Solver_C_bar, N=Solver_N) return CMFR
[ "def", "Solver_CMFR_N", "(", "t_data", ",", "C_data", ",", "theta_guess", ",", "C_bar_guess", ")", ":", "C_unitless", "=", "C_data", ".", "magnitude", "C_units", "=", "str", "(", "C_bar_guess", ".", "units", ")", "t_seconds", "=", "(", "t_data", ".", "to", "(", "u", ".", "s", ")", ")", ".", "magnitude", "# assume that a guess of 1 reactor in series is close enough to get a solution", "p0", "=", "[", "theta_guess", ".", "to", "(", "u", ".", "s", ")", ".", "magnitude", ",", "C_bar_guess", ".", "magnitude", ",", "1", "]", "popt", ",", "pcov", "=", "curve_fit", "(", "Tracer_CMFR_N", ",", "t_seconds", ",", "C_unitless", ",", "p0", ")", "Solver_theta", "=", "popt", "[", "0", "]", "*", "u", ".", "s", "Solver_C_bar", "=", "popt", "[", "1", "]", "*", "u", "(", "C_units", ")", "Solver_N", "=", "popt", "[", "2", "]", "Reactor_results", "=", "collections", ".", "namedtuple", "(", "'Reactor_results'", ",", "'theta C_bar N'", ")", "CMFR", "=", "Reactor_results", "(", "theta", "=", "Solver_theta", ",", "C_bar", "=", "Solver_C_bar", ",", "N", "=", "Solver_N", ")", "return", "CMFR" ]
Use non-linear least squares to fit the function Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data. :param t_data: Array of times with units :type t_data: float list :param C_data: Array of tracer concentration data with units :type C_data: float list :param theta_guess: Estimate of time spent in one CMFR with units. :type theta_guess: float :param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR)) :type C_bar_guess: float :return: tuple of * **theta** (*float*)- Residence time in seconds * **C_bar** (*float*) - Average concentration with same units as C_bar_guess * **N** (*float*)- Number of CMFRS in series that best fit the data
[ "Use", "non", "-", "linear", "least", "squares", "to", "fit", "the", "function", "Tracer_CMFR_N", "(", "t_seconds", "t_bar", "C_bar", "N", ")", "to", "reactor", "data", "." ]
python
train
45.322581
PmagPy/PmagPy
programs/demag_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L1835-L1873
def plot_high_levels_data(self): """ Complicated function that draws the high level mean plot on canvas4, draws all specimen, sample, or site interpretations according to the UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by polarity of all interpretations displayed, draws sample orientation check if on, and if interpretation editor is open it calls the interpretation editor to have it draw the same things. """ # self.toolbar4.home() high_level = self.level_box.GetValue() self.UPPER_LEVEL_NAME = self.level_names.GetValue() self.UPPER_LEVEL_MEAN = self.mean_type_box.GetValue() draw_net(self.high_level_eqarea) what_is_it = self.level_box.GetValue()+": "+self.level_names.GetValue() self.high_level_eqarea.text(-1.2, 1.15, what_is_it, { 'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'}) if self.ie_open: self.ie.draw_net() self.ie.write(what_is_it) # plot elements directions self.plot_high_level_elements() # plot elements means self.plot_high_level_means() # update high level stats after plotting in case of change self.update_high_level_stats() # check sample orietation if self.check_orient_on: self.calc_and_plot_sample_orient_check() self.canvas4.draw() if self.ie_open: self.ie.draw()
[ "def", "plot_high_levels_data", "(", "self", ")", ":", "# self.toolbar4.home()", "high_level", "=", "self", ".", "level_box", ".", "GetValue", "(", ")", "self", ".", "UPPER_LEVEL_NAME", "=", "self", ".", "level_names", ".", "GetValue", "(", ")", "self", ".", "UPPER_LEVEL_MEAN", "=", "self", ".", "mean_type_box", ".", "GetValue", "(", ")", "draw_net", "(", "self", ".", "high_level_eqarea", ")", "what_is_it", "=", "self", ".", "level_box", ".", "GetValue", "(", ")", "+", "\": \"", "+", "self", ".", "level_names", ".", "GetValue", "(", ")", "self", ".", "high_level_eqarea", ".", "text", "(", "-", "1.2", ",", "1.15", ",", "what_is_it", ",", "{", "'family'", ":", "self", ".", "font_type", ",", "'fontsize'", ":", "10", "*", "self", ".", "GUI_RESOLUTION", ",", "'style'", ":", "'normal'", ",", "'va'", ":", "'center'", ",", "'ha'", ":", "'left'", "}", ")", "if", "self", ".", "ie_open", ":", "self", ".", "ie", ".", "draw_net", "(", ")", "self", ".", "ie", ".", "write", "(", "what_is_it", ")", "# plot elements directions", "self", ".", "plot_high_level_elements", "(", ")", "# plot elements means", "self", ".", "plot_high_level_means", "(", ")", "# update high level stats after plotting in case of change", "self", ".", "update_high_level_stats", "(", ")", "# check sample orietation", "if", "self", ".", "check_orient_on", ":", "self", ".", "calc_and_plot_sample_orient_check", "(", ")", "self", ".", "canvas4", ".", "draw", "(", ")", "if", "self", ".", "ie_open", ":", "self", ".", "ie", ".", "draw", "(", ")" ]
Complicated function that draws the high level mean plot on canvas4, draws all specimen, sample, or site interpretations according to the UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by polarity of all interpretations displayed, draws sample orientation check if on, and if interpretation editor is open it calls the interpretation editor to have it draw the same things.
[ "Complicated", "function", "that", "draws", "the", "high", "level", "mean", "plot", "on", "canvas4", "draws", "all", "specimen", "sample", "or", "site", "interpretations", "according", "to", "the", "UPPER_LEVEL_SHOW", "variable", "draws", "the", "fisher", "mean", "or", "fisher", "mean", "by", "polarity", "of", "all", "interpretations", "displayed", "draws", "sample", "orientation", "check", "if", "on", "and", "if", "interpretation", "editor", "is", "open", "it", "calls", "the", "interpretation", "editor", "to", "have", "it", "draw", "the", "same", "things", "." ]
python
train
39.384615
sanger-pathogens/circlator
circlator/merge.py
https://github.com/sanger-pathogens/circlator/blob/a4befb8c9dbbcd4b3ad1899a95aa3e689d58b638/circlator/merge.py#L451-L473
def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit): '''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits''' assert start_hit.qry_name == end_hit.qry_name if start_hit.ref_name == end_hit.ref_name: return False if ( (self._is_at_ref_end(start_hit) and start_hit.on_same_strand()) or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand()) ): start_hit_ok = True else: start_hit_ok = False if ( (self._is_at_ref_start(end_hit) and end_hit.on_same_strand()) or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand()) ): end_hit_ok = True else: end_hit_ok = False return start_hit_ok and end_hit_ok
[ "def", "_orientation_ok_to_bridge_contigs", "(", "self", ",", "start_hit", ",", "end_hit", ")", ":", "assert", "start_hit", ".", "qry_name", "==", "end_hit", ".", "qry_name", "if", "start_hit", ".", "ref_name", "==", "end_hit", ".", "ref_name", ":", "return", "False", "if", "(", "(", "self", ".", "_is_at_ref_end", "(", "start_hit", ")", "and", "start_hit", ".", "on_same_strand", "(", ")", ")", "or", "(", "self", ".", "_is_at_ref_start", "(", "start_hit", ")", "and", "not", "start_hit", ".", "on_same_strand", "(", ")", ")", ")", ":", "start_hit_ok", "=", "True", "else", ":", "start_hit_ok", "=", "False", "if", "(", "(", "self", ".", "_is_at_ref_start", "(", "end_hit", ")", "and", "end_hit", ".", "on_same_strand", "(", ")", ")", "or", "(", "self", ".", "_is_at_ref_end", "(", "end_hit", ")", "and", "not", "end_hit", ".", "on_same_strand", "(", ")", ")", ")", ":", "end_hit_ok", "=", "True", "else", ":", "end_hit_ok", "=", "False", "return", "start_hit_ok", "and", "end_hit_ok" ]
Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits
[ "Returns", "True", "iff", "the", "orientation", "of", "the", "hits", "means", "that", "the", "query", "contig", "of", "both", "hits", "can", "bridge", "the", "reference", "contigs", "of", "the", "hits" ]
python
train
38.217391
ChrisBeaumont/soupy
soupy.py
https://github.com/ChrisBeaumont/soupy/blob/795f2f61f711f574d5218fc8a3375d02bda1104f/soupy.py#L1032-L1037
def find_previous_sibling(self, *args, **kwargs): """ Like :meth:`find`, but searches through :attr:`previous_siblings` """ op = operator.methodcaller('find_previous_sibling', *args, **kwargs) return self._wrap_node(op)
[ "def", "find_previous_sibling", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "op", "=", "operator", ".", "methodcaller", "(", "'find_previous_sibling'", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_wrap_node", "(", "op", ")" ]
Like :meth:`find`, but searches through :attr:`previous_siblings`
[ "Like", ":", "meth", ":", "find", "but", "searches", "through", ":", "attr", ":", "previous_siblings" ]
python
test
42.333333
Crunch-io/crunch-cube
src/cr/cube/min_base_size_mask.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/min_base_size_mask.py#L72-L91
def table_mask(self): """ndarray, True where table margin <= min_base_size, same shape as slice.""" margin = compress_pruned( self._slice.margin( axis=None, weighted=False, include_transforms_for_dims=self._hs_dims, prune=self._prune, ) ) mask = margin < self._size if margin.shape == self._shape: return mask if self._slice.dim_types[0] == DT.MR: # If the margin is a column vector - broadcast it's mask to the array shape return np.logical_or(np.zeros(self._shape, dtype=bool), mask[:, None]) return np.logical_or(np.zeros(self._shape, dtype=bool), mask)
[ "def", "table_mask", "(", "self", ")", ":", "margin", "=", "compress_pruned", "(", "self", ".", "_slice", ".", "margin", "(", "axis", "=", "None", ",", "weighted", "=", "False", ",", "include_transforms_for_dims", "=", "self", ".", "_hs_dims", ",", "prune", "=", "self", ".", "_prune", ",", ")", ")", "mask", "=", "margin", "<", "self", ".", "_size", "if", "margin", ".", "shape", "==", "self", ".", "_shape", ":", "return", "mask", "if", "self", ".", "_slice", ".", "dim_types", "[", "0", "]", "==", "DT", ".", "MR", ":", "# If the margin is a column vector - broadcast it's mask to the array shape", "return", "np", ".", "logical_or", "(", "np", ".", "zeros", "(", "self", ".", "_shape", ",", "dtype", "=", "bool", ")", ",", "mask", "[", ":", ",", "None", "]", ")", "return", "np", ".", "logical_or", "(", "np", ".", "zeros", "(", "self", ".", "_shape", ",", "dtype", "=", "bool", ")", ",", "mask", ")" ]
ndarray, True where table margin <= min_base_size, same shape as slice.
[ "ndarray", "True", "where", "table", "margin", "<", "=", "min_base_size", "same", "shape", "as", "slice", "." ]
python
train
36
Crunch-io/crunch-cube
src/cr/cube/measures/wishart_pairwise_significance.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/measures/wishart_pairwise_significance.py#L132-L140
def _opposite_axis_margin(self): """ndarray representing margin along the axis opposite of self._axis In the process of calculating p-values for the column significance testing we need both the margin along the primary axis and the percentage margin along the opposite axis. """ off_axis = 1 - self._axis return self._slice.margin(axis=off_axis, include_mr_cat=self._include_mr_cat)
[ "def", "_opposite_axis_margin", "(", "self", ")", ":", "off_axis", "=", "1", "-", "self", ".", "_axis", "return", "self", ".", "_slice", ".", "margin", "(", "axis", "=", "off_axis", ",", "include_mr_cat", "=", "self", ".", "_include_mr_cat", ")" ]
ndarray representing margin along the axis opposite of self._axis In the process of calculating p-values for the column significance testing we need both the margin along the primary axis and the percentage margin along the opposite axis.
[ "ndarray", "representing", "margin", "along", "the", "axis", "opposite", "of", "self", ".", "_axis" ]
python
train
47.888889
Microsoft/nni
tools/nni_annotation/code_generator.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_annotation/code_generator.py#L85-L97
def parse_nni_function(code): """Parse `nni.function_choice` expression. Return the AST node of annotated expression and a list of dumped function call expressions. code: annotation string """ name, call = parse_annotation_function(code, 'function_choice') funcs = [ast.dump(func, False) for func in call.args] convert_args_to_dict(call, with_lambda=True) name_str = astor.to_source(name).strip() call.keywords[0].value = ast.Str(s=name_str) return call, funcs
[ "def", "parse_nni_function", "(", "code", ")", ":", "name", ",", "call", "=", "parse_annotation_function", "(", "code", ",", "'function_choice'", ")", "funcs", "=", "[", "ast", ".", "dump", "(", "func", ",", "False", ")", "for", "func", "in", "call", ".", "args", "]", "convert_args_to_dict", "(", "call", ",", "with_lambda", "=", "True", ")", "name_str", "=", "astor", ".", "to_source", "(", "name", ")", ".", "strip", "(", ")", "call", ".", "keywords", "[", "0", "]", ".", "value", "=", "ast", ".", "Str", "(", "s", "=", "name_str", ")", "return", "call", ",", "funcs" ]
Parse `nni.function_choice` expression. Return the AST node of annotated expression and a list of dumped function call expressions. code: annotation string
[ "Parse", "nni", ".", "function_choice", "expression", ".", "Return", "the", "AST", "node", "of", "annotated", "expression", "and", "a", "list", "of", "dumped", "function", "call", "expressions", ".", "code", ":", "annotation", "string" ]
python
train
37.692308
ev3dev/ev3dev-lang-python
ev3dev2/sound.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sound.py#L346-L359
def set_volume(self, pct, channel=None): """ Sets the sound volume to the given percentage [0-100] by calling ``amixer -q set <channel> <pct>%``. If the channel is not specified, it tries to determine the default one by running ``amixer scontrols``. If that fails as well, it uses the ``Playback`` channel, as that is the only channel on the EV3. """ if channel is None: channel = self._get_channel() cmd_line = '/usr/bin/amixer -q set {0} {1:d}%'.format(channel, pct) Popen(shlex.split(cmd_line)).wait()
[ "def", "set_volume", "(", "self", ",", "pct", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "_get_channel", "(", ")", "cmd_line", "=", "'/usr/bin/amixer -q set {0} {1:d}%'", ".", "format", "(", "channel", ",", "pct", ")", "Popen", "(", "shlex", ".", "split", "(", "cmd_line", ")", ")", ".", "wait", "(", ")" ]
Sets the sound volume to the given percentage [0-100] by calling ``amixer -q set <channel> <pct>%``. If the channel is not specified, it tries to determine the default one by running ``amixer scontrols``. If that fails as well, it uses the ``Playback`` channel, as that is the only channel on the EV3.
[ "Sets", "the", "sound", "volume", "to", "the", "given", "percentage", "[", "0", "-", "100", "]", "by", "calling", "amixer", "-", "q", "set", "<channel", ">", "<pct", ">", "%", ".", "If", "the", "channel", "is", "not", "specified", "it", "tries", "to", "determine", "the", "default", "one", "by", "running", "amixer", "scontrols", ".", "If", "that", "fails", "as", "well", "it", "uses", "the", "Playback", "channel", "as", "that", "is", "the", "only", "channel", "on", "the", "EV3", "." ]
python
train
41.785714
sontek/bulby
bulby/client.py
https://github.com/sontek/bulby/blob/a2e741f843ee8e361b50a6079601108bfbe52526/bulby/client.py#L77-L97
def connect(self): ''' Registers a new device + username with the bridge ''' # Don't try to register if we already have if self.validate_registration(): return True body = { 'devicetype': self.device_type, 'username': self.username, } response = self.make_request('POST', '/api', body) if 'error' in response: if response['error']['type'] == 101: msg = 'Please press the link button and try again' else: msg = response['error']['description'] raise Exception(msg)
[ "def", "connect", "(", "self", ")", ":", "# Don't try to register if we already have", "if", "self", ".", "validate_registration", "(", ")", ":", "return", "True", "body", "=", "{", "'devicetype'", ":", "self", ".", "device_type", ",", "'username'", ":", "self", ".", "username", ",", "}", "response", "=", "self", ".", "make_request", "(", "'POST'", ",", "'/api'", ",", "body", ")", "if", "'error'", "in", "response", ":", "if", "response", "[", "'error'", "]", "[", "'type'", "]", "==", "101", ":", "msg", "=", "'Please press the link button and try again'", "else", ":", "msg", "=", "response", "[", "'error'", "]", "[", "'description'", "]", "raise", "Exception", "(", "msg", ")" ]
Registers a new device + username with the bridge
[ "Registers", "a", "new", "device", "+", "username", "with", "the", "bridge" ]
python
train
29.619048
librosa/librosa
librosa/util/utils.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1009-L1098
def sparsify_rows(x, quantile=0.01): ''' Return a row-sparse matrix approximating the input `x`. Parameters ---------- x : np.ndarray [ndim <= 2] The input matrix to sparsify. quantile : float in [0, 1.0) Percentage of magnitude to discard in each row of `x` Returns ------- x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape] Row-sparsified approximation of `x` If `x.ndim == 1`, then `x` is interpreted as a row vector, and `x_sparse.shape == (1, len(x))`. Raises ------ ParameterError If `x.ndim > 2` If `quantile` lies outside `[0, 1.0)` Notes ----- This function caches at level 40. Examples -------- >>> # Construct a Hann window to sparsify >>> x = scipy.signal.hann(32) >>> x array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156, 0.09 , 0.041, 0.01 , 0. ]) >>> # Discard the bottom percentile >>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01) >>> x_sparse <1x32 sparse matrix of type '<type 'numpy.float64'>' with 26 stored elements in Compressed Sparse Row format> >>> x_sparse.todense() matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156, 0.09 , 0. , 0. , 0. ]]) >>> # Discard up to the bottom 10th percentile >>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1) >>> x_sparse <1x32 sparse matrix of type '<type 'numpy.float64'>' with 20 stored elements in Compressed Sparse Row format> >>> x_sparse.todense() matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. , 0. , 0. , 0. , 0. ]]) ''' if x.ndim == 1: x = x.reshape((1, -1)) elif x.ndim > 2: raise ParameterError('Input must have 2 or fewer dimensions. ' 'Provided x.shape={}.'.format(x.shape)) if not 0.0 <= quantile < 1: raise ParameterError('Invalid quantile {:.2f}'.format(quantile)) x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype) mags = np.abs(x) norms = np.sum(mags, axis=1, keepdims=True) mag_sort = np.sort(mags, axis=1) cumulative_mag = np.cumsum(mag_sort / norms, axis=1) threshold_idx = np.argmin(cumulative_mag < quantile, axis=1) for i, j in enumerate(threshold_idx): idx = np.where(mags[i] >= mag_sort[i, j]) x_sparse[i, idx] = x[i, idx] return x_sparse.tocsr()
[ "def", "sparsify_rows", "(", "x", ",", "quantile", "=", "0.01", ")", ":", "if", "x", ".", "ndim", "==", "1", ":", "x", "=", "x", ".", "reshape", "(", "(", "1", ",", "-", "1", ")", ")", "elif", "x", ".", "ndim", ">", "2", ":", "raise", "ParameterError", "(", "'Input must have 2 or fewer dimensions. '", "'Provided x.shape={}.'", ".", "format", "(", "x", ".", "shape", ")", ")", "if", "not", "0.0", "<=", "quantile", "<", "1", ":", "raise", "ParameterError", "(", "'Invalid quantile {:.2f}'", ".", "format", "(", "quantile", ")", ")", "x_sparse", "=", "scipy", ".", "sparse", ".", "lil_matrix", "(", "x", ".", "shape", ",", "dtype", "=", "x", ".", "dtype", ")", "mags", "=", "np", ".", "abs", "(", "x", ")", "norms", "=", "np", ".", "sum", "(", "mags", ",", "axis", "=", "1", ",", "keepdims", "=", "True", ")", "mag_sort", "=", "np", ".", "sort", "(", "mags", ",", "axis", "=", "1", ")", "cumulative_mag", "=", "np", ".", "cumsum", "(", "mag_sort", "/", "norms", ",", "axis", "=", "1", ")", "threshold_idx", "=", "np", ".", "argmin", "(", "cumulative_mag", "<", "quantile", ",", "axis", "=", "1", ")", "for", "i", ",", "j", "in", "enumerate", "(", "threshold_idx", ")", ":", "idx", "=", "np", ".", "where", "(", "mags", "[", "i", "]", ">=", "mag_sort", "[", "i", ",", "j", "]", ")", "x_sparse", "[", "i", ",", "idx", "]", "=", "x", "[", "i", ",", "idx", "]", "return", "x_sparse", ".", "tocsr", "(", ")" ]
Return a row-sparse matrix approximating the input `x`. Parameters ---------- x : np.ndarray [ndim <= 2] The input matrix to sparsify. quantile : float in [0, 1.0) Percentage of magnitude to discard in each row of `x` Returns ------- x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape] Row-sparsified approximation of `x` If `x.ndim == 1`, then `x` is interpreted as a row vector, and `x_sparse.shape == (1, len(x))`. Raises ------ ParameterError If `x.ndim > 2` If `quantile` lies outside `[0, 1.0)` Notes ----- This function caches at level 40. Examples -------- >>> # Construct a Hann window to sparsify >>> x = scipy.signal.hann(32) >>> x array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156, 0.09 , 0.041, 0.01 , 0. ]) >>> # Discard the bottom percentile >>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01) >>> x_sparse <1x32 sparse matrix of type '<type 'numpy.float64'>' with 26 stored elements in Compressed Sparse Row format> >>> x_sparse.todense() matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156, 0.09 , 0. , 0. , 0. ]]) >>> # Discard up to the bottom 10th percentile >>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1) >>> x_sparse <1x32 sparse matrix of type '<type 'numpy.float64'>' with 20 stored elements in Compressed Sparse Row format> >>> x_sparse.todense() matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326, 0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937, 0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806, 0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. , 0. , 0. , 0. , 0. ]])
[ "Return", "a", "row", "-", "sparse", "matrix", "approximating", "the", "input", "x", "." ]
python
test
33.655556
mobolic/facebook-sdk
facebook/__init__.py
https://github.com/mobolic/facebook-sdk/blob/65ff582e77f7ed68b6e9643a7490e5dee2a1031b/facebook/__init__.py#L346-L364
def get_access_token_from_code( self, code, redirect_uri, app_id, app_secret ): """Get an access token from the "code" returned from an OAuth dialog. Returns a dict containing the user-specific access token and its expiration date (if applicable). """ args = { "code": code, "redirect_uri": redirect_uri, "client_id": app_id, "client_secret": app_secret, } return self.request( "{0}/oauth/access_token".format(self.version), args )
[ "def", "get_access_token_from_code", "(", "self", ",", "code", ",", "redirect_uri", ",", "app_id", ",", "app_secret", ")", ":", "args", "=", "{", "\"code\"", ":", "code", ",", "\"redirect_uri\"", ":", "redirect_uri", ",", "\"client_id\"", ":", "app_id", ",", "\"client_secret\"", ":", "app_secret", ",", "}", "return", "self", ".", "request", "(", "\"{0}/oauth/access_token\"", ".", "format", "(", "self", ".", "version", ")", ",", "args", ")" ]
Get an access token from the "code" returned from an OAuth dialog. Returns a dict containing the user-specific access token and its expiration date (if applicable).
[ "Get", "an", "access", "token", "from", "the", "code", "returned", "from", "an", "OAuth", "dialog", "." ]
python
train
29.052632
JNRowe/jnrbase
jnrbase/entry.py
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/entry.py#L24-L37
def entry_point(__func: Callable) -> Callable: """Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run """ if __func.__module__ == '__main__': import sys sys.exit(__func()) else: return __func
[ "def", "entry_point", "(", "__func", ":", "Callable", ")", "->", "Callable", ":", "if", "__func", ".", "__module__", "==", "'__main__'", ":", "import", "sys", "sys", ".", "exit", "(", "__func", "(", ")", ")", "else", ":", "return", "__func" ]
Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run
[ "Execute", "function", "when", "module", "is", "run", "directly", "." ]
python
train
23.857143
CivicSpleen/ambry
ambry/orm/config.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/config.py#L228-L231
def build_duration(self): """Return the difference between build and build_done states""" return int(self.state.build_done) - int(self.state.build)
[ "def", "build_duration", "(", "self", ")", ":", "return", "int", "(", "self", ".", "state", ".", "build_done", ")", "-", "int", "(", "self", ".", "state", ".", "build", ")" ]
Return the difference between build and build_done states
[ "Return", "the", "difference", "between", "build", "and", "build_done", "states" ]
python
train
40.25
mwouts/jupytext
jupytext/contentsmanager.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/contentsmanager.py#L191-L204
def set_default_format_options(self, format_options, read=False): """Set default format option""" if self.default_notebook_metadata_filter: format_options.setdefault('notebook_metadata_filter', self.default_notebook_metadata_filter) if self.default_cell_metadata_filter: format_options.setdefault('cell_metadata_filter', self.default_cell_metadata_filter) if self.comment_magics is not None: format_options.setdefault('comment_magics', self.comment_magics) if self.split_at_heading: format_options.setdefault('split_at_heading', self.split_at_heading) if not read and self.default_cell_markers: format_options.setdefault('cell_markers', self.default_cell_markers) if read and self.sphinx_convert_rst2md: format_options.setdefault('rst2md', self.sphinx_convert_rst2md)
[ "def", "set_default_format_options", "(", "self", ",", "format_options", ",", "read", "=", "False", ")", ":", "if", "self", ".", "default_notebook_metadata_filter", ":", "format_options", ".", "setdefault", "(", "'notebook_metadata_filter'", ",", "self", ".", "default_notebook_metadata_filter", ")", "if", "self", ".", "default_cell_metadata_filter", ":", "format_options", ".", "setdefault", "(", "'cell_metadata_filter'", ",", "self", ".", "default_cell_metadata_filter", ")", "if", "self", ".", "comment_magics", "is", "not", "None", ":", "format_options", ".", "setdefault", "(", "'comment_magics'", ",", "self", ".", "comment_magics", ")", "if", "self", ".", "split_at_heading", ":", "format_options", ".", "setdefault", "(", "'split_at_heading'", ",", "self", ".", "split_at_heading", ")", "if", "not", "read", "and", "self", ".", "default_cell_markers", ":", "format_options", ".", "setdefault", "(", "'cell_markers'", ",", "self", ".", "default_cell_markers", ")", "if", "read", "and", "self", ".", "sphinx_convert_rst2md", ":", "format_options", ".", "setdefault", "(", "'rst2md'", ",", "self", ".", "sphinx_convert_rst2md", ")" ]
Set default format option
[ "Set", "default", "format", "option" ]
python
train
63
echonest/pyechonest
pyechonest/artist.py
https://github.com/echonest/pyechonest/blob/d8c7af6c1da699b50b2f4b1bd3c0febe72e7f1ee/pyechonest/artist.py#L1062-L1119
def suggest(q='', results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None): """Suggest artists based upon partial names. Args: Kwargs: q (str): The text to suggest artists from results (int): An integer number of results to return buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for Returns: A list of Artist objects Example: >>> results = artist.suggest(text='rad') >>> results >>> """ buckets = buckets or [] kwargs = {} kwargs['q'] = q if max_familiarity is not None: kwargs['max_familiarity'] = max_familiarity if min_familiarity is not None: kwargs['min_familiarity'] = min_familiarity if max_hotttnesss is not None: kwargs['max_hotttnesss'] = max_hotttnesss if min_hotttnesss is not None: kwargs['min_hotttnesss'] = min_hotttnesss if results: kwargs['results'] = results if buckets: kwargs['bucket'] = buckets if limit: kwargs['limit'] = 'true' result = util.callm("%s/%s" % ('artist', 'suggest'), kwargs) return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
[ "def", "suggest", "(", "q", "=", "''", ",", "results", "=", "15", ",", "buckets", "=", "None", ",", "limit", "=", "False", ",", "max_familiarity", "=", "None", ",", "min_familiarity", "=", "None", ",", "max_hotttnesss", "=", "None", ",", "min_hotttnesss", "=", "None", ")", ":", "buckets", "=", "buckets", "or", "[", "]", "kwargs", "=", "{", "}", "kwargs", "[", "'q'", "]", "=", "q", "if", "max_familiarity", "is", "not", "None", ":", "kwargs", "[", "'max_familiarity'", "]", "=", "max_familiarity", "if", "min_familiarity", "is", "not", "None", ":", "kwargs", "[", "'min_familiarity'", "]", "=", "min_familiarity", "if", "max_hotttnesss", "is", "not", "None", ":", "kwargs", "[", "'max_hotttnesss'", "]", "=", "max_hotttnesss", "if", "min_hotttnesss", "is", "not", "None", ":", "kwargs", "[", "'min_hotttnesss'", "]", "=", "min_hotttnesss", "if", "results", ":", "kwargs", "[", "'results'", "]", "=", "results", "if", "buckets", ":", "kwargs", "[", "'bucket'", "]", "=", "buckets", "if", "limit", ":", "kwargs", "[", "'limit'", "]", "=", "'true'", "result", "=", "util", ".", "callm", "(", "\"%s/%s\"", "%", "(", "'artist'", ",", "'suggest'", ")", ",", "kwargs", ")", "return", "[", "Artist", "(", "*", "*", "util", ".", "fix", "(", "a_dict", ")", ")", "for", "a_dict", "in", "result", "[", "'response'", "]", "[", "'artists'", "]", "]" ]
Suggest artists based upon partial names. Args: Kwargs: q (str): The text to suggest artists from results (int): An integer number of results to return buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for Returns: A list of Artist objects Example: >>> results = artist.suggest(text='rad') >>> results >>>
[ "Suggest", "artists", "based", "upon", "partial", "names", "." ]
python
train
29.948276
cox-labs/perseuspy
perseuspy/io/perseus/matrix.py
https://github.com/cox-labs/perseuspy/blob/3809c1bd46512605f9e7ca7f97e026e4940ed604/perseuspy/io/perseus/matrix.py#L52-L58
def annotation_rows(prefix, annotations): """ Helper function to extract N: and C: rows from annotations and pad their values """ ncol = len(annotations['Column Name']) return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values)) for name, values in annotations.items() if name.startswith(prefix)}
[ "def", "annotation_rows", "(", "prefix", ",", "annotations", ")", ":", "ncol", "=", "len", "(", "annotations", "[", "'Column Name'", "]", ")", "return", "{", "name", ".", "replace", "(", "prefix", ",", "''", ",", "1", ")", ":", "values", "+", "[", "''", "]", "*", "(", "ncol", "-", "len", "(", "values", ")", ")", "for", "name", ",", "values", "in", "annotations", ".", "items", "(", ")", "if", "name", ".", "startswith", "(", "prefix", ")", "}" ]
Helper function to extract N: and C: rows from annotations and pad their values
[ "Helper", "function", "to", "extract", "N", ":", "and", "C", ":", "rows", "from", "annotations", "and", "pad", "their", "values" ]
python
train
48.142857
ska-sa/katcp-python
katcp/core.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/core.py#L380-L383
def reply_ok(self): """Return True if this is a reply and its first argument is 'ok'.""" return (self.mtype == self.REPLY and self.arguments and self.arguments[0] == self.OK)
[ "def", "reply_ok", "(", "self", ")", ":", "return", "(", "self", ".", "mtype", "==", "self", ".", "REPLY", "and", "self", ".", "arguments", "and", "self", ".", "arguments", "[", "0", "]", "==", "self", ".", "OK", ")" ]
Return True if this is a reply and its first argument is 'ok'.
[ "Return", "True", "if", "this", "is", "a", "reply", "and", "its", "first", "argument", "is", "ok", "." ]
python
train
50.75
sighingnow/parsec.py
src/parsec/__init__.py
https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L231-L243
def mark(self): '''Mark the line and column information of the result of this parser.''' def pos(text, index): return ParseError.loc_info(text, index) @Parser def mark_parser(text, index): res = self(text, index) if res.status: return Value.success(res.index, (pos(text, index), res.value, pos(text, res.index))) else: return res # failed. return mark_parser
[ "def", "mark", "(", "self", ")", ":", "def", "pos", "(", "text", ",", "index", ")", ":", "return", "ParseError", ".", "loc_info", "(", "text", ",", "index", ")", "@", "Parser", "def", "mark_parser", "(", "text", ",", "index", ")", ":", "res", "=", "self", "(", "text", ",", "index", ")", "if", "res", ".", "status", ":", "return", "Value", ".", "success", "(", "res", ".", "index", ",", "(", "pos", "(", "text", ",", "index", ")", ",", "res", ".", "value", ",", "pos", "(", "text", ",", "res", ".", "index", ")", ")", ")", "else", ":", "return", "res", "# failed.", "return", "mark_parser" ]
Mark the line and column information of the result of this parser.
[ "Mark", "the", "line", "and", "column", "information", "of", "the", "result", "of", "this", "parser", "." ]
python
train
36
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L784-L846
def sample(self, withReplacement=None, fraction=None, seed=None): """Returns a sampled subset of this :class:`DataFrame`. :param withReplacement: Sample with replacement or not (default False). :param fraction: Fraction of rows to generate, range [0.0, 1.0]. :param seed: Seed for sampling (default a random seed). .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. .. note:: `fraction` is required and, `withReplacement` and `seed` are optional. >>> df = spark.range(10) >>> df.sample(0.5, 3).count() 7 >>> df.sample(fraction=0.5, seed=3).count() 7 >>> df.sample(withReplacement=True, fraction=0.5, seed=3).count() 1 >>> df.sample(1.0).count() 10 >>> df.sample(fraction=1.0).count() 10 >>> df.sample(False, fraction=1.0).count() 10 """ # For the cases below: # sample(True, 0.5 [, seed]) # sample(True, fraction=0.5 [, seed]) # sample(withReplacement=False, fraction=0.5 [, seed]) is_withReplacement_set = \ type(withReplacement) == bool and isinstance(fraction, float) # For the case below: # sample(faction=0.5 [, seed]) is_withReplacement_omitted_kwargs = \ withReplacement is None and isinstance(fraction, float) # For the case below: # sample(0.5 [, seed]) is_withReplacement_omitted_args = isinstance(withReplacement, float) if not (is_withReplacement_set or is_withReplacement_omitted_kwargs or is_withReplacement_omitted_args): argtypes = [ str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None] raise TypeError( "withReplacement (optional), fraction (required) and seed (optional)" " should be a bool, float and number; however, " "got [%s]." % ", ".join(argtypes)) if is_withReplacement_omitted_args: if fraction is not None: seed = fraction fraction = withReplacement withReplacement = None seed = long(seed) if seed is not None else None args = [arg for arg in [withReplacement, fraction, seed] if arg is not None] jdf = self._jdf.sample(*args) return DataFrame(jdf, self.sql_ctx)
[ "def", "sample", "(", "self", ",", "withReplacement", "=", "None", ",", "fraction", "=", "None", ",", "seed", "=", "None", ")", ":", "# For the cases below:", "# sample(True, 0.5 [, seed])", "# sample(True, fraction=0.5 [, seed])", "# sample(withReplacement=False, fraction=0.5 [, seed])", "is_withReplacement_set", "=", "type", "(", "withReplacement", ")", "==", "bool", "and", "isinstance", "(", "fraction", ",", "float", ")", "# For the case below:", "# sample(faction=0.5 [, seed])", "is_withReplacement_omitted_kwargs", "=", "withReplacement", "is", "None", "and", "isinstance", "(", "fraction", ",", "float", ")", "# For the case below:", "# sample(0.5 [, seed])", "is_withReplacement_omitted_args", "=", "isinstance", "(", "withReplacement", ",", "float", ")", "if", "not", "(", "is_withReplacement_set", "or", "is_withReplacement_omitted_kwargs", "or", "is_withReplacement_omitted_args", ")", ":", "argtypes", "=", "[", "str", "(", "type", "(", "arg", ")", ")", "for", "arg", "in", "[", "withReplacement", ",", "fraction", ",", "seed", "]", "if", "arg", "is", "not", "None", "]", "raise", "TypeError", "(", "\"withReplacement (optional), fraction (required) and seed (optional)\"", "\" should be a bool, float and number; however, \"", "\"got [%s].\"", "%", "\", \"", ".", "join", "(", "argtypes", ")", ")", "if", "is_withReplacement_omitted_args", ":", "if", "fraction", "is", "not", "None", ":", "seed", "=", "fraction", "fraction", "=", "withReplacement", "withReplacement", "=", "None", "seed", "=", "long", "(", "seed", ")", "if", "seed", "is", "not", "None", "else", "None", "args", "=", "[", "arg", "for", "arg", "in", "[", "withReplacement", ",", "fraction", ",", "seed", "]", "if", "arg", "is", "not", "None", "]", "jdf", "=", "self", ".", "_jdf", ".", "sample", "(", "*", "args", ")", "return", "DataFrame", "(", "jdf", ",", "self", ".", "sql_ctx", ")" ]
Returns a sampled subset of this :class:`DataFrame`. :param withReplacement: Sample with replacement or not (default False). :param fraction: Fraction of rows to generate, range [0.0, 1.0]. :param seed: Seed for sampling (default a random seed). .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. .. note:: `fraction` is required and, `withReplacement` and `seed` are optional. >>> df = spark.range(10) >>> df.sample(0.5, 3).count() 7 >>> df.sample(fraction=0.5, seed=3).count() 7 >>> df.sample(withReplacement=True, fraction=0.5, seed=3).count() 1 >>> df.sample(1.0).count() 10 >>> df.sample(fraction=1.0).count() 10 >>> df.sample(False, fraction=1.0).count() 10
[ "Returns", "a", "sampled", "subset", "of", "this", ":", "class", ":", "DataFrame", "." ]
python
train
38.984127