repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
quantopian/zipline
zipline/algorithm.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/algorithm.py#L1811-L1870
def order_target_percent(self, asset, target, limit_price=None, stop_price=None, style=None): """Place an order to adjust a position to a target percent of the current portfolio value. If the position doesn't already exist, this is equivalent to placing a new order. If the position does exist, this is equivalent to placing an order for the difference between the target percent and the current percent. Parameters ---------- asset : Asset The asset that this order is for. target : float The desired percentage of the portfolio value to allocate to ``asset``. This is specified as a decimal, for example: 0.50 means 50%. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- ``order_target_value`` does not take into account any open orders. For example: .. code-block:: python order_target_percent(sid(0), 10) order_target_percent(sid(0), 10) This code will result in 20% of the portfolio being allocated to sid(0) because the first call to ``order_target_percent`` will not have been filled when the second ``order_target_percent`` call is made. See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_target` :func:`zipline.api.order_target_value` """ if not self._can_order_asset(asset): return None amount = self._calculate_order_target_percent_amount(asset, target) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
[ "def", "order_target_percent", "(", "self", ",", "asset", ",", "target", ",", "limit_price", "=", "None", ",", "stop_price", "=", "None", ",", "style", "=", "None", ")", ":", "if", "not", "self", ".", "_can_order_asset", "(", "asset", ")", ":", "return", "None", "amount", "=", "self", ".", "_calculate_order_target_percent_amount", "(", "asset", ",", "target", ")", "return", "self", ".", "order", "(", "asset", ",", "amount", ",", "limit_price", "=", "limit_price", ",", "stop_price", "=", "stop_price", ",", "style", "=", "style", ")" ]
Place an order to adjust a position to a target percent of the current portfolio value. If the position doesn't already exist, this is equivalent to placing a new order. If the position does exist, this is equivalent to placing an order for the difference between the target percent and the current percent. Parameters ---------- asset : Asset The asset that this order is for. target : float The desired percentage of the portfolio value to allocate to ``asset``. This is specified as a decimal, for example: 0.50 means 50%. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- ``order_target_value`` does not take into account any open orders. For example: .. code-block:: python order_target_percent(sid(0), 10) order_target_percent(sid(0), 10) This code will result in 20% of the portfolio being allocated to sid(0) because the first call to ``order_target_percent`` will not have been filled when the second ``order_target_percent`` call is made. See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_target` :func:`zipline.api.order_target_value`
[ "Place", "an", "order", "to", "adjust", "a", "position", "to", "a", "target", "percent", "of", "the", "current", "portfolio", "value", ".", "If", "the", "position", "doesn", "t", "already", "exist", "this", "is", "equivalent", "to", "placing", "a", "new", "order", ".", "If", "the", "position", "does", "exist", "this", "is", "equivalent", "to", "placing", "an", "order", "for", "the", "difference", "between", "the", "target", "percent", "and", "the", "current", "percent", "." ]
python
train
alantygel/ckanext-semantictags
ckanext/semantictags/db.py
https://github.com/alantygel/ckanext-semantictags/blob/10bb31d29f34b2b5a6feae693961842f93007ce1/ckanext/semantictags/db.py#L99-L117
def get(cls, tag_id_or_URI, label=None): '''Return the tag with the given id or URI, or None. :param tag_id_or_name: the id or name of the tag to return :type tag_id_or_name: string :returns: the tag object with the given id or name, or None if there is no tag with that id or name :rtype: ckan.model.tag.Tag ''' # First try to get the tag by ID. semantictag = SemanticTag.by_id(tag_id_or_URI) if semantictag: return semantictag else: semantictag = SemanticTag.by_URI(tag_id_or_URI) return semantictag
[ "def", "get", "(", "cls", ",", "tag_id_or_URI", ",", "label", "=", "None", ")", ":", "# First try to get the tag by ID.", "semantictag", "=", "SemanticTag", ".", "by_id", "(", "tag_id_or_URI", ")", "if", "semantictag", ":", "return", "semantictag", "else", ":", "semantictag", "=", "SemanticTag", ".", "by_URI", "(", "tag_id_or_URI", ")", "return", "semantictag" ]
Return the tag with the given id or URI, or None. :param tag_id_or_name: the id or name of the tag to return :type tag_id_or_name: string :returns: the tag object with the given id or name, or None if there is no tag with that id or name :rtype: ckan.model.tag.Tag
[ "Return", "the", "tag", "with", "the", "given", "id", "or", "URI", "or", "None", "." ]
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py#L796-L877
def plot_median_freq_evol(time_signal, signal, time_median_freq, median_freq, activations_begin, activations_end, sample_rate, file_name=None): """ ----- Brief ----- Graphical representation of the EMG median power frequency evolution time series. ----------- Description ----------- Function intended to generate a Bokeh figure with 2x1 format, where each muscular activation period is identified through a colored box and the plot that shows the median frequency evolution is also presented. In the first cell is presented the EMG signal, highlighting each muscular activation. The second cell has the same time scale as the first one (the two plots are synchronized), being plotted the evolution time series of EMG median frequency. Per muscular activation period is extracted a Median Power Frequency value (sample), so, our window is a muscular activation period. Median power frequency is a commonly used parameter for evaluating muscular fatigue. It is widely accepted that this parameter decreases as fatigue sets in. Applied in the Notebook "Fatigue Evaluation - Evolution of Median Power Frequency". ---------- Parameters ---------- time_signal : list List with the time axis samples of EMG signal. signal : list List with EMG signal to present. time_median_freq : list List with the time axis samples of the median frequency evolution time-series. median_freq : list List with the Median Frequency samples. activations_begin : list List with the samples where each muscular activation period starts. activations_end : list List with the samples where each muscular activation period ends. sample_rate : int Sampling rate of acquisition. file_name : str Path containing the destination folder where the Bokeh figure will be stored. """ # Generation of the HTML file where the plot will be stored. #file_name = _generate_bokeh_file(file_name) list_figures_1 = plot([list(time_signal), list(time_median_freq)], [list(signal), list(median_freq)], title=["EMG Acquisition highlighting muscular activations", "Median Frequency Evolution"], grid_plot=True, grid_lines=2, grid_columns=1, open_signals_style=True, x_axis_label="Time (s)", yAxisLabel=["Raw Data", "Median Frequency (Hz)"], x_range=[0, 125], get_fig_list=True, show_plot=False) # Highlighting of each processing window for activation in range(0, len(activations_begin)): color = opensignals_color_pallet() box_annotation = BoxAnnotation(left=activations_begin[activation] / sample_rate, right=activations_end[activation] / sample_rate, fill_color=color, fill_alpha=0.1) box_annotation_copy = BoxAnnotation(left=activations_begin[activation] / sample_rate, right=activations_end[activation] / sample_rate, fill_color=color, fill_alpha=0.1) list_figures_1[0].add_layout(box_annotation) list_figures_1[1].add_layout(box_annotation_copy) gridplot_1 = gridplot([[list_figures_1[0]], [list_figures_1[1]]], **opensignals_kwargs("gridplot")) show(gridplot_1)
[ "def", "plot_median_freq_evol", "(", "time_signal", ",", "signal", ",", "time_median_freq", ",", "median_freq", ",", "activations_begin", ",", "activations_end", ",", "sample_rate", ",", "file_name", "=", "None", ")", ":", "# Generation of the HTML file where the plot will be stored.", "#file_name = _generate_bokeh_file(file_name)", "list_figures_1", "=", "plot", "(", "[", "list", "(", "time_signal", ")", ",", "list", "(", "time_median_freq", ")", "]", ",", "[", "list", "(", "signal", ")", ",", "list", "(", "median_freq", ")", "]", ",", "title", "=", "[", "\"EMG Acquisition highlighting muscular activations\"", ",", "\"Median Frequency Evolution\"", "]", ",", "grid_plot", "=", "True", ",", "grid_lines", "=", "2", ",", "grid_columns", "=", "1", ",", "open_signals_style", "=", "True", ",", "x_axis_label", "=", "\"Time (s)\"", ",", "yAxisLabel", "=", "[", "\"Raw Data\"", ",", "\"Median Frequency (Hz)\"", "]", ",", "x_range", "=", "[", "0", ",", "125", "]", ",", "get_fig_list", "=", "True", ",", "show_plot", "=", "False", ")", "# Highlighting of each processing window", "for", "activation", "in", "range", "(", "0", ",", "len", "(", "activations_begin", ")", ")", ":", "color", "=", "opensignals_color_pallet", "(", ")", "box_annotation", "=", "BoxAnnotation", "(", "left", "=", "activations_begin", "[", "activation", "]", "/", "sample_rate", ",", "right", "=", "activations_end", "[", "activation", "]", "/", "sample_rate", ",", "fill_color", "=", "color", ",", "fill_alpha", "=", "0.1", ")", "box_annotation_copy", "=", "BoxAnnotation", "(", "left", "=", "activations_begin", "[", "activation", "]", "/", "sample_rate", ",", "right", "=", "activations_end", "[", "activation", "]", "/", "sample_rate", ",", "fill_color", "=", "color", ",", "fill_alpha", "=", "0.1", ")", "list_figures_1", "[", "0", "]", ".", "add_layout", "(", "box_annotation", ")", "list_figures_1", "[", "1", "]", ".", "add_layout", "(", "box_annotation_copy", ")", "gridplot_1", "=", "gridplot", "(", "[", "[", "list_figures_1", "[", "0", "]", "]", ",", "[", "list_figures_1", "[", "1", "]", "]", "]", ",", "*", "*", "opensignals_kwargs", "(", "\"gridplot\"", ")", ")", "show", "(", "gridplot_1", ")" ]
----- Brief ----- Graphical representation of the EMG median power frequency evolution time series. ----------- Description ----------- Function intended to generate a Bokeh figure with 2x1 format, where each muscular activation period is identified through a colored box and the plot that shows the median frequency evolution is also presented. In the first cell is presented the EMG signal, highlighting each muscular activation. The second cell has the same time scale as the first one (the two plots are synchronized), being plotted the evolution time series of EMG median frequency. Per muscular activation period is extracted a Median Power Frequency value (sample), so, our window is a muscular activation period. Median power frequency is a commonly used parameter for evaluating muscular fatigue. It is widely accepted that this parameter decreases as fatigue sets in. Applied in the Notebook "Fatigue Evaluation - Evolution of Median Power Frequency". ---------- Parameters ---------- time_signal : list List with the time axis samples of EMG signal. signal : list List with EMG signal to present. time_median_freq : list List with the time axis samples of the median frequency evolution time-series. median_freq : list List with the Median Frequency samples. activations_begin : list List with the samples where each muscular activation period starts. activations_end : list List with the samples where each muscular activation period ends. sample_rate : int Sampling rate of acquisition. file_name : str Path containing the destination folder where the Bokeh figure will be stored.
[ "-----", "Brief", "-----", "Graphical", "representation", "of", "the", "EMG", "median", "power", "frequency", "evolution", "time", "series", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/posmap.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/posmap.py#L155-L180
def index(args): """ %prog index frgscf.sorted Compress frgscffile.sorted and index it using `tabix`. """ p = OptionParser(index.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) frgscffile, = args gzfile = frgscffile + ".gz" cmd = "bgzip -c {0}".format(frgscffile) if not op.exists(gzfile): sh(cmd, outfile=gzfile) tbifile = gzfile + ".tbi" # Sequence, begin, end in 2, 3, 4-th column, respectively cmd = "tabix -s 2 -b 3 -e 4 {0}".format(gzfile) if not op.exists(tbifile): sh(cmd)
[ "def", "index", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "index", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "frgscffile", ",", "=", "args", "gzfile", "=", "frgscffile", "+", "\".gz\"", "cmd", "=", "\"bgzip -c {0}\"", ".", "format", "(", "frgscffile", ")", "if", "not", "op", ".", "exists", "(", "gzfile", ")", ":", "sh", "(", "cmd", ",", "outfile", "=", "gzfile", ")", "tbifile", "=", "gzfile", "+", "\".tbi\"", "# Sequence, begin, end in 2, 3, 4-th column, respectively", "cmd", "=", "\"tabix -s 2 -b 3 -e 4 {0}\"", ".", "format", "(", "gzfile", ")", "if", "not", "op", ".", "exists", "(", "tbifile", ")", ":", "sh", "(", "cmd", ")" ]
%prog index frgscf.sorted Compress frgscffile.sorted and index it using `tabix`.
[ "%prog", "index", "frgscf", ".", "sorted" ]
python
train
galaxy-genome-annotation/python-apollo
apollo/client.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/client.py#L29-L65
def post(self, client_method, data, post_params=None, is_json=True): """Make a POST request""" url = self._wa.apollo_url + self.CLIENT_BASE + client_method if post_params is None: post_params = {} headers = { 'Content-Type': 'application/json' } data.update({ 'username': self._wa.username, 'password': self._wa.password, }) curl_command = ['curl', url] for (k, v) in headers.items(): curl_command += ['-H', quote('%s: %s' % (k, v))] curl_command += ['-d', quote(json.dumps(data))] log.info(' '.join(curl_command)) resp = requests.post(url, data=json.dumps(data), headers=headers, verify=self.__verify, params=post_params, allow_redirects=False, **self._request_args) if resp.status_code == 200 or resp.status_code == 302: if is_json: data = resp.json() return self._scrub_data(data) else: return resp.text # @see self.body for HTTP response body raise Exception("Unexpected response from apollo %s: %s" % (resp.status_code, resp.text))
[ "def", "post", "(", "self", ",", "client_method", ",", "data", ",", "post_params", "=", "None", ",", "is_json", "=", "True", ")", ":", "url", "=", "self", ".", "_wa", ".", "apollo_url", "+", "self", ".", "CLIENT_BASE", "+", "client_method", "if", "post_params", "is", "None", ":", "post_params", "=", "{", "}", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "data", ".", "update", "(", "{", "'username'", ":", "self", ".", "_wa", ".", "username", ",", "'password'", ":", "self", ".", "_wa", ".", "password", ",", "}", ")", "curl_command", "=", "[", "'curl'", ",", "url", "]", "for", "(", "k", ",", "v", ")", "in", "headers", ".", "items", "(", ")", ":", "curl_command", "+=", "[", "'-H'", ",", "quote", "(", "'%s: %s'", "%", "(", "k", ",", "v", ")", ")", "]", "curl_command", "+=", "[", "'-d'", ",", "quote", "(", "json", ".", "dumps", "(", "data", ")", ")", "]", "log", ".", "info", "(", "' '", ".", "join", "(", "curl_command", ")", ")", "resp", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "headers", ",", "verify", "=", "self", ".", "__verify", ",", "params", "=", "post_params", ",", "allow_redirects", "=", "False", ",", "*", "*", "self", ".", "_request_args", ")", "if", "resp", ".", "status_code", "==", "200", "or", "resp", ".", "status_code", "==", "302", ":", "if", "is_json", ":", "data", "=", "resp", ".", "json", "(", ")", "return", "self", ".", "_scrub_data", "(", "data", ")", "else", ":", "return", "resp", ".", "text", "# @see self.body for HTTP response body", "raise", "Exception", "(", "\"Unexpected response from apollo %s: %s\"", "%", "(", "resp", ".", "status_code", ",", "resp", ".", "text", ")", ")" ]
Make a POST request
[ "Make", "a", "POST", "request" ]
python
train
72squared/redpipe
redpipe/keyspaces.py
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L629-L638
def getbit(self, name, offset): """ Returns a boolean indicating the value of ``offset`` in key :param name: str the name of the redis key :param offset: int :return: Future() """ with self.pipe as pipe: return pipe.getbit(self.redis_key(name), offset)
[ "def", "getbit", "(", "self", ",", "name", ",", "offset", ")", ":", "with", "self", ".", "pipe", "as", "pipe", ":", "return", "pipe", ".", "getbit", "(", "self", ".", "redis_key", "(", "name", ")", ",", "offset", ")" ]
Returns a boolean indicating the value of ``offset`` in key :param name: str the name of the redis key :param offset: int :return: Future()
[ "Returns", "a", "boolean", "indicating", "the", "value", "of", "offset", "in", "key" ]
python
train
saltstack/salt
salt/utils/decorators/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/decorators/state.py#L75-L95
def unify(self, result): ''' While comments as a list are allowed, comments needs to be strings for backward compatibility. See such claim here: https://github.com/saltstack/salt/pull/43070 Rules applied: - 'comment' is joined into a multi-line string, in case the value is a list. - 'result' should be always either True, False or None. :param result: :return: ''' if isinstance(result.get('comment'), list): result['comment'] = u'\n'.join([ salt.utils.stringutils.to_unicode(elm) for elm in result['comment'] ]) if result.get('result') is not None: result['result'] = bool(result['result']) return result
[ "def", "unify", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ".", "get", "(", "'comment'", ")", ",", "list", ")", ":", "result", "[", "'comment'", "]", "=", "u'\\n'", ".", "join", "(", "[", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "elm", ")", "for", "elm", "in", "result", "[", "'comment'", "]", "]", ")", "if", "result", ".", "get", "(", "'result'", ")", "is", "not", "None", ":", "result", "[", "'result'", "]", "=", "bool", "(", "result", "[", "'result'", "]", ")", "return", "result" ]
While comments as a list are allowed, comments needs to be strings for backward compatibility. See such claim here: https://github.com/saltstack/salt/pull/43070 Rules applied: - 'comment' is joined into a multi-line string, in case the value is a list. - 'result' should be always either True, False or None. :param result: :return:
[ "While", "comments", "as", "a", "list", "are", "allowed", "comments", "needs", "to", "be", "strings", "for", "backward", "compatibility", ".", "See", "such", "claim", "here", ":", "https", ":", "//", "github", ".", "com", "/", "saltstack", "/", "salt", "/", "pull", "/", "43070" ]
python
train
keon/algorithms
algorithms/strings/decode_string.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/strings/decode_string.py#L20-L38
def decode_string(s): """ :type s: str :rtype: str """ stack = []; cur_num = 0; cur_string = '' for c in s: if c == '[': stack.append((cur_string, cur_num)) cur_string = '' cur_num = 0 elif c == ']': prev_string, num = stack.pop() cur_string = prev_string + num * cur_string elif c.isdigit(): cur_num = cur_num*10 + int(c) else: cur_string += c return cur_string
[ "def", "decode_string", "(", "s", ")", ":", "stack", "=", "[", "]", "cur_num", "=", "0", "cur_string", "=", "''", "for", "c", "in", "s", ":", "if", "c", "==", "'['", ":", "stack", ".", "append", "(", "(", "cur_string", ",", "cur_num", ")", ")", "cur_string", "=", "''", "cur_num", "=", "0", "elif", "c", "==", "']'", ":", "prev_string", ",", "num", "=", "stack", ".", "pop", "(", ")", "cur_string", "=", "prev_string", "+", "num", "*", "cur_string", "elif", "c", ".", "isdigit", "(", ")", ":", "cur_num", "=", "cur_num", "*", "10", "+", "int", "(", "c", ")", "else", ":", "cur_string", "+=", "c", "return", "cur_string" ]
:type s: str :rtype: str
[ ":", "type", "s", ":", "str", ":", "rtype", ":", "str" ]
python
train
TeamHG-Memex/eli5
eli5/sklearn/utils.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn/utils.py#L196-L213
def get_num_features(estimator): """ Return size of a feature vector estimator expects as an input. """ if hasattr(estimator, 'coef_'): # linear models if len(estimator.coef_.shape) == 0: return 1 return estimator.coef_.shape[-1] elif hasattr(estimator, 'feature_importances_'): # ensembles return estimator.feature_importances_.shape[-1] elif hasattr(estimator, 'feature_count_'): # naive bayes return estimator.feature_count_.shape[-1] elif hasattr(estimator, 'theta_'): return estimator.theta_.shape[-1] elif hasattr(estimator, 'estimators_') and len(estimator.estimators_): # OvR return get_num_features(estimator.estimators_[0]) else: raise ValueError("Can't figure out feature vector size for %s" % estimator)
[ "def", "get_num_features", "(", "estimator", ")", ":", "if", "hasattr", "(", "estimator", ",", "'coef_'", ")", ":", "# linear models", "if", "len", "(", "estimator", ".", "coef_", ".", "shape", ")", "==", "0", ":", "return", "1", "return", "estimator", ".", "coef_", ".", "shape", "[", "-", "1", "]", "elif", "hasattr", "(", "estimator", ",", "'feature_importances_'", ")", ":", "# ensembles", "return", "estimator", ".", "feature_importances_", ".", "shape", "[", "-", "1", "]", "elif", "hasattr", "(", "estimator", ",", "'feature_count_'", ")", ":", "# naive bayes", "return", "estimator", ".", "feature_count_", ".", "shape", "[", "-", "1", "]", "elif", "hasattr", "(", "estimator", ",", "'theta_'", ")", ":", "return", "estimator", ".", "theta_", ".", "shape", "[", "-", "1", "]", "elif", "hasattr", "(", "estimator", ",", "'estimators_'", ")", "and", "len", "(", "estimator", ".", "estimators_", ")", ":", "# OvR", "return", "get_num_features", "(", "estimator", ".", "estimators_", "[", "0", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Can't figure out feature vector size for %s\"", "%", "estimator", ")" ]
Return size of a feature vector estimator expects as an input.
[ "Return", "size", "of", "a", "feature", "vector", "estimator", "expects", "as", "an", "input", "." ]
python
train
qacafe/cdrouter.py
cdrouter/system.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/system.py#L425-L433
def space(self): """Get system disk space usage. :return: :class:`system.Space <system.Space>` object :rtype: system.Space """ schema = SpaceSchema() resp = self.service.get(self.base+'space/') return self.service.decode(schema, resp)
[ "def", "space", "(", "self", ")", ":", "schema", "=", "SpaceSchema", "(", ")", "resp", "=", "self", ".", "service", ".", "get", "(", "self", ".", "base", "+", "'space/'", ")", "return", "self", ".", "service", ".", "decode", "(", "schema", ",", "resp", ")" ]
Get system disk space usage. :return: :class:`system.Space <system.Space>` object :rtype: system.Space
[ "Get", "system", "disk", "space", "usage", "." ]
python
train
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L3429-L3470
def locate_key(self, key): """Get index location for the requested key. Parameters ---------- key : object Value to locate. Returns ------- loc : int or slice Location of `key` (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 6, 11]) >>> idx.locate_key(3) 0 >>> idx.locate_key(11) 3 >>> idx.locate_key(6) slice(1, 3, None) >>> try: ... idx.locate_key(2) ... except KeyError as e: ... print(e) ... 2 """ left = bisect.bisect_left(self, key) right = bisect.bisect_right(self, key) diff = right - left if diff == 0: raise KeyError(key) elif diff == 1: return left else: return slice(left, right)
[ "def", "locate_key", "(", "self", ",", "key", ")", ":", "left", "=", "bisect", ".", "bisect_left", "(", "self", ",", "key", ")", "right", "=", "bisect", ".", "bisect_right", "(", "self", ",", "key", ")", "diff", "=", "right", "-", "left", "if", "diff", "==", "0", ":", "raise", "KeyError", "(", "key", ")", "elif", "diff", "==", "1", ":", "return", "left", "else", ":", "return", "slice", "(", "left", ",", "right", ")" ]
Get index location for the requested key. Parameters ---------- key : object Value to locate. Returns ------- loc : int or slice Location of `key` (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 6, 11]) >>> idx.locate_key(3) 0 >>> idx.locate_key(11) 3 >>> idx.locate_key(6) slice(1, 3, None) >>> try: ... idx.locate_key(2) ... except KeyError as e: ... print(e) ... 2
[ "Get", "index", "location", "for", "the", "requested", "key", "." ]
python
train
Qiskit/qiskit-terra
qiskit/transpiler/passes/mapping/dense_layout.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/passes/mapping/dense_layout.py#L45-L66
def run(self, dag): """ Pick a convenient layout depending on the best matching qubit connectivity, and set the property `layout`. Args: dag (DAGCircuit): DAG to find layout for. Raises: TranspilerError: if dag wider than self.coupling_map """ num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()]) if num_dag_qubits > self.coupling_map.size(): raise TranspilerError('Number of qubits greater than device.') best_sub = self._best_subset(num_dag_qubits) layout = Layout() map_iter = 0 for qreg in dag.qregs.values(): for i in range(qreg.size): layout[(qreg, i)] = int(best_sub[map_iter]) map_iter += 1 self.property_set['layout'] = layout
[ "def", "run", "(", "self", ",", "dag", ")", ":", "num_dag_qubits", "=", "sum", "(", "[", "qreg", ".", "size", "for", "qreg", "in", "dag", ".", "qregs", ".", "values", "(", ")", "]", ")", "if", "num_dag_qubits", ">", "self", ".", "coupling_map", ".", "size", "(", ")", ":", "raise", "TranspilerError", "(", "'Number of qubits greater than device.'", ")", "best_sub", "=", "self", ".", "_best_subset", "(", "num_dag_qubits", ")", "layout", "=", "Layout", "(", ")", "map_iter", "=", "0", "for", "qreg", "in", "dag", ".", "qregs", ".", "values", "(", ")", ":", "for", "i", "in", "range", "(", "qreg", ".", "size", ")", ":", "layout", "[", "(", "qreg", ",", "i", ")", "]", "=", "int", "(", "best_sub", "[", "map_iter", "]", ")", "map_iter", "+=", "1", "self", ".", "property_set", "[", "'layout'", "]", "=", "layout" ]
Pick a convenient layout depending on the best matching qubit connectivity, and set the property `layout`. Args: dag (DAGCircuit): DAG to find layout for. Raises: TranspilerError: if dag wider than self.coupling_map
[ "Pick", "a", "convenient", "layout", "depending", "on", "the", "best", "matching", "qubit", "connectivity", "and", "set", "the", "property", "layout", "." ]
python
test
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L567-L570
def get(cls, name: sym.Symbol) -> "Optional[Namespace]": """Get the namespace bound to the symbol `name` in the global namespace cache. Return the namespace if it exists or None otherwise..""" return cls._NAMESPACES.deref().entry(name, None)
[ "def", "get", "(", "cls", ",", "name", ":", "sym", ".", "Symbol", ")", "->", "\"Optional[Namespace]\"", ":", "return", "cls", ".", "_NAMESPACES", ".", "deref", "(", ")", ".", "entry", "(", "name", ",", "None", ")" ]
Get the namespace bound to the symbol `name` in the global namespace cache. Return the namespace if it exists or None otherwise..
[ "Get", "the", "namespace", "bound", "to", "the", "symbol", "name", "in", "the", "global", "namespace", "cache", ".", "Return", "the", "namespace", "if", "it", "exists", "or", "None", "otherwise", ".." ]
python
test
ttm/participationLegacy
participation/triplification/participaTriplification.py
https://github.com/ttm/participationLegacy/blob/d78975038a64ea018120889d019a559409dae631/participation/triplification/participaTriplification.py#L157-L170
def triplifyOverallStructures(self): """Insert into RDF graph the textual and network structures. Ideally, one should be able to make bag of words related to each item (communities, users, posts, comments, tags, etc). Interaction and friendship networks should be made. Human networks mediated by co-ocurrance (time os posts, geographical locations, vocabulary, etc) should be addressed as well. """ if self.compute_networks: self.computeNetworks() if self.compute_bows: self.computeBows()
[ "def", "triplifyOverallStructures", "(", "self", ")", ":", "if", "self", ".", "compute_networks", ":", "self", ".", "computeNetworks", "(", ")", "if", "self", ".", "compute_bows", ":", "self", ".", "computeBows", "(", ")" ]
Insert into RDF graph the textual and network structures. Ideally, one should be able to make bag of words related to each item (communities, users, posts, comments, tags, etc). Interaction and friendship networks should be made. Human networks mediated by co-ocurrance (time os posts, geographical locations, vocabulary, etc) should be addressed as well.
[ "Insert", "into", "RDF", "graph", "the", "textual", "and", "network", "structures", "." ]
python
train
idlesign/django-sitecats
sitecats/utils.py
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L57-L85
def _cache_init(self): """Initializes local cache from Django cache if required.""" cache_ = cache.get(self.CACHE_ENTRY_NAME) if cache_ is None: categories = get_category_model().objects.order_by('sort_order') ids = {category.id: category for category in categories} aliases = {category.alias: category for category in categories if category.alias} parent_to_children = OrderedDict() # Preserve aliases order. for category in categories: parent_category = ids.get(category.parent_id, False) parent_alias = None if parent_category: parent_alias = parent_category.alias if parent_alias not in parent_to_children: parent_to_children[parent_alias] = [] parent_to_children[parent_alias].append(category.id) cache_ = { self.CACHE_NAME_IDS: ids, self.CACHE_NAME_PARENTS: parent_to_children, self.CACHE_NAME_ALIASES: aliases } cache.set(self.CACHE_ENTRY_NAME, cache_, self.CACHE_TIMEOUT) self._cache = cache_
[ "def", "_cache_init", "(", "self", ")", ":", "cache_", "=", "cache", ".", "get", "(", "self", ".", "CACHE_ENTRY_NAME", ")", "if", "cache_", "is", "None", ":", "categories", "=", "get_category_model", "(", ")", ".", "objects", ".", "order_by", "(", "'sort_order'", ")", "ids", "=", "{", "category", ".", "id", ":", "category", "for", "category", "in", "categories", "}", "aliases", "=", "{", "category", ".", "alias", ":", "category", "for", "category", "in", "categories", "if", "category", ".", "alias", "}", "parent_to_children", "=", "OrderedDict", "(", ")", "# Preserve aliases order.", "for", "category", "in", "categories", ":", "parent_category", "=", "ids", ".", "get", "(", "category", ".", "parent_id", ",", "False", ")", "parent_alias", "=", "None", "if", "parent_category", ":", "parent_alias", "=", "parent_category", ".", "alias", "if", "parent_alias", "not", "in", "parent_to_children", ":", "parent_to_children", "[", "parent_alias", "]", "=", "[", "]", "parent_to_children", "[", "parent_alias", "]", ".", "append", "(", "category", ".", "id", ")", "cache_", "=", "{", "self", ".", "CACHE_NAME_IDS", ":", "ids", ",", "self", ".", "CACHE_NAME_PARENTS", ":", "parent_to_children", ",", "self", ".", "CACHE_NAME_ALIASES", ":", "aliases", "}", "cache", ".", "set", "(", "self", ".", "CACHE_ENTRY_NAME", ",", "cache_", ",", "self", ".", "CACHE_TIMEOUT", ")", "self", ".", "_cache", "=", "cache_" ]
Initializes local cache from Django cache if required.
[ "Initializes", "local", "cache", "from", "Django", "cache", "if", "required", "." ]
python
train
waqasbhatti/astrobase
astrobase/checkplot/pkl_io.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_io.py#L130-L179
def _read_checkplot_picklefile(checkplotpickle): '''This reads a checkplot gzipped pickle file back into a dict. NOTE: the try-except is for Python 2 pickles that have numpy arrays in them. Apparently, these aren't compatible with Python 3. See here: http://stackoverflow.com/q/11305790 The workaround is noted in this answer: http://stackoverflow.com/a/41366785 Parameters ---------- checkplotpickle : str The path to a checkplot pickle file. This can be a gzipped file (in which case the file extension should end in '.gz') Returns ------- dict This returns a checkplotdict. ''' if checkplotpickle.endswith('.gz'): try: with gzip.open(checkplotpickle,'rb') as infd: cpdict = pickle.load(infd) except UnicodeDecodeError: with gzip.open(checkplotpickle,'rb') as infd: cpdict = pickle.load(infd, encoding='latin1') else: try: with open(checkplotpickle,'rb') as infd: cpdict = pickle.load(infd) except UnicodeDecodeError: with open(checkplotpickle,'rb') as infd: cpdict = pickle.load(infd, encoding='latin1') return cpdict
[ "def", "_read_checkplot_picklefile", "(", "checkplotpickle", ")", ":", "if", "checkplotpickle", ".", "endswith", "(", "'.gz'", ")", ":", "try", ":", "with", "gzip", ".", "open", "(", "checkplotpickle", ",", "'rb'", ")", "as", "infd", ":", "cpdict", "=", "pickle", ".", "load", "(", "infd", ")", "except", "UnicodeDecodeError", ":", "with", "gzip", ".", "open", "(", "checkplotpickle", ",", "'rb'", ")", "as", "infd", ":", "cpdict", "=", "pickle", ".", "load", "(", "infd", ",", "encoding", "=", "'latin1'", ")", "else", ":", "try", ":", "with", "open", "(", "checkplotpickle", ",", "'rb'", ")", "as", "infd", ":", "cpdict", "=", "pickle", ".", "load", "(", "infd", ")", "except", "UnicodeDecodeError", ":", "with", "open", "(", "checkplotpickle", ",", "'rb'", ")", "as", "infd", ":", "cpdict", "=", "pickle", ".", "load", "(", "infd", ",", "encoding", "=", "'latin1'", ")", "return", "cpdict" ]
This reads a checkplot gzipped pickle file back into a dict. NOTE: the try-except is for Python 2 pickles that have numpy arrays in them. Apparently, these aren't compatible with Python 3. See here: http://stackoverflow.com/q/11305790 The workaround is noted in this answer: http://stackoverflow.com/a/41366785 Parameters ---------- checkplotpickle : str The path to a checkplot pickle file. This can be a gzipped file (in which case the file extension should end in '.gz') Returns ------- dict This returns a checkplotdict.
[ "This", "reads", "a", "checkplot", "gzipped", "pickle", "file", "back", "into", "a", "dict", "." ]
python
valid
acutesoftware/AIKIF
aikif/toolbox/cls_grid.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L52-L58
def save(self, fname): """ saves a grid to file as ASCII text """ try: with open(fname, "w") as f: f.write(str(self)) except Exception as ex: print('ERROR = cant save grid results to ' + fname + str(ex))
[ "def", "save", "(", "self", ",", "fname", ")", ":", "try", ":", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "self", ")", ")", "except", "Exception", "as", "ex", ":", "print", "(", "'ERROR = cant save grid results to '", "+", "fname", "+", "str", "(", "ex", ")", ")" ]
saves a grid to file as ASCII text
[ "saves", "a", "grid", "to", "file", "as", "ASCII", "text" ]
python
train
craffel/mir_eval
mir_eval/segment.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L663-L701
def _mutual_info_score(reference_indices, estimated_indices, contingency=None): """Compute the mutual information between two sequence labelings. Parameters ---------- reference_indices : np.ndarray Array of reference indices estimated_indices : np.ndarray Array of estimated indices contingency : np.ndarray Pre-computed contingency matrix. If None, one will be computed. (Default value = None) Returns ------- mi : float Mutual information .. note:: Based on sklearn.metrics.cluster.mutual_info_score """ if contingency is None: contingency = _contingency_matrix(reference_indices, estimated_indices).astype(float) contingency_sum = np.sum(contingency) pi = np.sum(contingency, axis=1) pj = np.sum(contingency, axis=0) outer = np.outer(pi, pj) nnz = contingency != 0.0 # normalized contingency contingency_nm = contingency[nnz] log_contingency_nm = np.log(contingency_nm) contingency_nm /= contingency_sum # log(a / b) should be calculated as log(a) - log(b) for # possible loss of precision log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum()) mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) + contingency_nm * log_outer) return mi.sum()
[ "def", "_mutual_info_score", "(", "reference_indices", ",", "estimated_indices", ",", "contingency", "=", "None", ")", ":", "if", "contingency", "is", "None", ":", "contingency", "=", "_contingency_matrix", "(", "reference_indices", ",", "estimated_indices", ")", ".", "astype", "(", "float", ")", "contingency_sum", "=", "np", ".", "sum", "(", "contingency", ")", "pi", "=", "np", ".", "sum", "(", "contingency", ",", "axis", "=", "1", ")", "pj", "=", "np", ".", "sum", "(", "contingency", ",", "axis", "=", "0", ")", "outer", "=", "np", ".", "outer", "(", "pi", ",", "pj", ")", "nnz", "=", "contingency", "!=", "0.0", "# normalized contingency", "contingency_nm", "=", "contingency", "[", "nnz", "]", "log_contingency_nm", "=", "np", ".", "log", "(", "contingency_nm", ")", "contingency_nm", "/=", "contingency_sum", "# log(a / b) should be calculated as log(a) - log(b) for", "# possible loss of precision", "log_outer", "=", "-", "np", ".", "log", "(", "outer", "[", "nnz", "]", ")", "+", "np", ".", "log", "(", "pi", ".", "sum", "(", ")", ")", "+", "np", ".", "log", "(", "pj", ".", "sum", "(", ")", ")", "mi", "=", "(", "contingency_nm", "*", "(", "log_contingency_nm", "-", "np", ".", "log", "(", "contingency_sum", ")", ")", "+", "contingency_nm", "*", "log_outer", ")", "return", "mi", ".", "sum", "(", ")" ]
Compute the mutual information between two sequence labelings. Parameters ---------- reference_indices : np.ndarray Array of reference indices estimated_indices : np.ndarray Array of estimated indices contingency : np.ndarray Pre-computed contingency matrix. If None, one will be computed. (Default value = None) Returns ------- mi : float Mutual information .. note:: Based on sklearn.metrics.cluster.mutual_info_score
[ "Compute", "the", "mutual", "information", "between", "two", "sequence", "labelings", "." ]
python
train
KyleJamesWalker/yamlsettings
yamlsettings/extensions/registry.py
https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/registry.py#L39-L45
def _discover(self): """Find and install all extensions""" for ep in pkg_resources.iter_entry_points('yamlsettings10'): ext = ep.load() if callable(ext): ext = ext() self.add(ext)
[ "def", "_discover", "(", "self", ")", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "'yamlsettings10'", ")", ":", "ext", "=", "ep", ".", "load", "(", ")", "if", "callable", "(", "ext", ")", ":", "ext", "=", "ext", "(", ")", "self", ".", "add", "(", "ext", ")" ]
Find and install all extensions
[ "Find", "and", "install", "all", "extensions" ]
python
train
saltstack/salt
salt/states/selinux.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/selinux.py#L292-L315
def module_remove(name): ''' Removes SELinux module name The name of the module to remove .. versionadded:: 2016.11.6 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} modules = __salt__['selinux.list_semod']() if name not in modules: ret['comment'] = 'Module {0} is not available'.format(name) ret['result'] = False return ret if __salt__['selinux.remove_semod'](name): ret['comment'] = 'Module {0} has been removed'.format(name) return ret ret['result'] = False ret['comment'] = 'Failed to remove module {0}'.format(name) return ret
[ "def", "module_remove", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "modules", "=", "__salt__", "[", "'selinux.list_semod'", "]", "(", ")", "if", "name", "not", "in", "modules", ":", "ret", "[", "'comment'", "]", "=", "'Module {0} is not available'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "if", "__salt__", "[", "'selinux.remove_semod'", "]", "(", "name", ")", ":", "ret", "[", "'comment'", "]", "=", "'Module {0} has been removed'", ".", "format", "(", "name", ")", "return", "ret", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to remove module {0}'", ".", "format", "(", "name", ")", "return", "ret" ]
Removes SELinux module name The name of the module to remove .. versionadded:: 2016.11.6
[ "Removes", "SELinux", "module" ]
python
train
odlgroup/odl
odl/discr/lp_discr.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/lp_discr.py#L697-L714
def imag(self, newimag): """Set the imaginary part of this element to ``newimag``. This method is invoked by ``x.imag = other``. Parameters ---------- newimag : array-like or scalar Values to be assigned to the imaginary part of this element. Raises ------ ValueError If the space is real, i.e., no imagninary part can be set. """ if self.space.is_real: raise ValueError('cannot set imaginary part in real spaces') self.tensor.imag = newimag
[ "def", "imag", "(", "self", ",", "newimag", ")", ":", "if", "self", ".", "space", ".", "is_real", ":", "raise", "ValueError", "(", "'cannot set imaginary part in real spaces'", ")", "self", ".", "tensor", ".", "imag", "=", "newimag" ]
Set the imaginary part of this element to ``newimag``. This method is invoked by ``x.imag = other``. Parameters ---------- newimag : array-like or scalar Values to be assigned to the imaginary part of this element. Raises ------ ValueError If the space is real, i.e., no imagninary part can be set.
[ "Set", "the", "imaginary", "part", "of", "this", "element", "to", "newimag", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L1453-L1466
def stage_tc_create_tag(self, tag, resource): """Add a tag to a resource. Args: tag (str): The tag to be added to the resource. resource (obj): An instance of tcex resource class. """ tag_resource = resource.tags(self.tcex.safetag(tag)) tag_resource.http_method = 'POST' t_response = tag_resource.request() if t_response.get('status') != 'Success': self.log.warning( '[tcex] Failed adding tag "{}" ({}).'.format(tag, t_response.get('response').text) )
[ "def", "stage_tc_create_tag", "(", "self", ",", "tag", ",", "resource", ")", ":", "tag_resource", "=", "resource", ".", "tags", "(", "self", ".", "tcex", ".", "safetag", "(", "tag", ")", ")", "tag_resource", ".", "http_method", "=", "'POST'", "t_response", "=", "tag_resource", ".", "request", "(", ")", "if", "t_response", ".", "get", "(", "'status'", ")", "!=", "'Success'", ":", "self", ".", "log", ".", "warning", "(", "'[tcex] Failed adding tag \"{}\" ({}).'", ".", "format", "(", "tag", ",", "t_response", ".", "get", "(", "'response'", ")", ".", "text", ")", ")" ]
Add a tag to a resource. Args: tag (str): The tag to be added to the resource. resource (obj): An instance of tcex resource class.
[ "Add", "a", "tag", "to", "a", "resource", "." ]
python
train
Julius2342/pyvlx
pyvlx/frames/frame_get_version.py
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_version.py#L56-L61
def from_payload(self, payload): """Init frame from binary data.""" self._software_version = payload[0:6] self.hardware_version = payload[6] self.product_group = payload[7] self.product_type = payload[8]
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "self", ".", "_software_version", "=", "payload", "[", "0", ":", "6", "]", "self", ".", "hardware_version", "=", "payload", "[", "6", "]", "self", ".", "product_group", "=", "payload", "[", "7", "]", "self", ".", "product_type", "=", "payload", "[", "8", "]" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
python
train
Kozea/cairocffi
cairocffi/fonts.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L465-L472
def set_hint_style(self, hint_style): """Changes the :ref:`HINT_STYLE` for the font options object. This controls whether to fit font outlines to the pixel grid, and if so, whether to optimize for fidelity or contrast. """ cairo.cairo_font_options_set_hint_style(self._pointer, hint_style) self._check_status()
[ "def", "set_hint_style", "(", "self", ",", "hint_style", ")", ":", "cairo", ".", "cairo_font_options_set_hint_style", "(", "self", ".", "_pointer", ",", "hint_style", ")", "self", ".", "_check_status", "(", ")" ]
Changes the :ref:`HINT_STYLE` for the font options object. This controls whether to fit font outlines to the pixel grid, and if so, whether to optimize for fidelity or contrast.
[ "Changes", "the", ":", "ref", ":", "HINT_STYLE", "for", "the", "font", "options", "object", ".", "This", "controls", "whether", "to", "fit", "font", "outlines", "to", "the", "pixel", "grid", "and", "if", "so", "whether", "to", "optimize", "for", "fidelity", "or", "contrast", "." ]
python
train
titusjan/argos
argos/config/abstractcti.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/abstractcti.py#L590-L606
def addSubEditor(self, subEditor, isFocusProxy=False): """ Adds a sub editor to the layout (at the right but before the reset button) Will add the necessary event filter to handle tabs and sets the strong focus so that events will not propagate to the tree view. If isFocusProxy is True the sub editor will be the focus proxy of the CTI. """ self.hBoxLayout.insertWidget(len(self._subEditors), subEditor) self._subEditors.append(subEditor) subEditor.installEventFilter(self) subEditor.setFocusPolicy(Qt.StrongFocus) if isFocusProxy: self.setFocusProxy(subEditor) return subEditor
[ "def", "addSubEditor", "(", "self", ",", "subEditor", ",", "isFocusProxy", "=", "False", ")", ":", "self", ".", "hBoxLayout", ".", "insertWidget", "(", "len", "(", "self", ".", "_subEditors", ")", ",", "subEditor", ")", "self", ".", "_subEditors", ".", "append", "(", "subEditor", ")", "subEditor", ".", "installEventFilter", "(", "self", ")", "subEditor", ".", "setFocusPolicy", "(", "Qt", ".", "StrongFocus", ")", "if", "isFocusProxy", ":", "self", ".", "setFocusProxy", "(", "subEditor", ")", "return", "subEditor" ]
Adds a sub editor to the layout (at the right but before the reset button) Will add the necessary event filter to handle tabs and sets the strong focus so that events will not propagate to the tree view. If isFocusProxy is True the sub editor will be the focus proxy of the CTI.
[ "Adds", "a", "sub", "editor", "to", "the", "layout", "(", "at", "the", "right", "but", "before", "the", "reset", "button", ")", "Will", "add", "the", "necessary", "event", "filter", "to", "handle", "tabs", "and", "sets", "the", "strong", "focus", "so", "that", "events", "will", "not", "propagate", "to", "the", "tree", "view", "." ]
python
train
krukas/Trionyx
trionyx/trionyx/views/core.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L304-L320
def items(self): """Get all list items""" query = self.get_queryset() fields = self.get_model_config().get_list_fields() for item in query.iterator(): row = OrderedDict() for field_name in self.get_current_fields(): field = fields.get(field_name) if not field_name: row[field_name] = '' if hasattr(item, field['field']): row[field_name] = getattr(item, field['field']) else: row[field_name] = '' # TODO Maybe render field ans strip html? yield row
[ "def", "items", "(", "self", ")", ":", "query", "=", "self", ".", "get_queryset", "(", ")", "fields", "=", "self", ".", "get_model_config", "(", ")", ".", "get_list_fields", "(", ")", "for", "item", "in", "query", ".", "iterator", "(", ")", ":", "row", "=", "OrderedDict", "(", ")", "for", "field_name", "in", "self", ".", "get_current_fields", "(", ")", ":", "field", "=", "fields", ".", "get", "(", "field_name", ")", "if", "not", "field_name", ":", "row", "[", "field_name", "]", "=", "''", "if", "hasattr", "(", "item", ",", "field", "[", "'field'", "]", ")", ":", "row", "[", "field_name", "]", "=", "getattr", "(", "item", ",", "field", "[", "'field'", "]", ")", "else", ":", "row", "[", "field_name", "]", "=", "''", "# TODO Maybe render field ans strip html?", "yield", "row" ]
Get all list items
[ "Get", "all", "list", "items" ]
python
train
eddieantonio/perfection
perfection/forest.py
https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/forest.py#L104-L113
def edges(self): """ Edges of this graph, in canonical order. """ canonical_edges = set() for v1, neighbours in self._vertices.items(): for v2 in neighbours: edge = self.canonical_order((v1, v2)) canonical_edges.add(edge) return canonical_edges
[ "def", "edges", "(", "self", ")", ":", "canonical_edges", "=", "set", "(", ")", "for", "v1", ",", "neighbours", "in", "self", ".", "_vertices", ".", "items", "(", ")", ":", "for", "v2", "in", "neighbours", ":", "edge", "=", "self", ".", "canonical_order", "(", "(", "v1", ",", "v2", ")", ")", "canonical_edges", ".", "add", "(", "edge", ")", "return", "canonical_edges" ]
Edges of this graph, in canonical order.
[ "Edges", "of", "this", "graph", "in", "canonical", "order", "." ]
python
train
vnmabus/dcor
dcor/_utils.py
https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_utils.py#L66-L78
def _can_be_double(x): """ Return if the array can be safely converted to double. That happens when the dtype is a float with the same size of a double or narrower, or when is an integer that can be safely converted to double (if the roundtrip conversion works). """ return ((np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize <= np.dtype(float).itemsize) or (np.issubdtype(x.dtype, np.signedinteger) and np.can_cast(x, float)))
[ "def", "_can_be_double", "(", "x", ")", ":", "return", "(", "(", "np", ".", "issubdtype", "(", "x", ".", "dtype", ",", "np", ".", "floating", ")", "and", "x", ".", "dtype", ".", "itemsize", "<=", "np", ".", "dtype", "(", "float", ")", ".", "itemsize", ")", "or", "(", "np", ".", "issubdtype", "(", "x", ".", "dtype", ",", "np", ".", "signedinteger", ")", "and", "np", ".", "can_cast", "(", "x", ",", "float", ")", ")", ")" ]
Return if the array can be safely converted to double. That happens when the dtype is a float with the same size of a double or narrower, or when is an integer that can be safely converted to double (if the roundtrip conversion works).
[ "Return", "if", "the", "array", "can", "be", "safely", "converted", "to", "double", "." ]
python
train
KelSolaar/Umbra
umbra/ui/widgets/basic_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/basic_QPlainTextEdit.py#L566-L583
def set_content(self, content): """ Sets document with given content while providing undo capability. :param content: Content to set. :type content: list :return: Method success. :rtype: bool """ cursor = self.textCursor() cursor.movePosition(QTextCursor.Start, QTextCursor.MoveAnchor) cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor) cursor.removeSelectedText() for line in content: self.moveCursor(QTextCursor.End) self.insertPlainText(line) return True
[ "def", "set_content", "(", "self", ",", "content", ")", ":", "cursor", "=", "self", ".", "textCursor", "(", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "Start", ",", "QTextCursor", ".", "MoveAnchor", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "End", ",", "QTextCursor", ".", "KeepAnchor", ")", "cursor", ".", "removeSelectedText", "(", ")", "for", "line", "in", "content", ":", "self", ".", "moveCursor", "(", "QTextCursor", ".", "End", ")", "self", ".", "insertPlainText", "(", "line", ")", "return", "True" ]
Sets document with given content while providing undo capability. :param content: Content to set. :type content: list :return: Method success. :rtype: bool
[ "Sets", "document", "with", "given", "content", "while", "providing", "undo", "capability", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3540-L3545
def setContentLen(self, content, len): """Replace the content of a node. NOTE: @content is supposed to be a piece of XML CDATA, so it allows entity references, but XML special chars need to be escaped first by using xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). """ libxml2mod.xmlNodeSetContentLen(self._o, content, len)
[ "def", "setContentLen", "(", "self", ",", "content", ",", "len", ")", ":", "libxml2mod", ".", "xmlNodeSetContentLen", "(", "self", ".", "_o", ",", "content", ",", "len", ")" ]
Replace the content of a node. NOTE: @content is supposed to be a piece of XML CDATA, so it allows entity references, but XML special chars need to be escaped first by using xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars().
[ "Replace", "the", "content", "of", "a", "node", ".", "NOTE", ":" ]
python
train
Becksteinlab/GromacsWrapper
gromacs/fileformats/xvg.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/xvg.py#L997-L1015
def decimate_rms(self, a, maxpoints, **kwargs): """Return data *a* rms-decimated on *maxpoints*. Histograms each column into *maxpoints* bins and calculates the root mean square sum in each bin as the decimated data, using :func:`numkit.timeseries.rms_histogrammed_function`. The coarse grained time in the first column contains the centers of the histogram time. If *a* contains <= *maxpoints* then *a* is simply returned; otherwise a new array of the same dimensions but with a reduced number of *maxpoints* points is returned. .. Note:: Assumes that the first column is time. """ return self._decimate(numkit.timeseries.rms_histogrammed_function, a, maxpoints, **kwargs)
[ "def", "decimate_rms", "(", "self", ",", "a", ",", "maxpoints", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_decimate", "(", "numkit", ".", "timeseries", ".", "rms_histogrammed_function", ",", "a", ",", "maxpoints", ",", "*", "*", "kwargs", ")" ]
Return data *a* rms-decimated on *maxpoints*. Histograms each column into *maxpoints* bins and calculates the root mean square sum in each bin as the decimated data, using :func:`numkit.timeseries.rms_histogrammed_function`. The coarse grained time in the first column contains the centers of the histogram time. If *a* contains <= *maxpoints* then *a* is simply returned; otherwise a new array of the same dimensions but with a reduced number of *maxpoints* points is returned. .. Note:: Assumes that the first column is time.
[ "Return", "data", "*", "a", "*", "rms", "-", "decimated", "on", "*", "maxpoints", "*", "." ]
python
valid
wummel/linkchecker
linkcheck/url.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/url.py#L558-L572
def is_duplicate_content_url(url1, url2): """Check if both URLs are allowed to point to the same content.""" if url1 == url2: return True if url2 in url1: url1 = shorten_duplicate_content_url(url1) if not url2.endswith('/') and url1.endswith('/'): url2 += '/' return url1 == url2 if url1 in url2: url2 = shorten_duplicate_content_url(url2) if not url1.endswith('/') and url2.endswith('/'): url1 += '/' return url1 == url2 return False
[ "def", "is_duplicate_content_url", "(", "url1", ",", "url2", ")", ":", "if", "url1", "==", "url2", ":", "return", "True", "if", "url2", "in", "url1", ":", "url1", "=", "shorten_duplicate_content_url", "(", "url1", ")", "if", "not", "url2", ".", "endswith", "(", "'/'", ")", "and", "url1", ".", "endswith", "(", "'/'", ")", ":", "url2", "+=", "'/'", "return", "url1", "==", "url2", "if", "url1", "in", "url2", ":", "url2", "=", "shorten_duplicate_content_url", "(", "url2", ")", "if", "not", "url1", ".", "endswith", "(", "'/'", ")", "and", "url2", ".", "endswith", "(", "'/'", ")", ":", "url1", "+=", "'/'", "return", "url1", "==", "url2", "return", "False" ]
Check if both URLs are allowed to point to the same content.
[ "Check", "if", "both", "URLs", "are", "allowed", "to", "point", "to", "the", "same", "content", "." ]
python
train
CartoDB/carto-python
carto/maps.py
https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/maps.py#L39-L106
def get_tile_url(self, x, y, z, layer_id=None, feature_id=None, filter=None, extension="png"): """ Prepares a URL to get data (raster or vector) from a NamedMap or AnonymousMap :param x: The x tile :param y: The y tile :param z: The zoom level :param layer_id: Can be a number (referring to the # layer of your \ map), all layers of your map, or a list of layers. To show just the basemap layer, enter the value 0 To show the first layer, enter the value 1 To show all layers, enter the value 'all' To show a list of layers, enter the comma separated \ layer value as '0,1,2' :param feature_id: The id of the feature :param filter: The filter to be applied to the layer :param extension: The format of the data to be retrieved: png, mvt, ... :type x: int :type y: int :type z: int :type layer_id: str :type feature_id: str :type filter: str :type extension: str :return: A URL to download data :rtype: str :raise: CartoException """ base_url = self.client.base_url + self.Meta.collection_endpoint template_id = self.template_id if hasattr(self, 'template_id') \ else self.layergroupid if layer_id is not None and feature_id is not None: url = urljoin(base_url, "{template_id}/{layer}/attributes/{feature_id}"). \ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id is not None and filter is not None: url = urljoin(base_url, "{template_id}/{filter}/{z}/{x}/{y}.{extension}"). \ format(template_id=template_id, filter=filter, z=z, x=x, y=y, extension=extension) elif layer_id is not None: url = urljoin(base_url, "{template_id}/{layer}/{z}/{x}/{y}.{extension}"). \ format(template_id=template_id, layer=layer_id, z=z, x=x, y=y, extension=extension) else: url = urljoin(base_url, "{template_id}/{z}/{x}/{y}.{extension}"). \ format( template_id=template_id, z=z, x=x, y=y, extension=extension) if hasattr(self, 'auth') and self.auth is not None \ and len(self.auth['valid_tokens']) > 0: url = urljoin(url, "?auth_token={auth_token}"). \ format(auth_token=self.auth['valid_tokens'][0]) return url
[ "def", "get_tile_url", "(", "self", ",", "x", ",", "y", ",", "z", ",", "layer_id", "=", "None", ",", "feature_id", "=", "None", ",", "filter", "=", "None", ",", "extension", "=", "\"png\"", ")", ":", "base_url", "=", "self", ".", "client", ".", "base_url", "+", "self", ".", "Meta", ".", "collection_endpoint", "template_id", "=", "self", ".", "template_id", "if", "hasattr", "(", "self", ",", "'template_id'", ")", "else", "self", ".", "layergroupid", "if", "layer_id", "is", "not", "None", "and", "feature_id", "is", "not", "None", ":", "url", "=", "urljoin", "(", "base_url", ",", "\"{template_id}/{layer}/attributes/{feature_id}\"", ")", ".", "format", "(", "template_id", "=", "template_id", ",", "layer", "=", "layer_id", ",", "feature_id", "=", "feature_id", ")", "elif", "layer_id", "is", "not", "None", "and", "filter", "is", "not", "None", ":", "url", "=", "urljoin", "(", "base_url", ",", "\"{template_id}/{filter}/{z}/{x}/{y}.{extension}\"", ")", ".", "format", "(", "template_id", "=", "template_id", ",", "filter", "=", "filter", ",", "z", "=", "z", ",", "x", "=", "x", ",", "y", "=", "y", ",", "extension", "=", "extension", ")", "elif", "layer_id", "is", "not", "None", ":", "url", "=", "urljoin", "(", "base_url", ",", "\"{template_id}/{layer}/{z}/{x}/{y}.{extension}\"", ")", ".", "format", "(", "template_id", "=", "template_id", ",", "layer", "=", "layer_id", ",", "z", "=", "z", ",", "x", "=", "x", ",", "y", "=", "y", ",", "extension", "=", "extension", ")", "else", ":", "url", "=", "urljoin", "(", "base_url", ",", "\"{template_id}/{z}/{x}/{y}.{extension}\"", ")", ".", "format", "(", "template_id", "=", "template_id", ",", "z", "=", "z", ",", "x", "=", "x", ",", "y", "=", "y", ",", "extension", "=", "extension", ")", "if", "hasattr", "(", "self", ",", "'auth'", ")", "and", "self", ".", "auth", "is", "not", "None", "and", "len", "(", "self", ".", "auth", "[", "'valid_tokens'", "]", ")", ">", "0", ":", "url", "=", "urljoin", "(", "url", ",", "\"?auth_token={auth_token}\"", ")", ".", "format", "(", "auth_token", "=", "self", ".", "auth", "[", "'valid_tokens'", "]", "[", "0", "]", ")", "return", "url" ]
Prepares a URL to get data (raster or vector) from a NamedMap or AnonymousMap :param x: The x tile :param y: The y tile :param z: The zoom level :param layer_id: Can be a number (referring to the # layer of your \ map), all layers of your map, or a list of layers. To show just the basemap layer, enter the value 0 To show the first layer, enter the value 1 To show all layers, enter the value 'all' To show a list of layers, enter the comma separated \ layer value as '0,1,2' :param feature_id: The id of the feature :param filter: The filter to be applied to the layer :param extension: The format of the data to be retrieved: png, mvt, ... :type x: int :type y: int :type z: int :type layer_id: str :type feature_id: str :type filter: str :type extension: str :return: A URL to download data :rtype: str :raise: CartoException
[ "Prepares", "a", "URL", "to", "get", "data", "(", "raster", "or", "vector", ")", "from", "a", "NamedMap", "or", "AnonymousMap" ]
python
train
StanfordBioinformatics/loom
server/loomengine_server/api/models/base.py
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/models/base.py#L126-L147
def filter_by_name_or_id_or_tag(self, query_string, queryset = None): """Find objects that match the identifier of form {name}@{ID}, {name}, or @{ID}, where ID may be truncated """ assert self.Model.NAME_FIELD, \ 'NAME_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.ID_FIELD, \ 'ID_FIELD is missing on model %s' % self.Model.__name__ assert self.Model.TAG_FIELD, \ 'TAG_FIELD is missing on model %s' % self.Model.__name__ filter_args = {} name, uuid, tag = self._parse_as_name_or_id_or_tag(query_string) if name is not None: filter_args[self.Model.NAME_FIELD] = name if uuid is not None: filter_args[self.Model.ID_FIELD+'__startswith'] = uuid if tag is not None: filter_args[self.Model.TAG_FIELD] = tag if queryset is None: queryset = self.Model.objects.all() return queryset.filter(**filter_args)
[ "def", "filter_by_name_or_id_or_tag", "(", "self", ",", "query_string", ",", "queryset", "=", "None", ")", ":", "assert", "self", ".", "Model", ".", "NAME_FIELD", ",", "'NAME_FIELD is missing on model %s'", "%", "self", ".", "Model", ".", "__name__", "assert", "self", ".", "Model", ".", "ID_FIELD", ",", "'ID_FIELD is missing on model %s'", "%", "self", ".", "Model", ".", "__name__", "assert", "self", ".", "Model", ".", "TAG_FIELD", ",", "'TAG_FIELD is missing on model %s'", "%", "self", ".", "Model", ".", "__name__", "filter_args", "=", "{", "}", "name", ",", "uuid", ",", "tag", "=", "self", ".", "_parse_as_name_or_id_or_tag", "(", "query_string", ")", "if", "name", "is", "not", "None", ":", "filter_args", "[", "self", ".", "Model", ".", "NAME_FIELD", "]", "=", "name", "if", "uuid", "is", "not", "None", ":", "filter_args", "[", "self", ".", "Model", ".", "ID_FIELD", "+", "'__startswith'", "]", "=", "uuid", "if", "tag", "is", "not", "None", ":", "filter_args", "[", "self", ".", "Model", ".", "TAG_FIELD", "]", "=", "tag", "if", "queryset", "is", "None", ":", "queryset", "=", "self", ".", "Model", ".", "objects", ".", "all", "(", ")", "return", "queryset", ".", "filter", "(", "*", "*", "filter_args", ")" ]
Find objects that match the identifier of form {name}@{ID}, {name}, or @{ID}, where ID may be truncated
[ "Find", "objects", "that", "match", "the", "identifier", "of", "form", "{", "name", "}" ]
python
train
proycon/pynlpl
pynlpl/algorithms.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/algorithms.py#L19-L31
def sum_to_n(n, size, limit=None): #from http://stackoverflow.com/questions/2065553/python-get-all-numbers-that-add-up-to-a-number """Produce all lists of `size` positive integers in decreasing order that add up to `n`.""" if size == 1: yield [n] return if limit is None: limit = n start = (n + size - 1) // size stop = min(limit, n - size + 1) + 1 for i in range(start, stop): for tail in sum_to_n(n - i, size - 1, i): yield [i] + tail
[ "def", "sum_to_n", "(", "n", ",", "size", ",", "limit", "=", "None", ")", ":", "#from http://stackoverflow.com/questions/2065553/python-get-all-numbers-that-add-up-to-a-number", "if", "size", "==", "1", ":", "yield", "[", "n", "]", "return", "if", "limit", "is", "None", ":", "limit", "=", "n", "start", "=", "(", "n", "+", "size", "-", "1", ")", "//", "size", "stop", "=", "min", "(", "limit", ",", "n", "-", "size", "+", "1", ")", "+", "1", "for", "i", "in", "range", "(", "start", ",", "stop", ")", ":", "for", "tail", "in", "sum_to_n", "(", "n", "-", "i", ",", "size", "-", "1", ",", "i", ")", ":", "yield", "[", "i", "]", "+", "tail" ]
Produce all lists of `size` positive integers in decreasing order that add up to `n`.
[ "Produce", "all", "lists", "of", "size", "positive", "integers", "in", "decreasing", "order", "that", "add", "up", "to", "n", "." ]
python
train
pdkit/pdkit
pdkit/utils.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/utils.py#L753-L802
def separate_walks_turns(data, window=[1, 1, 1]): """ Will separate peaks into the clusters by following the trend in the clusters array. This is usedful because scipy's k-mean clustering will give us a continous clusters array. :param clusters array: A continous array representing different classes. :param peaks array: The peaks that we want to separate into the classes from the custers. :return walks arrays: An array of arrays that will have all the peaks corresponding to every individual walk. :return turns arraays: Array of array which has all the indices of the peaks that correspond to turning. """ clusters, peaks, promi = cluster_walk_turn(data, window=window) group_one = [] group_two = [] start = 0 for i in range(1, len(clusters)): if clusters[i-1] != clusters[i]: assert np.all(clusters[start: i] == clusters[start]), 'Some values are mixed up, please check!' add = group_one if clusters[start] == 0 else group_two add.append(peaks[start: i]) start = i # hacky fix for the last part of the signal ... # I need to change this ... if i == len(clusters)-1: if not peaks[start] in add[-1]: add = group_one if clusters[start] == 0 else group_two add.append(peaks[start: ]) maxes_one = [np.max(data[c]) for c in group_one] maxes_two = [np.max(data[c]) for c in group_two] walks, turns = group_two, group_one if np.max(maxes_one) > np.max(maxes_two): walks, turns = group_one, group_two # let's drop any turns at the end of the signal # if len(turns[-1]) > len(walks[-1]): # turns.pop() return walks, turns
[ "def", "separate_walks_turns", "(", "data", ",", "window", "=", "[", "1", ",", "1", ",", "1", "]", ")", ":", "clusters", ",", "peaks", ",", "promi", "=", "cluster_walk_turn", "(", "data", ",", "window", "=", "window", ")", "group_one", "=", "[", "]", "group_two", "=", "[", "]", "start", "=", "0", "for", "i", "in", "range", "(", "1", ",", "len", "(", "clusters", ")", ")", ":", "if", "clusters", "[", "i", "-", "1", "]", "!=", "clusters", "[", "i", "]", ":", "assert", "np", ".", "all", "(", "clusters", "[", "start", ":", "i", "]", "==", "clusters", "[", "start", "]", ")", ",", "'Some values are mixed up, please check!'", "add", "=", "group_one", "if", "clusters", "[", "start", "]", "==", "0", "else", "group_two", "add", ".", "append", "(", "peaks", "[", "start", ":", "i", "]", ")", "start", "=", "i", "# hacky fix for the last part of the signal ...", "# I need to change this ...", "if", "i", "==", "len", "(", "clusters", ")", "-", "1", ":", "if", "not", "peaks", "[", "start", "]", "in", "add", "[", "-", "1", "]", ":", "add", "=", "group_one", "if", "clusters", "[", "start", "]", "==", "0", "else", "group_two", "add", ".", "append", "(", "peaks", "[", "start", ":", "]", ")", "maxes_one", "=", "[", "np", ".", "max", "(", "data", "[", "c", "]", ")", "for", "c", "in", "group_one", "]", "maxes_two", "=", "[", "np", ".", "max", "(", "data", "[", "c", "]", ")", "for", "c", "in", "group_two", "]", "walks", ",", "turns", "=", "group_two", ",", "group_one", "if", "np", ".", "max", "(", "maxes_one", ")", ">", "np", ".", "max", "(", "maxes_two", ")", ":", "walks", ",", "turns", "=", "group_one", ",", "group_two", "# let's drop any turns at the end of the signal", "# if len(turns[-1]) > len(walks[-1]):", "# turns.pop()", "return", "walks", ",", "turns" ]
Will separate peaks into the clusters by following the trend in the clusters array. This is usedful because scipy's k-mean clustering will give us a continous clusters array. :param clusters array: A continous array representing different classes. :param peaks array: The peaks that we want to separate into the classes from the custers. :return walks arrays: An array of arrays that will have all the peaks corresponding to every individual walk. :return turns arraays: Array of array which has all the indices of the peaks that correspond to turning.
[ "Will", "separate", "peaks", "into", "the", "clusters", "by", "following", "the", "trend", "in", "the", "clusters", "array", ".", "This", "is", "usedful", "because", "scipy", "s", "k", "-", "mean", "clustering", "will", "give", "us", "a", "continous", "clusters", "array", ".", ":", "param", "clusters", "array", ":", "A", "continous", "array", "representing", "different", "classes", ".", ":", "param", "peaks", "array", ":", "The", "peaks", "that", "we", "want", "to", "separate", "into", "the", "classes", "from", "the", "custers", ".", ":", "return", "walks", "arrays", ":", "An", "array", "of", "arrays", "that", "will", "have", "all", "the", "peaks", "corresponding", "to", "every", "individual", "walk", ".", ":", "return", "turns", "arraays", ":", "Array", "of", "array", "which", "has", "all", "the", "indices", "of", "the", "peaks", "that", "correspond", "to", "turning", "." ]
python
train
gwastro/pycbc
pycbc/workflow/minifollowups.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/minifollowups.py#L591-L682
def make_qscan_plot(workflow, ifo, trig_time, out_dir, injection_file=None, data_segments=None, time_window=100, tags=None): """ Generate a make_qscan node and add it to workflow. This function generates a single node of the singles_timefreq executable and adds it to the current workflow. Parent/child relationships are set by the input/output files automatically. Parameters ----------- workflow: pycbc.workflow.core.Workflow The workflow class that stores the jobs that will be run. ifo: str Which interferometer are we using? trig_time: int The time of the trigger being followed up. out_dir: str Location of directory to output to injection_file: pycbc.workflow.File (optional, default=None) If given, add the injections in the file to strain before making the plot. data_segments: ligo.segments.segmentlist (optional, default=None) The list of segments for which data exists and can be read in. If given the start/end times given to singles_timefreq will be adjusted if [trig_time - time_window, trig_time + time_window] does not completely lie within a valid data segment. A ValueError will be raised if the trig_time is not within a valid segment, or if it is not possible to find 2*time_window (plus the padding) of continuous data around the trigger. This **must** be coalesced. time_window: int (optional, default=None) The amount of data (not including padding) that will be read in by the singles_timefreq job. The default value of 100s should be fine for most cases. tags: list (optional, default=None) List of tags to add to the created nodes, which determine file naming. """ tags = [] if tags is None else tags makedir(out_dir) name = 'plot_qscan' curr_exe = PlotQScanExecutable(workflow.cp, name, ifos=[ifo], out_dir=out_dir, tags=tags) node = curr_exe.create_node() # Determine start/end times, using data segments if needed. # Begin by choosing "optimal" times start = trig_time - time_window end = trig_time + time_window # Then if data_segments is available, check against that, and move if # needed if data_segments is not None: # Assumes coalesced, so trig_time can only be within one segment for seg in data_segments: if trig_time in seg: data_seg = seg break elif trig_time == -1.0: node.add_opt('--gps-start-time', int(trig_time)) node.add_opt('--gps-end-time', int(trig_time)) node.add_opt('--center-time', trig_time) caption_string = "'No trigger in %s'" % ifo node.add_opt('--plot-caption', caption_string) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files else: err_msg = "Trig time {} ".format(trig_time) err_msg += "does not seem to lie within any data segments. " err_msg += "This shouldn't be possible, please ask for help!" raise ValueError(err_msg) # Check for pad-data if curr_exe.has_opt('pad-data'): pad_data = int(curr_exe.get_opt('pad-data')) else: pad_data = 0 # We only read data that's available. The code must handle the case # of not much data being available. if end > (data_seg[1] - pad_data): end = data_seg[1] - pad_data if start < (data_seg[0] + pad_data): start = data_seg[0] + pad_data node.add_opt('--gps-start-time', int(start)) node.add_opt('--gps-end-time', int(end)) node.add_opt('--center-time', trig_time) if injection_file is not None: node.add_input_opt('--injection-file', injection_file) node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file') workflow += node return node.output_files
[ "def", "make_qscan_plot", "(", "workflow", ",", "ifo", ",", "trig_time", ",", "out_dir", ",", "injection_file", "=", "None", ",", "data_segments", "=", "None", ",", "time_window", "=", "100", ",", "tags", "=", "None", ")", ":", "tags", "=", "[", "]", "if", "tags", "is", "None", "else", "tags", "makedir", "(", "out_dir", ")", "name", "=", "'plot_qscan'", "curr_exe", "=", "PlotQScanExecutable", "(", "workflow", ".", "cp", ",", "name", ",", "ifos", "=", "[", "ifo", "]", ",", "out_dir", "=", "out_dir", ",", "tags", "=", "tags", ")", "node", "=", "curr_exe", ".", "create_node", "(", ")", "# Determine start/end times, using data segments if needed.", "# Begin by choosing \"optimal\" times", "start", "=", "trig_time", "-", "time_window", "end", "=", "trig_time", "+", "time_window", "# Then if data_segments is available, check against that, and move if", "# needed", "if", "data_segments", "is", "not", "None", ":", "# Assumes coalesced, so trig_time can only be within one segment", "for", "seg", "in", "data_segments", ":", "if", "trig_time", "in", "seg", ":", "data_seg", "=", "seg", "break", "elif", "trig_time", "==", "-", "1.0", ":", "node", ".", "add_opt", "(", "'--gps-start-time'", ",", "int", "(", "trig_time", ")", ")", "node", ".", "add_opt", "(", "'--gps-end-time'", ",", "int", "(", "trig_time", ")", ")", "node", ".", "add_opt", "(", "'--center-time'", ",", "trig_time", ")", "caption_string", "=", "\"'No trigger in %s'\"", "%", "ifo", "node", ".", "add_opt", "(", "'--plot-caption'", ",", "caption_string", ")", "node", ".", "new_output_file_opt", "(", "workflow", ".", "analysis_time", ",", "'.png'", ",", "'--output-file'", ")", "workflow", "+=", "node", "return", "node", ".", "output_files", "else", ":", "err_msg", "=", "\"Trig time {} \"", ".", "format", "(", "trig_time", ")", "err_msg", "+=", "\"does not seem to lie within any data segments. \"", "err_msg", "+=", "\"This shouldn't be possible, please ask for help!\"", "raise", "ValueError", "(", "err_msg", ")", "# Check for pad-data", "if", "curr_exe", ".", "has_opt", "(", "'pad-data'", ")", ":", "pad_data", "=", "int", "(", "curr_exe", ".", "get_opt", "(", "'pad-data'", ")", ")", "else", ":", "pad_data", "=", "0", "# We only read data that's available. The code must handle the case", "# of not much data being available.", "if", "end", ">", "(", "data_seg", "[", "1", "]", "-", "pad_data", ")", ":", "end", "=", "data_seg", "[", "1", "]", "-", "pad_data", "if", "start", "<", "(", "data_seg", "[", "0", "]", "+", "pad_data", ")", ":", "start", "=", "data_seg", "[", "0", "]", "+", "pad_data", "node", ".", "add_opt", "(", "'--gps-start-time'", ",", "int", "(", "start", ")", ")", "node", ".", "add_opt", "(", "'--gps-end-time'", ",", "int", "(", "end", ")", ")", "node", ".", "add_opt", "(", "'--center-time'", ",", "trig_time", ")", "if", "injection_file", "is", "not", "None", ":", "node", ".", "add_input_opt", "(", "'--injection-file'", ",", "injection_file", ")", "node", ".", "new_output_file_opt", "(", "workflow", ".", "analysis_time", ",", "'.png'", ",", "'--output-file'", ")", "workflow", "+=", "node", "return", "node", ".", "output_files" ]
Generate a make_qscan node and add it to workflow. This function generates a single node of the singles_timefreq executable and adds it to the current workflow. Parent/child relationships are set by the input/output files automatically. Parameters ----------- workflow: pycbc.workflow.core.Workflow The workflow class that stores the jobs that will be run. ifo: str Which interferometer are we using? trig_time: int The time of the trigger being followed up. out_dir: str Location of directory to output to injection_file: pycbc.workflow.File (optional, default=None) If given, add the injections in the file to strain before making the plot. data_segments: ligo.segments.segmentlist (optional, default=None) The list of segments for which data exists and can be read in. If given the start/end times given to singles_timefreq will be adjusted if [trig_time - time_window, trig_time + time_window] does not completely lie within a valid data segment. A ValueError will be raised if the trig_time is not within a valid segment, or if it is not possible to find 2*time_window (plus the padding) of continuous data around the trigger. This **must** be coalesced. time_window: int (optional, default=None) The amount of data (not including padding) that will be read in by the singles_timefreq job. The default value of 100s should be fine for most cases. tags: list (optional, default=None) List of tags to add to the created nodes, which determine file naming.
[ "Generate", "a", "make_qscan", "node", "and", "add", "it", "to", "workflow", "." ]
python
train
wishtack/pysynthetic
synthetic/synthetic_meta_data.py
https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/synthetic_meta_data.py#L64-L73
def insertSyntheticMemberAtBegin(self, synthesizedMember): """ :type synthesizedMember: SyntheticMember :raises DuplicateMemberNameError """ memberName = synthesizedMember.memberName() if memberName in [m.memberName() for m in self._syntheticMemberList]: raise DuplicateMemberNameError(memberName, self._class.__name__) self._syntheticMemberList.insert(0, synthesizedMember)
[ "def", "insertSyntheticMemberAtBegin", "(", "self", ",", "synthesizedMember", ")", ":", "memberName", "=", "synthesizedMember", ".", "memberName", "(", ")", "if", "memberName", "in", "[", "m", ".", "memberName", "(", ")", "for", "m", "in", "self", ".", "_syntheticMemberList", "]", ":", "raise", "DuplicateMemberNameError", "(", "memberName", ",", "self", ".", "_class", ".", "__name__", ")", "self", ".", "_syntheticMemberList", ".", "insert", "(", "0", ",", "synthesizedMember", ")" ]
:type synthesizedMember: SyntheticMember :raises DuplicateMemberNameError
[ ":", "type", "synthesizedMember", ":", "SyntheticMember", ":", "raises", "DuplicateMemberNameError" ]
python
train
stephenmcd/django-socketio
django_socketio/example_project/chat/events.py
https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/events.py#L10-L37
def message(request, socket, context, message): """ Event handler for a room receiving a message. First validates a joining user's name and sends them the list of users. """ room = get_object_or_404(ChatRoom, id=message["room"]) if message["action"] == "start": name = strip_tags(message["name"]) user, created = room.users.get_or_create(name=name) if not created: socket.send({"action": "in-use"}) else: context["user"] = user users = [u.name for u in room.users.exclude(id=user.id)] socket.send({"action": "started", "users": users}) user.session = socket.session.session_id user.save() joined = {"action": "join", "name": user.name, "id": user.id} socket.send_and_broadcast_channel(joined) else: try: user = context["user"] except KeyError: return if message["action"] == "message": message["message"] = strip_tags(message["message"]) message["name"] = user.name socket.send_and_broadcast_channel(message)
[ "def", "message", "(", "request", ",", "socket", ",", "context", ",", "message", ")", ":", "room", "=", "get_object_or_404", "(", "ChatRoom", ",", "id", "=", "message", "[", "\"room\"", "]", ")", "if", "message", "[", "\"action\"", "]", "==", "\"start\"", ":", "name", "=", "strip_tags", "(", "message", "[", "\"name\"", "]", ")", "user", ",", "created", "=", "room", ".", "users", ".", "get_or_create", "(", "name", "=", "name", ")", "if", "not", "created", ":", "socket", ".", "send", "(", "{", "\"action\"", ":", "\"in-use\"", "}", ")", "else", ":", "context", "[", "\"user\"", "]", "=", "user", "users", "=", "[", "u", ".", "name", "for", "u", "in", "room", ".", "users", ".", "exclude", "(", "id", "=", "user", ".", "id", ")", "]", "socket", ".", "send", "(", "{", "\"action\"", ":", "\"started\"", ",", "\"users\"", ":", "users", "}", ")", "user", ".", "session", "=", "socket", ".", "session", ".", "session_id", "user", ".", "save", "(", ")", "joined", "=", "{", "\"action\"", ":", "\"join\"", ",", "\"name\"", ":", "user", ".", "name", ",", "\"id\"", ":", "user", ".", "id", "}", "socket", ".", "send_and_broadcast_channel", "(", "joined", ")", "else", ":", "try", ":", "user", "=", "context", "[", "\"user\"", "]", "except", "KeyError", ":", "return", "if", "message", "[", "\"action\"", "]", "==", "\"message\"", ":", "message", "[", "\"message\"", "]", "=", "strip_tags", "(", "message", "[", "\"message\"", "]", ")", "message", "[", "\"name\"", "]", "=", "user", ".", "name", "socket", ".", "send_and_broadcast_channel", "(", "message", ")" ]
Event handler for a room receiving a message. First validates a joining user's name and sends them the list of users.
[ "Event", "handler", "for", "a", "room", "receiving", "a", "message", ".", "First", "validates", "a", "joining", "user", "s", "name", "and", "sends", "them", "the", "list", "of", "users", "." ]
python
train
OCR-D/core
ocrd/ocrd/processor/base.py
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/processor/base.py#L76-L105
def run_cli( executable, mets_url=None, resolver=None, workspace=None, page_id=None, log_level=None, input_file_grp=None, output_file_grp=None, parameter=None, working_dir=None, ): """ Create a workspace for mets_url and run MP CLI through it """ workspace = _get_workspace(workspace, resolver, mets_url, working_dir) args = [executable, '--working-dir', workspace.directory] args += ['--mets', mets_url] if log_level: args += ['--log-level', log_level] if page_id: args += ['--page-id', page_id] if input_file_grp: args += ['--input-file-grp', input_file_grp] if output_file_grp: args += ['--output-file-grp', output_file_grp] if parameter: args += ['--parameter', parameter] log.debug("Running subprocess '%s'", ' '.join(args)) return subprocess.call(args)
[ "def", "run_cli", "(", "executable", ",", "mets_url", "=", "None", ",", "resolver", "=", "None", ",", "workspace", "=", "None", ",", "page_id", "=", "None", ",", "log_level", "=", "None", ",", "input_file_grp", "=", "None", ",", "output_file_grp", "=", "None", ",", "parameter", "=", "None", ",", "working_dir", "=", "None", ",", ")", ":", "workspace", "=", "_get_workspace", "(", "workspace", ",", "resolver", ",", "mets_url", ",", "working_dir", ")", "args", "=", "[", "executable", ",", "'--working-dir'", ",", "workspace", ".", "directory", "]", "args", "+=", "[", "'--mets'", ",", "mets_url", "]", "if", "log_level", ":", "args", "+=", "[", "'--log-level'", ",", "log_level", "]", "if", "page_id", ":", "args", "+=", "[", "'--page-id'", ",", "page_id", "]", "if", "input_file_grp", ":", "args", "+=", "[", "'--input-file-grp'", ",", "input_file_grp", "]", "if", "output_file_grp", ":", "args", "+=", "[", "'--output-file-grp'", ",", "output_file_grp", "]", "if", "parameter", ":", "args", "+=", "[", "'--parameter'", ",", "parameter", "]", "log", ".", "debug", "(", "\"Running subprocess '%s'\"", ",", "' '", ".", "join", "(", "args", ")", ")", "return", "subprocess", ".", "call", "(", "args", ")" ]
Create a workspace for mets_url and run MP CLI through it
[ "Create", "a", "workspace", "for", "mets_url", "and", "run", "MP", "CLI", "through", "it" ]
python
train
GNS3/gns3-server
gns3server/controller/project.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/project.py#L902-L909
def start_all(self): """ Start all nodes """ pool = Pool(concurrency=3) for node in self.nodes.values(): pool.append(node.start) yield from pool.join()
[ "def", "start_all", "(", "self", ")", ":", "pool", "=", "Pool", "(", "concurrency", "=", "3", ")", "for", "node", "in", "self", ".", "nodes", ".", "values", "(", ")", ":", "pool", ".", "append", "(", "node", ".", "start", ")", "yield", "from", "pool", ".", "join", "(", ")" ]
Start all nodes
[ "Start", "all", "nodes" ]
python
train
spyder-ide/spyder
spyder/utils/encoding.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/encoding.py#L63-L77
def to_unicode_from_fs(string): """ Return a unicode version of string decoded using the file system encoding. """ if not is_string(string): # string is a QString string = to_text_string(string.toUtf8(), 'utf-8') else: if is_binary_string(string): try: unic = string.decode(FS_ENCODING) except (UnicodeError, TypeError): pass else: return unic return string
[ "def", "to_unicode_from_fs", "(", "string", ")", ":", "if", "not", "is_string", "(", "string", ")", ":", "# string is a QString\r", "string", "=", "to_text_string", "(", "string", ".", "toUtf8", "(", ")", ",", "'utf-8'", ")", "else", ":", "if", "is_binary_string", "(", "string", ")", ":", "try", ":", "unic", "=", "string", ".", "decode", "(", "FS_ENCODING", ")", "except", "(", "UnicodeError", ",", "TypeError", ")", ":", "pass", "else", ":", "return", "unic", "return", "string" ]
Return a unicode version of string decoded using the file system encoding.
[ "Return", "a", "unicode", "version", "of", "string", "decoded", "using", "the", "file", "system", "encoding", "." ]
python
train
openstates/billy
billy/importers/names.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/importers/names.py#L120-L131
def _normalize(self, name): """ Normalizes a legislator name by stripping titles from the front, converting to lowercase and removing punctuation. """ name = re.sub( r'^(Senator|Representative|Sen\.?|Rep\.?|' 'Hon\.?|Right Hon\.?|Mr\.?|Mrs\.?|Ms\.?|L\'hon\.?|' 'Assembly(member|man|woman)) ', '', name) return name.strip().lower().replace('.', '')
[ "def", "_normalize", "(", "self", ",", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "r'^(Senator|Representative|Sen\\.?|Rep\\.?|'", "'Hon\\.?|Right Hon\\.?|Mr\\.?|Mrs\\.?|Ms\\.?|L\\'hon\\.?|'", "'Assembly(member|man|woman)) '", ",", "''", ",", "name", ")", "return", "name", ".", "strip", "(", ")", ".", "lower", "(", ")", ".", "replace", "(", "'.'", ",", "''", ")" ]
Normalizes a legislator name by stripping titles from the front, converting to lowercase and removing punctuation.
[ "Normalizes", "a", "legislator", "name", "by", "stripping", "titles", "from", "the", "front", "converting", "to", "lowercase", "and", "removing", "punctuation", "." ]
python
train
sorgerlab/indra
indra/sources/geneways/symbols_parser.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/symbols_parser.py#L50-L57
def symbol_to_id(self, symbol): """Returns the list of Entrez IDs for a given Geneways symbol (there may be more than one)""" if symbol not in self.symbols_to_ids: m = 'Could not look up Entrez ID for Geneways symbol ' + symbol raise Exception(m) return self.symbols_to_ids[symbol]
[ "def", "symbol_to_id", "(", "self", ",", "symbol", ")", ":", "if", "symbol", "not", "in", "self", ".", "symbols_to_ids", ":", "m", "=", "'Could not look up Entrez ID for Geneways symbol '", "+", "symbol", "raise", "Exception", "(", "m", ")", "return", "self", ".", "symbols_to_ids", "[", "symbol", "]" ]
Returns the list of Entrez IDs for a given Geneways symbol (there may be more than one)
[ "Returns", "the", "list", "of", "Entrez", "IDs", "for", "a", "given", "Geneways", "symbol", "(", "there", "may", "be", "more", "than", "one", ")" ]
python
train
skyfielders/python-skyfield
skyfield/positionlib.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/positionlib.py#L277-L304
def from_altaz(self, alt=None, az=None, alt_degrees=None, az_degrees=None, distance=Distance(au=0.1)): """Generate an Apparent position from an altitude and azimuth. The altitude and azimuth can each be provided as an `Angle` object, or else as a number of degrees provided as either a float or a tuple of degrees, arcminutes, and arcseconds:: alt=Angle(...), az=Angle(...) alt_degrees=23.2289, az_degrees=142.1161 alt_degrees=(23, 13, 44.1), az_degrees=(142, 6, 58.1) The distance should be a :class:`~skyfield.units.Distance` object, if provided; otherwise a default of 0.1 au is used. """ # TODO: should this method live on another class? R = self.observer_data.altaz_rotation if self.observer_data else None if R is None: raise ValueError('only a position generated by a topos() call' ' knows the orientation of the horizon' ' and can understand altitude and azimuth') alt = _interpret_angle('alt', alt, alt_degrees) az = _interpret_angle('az', az, az_degrees) r = distance.au p = from_polar(r, alt, az) p = einsum('ji...,j...->i...', R, p) return Apparent(p)
[ "def", "from_altaz", "(", "self", ",", "alt", "=", "None", ",", "az", "=", "None", ",", "alt_degrees", "=", "None", ",", "az_degrees", "=", "None", ",", "distance", "=", "Distance", "(", "au", "=", "0.1", ")", ")", ":", "# TODO: should this method live on another class?", "R", "=", "self", ".", "observer_data", ".", "altaz_rotation", "if", "self", ".", "observer_data", "else", "None", "if", "R", "is", "None", ":", "raise", "ValueError", "(", "'only a position generated by a topos() call'", "' knows the orientation of the horizon'", "' and can understand altitude and azimuth'", ")", "alt", "=", "_interpret_angle", "(", "'alt'", ",", "alt", ",", "alt_degrees", ")", "az", "=", "_interpret_angle", "(", "'az'", ",", "az", ",", "az_degrees", ")", "r", "=", "distance", ".", "au", "p", "=", "from_polar", "(", "r", ",", "alt", ",", "az", ")", "p", "=", "einsum", "(", "'ji...,j...->i...'", ",", "R", ",", "p", ")", "return", "Apparent", "(", "p", ")" ]
Generate an Apparent position from an altitude and azimuth. The altitude and azimuth can each be provided as an `Angle` object, or else as a number of degrees provided as either a float or a tuple of degrees, arcminutes, and arcseconds:: alt=Angle(...), az=Angle(...) alt_degrees=23.2289, az_degrees=142.1161 alt_degrees=(23, 13, 44.1), az_degrees=(142, 6, 58.1) The distance should be a :class:`~skyfield.units.Distance` object, if provided; otherwise a default of 0.1 au is used.
[ "Generate", "an", "Apparent", "position", "from", "an", "altitude", "and", "azimuth", "." ]
python
train
chrislim2888/IP2Location-Python
IP2Location.py
https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L177-L180
def get_area_code(self, ip): ''' Get area_code ''' rec = self.get_all(ip) return rec and rec.area_code
[ "def", "get_area_code", "(", "self", ",", "ip", ")", ":", "rec", "=", "self", ".", "get_all", "(", "ip", ")", "return", "rec", "and", "rec", ".", "area_code" ]
Get area_code
[ "Get", "area_code" ]
python
train
horazont/aiosasl
aiosasl/__init__.py
https://github.com/horazont/aiosasl/blob/af58bf30f688757e58af6e87892d35a8ce798482/aiosasl/__init__.py#L464-L509
def response(self, payload): """ Send a response to the previously received challenge, with the given `payload`. The payload is encoded using base64 and transmitted to the server. Return the next state of the state machine as tuple (see :class:`SASLStateMachine` for details). """ if self._state == SASLState.SUCCESS_SIMULATE_CHALLENGE: if payload != b"": # XXX: either our mechanism is buggy or the server # sent SASLState.SUCCESS before all challenge-response # messages defined by the mechanism were sent self._state = SASLState.FAILURE raise SASLFailure( None, "protocol violation: mechanism did not" " respond with an empty response to a" " challenge with final data – this suggests" " a protocol-violating early success from the server." ) self._state = SASLState.SUCCESS return SASLState.SUCCESS, None if self._state != SASLState.CHALLENGE: raise RuntimeError( "no challenge has been made or negotiation failed") try: next_state, payload = yield from self.interface.respond(payload) except SASLFailure: self._state = SASLState.FAILURE raise next_state = SASLState.from_reply(next_state) # unfold the (SASLState.SUCCESS, payload) to a sequence of # (SASLState.CHALLENGE, payload), (SASLState.SUCCESS, None) for the SASLMethod # to allow uniform treatment of both cases if next_state == SASLState.SUCCESS and payload is not None: self._state = SASLState.SUCCESS_SIMULATE_CHALLENGE return SASLState.CHALLENGE, payload self._state = next_state return next_state, payload
[ "def", "response", "(", "self", ",", "payload", ")", ":", "if", "self", ".", "_state", "==", "SASLState", ".", "SUCCESS_SIMULATE_CHALLENGE", ":", "if", "payload", "!=", "b\"\"", ":", "# XXX: either our mechanism is buggy or the server", "# sent SASLState.SUCCESS before all challenge-response", "# messages defined by the mechanism were sent", "self", ".", "_state", "=", "SASLState", ".", "FAILURE", "raise", "SASLFailure", "(", "None", ",", "\"protocol violation: mechanism did not\"", "\" respond with an empty response to a\"", "\" challenge with final data – this suggests\"", "\" a protocol-violating early success from the server.\"", ")", "self", ".", "_state", "=", "SASLState", ".", "SUCCESS", "return", "SASLState", ".", "SUCCESS", ",", "None", "if", "self", ".", "_state", "!=", "SASLState", ".", "CHALLENGE", ":", "raise", "RuntimeError", "(", "\"no challenge has been made or negotiation failed\"", ")", "try", ":", "next_state", ",", "payload", "=", "yield", "from", "self", ".", "interface", ".", "respond", "(", "payload", ")", "except", "SASLFailure", ":", "self", ".", "_state", "=", "SASLState", ".", "FAILURE", "raise", "next_state", "=", "SASLState", ".", "from_reply", "(", "next_state", ")", "# unfold the (SASLState.SUCCESS, payload) to a sequence of", "# (SASLState.CHALLENGE, payload), (SASLState.SUCCESS, None) for the SASLMethod", "# to allow uniform treatment of both cases", "if", "next_state", "==", "SASLState", ".", "SUCCESS", "and", "payload", "is", "not", "None", ":", "self", ".", "_state", "=", "SASLState", ".", "SUCCESS_SIMULATE_CHALLENGE", "return", "SASLState", ".", "CHALLENGE", ",", "payload", "self", ".", "_state", "=", "next_state", "return", "next_state", ",", "payload" ]
Send a response to the previously received challenge, with the given `payload`. The payload is encoded using base64 and transmitted to the server. Return the next state of the state machine as tuple (see :class:`SASLStateMachine` for details).
[ "Send", "a", "response", "to", "the", "previously", "received", "challenge", "with", "the", "given", "payload", ".", "The", "payload", "is", "encoded", "using", "base64", "and", "transmitted", "to", "the", "server", "." ]
python
test
wavycloud/pyboto3
pyboto3/ssm.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/ssm.py#L3774-L3883
def register_task_with_maintenance_window(WindowId=None, Targets=None, TaskArn=None, ServiceRoleArn=None, TaskType=None, TaskParameters=None, Priority=None, MaxConcurrency=None, MaxErrors=None, LoggingInfo=None, ClientToken=None): """ Adds a new task to a Maintenance Window. See also: AWS API Documentation :example: response = client.register_task_with_maintenance_window( WindowId='string', Targets=[ { 'Key': 'string', 'Values': [ 'string', ] }, ], TaskArn='string', ServiceRoleArn='string', TaskType='RUN_COMMAND', TaskParameters={ 'string': { 'Values': [ 'string', ] } }, Priority=123, MaxConcurrency='string', MaxErrors='string', LoggingInfo={ 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'S3Region': 'string' }, ClientToken='string' ) :type WindowId: string :param WindowId: [REQUIRED] The id of the Maintenance Window the task should be added to. :type Targets: list :param Targets: [REQUIRED] The targets (either instances or tags). Instances are specified using Key=instanceids,Values=instanceid1,instanceid2. Tags are specified using Key=tag name,Values=tag value. (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:Amazon EC2 tagor InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . (string) -- :type TaskArn: string :param TaskArn: [REQUIRED] The ARN of the task to execute :type ServiceRoleArn: string :param ServiceRoleArn: [REQUIRED] The role that should be assumed when executing the task. :type TaskType: string :param TaskType: [REQUIRED] The type of task being registered. :type TaskParameters: dict :param TaskParameters: The parameters that should be passed to the task when it is executed. (string) -- (dict) --Defines the values for a task parameter. Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length. (string) -- :type Priority: integer :param Priority: The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel. :type MaxConcurrency: string :param MaxConcurrency: [REQUIRED] The maximum number of targets this task can be run for in parallel. :type MaxErrors: string :param MaxErrors: [REQUIRED] The maximum number of errors allowed before this task stops being scheduled. :type LoggingInfo: dict :param LoggingInfo: A structure containing information about an Amazon S3 bucket to write instance-level logs to. S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored . S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder. S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located. :type ClientToken: string :param ClientToken: User-provided idempotency token. This field is autopopulated if not provided. :rtype: dict :return: { 'WindowTaskId': 'string' } """ pass
[ "def", "register_task_with_maintenance_window", "(", "WindowId", "=", "None", ",", "Targets", "=", "None", ",", "TaskArn", "=", "None", ",", "ServiceRoleArn", "=", "None", ",", "TaskType", "=", "None", ",", "TaskParameters", "=", "None", ",", "Priority", "=", "None", ",", "MaxConcurrency", "=", "None", ",", "MaxErrors", "=", "None", ",", "LoggingInfo", "=", "None", ",", "ClientToken", "=", "None", ")", ":", "pass" ]
Adds a new task to a Maintenance Window. See also: AWS API Documentation :example: response = client.register_task_with_maintenance_window( WindowId='string', Targets=[ { 'Key': 'string', 'Values': [ 'string', ] }, ], TaskArn='string', ServiceRoleArn='string', TaskType='RUN_COMMAND', TaskParameters={ 'string': { 'Values': [ 'string', ] } }, Priority=123, MaxConcurrency='string', MaxErrors='string', LoggingInfo={ 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'S3Region': 'string' }, ClientToken='string' ) :type WindowId: string :param WindowId: [REQUIRED] The id of the Maintenance Window the task should be added to. :type Targets: list :param Targets: [REQUIRED] The targets (either instances or tags). Instances are specified using Key=instanceids,Values=instanceid1,instanceid2. Tags are specified using Key=tag name,Values=tag value. (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:Amazon EC2 tagor InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . (string) -- :type TaskArn: string :param TaskArn: [REQUIRED] The ARN of the task to execute :type ServiceRoleArn: string :param ServiceRoleArn: [REQUIRED] The role that should be assumed when executing the task. :type TaskType: string :param TaskType: [REQUIRED] The type of task being registered. :type TaskParameters: dict :param TaskParameters: The parameters that should be passed to the task when it is executed. (string) -- (dict) --Defines the values for a task parameter. Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length. (string) -- :type Priority: integer :param Priority: The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel. :type MaxConcurrency: string :param MaxConcurrency: [REQUIRED] The maximum number of targets this task can be run for in parallel. :type MaxErrors: string :param MaxErrors: [REQUIRED] The maximum number of errors allowed before this task stops being scheduled. :type LoggingInfo: dict :param LoggingInfo: A structure containing information about an Amazon S3 bucket to write instance-level logs to. S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored . S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder. S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located. :type ClientToken: string :param ClientToken: User-provided idempotency token. This field is autopopulated if not provided. :rtype: dict :return: { 'WindowTaskId': 'string' }
[ "Adds", "a", "new", "task", "to", "a", "Maintenance", "Window", ".", "See", "also", ":", "AWS", "API", "Documentation", ":", "example", ":", "response", "=", "client", ".", "register_task_with_maintenance_window", "(", "WindowId", "=", "string", "Targets", "=", "[", "{", "Key", ":", "string", "Values", ":", "[", "string", "]", "}", "]", "TaskArn", "=", "string", "ServiceRoleArn", "=", "string", "TaskType", "=", "RUN_COMMAND", "TaskParameters", "=", "{", "string", ":", "{", "Values", ":", "[", "string", "]", "}", "}", "Priority", "=", "123", "MaxConcurrency", "=", "string", "MaxErrors", "=", "string", "LoggingInfo", "=", "{", "S3BucketName", ":", "string", "S3KeyPrefix", ":", "string", "S3Region", ":", "string", "}", "ClientToken", "=", "string", ")", ":", "type", "WindowId", ":", "string", ":", "param", "WindowId", ":", "[", "REQUIRED", "]", "The", "id", "of", "the", "Maintenance", "Window", "the", "task", "should", "be", "added", "to", "." ]
python
train
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L1754-L1764
def set_font_face(self, font_face): """Replaces the current font face with :obj:`font_face`. :param font_face: A :class:`FontFace` object, or :obj:`None` to restore the default font. """ font_face = font_face._pointer if font_face is not None else ffi.NULL cairo.cairo_set_font_face(self._pointer, font_face) self._check_status()
[ "def", "set_font_face", "(", "self", ",", "font_face", ")", ":", "font_face", "=", "font_face", ".", "_pointer", "if", "font_face", "is", "not", "None", "else", "ffi", ".", "NULL", "cairo", ".", "cairo_set_font_face", "(", "self", ".", "_pointer", ",", "font_face", ")", "self", ".", "_check_status", "(", ")" ]
Replaces the current font face with :obj:`font_face`. :param font_face: A :class:`FontFace` object, or :obj:`None` to restore the default font.
[ "Replaces", "the", "current", "font", "face", "with", ":", "obj", ":", "font_face", "." ]
python
train
thriftrw/thriftrw-python
thriftrw/idl/parser.py
https://github.com/thriftrw/thriftrw-python/blob/4f2f71acd7a0ac716c9ea5cdcea2162aa561304a/thriftrw/idl/parser.py#L206-L210
def p_struct(self, p): '''struct : STRUCT IDENTIFIER '{' field_seq '}' annotations''' p[0] = ast.Struct( name=p[2], fields=p[4], annotations=p[6], lineno=p.lineno(2) )
[ "def", "p_struct", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "Struct", "(", "name", "=", "p", "[", "2", "]", ",", "fields", "=", "p", "[", "4", "]", ",", "annotations", "=", "p", "[", "6", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")" ]
struct : STRUCT IDENTIFIER '{' field_seq '}' annotations
[ "struct", ":", "STRUCT", "IDENTIFIER", "{", "field_seq", "}", "annotations" ]
python
train
wavycloud/pyboto3
pyboto3/cloudformation.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/cloudformation.py#L123-L253
def create_change_set(StackName=None, TemplateBody=None, TemplateURL=None, UsePreviousTemplate=None, Parameters=None, Capabilities=None, ResourceTypes=None, RoleARN=None, NotificationARNs=None, Tags=None, ChangeSetName=None, ClientToken=None, Description=None, ChangeSetType=None): """ Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack. To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE . To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action. When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set. See also: AWS API Documentation :example: response = client.create_change_set( StackName='string', TemplateBody='string', TemplateURL='string', UsePreviousTemplate=True|False, Parameters=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False }, ], Capabilities=[ 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM', ], ResourceTypes=[ 'string', ], RoleARN='string', NotificationARNs=[ 'string', ], Tags=[ { 'Key': 'string', 'Value': 'string' }, ], ChangeSetName='string', ClientToken='string', Description='string', ChangeSetType='CREATE'|'UPDATE' ) :type StackName: string :param StackName: [REQUIRED] The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values. :type TemplateBody: string :param TemplateBody: A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by comparing this template with the template of the stack that you specified. Conditional: You must specify only TemplateBody or TemplateURL . :type TemplateURL: string :param TemplateURL: The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation generates the change set by comparing this template with the stack that you specified. Conditional: You must specify only TemplateBody or TemplateURL . :type UsePreviousTemplate: boolean :param UsePreviousTemplate: Whether to reuse the template that is associated with the stack to create the change set. :type Parameters: list :param Parameters: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type. (dict) --The Parameter data type. ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template. ParameterValue (string) --The value associated with the parameter. UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value. :type Capabilities: list :param Capabilities: A list of values that you must specify before AWS CloudFormation can update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter. The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM . The following resources require you to specify this parameter: AWS::IAM::AccessKey , AWS::IAM::Group , AWS::IAM::InstanceProfile , AWS::IAM::Policy , AWS::IAM::Role , AWS::IAM::User , and AWS::IAM::UserToGroupAddition . If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM . If you don't specify this parameter, this action returns an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates . (string) -- :type ResourceTypes: list :param ResourceTypes: The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance . If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for AWS CloudFormation. For more information, see Controlling Access with AWS Identity and Access Management in the AWS CloudFormation User Guide. (string) -- :type RoleARN: string :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes when executing the change set. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials. :type NotificationARNs: list :param NotificationARNs: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that AWS CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. (string) -- :type Tags: list :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 10 tags. (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack. Key (string) -- Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: . Value (string) -- Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value. :type ChangeSetName: string :param ChangeSetName: [REQUIRED] The name of the change set. The name must be unique among all change sets that are associated with the specified stack. A change set name can contain only alphanumeric, case sensitive characters and hyphens. It must start with an alphabetic character and cannot exceed 128 characters. :type ClientToken: string :param ClientToken: A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them. :type Description: string :param Description: A description to help you identify this change set. :type ChangeSetType: string :param ChangeSetType: The type of change set operation. To create a change set for a new stack, specify CREATE . To create a change set for an existing stack, specify UPDATE . If you create a change set for a new stack, AWS Cloudformation creates a stack with a unique stack ID, but no template or resources. The stack will be in the ` REVIEW_IN_PROGRESS http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#d0e11995`_ state until you execute the change set. By default, AWS CloudFormation specifies UPDATE . You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack. :rtype: dict :return: { 'Id': 'string', 'StackId': 'string' } """ pass
[ "def", "create_change_set", "(", "StackName", "=", "None", ",", "TemplateBody", "=", "None", ",", "TemplateURL", "=", "None", ",", "UsePreviousTemplate", "=", "None", ",", "Parameters", "=", "None", ",", "Capabilities", "=", "None", ",", "ResourceTypes", "=", "None", ",", "RoleARN", "=", "None", ",", "NotificationARNs", "=", "None", ",", "Tags", "=", "None", ",", "ChangeSetName", "=", "None", ",", "ClientToken", "=", "None", ",", "Description", "=", "None", ",", "ChangeSetType", "=", "None", ")", ":", "pass" ]
Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack. To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE . To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action. When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set. See also: AWS API Documentation :example: response = client.create_change_set( StackName='string', TemplateBody='string', TemplateURL='string', UsePreviousTemplate=True|False, Parameters=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False }, ], Capabilities=[ 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM', ], ResourceTypes=[ 'string', ], RoleARN='string', NotificationARNs=[ 'string', ], Tags=[ { 'Key': 'string', 'Value': 'string' }, ], ChangeSetName='string', ClientToken='string', Description='string', ChangeSetType='CREATE'|'UPDATE' ) :type StackName: string :param StackName: [REQUIRED] The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values. :type TemplateBody: string :param TemplateBody: A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by comparing this template with the template of the stack that you specified. Conditional: You must specify only TemplateBody or TemplateURL . :type TemplateURL: string :param TemplateURL: The location of the file that contains the revised template. The URL must point to a template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation generates the change set by comparing this template with the stack that you specified. Conditional: You must specify only TemplateBody or TemplateURL . :type UsePreviousTemplate: boolean :param UsePreviousTemplate: Whether to reuse the template that is associated with the stack to create the change set. :type Parameters: list :param Parameters: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type. (dict) --The Parameter data type. ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template. ParameterValue (string) --The value associated with the parameter. UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value. :type Capabilities: list :param Capabilities: A list of values that you must specify before AWS CloudFormation can update certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter. The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM . The following resources require you to specify this parameter: AWS::IAM::AccessKey , AWS::IAM::Group , AWS::IAM::InstanceProfile , AWS::IAM::Policy , AWS::IAM::Role , AWS::IAM::User , and AWS::IAM::UserToGroupAddition . If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM . If you don't specify this parameter, this action returns an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates . (string) -- :type ResourceTypes: list :param ResourceTypes: The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance . If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for AWS CloudFormation. For more information, see Controlling Access with AWS Identity and Access Management in the AWS CloudFormation User Guide. (string) -- :type RoleARN: string :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes when executing the change set. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials. :type NotificationARNs: list :param NotificationARNs: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that AWS CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. (string) -- :type Tags: list :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 10 tags. (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack. Key (string) -- Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: . Value (string) -- Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value. :type ChangeSetName: string :param ChangeSetName: [REQUIRED] The name of the change set. The name must be unique among all change sets that are associated with the specified stack. A change set name can contain only alphanumeric, case sensitive characters and hyphens. It must start with an alphabetic character and cannot exceed 128 characters. :type ClientToken: string :param ClientToken: A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them. :type Description: string :param Description: A description to help you identify this change set. :type ChangeSetType: string :param ChangeSetType: The type of change set operation. To create a change set for a new stack, specify CREATE . To create a change set for an existing stack, specify UPDATE . If you create a change set for a new stack, AWS Cloudformation creates a stack with a unique stack ID, but no template or resources. The stack will be in the ` REVIEW_IN_PROGRESS http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#d0e11995`_ state until you execute the change set. By default, AWS CloudFormation specifies UPDATE . You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack. :rtype: dict :return: { 'Id': 'string', 'StackId': 'string' }
[ "Creates", "a", "list", "of", "changes", "that", "will", "be", "applied", "to", "a", "stack", "so", "that", "you", "can", "review", "the", "changes", "before", "executing", "them", ".", "You", "can", "create", "a", "change", "set", "for", "a", "stack", "that", "doesn", "t", "exist", "or", "an", "existing", "stack", ".", "If", "you", "create", "a", "change", "set", "for", "a", "stack", "that", "doesn", "t", "exist", "the", "change", "set", "shows", "all", "of", "the", "resources", "that", "AWS", "CloudFormation", "will", "create", ".", "If", "you", "create", "a", "change", "set", "for", "an", "existing", "stack", "AWS", "CloudFormation", "compares", "the", "stack", "s", "information", "with", "the", "information", "that", "you", "submit", "in", "the", "change", "set", "and", "lists", "the", "differences", ".", "Use", "change", "sets", "to", "understand", "which", "resources", "AWS", "CloudFormation", "will", "create", "or", "change", "and", "how", "it", "will", "change", "resources", "in", "an", "existing", "stack", "before", "you", "create", "or", "update", "a", "stack", ".", "To", "create", "a", "change", "set", "for", "a", "stack", "that", "doesn", "t", "exist", "for", "the", "ChangeSetType", "parameter", "specify", "CREATE", ".", "To", "create", "a", "change", "set", "for", "an", "existing", "stack", "specify", "UPDATE", "for", "the", "ChangeSetType", "parameter", ".", "After", "the", "CreateChangeSet", "call", "successfully", "completes", "AWS", "CloudFormation", "starts", "creating", "the", "change", "set", ".", "To", "check", "the", "status", "of", "the", "change", "set", "or", "to", "review", "it", "use", "the", "DescribeChangeSet", "action", ".", "When", "you", "are", "satisfied", "with", "the", "changes", "the", "change", "set", "will", "make", "execute", "the", "change", "set", "by", "using", "the", "ExecuteChangeSet", "action", ".", "AWS", "CloudFormation", "doesn", "t", "make", "changes", "until", "you", "execute", "the", "change", "set", ".", "See", "also", ":", "AWS", "API", "Documentation", ":", "example", ":", "response", "=", "client", ".", "create_change_set", "(", "StackName", "=", "string", "TemplateBody", "=", "string", "TemplateURL", "=", "string", "UsePreviousTemplate", "=", "True|False", "Parameters", "=", "[", "{", "ParameterKey", ":", "string", "ParameterValue", ":", "string", "UsePreviousValue", ":", "True|False", "}", "]", "Capabilities", "=", "[", "CAPABILITY_IAM", "|", "CAPABILITY_NAMED_IAM", "]", "ResourceTypes", "=", "[", "string", "]", "RoleARN", "=", "string", "NotificationARNs", "=", "[", "string", "]", "Tags", "=", "[", "{", "Key", ":", "string", "Value", ":", "string", "}", "]", "ChangeSetName", "=", "string", "ClientToken", "=", "string", "Description", "=", "string", "ChangeSetType", "=", "CREATE", "|", "UPDATE", ")", ":", "type", "StackName", ":", "string", ":", "param", "StackName", ":", "[", "REQUIRED", "]", "The", "name", "or", "the", "unique", "ID", "of", "the", "stack", "for", "which", "you", "are", "creating", "a", "change", "set", ".", "AWS", "CloudFormation", "generates", "the", "change", "set", "by", "comparing", "this", "stack", "s", "information", "with", "the", "information", "that", "you", "submit", "such", "as", "a", "modified", "template", "or", "different", "parameter", "input", "values", "." ]
python
train
waleedka/hiddenlayer
hiddenlayer/canvas.py
https://github.com/waleedka/hiddenlayer/blob/294f8732b271cbdd6310c55bdf5ce855cbf61c75/hiddenlayer/canvas.py#L191-L204
def draw_image(self, metric, limit=5): """Display a series of images at different time steps.""" rows = 1 cols = limit self.ax.axis("off") # Take the Axes gridspec and divide it into a grid gs = matplotlib.gridspec.GridSpecFromSubplotSpec( rows, cols, subplot_spec=self.gs) # Loop through images in last few steps for i, image in enumerate(metric.data[-cols:]): ax = self.figure.add_subplot(gs[0, i]) ax.axis('off') ax.set_title(metric.formatted_steps[-cols:][i]) ax.imshow(norm(image))
[ "def", "draw_image", "(", "self", ",", "metric", ",", "limit", "=", "5", ")", ":", "rows", "=", "1", "cols", "=", "limit", "self", ".", "ax", ".", "axis", "(", "\"off\"", ")", "# Take the Axes gridspec and divide it into a grid", "gs", "=", "matplotlib", ".", "gridspec", ".", "GridSpecFromSubplotSpec", "(", "rows", ",", "cols", ",", "subplot_spec", "=", "self", ".", "gs", ")", "# Loop through images in last few steps", "for", "i", ",", "image", "in", "enumerate", "(", "metric", ".", "data", "[", "-", "cols", ":", "]", ")", ":", "ax", "=", "self", ".", "figure", ".", "add_subplot", "(", "gs", "[", "0", ",", "i", "]", ")", "ax", ".", "axis", "(", "'off'", ")", "ax", ".", "set_title", "(", "metric", ".", "formatted_steps", "[", "-", "cols", ":", "]", "[", "i", "]", ")", "ax", ".", "imshow", "(", "norm", "(", "image", ")", ")" ]
Display a series of images at different time steps.
[ "Display", "a", "series", "of", "images", "at", "different", "time", "steps", "." ]
python
train
googledatalab/pydatalab
google/datalab/bigquery/_query.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query.py#L324-L339
def execute(self, output_options=None, sampling=None, context=None, query_params=None): """ Initiate the query and return a QueryJob. Args: output_options: a QueryOutput object describing how to execute the query sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. Returns: A Job object that can be used to get the query results, or export to a file or dataframe Raises: Exception if query could not be executed. """ return self.execute_async(output_options, sampling=sampling, context=context, query_params=query_params).wait()
[ "def", "execute", "(", "self", ",", "output_options", "=", "None", ",", "sampling", "=", "None", ",", "context", "=", "None", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "execute_async", "(", "output_options", ",", "sampling", "=", "sampling", ",", "context", "=", "context", ",", "query_params", "=", "query_params", ")", ".", "wait", "(", ")" ]
Initiate the query and return a QueryJob. Args: output_options: a QueryOutput object describing how to execute the query sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. Returns: A Job object that can be used to get the query results, or export to a file or dataframe Raises: Exception if query could not be executed.
[ "Initiate", "the", "query", "and", "return", "a", "QueryJob", "." ]
python
train
limix/glimix-core
glimix_core/cov/_free.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/cov/_free.py#L170-L189
def logdet(self): """ Log of |K|. Returns ------- float Log-determinant of K. """ from numpy.linalg import slogdet K = self.value() sign, logdet = slogdet(K) if sign != 1.0: msg = "The estimated determinant of K is not positive: " msg += f" ({sign}, {logdet})." raise RuntimeError(msg) return logdet
[ "def", "logdet", "(", "self", ")", ":", "from", "numpy", ".", "linalg", "import", "slogdet", "K", "=", "self", ".", "value", "(", ")", "sign", ",", "logdet", "=", "slogdet", "(", "K", ")", "if", "sign", "!=", "1.0", ":", "msg", "=", "\"The estimated determinant of K is not positive: \"", "msg", "+=", "f\" ({sign}, {logdet}).\"", "raise", "RuntimeError", "(", "msg", ")", "return", "logdet" ]
Log of |K|. Returns ------- float Log-determinant of K.
[ "Log", "of", "|K|", "." ]
python
valid
oz123/blogit
blogit/blogit.py
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L467-L480
def preview(): # pragma: no coverage """launch an HTTP to preview the website""" Handler = http.server.SimpleHTTPRequestHandler socketserver.TCPServer.allow_reuse_address = True port = CONFIG['http_port'] httpd = socketserver.TCPServer(("", port), Handler) os.chdir(CONFIG['output_to']) try: logger.info("and ready to test at " "http://127.0.0.1:%d" % CONFIG['http_port']) logger.info("Hit Ctrl+C to exit") httpd.serve_forever() except KeyboardInterrupt: httpd.shutdown()
[ "def", "preview", "(", ")", ":", "# pragma: no coverage", "Handler", "=", "http", ".", "server", ".", "SimpleHTTPRequestHandler", "socketserver", ".", "TCPServer", ".", "allow_reuse_address", "=", "True", "port", "=", "CONFIG", "[", "'http_port'", "]", "httpd", "=", "socketserver", ".", "TCPServer", "(", "(", "\"\"", ",", "port", ")", ",", "Handler", ")", "os", ".", "chdir", "(", "CONFIG", "[", "'output_to'", "]", ")", "try", ":", "logger", ".", "info", "(", "\"and ready to test at \"", "\"http://127.0.0.1:%d\"", "%", "CONFIG", "[", "'http_port'", "]", ")", "logger", ".", "info", "(", "\"Hit Ctrl+C to exit\"", ")", "httpd", ".", "serve_forever", "(", ")", "except", "KeyboardInterrupt", ":", "httpd", ".", "shutdown", "(", ")" ]
launch an HTTP to preview the website
[ "launch", "an", "HTTP", "to", "preview", "the", "website" ]
python
train
pypa/pipenv
pipenv/vendor/click/termui.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/termui.py#L232-L258
def echo_via_pager(text_or_generator, color=None): """This function takes a text and shows it via an environment specific pager on stdout. .. versionchanged:: 3.0 Added the `color` flag. :param text_or_generator: the text to page, or alternatively, a generator emitting the text to page. :param color: controls if the pager supports ANSI colors or not. The default is autodetection. """ color = resolve_color_default(color) if inspect.isgeneratorfunction(text_or_generator): i = text_or_generator() elif isinstance(text_or_generator, string_types): i = [text_or_generator] else: i = iter(text_or_generator) # convert every element of i to a text type if necessary text_generator = (el if isinstance(el, string_types) else text_type(el) for el in i) from ._termui_impl import pager return pager(itertools.chain(text_generator, "\n"), color)
[ "def", "echo_via_pager", "(", "text_or_generator", ",", "color", "=", "None", ")", ":", "color", "=", "resolve_color_default", "(", "color", ")", "if", "inspect", ".", "isgeneratorfunction", "(", "text_or_generator", ")", ":", "i", "=", "text_or_generator", "(", ")", "elif", "isinstance", "(", "text_or_generator", ",", "string_types", ")", ":", "i", "=", "[", "text_or_generator", "]", "else", ":", "i", "=", "iter", "(", "text_or_generator", ")", "# convert every element of i to a text type if necessary", "text_generator", "=", "(", "el", "if", "isinstance", "(", "el", ",", "string_types", ")", "else", "text_type", "(", "el", ")", "for", "el", "in", "i", ")", "from", ".", "_termui_impl", "import", "pager", "return", "pager", "(", "itertools", ".", "chain", "(", "text_generator", ",", "\"\\n\"", ")", ",", "color", ")" ]
This function takes a text and shows it via an environment specific pager on stdout. .. versionchanged:: 3.0 Added the `color` flag. :param text_or_generator: the text to page, or alternatively, a generator emitting the text to page. :param color: controls if the pager supports ANSI colors or not. The default is autodetection.
[ "This", "function", "takes", "a", "text", "and", "shows", "it", "via", "an", "environment", "specific", "pager", "on", "stdout", "." ]
python
train
studionow/pybrightcove
pybrightcove/playlist.py
https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/playlist.py#L171-L177
def find_all(connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): """ List all playlists. """ return pybrightcove.connection.ItemResultSet("find_all_playlists", Playlist, connection, page_size, page_number, sort_by, sort_order)
[ "def", "find_all", "(", "connection", "=", "None", ",", "page_size", "=", "100", ",", "page_number", "=", "0", ",", "sort_by", "=", "DEFAULT_SORT_BY", ",", "sort_order", "=", "DEFAULT_SORT_ORDER", ")", ":", "return", "pybrightcove", ".", "connection", ".", "ItemResultSet", "(", "\"find_all_playlists\"", ",", "Playlist", ",", "connection", ",", "page_size", ",", "page_number", ",", "sort_by", ",", "sort_order", ")" ]
List all playlists.
[ "List", "all", "playlists", "." ]
python
train
saltstack/salt
salt/states/boto_vpc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_vpc.py#L674-L691
def _verify_subnet_association(route_table_desc, subnet_id): ''' Helper function verify a subnet's route table association route_table_desc the description of a route table, as returned from boto_vpc.describe_route_table subnet_id the subnet id to verify .. versionadded:: 2016.11.0 ''' if route_table_desc: if 'associations' in route_table_desc: for association in route_table_desc['associations']: if association['subnet_id'] == subnet_id: return True return False
[ "def", "_verify_subnet_association", "(", "route_table_desc", ",", "subnet_id", ")", ":", "if", "route_table_desc", ":", "if", "'associations'", "in", "route_table_desc", ":", "for", "association", "in", "route_table_desc", "[", "'associations'", "]", ":", "if", "association", "[", "'subnet_id'", "]", "==", "subnet_id", ":", "return", "True", "return", "False" ]
Helper function verify a subnet's route table association route_table_desc the description of a route table, as returned from boto_vpc.describe_route_table subnet_id the subnet id to verify .. versionadded:: 2016.11.0
[ "Helper", "function", "verify", "a", "subnet", "s", "route", "table", "association" ]
python
train
observermedia/django-wordpress-rest
wordpress/loading.py
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L500-L519
def set_posts_param_modified_after(self, params, post_type, status): """ Set modified_after date to "continue where we left off" if appropriate :param params: the GET params dict, which may be updated to include the "modified_after" key :param post_type: post, page, attachment, or any custom post type set up in the WP API :param status: publish, private, draft, etc. :return: None """ if not self.purge_first and not self.full and not self.modified_after: if status == "any": latest = Post.objects.filter(post_type=post_type).order_by("-modified").first() else: latest = Post.objects.filter(post_type=post_type, status=status).order_by("-modified").first() if latest: self.modified_after = latest.modified if self.modified_after: params["modified_after"] = self.modified_after.isoformat() logger.info("getting posts after: %s", params["modified_after"])
[ "def", "set_posts_param_modified_after", "(", "self", ",", "params", ",", "post_type", ",", "status", ")", ":", "if", "not", "self", ".", "purge_first", "and", "not", "self", ".", "full", "and", "not", "self", ".", "modified_after", ":", "if", "status", "==", "\"any\"", ":", "latest", "=", "Post", ".", "objects", ".", "filter", "(", "post_type", "=", "post_type", ")", ".", "order_by", "(", "\"-modified\"", ")", ".", "first", "(", ")", "else", ":", "latest", "=", "Post", ".", "objects", ".", "filter", "(", "post_type", "=", "post_type", ",", "status", "=", "status", ")", ".", "order_by", "(", "\"-modified\"", ")", ".", "first", "(", ")", "if", "latest", ":", "self", ".", "modified_after", "=", "latest", ".", "modified", "if", "self", ".", "modified_after", ":", "params", "[", "\"modified_after\"", "]", "=", "self", ".", "modified_after", ".", "isoformat", "(", ")", "logger", ".", "info", "(", "\"getting posts after: %s\"", ",", "params", "[", "\"modified_after\"", "]", ")" ]
Set modified_after date to "continue where we left off" if appropriate :param params: the GET params dict, which may be updated to include the "modified_after" key :param post_type: post, page, attachment, or any custom post type set up in the WP API :param status: publish, private, draft, etc. :return: None
[ "Set", "modified_after", "date", "to", "continue", "where", "we", "left", "off", "if", "appropriate" ]
python
train
mdsol/rwslib
rwslib/builders/admindata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/admindata.py#L236-L255
def build(self, builder): """Build XML by appending to builder""" params = dict(OID=self.oid) if self.user_type: params.update(dict(UserType=self.user_type.value)) builder.start(self.__class__.__name__, params) # build the children for child in ('login_name', 'display_name', 'full_name', 'first_name', 'last_name', 'organisation'): if getattr(self, child) is not None: getattr(self, child).build(builder) for address in self.addresses: address.build(builder) for email in self.emails: email.build(builder) for phone in self.phones: phone.build(builder) for location in self.locations: location.build(builder) builder.end(self.__class__.__name__)
[ "def", "build", "(", "self", ",", "builder", ")", ":", "params", "=", "dict", "(", "OID", "=", "self", ".", "oid", ")", "if", "self", ".", "user_type", ":", "params", ".", "update", "(", "dict", "(", "UserType", "=", "self", ".", "user_type", ".", "value", ")", ")", "builder", ".", "start", "(", "self", ".", "__class__", ".", "__name__", ",", "params", ")", "# build the children", "for", "child", "in", "(", "'login_name'", ",", "'display_name'", ",", "'full_name'", ",", "'first_name'", ",", "'last_name'", ",", "'organisation'", ")", ":", "if", "getattr", "(", "self", ",", "child", ")", "is", "not", "None", ":", "getattr", "(", "self", ",", "child", ")", ".", "build", "(", "builder", ")", "for", "address", "in", "self", ".", "addresses", ":", "address", ".", "build", "(", "builder", ")", "for", "email", "in", "self", ".", "emails", ":", "email", ".", "build", "(", "builder", ")", "for", "phone", "in", "self", ".", "phones", ":", "phone", ".", "build", "(", "builder", ")", "for", "location", "in", "self", ".", "locations", ":", "location", ".", "build", "(", "builder", ")", "builder", ".", "end", "(", "self", ".", "__class__", ".", "__name__", ")" ]
Build XML by appending to builder
[ "Build", "XML", "by", "appending", "to", "builder" ]
python
train
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L1352-L1448
def proximal_l1_l2(space, lam=1, g=None): r"""Proximal operator factory of the group-L1-L2 norm/distance. Implements the proximal operator of the functional :: F(x) = lam || |x - g|_2 ||_1 with ``x`` and ``g`` elements in ``space``, and scaling factor ``lam``. Here, ``|.|_2`` is the pointwise Euclidean norm of a vector-valued function. Parameters ---------- space : `LinearSpace` or `ProductSpace` Domain of the functional. lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional Element to which the L1-L2 distance is taken. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- For the functional .. math:: F(x) = \lambda \| |x - g|_2 \|_1, and a step size :math:`\sigma`, the proximal operator of :math:`\sigma F` is given as the "soft-shrinkage" operator .. math:: \mathrm{prox}_{\sigma F}(x) = \begin{cases} g, & \text{where } |x - g|_2 \leq \sigma\lambda, \\ x - \sigma\lambda \frac{x - g}{|x - g|_2}, & \text{elsewhere.} \end{cases} Here, all operations are to be read pointwise. See Also -------- proximal_l1 : Scalar or non-isotropic vectorial variant """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalL1L2(Operator): """Proximal operator of the group-L1-L2 norm/distance.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter. """ super(ProximalL1L2, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # diff = x - g if g is not None: diff = x - g else: if x is out: # Handle aliased `x` and `out` (original `x` needed later) diff = x.copy() else: diff = x # We write the operator as # x - (x - g) / max(|x - g|_2 / sig*lam, 1) pwnorm = PointwiseNorm(self.domain, exponent=2) denom = pwnorm(diff) denom /= self.sigma * lam denom.ufuncs.maximum(1, out=denom) # out = (x - g) / denom for out_i, diff_i in zip(out, diff): diff_i.divide(denom, out=out_i) # out = x - ... out.lincomb(1, x, -1, out) return ProximalL1L2
[ "def", "proximal_l1_l2", "(", "space", ",", "lam", "=", "1", ",", "g", "=", "None", ")", ":", "lam", "=", "float", "(", "lam", ")", "if", "g", "is", "not", "None", "and", "g", "not", "in", "space", ":", "raise", "TypeError", "(", "'{!r} is not an element of {!r}'", ".", "format", "(", "g", ",", "space", ")", ")", "class", "ProximalL1L2", "(", "Operator", ")", ":", "\"\"\"Proximal operator of the group-L1-L2 norm/distance.\"\"\"", "def", "__init__", "(", "self", ",", "sigma", ")", ":", "\"\"\"Initialize a new instance.\n\n Parameters\n ----------\n sigma : positive float\n Step size parameter.\n \"\"\"", "super", "(", "ProximalL1L2", ",", "self", ")", ".", "__init__", "(", "domain", "=", "space", ",", "range", "=", "space", ",", "linear", "=", "False", ")", "self", ".", "sigma", "=", "float", "(", "sigma", ")", "def", "_call", "(", "self", ",", "x", ",", "out", ")", ":", "\"\"\"Return ``self(x, out=out)``.\"\"\"", "# diff = x - g", "if", "g", "is", "not", "None", ":", "diff", "=", "x", "-", "g", "else", ":", "if", "x", "is", "out", ":", "# Handle aliased `x` and `out` (original `x` needed later)", "diff", "=", "x", ".", "copy", "(", ")", "else", ":", "diff", "=", "x", "# We write the operator as", "# x - (x - g) / max(|x - g|_2 / sig*lam, 1)", "pwnorm", "=", "PointwiseNorm", "(", "self", ".", "domain", ",", "exponent", "=", "2", ")", "denom", "=", "pwnorm", "(", "diff", ")", "denom", "/=", "self", ".", "sigma", "*", "lam", "denom", ".", "ufuncs", ".", "maximum", "(", "1", ",", "out", "=", "denom", ")", "# out = (x - g) / denom", "for", "out_i", ",", "diff_i", "in", "zip", "(", "out", ",", "diff", ")", ":", "diff_i", ".", "divide", "(", "denom", ",", "out", "=", "out_i", ")", "# out = x - ...", "out", ".", "lincomb", "(", "1", ",", "x", ",", "-", "1", ",", "out", ")", "return", "ProximalL1L2" ]
r"""Proximal operator factory of the group-L1-L2 norm/distance. Implements the proximal operator of the functional :: F(x) = lam || |x - g|_2 ||_1 with ``x`` and ``g`` elements in ``space``, and scaling factor ``lam``. Here, ``|.|_2`` is the pointwise Euclidean norm of a vector-valued function. Parameters ---------- space : `LinearSpace` or `ProductSpace` Domain of the functional. lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional Element to which the L1-L2 distance is taken. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- For the functional .. math:: F(x) = \lambda \| |x - g|_2 \|_1, and a step size :math:`\sigma`, the proximal operator of :math:`\sigma F` is given as the "soft-shrinkage" operator .. math:: \mathrm{prox}_{\sigma F}(x) = \begin{cases} g, & \text{where } |x - g|_2 \leq \sigma\lambda, \\ x - \sigma\lambda \frac{x - g}{|x - g|_2}, & \text{elsewhere.} \end{cases} Here, all operations are to be read pointwise. See Also -------- proximal_l1 : Scalar or non-isotropic vectorial variant
[ "r", "Proximal", "operator", "factory", "of", "the", "group", "-", "L1", "-", "L2", "norm", "/", "distance", "." ]
python
train
NyashniyVladya/MarkovTextGenerator
MarkovTextGenerator/markov_text_generator.py
https://github.com/NyashniyVladya/MarkovTextGenerator/blob/3d90e02a507939709773ef01c7ff3ec68b2b8d4b/MarkovTextGenerator/markov_text_generator.py#L382-L395
def _parse_from_file(self, file_path): """ см. описание _parse_from_text. Только на вход подаётся не текст, а путь к файлу. """ file_path = abspath(file_path) if not isfile(file_path): raise MarkovTextExcept("Передан не файл.") with open(file_path, "rb") as txt_file: for line in txt_file: text = line.decode("utf-8", "ignore").strip() if not text: continue yield from self._parse_from_text(text)
[ "def", "_parse_from_file", "(", "self", ",", "file_path", ")", ":", "file_path", "=", "abspath", "(", "file_path", ")", "if", "not", "isfile", "(", "file_path", ")", ":", "raise", "MarkovTextExcept", "(", "\"Передан не файл.\")", "", "with", "open", "(", "file_path", ",", "\"rb\"", ")", "as", "txt_file", ":", "for", "line", "in", "txt_file", ":", "text", "=", "line", ".", "decode", "(", "\"utf-8\"", ",", "\"ignore\"", ")", ".", "strip", "(", ")", "if", "not", "text", ":", "continue", "yield", "from", "self", ".", "_parse_from_text", "(", "text", ")" ]
см. описание _parse_from_text. Только на вход подаётся не текст, а путь к файлу.
[ "см", ".", "описание", "_parse_from_text", ".", "Только", "на", "вход", "подаётся", "не", "текст", "а", "путь", "к", "файлу", "." ]
python
valid
LogicalDash/LiSE
ELiDE/ELiDE/board/board.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/board.py#L846-L850
def arrows(self): """Iterate over all my arrows.""" for o in self.arrow.values(): for arro in o.values(): yield arro
[ "def", "arrows", "(", "self", ")", ":", "for", "o", "in", "self", ".", "arrow", ".", "values", "(", ")", ":", "for", "arro", "in", "o", ".", "values", "(", ")", ":", "yield", "arro" ]
Iterate over all my arrows.
[ "Iterate", "over", "all", "my", "arrows", "." ]
python
train
fossasia/AYABInterface
AYABInterface/communication/__init__.py
https://github.com/fossasia/AYABInterface/blob/e2065eed8daf17b2936f6ca5e488c9bfb850914e/AYABInterface/communication/__init__.py#L79-L84
def _message_received(self, message): """Notify the observers about the received message.""" with self.lock: self._state.receive_message(message) for callable in chain(self._on_message_received, self._on_message): callable(message)
[ "def", "_message_received", "(", "self", ",", "message", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "_state", ".", "receive_message", "(", "message", ")", "for", "callable", "in", "chain", "(", "self", ".", "_on_message_received", ",", "self", ".", "_on_message", ")", ":", "callable", "(", "message", ")" ]
Notify the observers about the received message.
[ "Notify", "the", "observers", "about", "the", "received", "message", "." ]
python
train
jsommers/switchyard
switchyard/lib/packet/common.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/packet/common.py#L317-L349
def checksum (data, start = 0, skip_word = None): """ Calculate standard internet checksum over data starting at start'th byte skip_word: If specified, it's the word offset of a word in data to "skip" (as if it were zero). The purpose is when data is received data which contains a computed checksum that you are trying to verify -- you want to skip that word since it was zero when the checksum was initially calculated. """ if len(data) % 2 != 0: arr = array.array('H', data[:-1]) else: arr = array.array('H', data) if skip_word is not None: for i in range(0, len(arr)): if i == skip_word: continue start += arr[i] else: for i in range(0, len(arr)): start += arr[i] if len(data) % 2 != 0: start += struct.unpack('H', data[-1:]+b'\x00')[0] # Specify order? start = (start >> 16) + (start & 0xffff) start += (start >> 16) #while start >> 16: # start = (start >> 16) + (start & 0xffff) return ntohs(~start & 0xffff)
[ "def", "checksum", "(", "data", ",", "start", "=", "0", ",", "skip_word", "=", "None", ")", ":", "if", "len", "(", "data", ")", "%", "2", "!=", "0", ":", "arr", "=", "array", ".", "array", "(", "'H'", ",", "data", "[", ":", "-", "1", "]", ")", "else", ":", "arr", "=", "array", ".", "array", "(", "'H'", ",", "data", ")", "if", "skip_word", "is", "not", "None", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "arr", ")", ")", ":", "if", "i", "==", "skip_word", ":", "continue", "start", "+=", "arr", "[", "i", "]", "else", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "arr", ")", ")", ":", "start", "+=", "arr", "[", "i", "]", "if", "len", "(", "data", ")", "%", "2", "!=", "0", ":", "start", "+=", "struct", ".", "unpack", "(", "'H'", ",", "data", "[", "-", "1", ":", "]", "+", "b'\\x00'", ")", "[", "0", "]", "# Specify order?", "start", "=", "(", "start", ">>", "16", ")", "+", "(", "start", "&", "0xffff", ")", "start", "+=", "(", "start", ">>", "16", ")", "#while start >> 16:", "# start = (start >> 16) + (start & 0xffff)", "return", "ntohs", "(", "~", "start", "&", "0xffff", ")" ]
Calculate standard internet checksum over data starting at start'th byte skip_word: If specified, it's the word offset of a word in data to "skip" (as if it were zero). The purpose is when data is received data which contains a computed checksum that you are trying to verify -- you want to skip that word since it was zero when the checksum was initially calculated.
[ "Calculate", "standard", "internet", "checksum", "over", "data", "starting", "at", "start", "th", "byte" ]
python
train
JarryShaw/PyPCAPKit
src/const/hip/parameter.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/hip/parameter.py#L71-L77
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return Parameter(key) if key not in Parameter._member_map_: extend_enum(Parameter, key, default) return Parameter[key]
[ "def", "get", "(", "key", ",", "default", "=", "-", "1", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "Parameter", "(", "key", ")", "if", "key", "not", "in", "Parameter", ".", "_member_map_", ":", "extend_enum", "(", "Parameter", ",", "key", ",", "default", ")", "return", "Parameter", "[", "key", "]" ]
Backport support for original codes.
[ "Backport", "support", "for", "original", "codes", "." ]
python
train
paramiko/paramiko
paramiko/server.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/server.py#L239-L267
def check_auth_gssapi_with_mic( self, username, gss_authenticated=AUTH_FAILED, cc_file=None ): """ Authenticate the given user to the server if he is a valid krb5 principal. :param str username: The username of the authenticating client :param int gss_authenticated: The result of the krb5 authentication :param str cc_filename: The krb5 client credentials cache filename :return: ``AUTH_FAILED`` if the user is not authenticated otherwise ``AUTH_SUCCESSFUL`` :rtype: int :note: Kerberos credential delegation is not supported. :see: `.ssh_gss` :note: : We are just checking in L{AuthHandler} that the given user is a valid krb5 principal! We don't check if the krb5 principal is allowed to log in on the server, because there is no way to do that in python. So if you develop your own SSH server with paramiko for a cetain plattform like Linux, you should call C{krb5_kuserok()} in your local kerberos library to make sure that the krb5_principal has an account on the server and is allowed to log in as a user. :see: http://www.unix.com/man-page/all/3/krb5_kuserok/ """ if gss_authenticated == AUTH_SUCCESSFUL: return AUTH_SUCCESSFUL return AUTH_FAILED
[ "def", "check_auth_gssapi_with_mic", "(", "self", ",", "username", ",", "gss_authenticated", "=", "AUTH_FAILED", ",", "cc_file", "=", "None", ")", ":", "if", "gss_authenticated", "==", "AUTH_SUCCESSFUL", ":", "return", "AUTH_SUCCESSFUL", "return", "AUTH_FAILED" ]
Authenticate the given user to the server if he is a valid krb5 principal. :param str username: The username of the authenticating client :param int gss_authenticated: The result of the krb5 authentication :param str cc_filename: The krb5 client credentials cache filename :return: ``AUTH_FAILED`` if the user is not authenticated otherwise ``AUTH_SUCCESSFUL`` :rtype: int :note: Kerberos credential delegation is not supported. :see: `.ssh_gss` :note: : We are just checking in L{AuthHandler} that the given user is a valid krb5 principal! We don't check if the krb5 principal is allowed to log in on the server, because there is no way to do that in python. So if you develop your own SSH server with paramiko for a cetain plattform like Linux, you should call C{krb5_kuserok()} in your local kerberos library to make sure that the krb5_principal has an account on the server and is allowed to log in as a user. :see: http://www.unix.com/man-page/all/3/krb5_kuserok/
[ "Authenticate", "the", "given", "user", "to", "the", "server", "if", "he", "is", "a", "valid", "krb5", "principal", "." ]
python
train
schneiderfelipe/pyrrole
pyrrole/drawing.py
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/drawing.py#L257-L371
def draw_diagram_nodes(graph, pos=None, nodelist=None, node_size=.7, node_color='k', style='solid', alpha=1.0, cmap=None, vmin=None, vmax=None, ax=None, label=None): """ Draw nodes of graph. This draws only the nodes of graph as horizontal lines at each ``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where ``x = pos[0]``. Parameters ---------- graph : `networkx.Graph` A NetworkX graph. pos : mapping, optional A mapping with nodes as keys and positions as values. Positions should be sequences of length 2. If not specified (default), a diagram layout positioning will be computed. See `networkx.layout` and `pyrrole.drawing` for functions that compute node positions. nodelist : `list`, optional Draw only specified nodes (default is ``graph.nodes()``). node_size : scalar or array Size of nodes (default is ``.7``). If an array is specified it must be the same length as nodelist. node_color : color `str`, or array of `float` Node color. Can be a single color format `str` (default is ``'k'``), or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped to colors using the `cmap` and `vmin`, `vmax` parameters. See `matplotlib.hlines` for more details. style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``) Edge line style (default is ``'solid'``). See `matplotlib.hlines` for more details. alpha : `float` or array of `float`, optional The node transparency. This can be a single alpha value (default is ``'1.0'``), in which case it will be applied to all the nodes of color. Otherwise, if it is an array, the elements of alpha will be applied to the colors in order (cycling through alpha multiple times if necessary). cmap : Matplotlib colormap, optional Colormap name or Colormap instance for mapping intensities of nodes. vmin : `float`, optional Minimum for node colormap scaling. vmax : `float`, optional Maximum for node colormap scaling. ax : `matplotlib.axes.Axes`, optional Draw the graph in the specified Matplotlib axes. label : `str`, optional Label for legend. Returns ------- `matplotlib.collections.LineCollection` `LineCollection` of the nodes. Raises ------ networkx.NetworkXError Raised if a node has no position or one with bad value. Examples -------- >>> import pandas as pd >>> from pyrrole import ChemicalSystem >>> from pyrrole.drawing import draw_diagram_nodes >>> data = pd.DataFrame( ... [{"name": "Separated_Reactants", "freeenergy": 0.}, ... {"name": "mlC1", "freeenergy": -5.4}, ... {"name": "mlC2", "freeenergy": -15.6}, ... {"name": "mTS1", "freeenergy": 28.5, "color": "g"}, ... {"name": "mCARB1", "freeenergy": -9.7}, ... {"name": "mCARB2", "freeenergy": -19.8}, ... {"name": "mCARBX", "freeenergy": 20}]).set_index("name") >>> system = ChemicalSystem( ... ["Separated_Reactants -> mlC1 -> mTS1", ... "Separated_Reactants -> mlC2 -> mTS1", ... "mCARB2 <- mTS1 -> mCARB1", ... "Separated_Reactants -> mCARBX"], data) >>> digraph = system.to_digraph() >>> nodes = draw_diagram_nodes(digraph) """ if ax is None: ax = _plt.gca() if nodelist is None: nodelist = list(graph.nodes()) if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing return None if pos is None: pos = diagram_layout(graph) try: xy = _np.asarray([pos[v] for v in nodelist]) except KeyError as e: raise _nx.NetworkXError('Node {} has no position.'.format(e)) except ValueError: raise _nx.NetworkXError('Bad value in node positions.') if isinstance(alpha, _collections.Iterable): node_color = _nx.drawing.apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax) alpha = None node_collection = ax.hlines(xy[:, 1], xy[:, 0] - node_size/2., xy[:, 0] + node_size/2., colors=node_color, linestyles=style, label=label, cmap=cmap) node_collection.set_zorder(2) return node_collection
[ "def", "draw_diagram_nodes", "(", "graph", ",", "pos", "=", "None", ",", "nodelist", "=", "None", ",", "node_size", "=", ".7", ",", "node_color", "=", "'k'", ",", "style", "=", "'solid'", ",", "alpha", "=", "1.0", ",", "cmap", "=", "None", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "ax", "=", "None", ",", "label", "=", "None", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "_plt", ".", "gca", "(", ")", "if", "nodelist", "is", "None", ":", "nodelist", "=", "list", "(", "graph", ".", "nodes", "(", ")", ")", "if", "not", "nodelist", "or", "len", "(", "nodelist", ")", "==", "0", ":", "# empty nodelist, no drawing", "return", "None", "if", "pos", "is", "None", ":", "pos", "=", "diagram_layout", "(", "graph", ")", "try", ":", "xy", "=", "_np", ".", "asarray", "(", "[", "pos", "[", "v", "]", "for", "v", "in", "nodelist", "]", ")", "except", "KeyError", "as", "e", ":", "raise", "_nx", ".", "NetworkXError", "(", "'Node {} has no position.'", ".", "format", "(", "e", ")", ")", "except", "ValueError", ":", "raise", "_nx", ".", "NetworkXError", "(", "'Bad value in node positions.'", ")", "if", "isinstance", "(", "alpha", ",", "_collections", ".", "Iterable", ")", ":", "node_color", "=", "_nx", ".", "drawing", ".", "apply_alpha", "(", "node_color", ",", "alpha", ",", "nodelist", ",", "cmap", ",", "vmin", ",", "vmax", ")", "alpha", "=", "None", "node_collection", "=", "ax", ".", "hlines", "(", "xy", "[", ":", ",", "1", "]", ",", "xy", "[", ":", ",", "0", "]", "-", "node_size", "/", "2.", ",", "xy", "[", ":", ",", "0", "]", "+", "node_size", "/", "2.", ",", "colors", "=", "node_color", ",", "linestyles", "=", "style", ",", "label", "=", "label", ",", "cmap", "=", "cmap", ")", "node_collection", ".", "set_zorder", "(", "2", ")", "return", "node_collection" ]
Draw nodes of graph. This draws only the nodes of graph as horizontal lines at each ``y = pos[1]`` from ``x - node_size/2`` to ``x + node_size/2``, where ``x = pos[0]``. Parameters ---------- graph : `networkx.Graph` A NetworkX graph. pos : mapping, optional A mapping with nodes as keys and positions as values. Positions should be sequences of length 2. If not specified (default), a diagram layout positioning will be computed. See `networkx.layout` and `pyrrole.drawing` for functions that compute node positions. nodelist : `list`, optional Draw only specified nodes (default is ``graph.nodes()``). node_size : scalar or array Size of nodes (default is ``.7``). If an array is specified it must be the same length as nodelist. node_color : color `str`, or array of `float` Node color. Can be a single color format `str` (default is ``'k'``), or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped to colors using the `cmap` and `vmin`, `vmax` parameters. See `matplotlib.hlines` for more details. style : `str` (``'solid'``, ``'dashed'``, ``'dotted'``, ``'dashdot'``) Edge line style (default is ``'solid'``). See `matplotlib.hlines` for more details. alpha : `float` or array of `float`, optional The node transparency. This can be a single alpha value (default is ``'1.0'``), in which case it will be applied to all the nodes of color. Otherwise, if it is an array, the elements of alpha will be applied to the colors in order (cycling through alpha multiple times if necessary). cmap : Matplotlib colormap, optional Colormap name or Colormap instance for mapping intensities of nodes. vmin : `float`, optional Minimum for node colormap scaling. vmax : `float`, optional Maximum for node colormap scaling. ax : `matplotlib.axes.Axes`, optional Draw the graph in the specified Matplotlib axes. label : `str`, optional Label for legend. Returns ------- `matplotlib.collections.LineCollection` `LineCollection` of the nodes. Raises ------ networkx.NetworkXError Raised if a node has no position or one with bad value. Examples -------- >>> import pandas as pd >>> from pyrrole import ChemicalSystem >>> from pyrrole.drawing import draw_diagram_nodes >>> data = pd.DataFrame( ... [{"name": "Separated_Reactants", "freeenergy": 0.}, ... {"name": "mlC1", "freeenergy": -5.4}, ... {"name": "mlC2", "freeenergy": -15.6}, ... {"name": "mTS1", "freeenergy": 28.5, "color": "g"}, ... {"name": "mCARB1", "freeenergy": -9.7}, ... {"name": "mCARB2", "freeenergy": -19.8}, ... {"name": "mCARBX", "freeenergy": 20}]).set_index("name") >>> system = ChemicalSystem( ... ["Separated_Reactants -> mlC1 -> mTS1", ... "Separated_Reactants -> mlC2 -> mTS1", ... "mCARB2 <- mTS1 -> mCARB1", ... "Separated_Reactants -> mCARBX"], data) >>> digraph = system.to_digraph() >>> nodes = draw_diagram_nodes(digraph)
[ "Draw", "nodes", "of", "graph", "." ]
python
train
rigetti/quantumflow
quantumflow/backend/numpybk.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/backend/numpybk.py#L98-L102
def astensor(array: TensorLike) -> BKTensor: """Converts a numpy array to the backend's tensor object """ array = np.asarray(array, dtype=CTYPE) return array
[ "def", "astensor", "(", "array", ":", "TensorLike", ")", "->", "BKTensor", ":", "array", "=", "np", ".", "asarray", "(", "array", ",", "dtype", "=", "CTYPE", ")", "return", "array" ]
Converts a numpy array to the backend's tensor object
[ "Converts", "a", "numpy", "array", "to", "the", "backend", "s", "tensor", "object" ]
python
train
tobgu/pyrsistent
pyrsistent/_plist.py
https://github.com/tobgu/pyrsistent/blob/c84dab0daaa44973cbe83830d14888827b307632/pyrsistent/_plist.py#L73-L86
def mcons(self, iterable): """ Return a new list with all elements of iterable repeatedly cons:ed to the current list. NB! The elements will be inserted in the reverse order of the iterable. Runs in O(len(iterable)). >>> plist([1, 2]).mcons([3, 4]) plist([4, 3, 1, 2]) """ head = self for elem in iterable: head = head.cons(elem) return head
[ "def", "mcons", "(", "self", ",", "iterable", ")", ":", "head", "=", "self", "for", "elem", "in", "iterable", ":", "head", "=", "head", ".", "cons", "(", "elem", ")", "return", "head" ]
Return a new list with all elements of iterable repeatedly cons:ed to the current list. NB! The elements will be inserted in the reverse order of the iterable. Runs in O(len(iterable)). >>> plist([1, 2]).mcons([3, 4]) plist([4, 3, 1, 2])
[ "Return", "a", "new", "list", "with", "all", "elements", "of", "iterable", "repeatedly", "cons", ":", "ed", "to", "the", "current", "list", ".", "NB!", "The", "elements", "will", "be", "inserted", "in", "the", "reverse", "order", "of", "the", "iterable", ".", "Runs", "in", "O", "(", "len", "(", "iterable", "))", "." ]
python
train
wbond/oscrypto
oscrypto/_openssl/asymmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_openssl/asymmetric.py#L208-L218
def public_key(self): """ :return: The PublicKey object for the public key this certificate contains """ if not self._public_key and self.x509: evp_pkey = libcrypto.X509_get_pubkey(self.x509) self._public_key = PublicKey(evp_pkey, self.asn1['tbs_certificate']['subject_public_key_info']) return self._public_key
[ "def", "public_key", "(", "self", ")", ":", "if", "not", "self", ".", "_public_key", "and", "self", ".", "x509", ":", "evp_pkey", "=", "libcrypto", ".", "X509_get_pubkey", "(", "self", ".", "x509", ")", "self", ".", "_public_key", "=", "PublicKey", "(", "evp_pkey", ",", "self", ".", "asn1", "[", "'tbs_certificate'", "]", "[", "'subject_public_key_info'", "]", ")", "return", "self", ".", "_public_key" ]
:return: The PublicKey object for the public key this certificate contains
[ ":", "return", ":", "The", "PublicKey", "object", "for", "the", "public", "key", "this", "certificate", "contains" ]
python
valid
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L594-L646
def getSchemaItem(self, collection, namespace, name): """returns object instance representing namespace, name, or if does not exist return None if built-in, else raise SchemaError. namespace -- namespace item defined in. name -- name of item. collection -- collection in parent Schema instance to search. """ parent = GetSchema(self) if parent.targetNamespace == namespace: try: obj = getattr(parent, collection)[name] except KeyError, ex: raise KeyError, 'targetNamespace(%s) collection(%s) has no item(%s)'\ %(namespace, collection, name) return obj if not parent.imports.has_key(namespace): if namespace in BUILT_IN_NAMESPACES: # built-in just return # WARNING: expecting import if "redefine" or add to built-in namespace. return raise SchemaError, 'schema "%s" does not import namespace "%s"' %( parent.targetNamespace, namespace) # Lazy Eval schema = parent.imports[namespace] if not isinstance(schema, XMLSchema): schema = schema.getSchema() if schema is not None: parent.imports[namespace] = schema if schema is None: if namespace in BUILT_IN_NAMESPACES: # built-in just return return raise SchemaError, 'no schema instance for imported namespace (%s).'\ %(namespace) if not isinstance(schema, XMLSchema): raise TypeError, 'expecting XMLSchema instance not "%r"' %schema try: obj = getattr(schema, collection)[name] except KeyError, ex: raise KeyError, 'targetNamespace(%s) collection(%s) has no item(%s)'\ %(namespace, collection, name) return obj
[ "def", "getSchemaItem", "(", "self", ",", "collection", ",", "namespace", ",", "name", ")", ":", "parent", "=", "GetSchema", "(", "self", ")", "if", "parent", ".", "targetNamespace", "==", "namespace", ":", "try", ":", "obj", "=", "getattr", "(", "parent", ",", "collection", ")", "[", "name", "]", "except", "KeyError", ",", "ex", ":", "raise", "KeyError", ",", "'targetNamespace(%s) collection(%s) has no item(%s)'", "%", "(", "namespace", ",", "collection", ",", "name", ")", "return", "obj", "if", "not", "parent", ".", "imports", ".", "has_key", "(", "namespace", ")", ":", "if", "namespace", "in", "BUILT_IN_NAMESPACES", ":", "# built-in just return", "# WARNING: expecting import if \"redefine\" or add to built-in namespace.", "return", "raise", "SchemaError", ",", "'schema \"%s\" does not import namespace \"%s\"'", "%", "(", "parent", ".", "targetNamespace", ",", "namespace", ")", "# Lazy Eval", "schema", "=", "parent", ".", "imports", "[", "namespace", "]", "if", "not", "isinstance", "(", "schema", ",", "XMLSchema", ")", ":", "schema", "=", "schema", ".", "getSchema", "(", ")", "if", "schema", "is", "not", "None", ":", "parent", ".", "imports", "[", "namespace", "]", "=", "schema", "if", "schema", "is", "None", ":", "if", "namespace", "in", "BUILT_IN_NAMESPACES", ":", "# built-in just return", "return", "raise", "SchemaError", ",", "'no schema instance for imported namespace (%s).'", "%", "(", "namespace", ")", "if", "not", "isinstance", "(", "schema", ",", "XMLSchema", ")", ":", "raise", "TypeError", ",", "'expecting XMLSchema instance not \"%r\"'", "%", "schema", "try", ":", "obj", "=", "getattr", "(", "schema", ",", "collection", ")", "[", "name", "]", "except", "KeyError", ",", "ex", ":", "raise", "KeyError", ",", "'targetNamespace(%s) collection(%s) has no item(%s)'", "%", "(", "namespace", ",", "collection", ",", "name", ")", "return", "obj" ]
returns object instance representing namespace, name, or if does not exist return None if built-in, else raise SchemaError. namespace -- namespace item defined in. name -- name of item. collection -- collection in parent Schema instance to search.
[ "returns", "object", "instance", "representing", "namespace", "name", "or", "if", "does", "not", "exist", "return", "None", "if", "built", "-", "in", "else", "raise", "SchemaError", ".", "namespace", "--", "namespace", "item", "defined", "in", ".", "name", "--", "name", "of", "item", ".", "collection", "--", "collection", "in", "parent", "Schema", "instance", "to", "search", "." ]
python
train
foxx/peewee-extras
peewee_extras.py
https://github.com/foxx/peewee-extras/blob/327e7e63465b3f6e1afc0e6a651f4cb5c8c60889/peewee_extras.py#L322-L357
def list(self, filters, cursor, count): """ List items from query """ assert isinstance(filters, dict), "expected filters type 'dict'" assert isinstance(cursor, dict), "expected cursor type 'dict'" # start with our base query query = self.get_query() assert isinstance(query, peewee.Query) # XXX: convert and apply user specified filters #filters = {field.name: cursor[field.name] for field in fields} #query.where( paginator = self.get_paginator() assert isinstance(paginator, Pagination) # always include an extra row for next cursor position count += 1 # apply pagination to query pquery = paginator.filter_query(query, cursor, count) items = [ item for item in pquery ] # determine next cursor position next_item = items.pop(1) next_cursor = next_item.to_cursor_ref() ''' # is this field allowed for sort? if field not in self.sort_fields: raise ValueError("Cannot sort on field '{}'".format(field)) ''' return items, next_cursor
[ "def", "list", "(", "self", ",", "filters", ",", "cursor", ",", "count", ")", ":", "assert", "isinstance", "(", "filters", ",", "dict", ")", ",", "\"expected filters type 'dict'\"", "assert", "isinstance", "(", "cursor", ",", "dict", ")", ",", "\"expected cursor type 'dict'\"", "# start with our base query", "query", "=", "self", ".", "get_query", "(", ")", "assert", "isinstance", "(", "query", ",", "peewee", ".", "Query", ")", "# XXX: convert and apply user specified filters", "#filters = {field.name: cursor[field.name] for field in fields}", "#query.where(", "paginator", "=", "self", ".", "get_paginator", "(", ")", "assert", "isinstance", "(", "paginator", ",", "Pagination", ")", "# always include an extra row for next cursor position", "count", "+=", "1", "# apply pagination to query", "pquery", "=", "paginator", ".", "filter_query", "(", "query", ",", "cursor", ",", "count", ")", "items", "=", "[", "item", "for", "item", "in", "pquery", "]", "# determine next cursor position", "next_item", "=", "items", ".", "pop", "(", "1", ")", "next_cursor", "=", "next_item", ".", "to_cursor_ref", "(", ")", "'''\n # is this field allowed for sort?\n if field not in self.sort_fields:\n raise ValueError(\"Cannot sort on field '{}'\".format(field))\n '''", "return", "items", ",", "next_cursor" ]
List items from query
[ "List", "items", "from", "query" ]
python
valid
floydhub/floyd-cli
floyd/client/files.py
https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L77-L88
def matches_glob_list(path, glob_list): """ Given a list of glob patterns, returns a boolean indicating if a path matches any glob in the list """ for glob in glob_list: try: if PurePath(path).match(glob): return True except TypeError: pass return False
[ "def", "matches_glob_list", "(", "path", ",", "glob_list", ")", ":", "for", "glob", "in", "glob_list", ":", "try", ":", "if", "PurePath", "(", "path", ")", ".", "match", "(", "glob", ")", ":", "return", "True", "except", "TypeError", ":", "pass", "return", "False" ]
Given a list of glob patterns, returns a boolean indicating if a path matches any glob in the list
[ "Given", "a", "list", "of", "glob", "patterns", "returns", "a", "boolean", "indicating", "if", "a", "path", "matches", "any", "glob", "in", "the", "list" ]
python
train
inveniosoftware/invenio-records-files
invenio_records_files/utils.py
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/utils.py#L54-L101
def file_download_ui(pid, record, _record_file_factory=None, **kwargs): """File download view for a given record. Plug this method into your ``RECORDS_UI_ENDPOINTS`` configuration: .. code-block:: python RECORDS_UI_ENDPOINTS = dict( recid=dict( # ... route='/records/<pid_value/files/<filename>', view_imp='invenio_records_files.utils:file_download_ui', record_class='invenio_records_files.api:Record', ) ) If ``download`` is passed as a querystring argument, the file is sent as an attachment. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` instance. :param record: The record metadata. """ _record_file_factory = _record_file_factory or record_file_factory # Extract file from record. fileobj = _record_file_factory( pid, record, kwargs.get('filename') ) if not fileobj: abort(404) obj = fileobj.obj # Check permissions ObjectResource.check_object_permission(obj) # Send file. return ObjectResource.send_object( obj.bucket, obj, expected_chksum=fileobj.get('checksum'), logger_data={ 'bucket_id': obj.bucket_id, 'pid_type': pid.pid_type, 'pid_value': pid.pid_value, }, as_attachment=('download' in request.args) )
[ "def", "file_download_ui", "(", "pid", ",", "record", ",", "_record_file_factory", "=", "None", ",", "*", "*", "kwargs", ")", ":", "_record_file_factory", "=", "_record_file_factory", "or", "record_file_factory", "# Extract file from record.", "fileobj", "=", "_record_file_factory", "(", "pid", ",", "record", ",", "kwargs", ".", "get", "(", "'filename'", ")", ")", "if", "not", "fileobj", ":", "abort", "(", "404", ")", "obj", "=", "fileobj", ".", "obj", "# Check permissions", "ObjectResource", ".", "check_object_permission", "(", "obj", ")", "# Send file.", "return", "ObjectResource", ".", "send_object", "(", "obj", ".", "bucket", ",", "obj", ",", "expected_chksum", "=", "fileobj", ".", "get", "(", "'checksum'", ")", ",", "logger_data", "=", "{", "'bucket_id'", ":", "obj", ".", "bucket_id", ",", "'pid_type'", ":", "pid", ".", "pid_type", ",", "'pid_value'", ":", "pid", ".", "pid_value", ",", "}", ",", "as_attachment", "=", "(", "'download'", "in", "request", ".", "args", ")", ")" ]
File download view for a given record. Plug this method into your ``RECORDS_UI_ENDPOINTS`` configuration: .. code-block:: python RECORDS_UI_ENDPOINTS = dict( recid=dict( # ... route='/records/<pid_value/files/<filename>', view_imp='invenio_records_files.utils:file_download_ui', record_class='invenio_records_files.api:Record', ) ) If ``download`` is passed as a querystring argument, the file is sent as an attachment. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` instance. :param record: The record metadata.
[ "File", "download", "view", "for", "a", "given", "record", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/core/features/image_feature.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/image_feature.py#L135-L144
def encode_example(self, image_or_path_or_fobj): """Convert the given image into a dict convertible to tf example.""" if isinstance(image_or_path_or_fobj, np.ndarray): encoded_image = self._encode_image(image_or_path_or_fobj) elif isinstance(image_or_path_or_fobj, six.string_types): with tf.io.gfile.GFile(image_or_path_or_fobj, 'rb') as image_f: encoded_image = image_f.read() else: encoded_image = image_or_path_or_fobj.read() return encoded_image
[ "def", "encode_example", "(", "self", ",", "image_or_path_or_fobj", ")", ":", "if", "isinstance", "(", "image_or_path_or_fobj", ",", "np", ".", "ndarray", ")", ":", "encoded_image", "=", "self", ".", "_encode_image", "(", "image_or_path_or_fobj", ")", "elif", "isinstance", "(", "image_or_path_or_fobj", ",", "six", ".", "string_types", ")", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "image_or_path_or_fobj", ",", "'rb'", ")", "as", "image_f", ":", "encoded_image", "=", "image_f", ".", "read", "(", ")", "else", ":", "encoded_image", "=", "image_or_path_or_fobj", ".", "read", "(", ")", "return", "encoded_image" ]
Convert the given image into a dict convertible to tf example.
[ "Convert", "the", "given", "image", "into", "a", "dict", "convertible", "to", "tf", "example", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_qos.py#L135-L151
def qos_map_dscp_cos_mark_to(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") dscp_cos = ET.SubElement(map, "dscp-cos") dscp_cos_map_name_key = ET.SubElement(dscp_cos, "dscp-cos-map-name") dscp_cos_map_name_key.text = kwargs.pop('dscp_cos_map_name') mark = ET.SubElement(dscp_cos, "mark") dscp_in_values_key = ET.SubElement(mark, "dscp-in-values") dscp_in_values_key.text = kwargs.pop('dscp_in_values') to = ET.SubElement(mark, "to") to.text = kwargs.pop('to') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "qos_map_dscp_cos_mark_to", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "qos", "=", "ET", ".", "SubElement", "(", "config", ",", "\"qos\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-qos\"", ")", "map", "=", "ET", ".", "SubElement", "(", "qos", ",", "\"map\"", ")", "dscp_cos", "=", "ET", ".", "SubElement", "(", "map", ",", "\"dscp-cos\"", ")", "dscp_cos_map_name_key", "=", "ET", ".", "SubElement", "(", "dscp_cos", ",", "\"dscp-cos-map-name\"", ")", "dscp_cos_map_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'dscp_cos_map_name'", ")", "mark", "=", "ET", ".", "SubElement", "(", "dscp_cos", ",", "\"mark\"", ")", "dscp_in_values_key", "=", "ET", ".", "SubElement", "(", "mark", ",", "\"dscp-in-values\"", ")", "dscp_in_values_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'dscp_in_values'", ")", "to", "=", "ET", ".", "SubElement", "(", "mark", ",", "\"to\"", ")", "to", ".", "text", "=", "kwargs", ".", "pop", "(", "'to'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1855-L1861
def get_top_tracks(self, limit=None, cacheable=True): """Returns a list of the most played Tracks by this artist.""" params = self._get_params() if limit: params["limit"] = limit return self._get_things("getTopTracks", "track", Track, params, cacheable)
[ "def", "get_top_tracks", "(", "self", ",", "limit", "=", "None", ",", "cacheable", "=", "True", ")", ":", "params", "=", "self", ".", "_get_params", "(", ")", "if", "limit", ":", "params", "[", "\"limit\"", "]", "=", "limit", "return", "self", ".", "_get_things", "(", "\"getTopTracks\"", ",", "\"track\"", ",", "Track", ",", "params", ",", "cacheable", ")" ]
Returns a list of the most played Tracks by this artist.
[ "Returns", "a", "list", "of", "the", "most", "played", "Tracks", "by", "this", "artist", "." ]
python
train
Yubico/yubikey-manager
ykman/cli/piv.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/piv.py#L869-L894
def write_object(ctx, pin, management_key, object_id, data): """ Write an arbitrary PIV object. Write a PIV object by providing the object id. Yubico writable PIV objects are available in the range 5f0000 - 5fffff. \b OBJECT-ID Id of PIV object in HEX. DATA File containing the data to be written. Use '-' to use stdin. """ controller = ctx.obj['controller'] _ensure_authenticated(ctx, controller, pin, management_key) def do_write_object(retry=True): try: controller.put_data(object_id, data.read()) except APDUError as e: logger.debug('Failed writing object', exc_info=e) if e.sw == SW.INCORRECT_PARAMETERS: ctx.fail('Something went wrong, is the object id valid?') raise do_write_object()
[ "def", "write_object", "(", "ctx", ",", "pin", ",", "management_key", ",", "object_id", ",", "data", ")", ":", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "_ensure_authenticated", "(", "ctx", ",", "controller", ",", "pin", ",", "management_key", ")", "def", "do_write_object", "(", "retry", "=", "True", ")", ":", "try", ":", "controller", ".", "put_data", "(", "object_id", ",", "data", ".", "read", "(", ")", ")", "except", "APDUError", "as", "e", ":", "logger", ".", "debug", "(", "'Failed writing object'", ",", "exc_info", "=", "e", ")", "if", "e", ".", "sw", "==", "SW", ".", "INCORRECT_PARAMETERS", ":", "ctx", ".", "fail", "(", "'Something went wrong, is the object id valid?'", ")", "raise", "do_write_object", "(", ")" ]
Write an arbitrary PIV object. Write a PIV object by providing the object id. Yubico writable PIV objects are available in the range 5f0000 - 5fffff. \b OBJECT-ID Id of PIV object in HEX. DATA File containing the data to be written. Use '-' to use stdin.
[ "Write", "an", "arbitrary", "PIV", "object", "." ]
python
train
UDST/urbansim
urbansim/models/util.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/util.py#L307-L347
def columns_in_formula(formula): """ Returns the names of all the columns used in a patsy formula. Parameters ---------- formula : str, iterable, or dict Any formula construction supported by ``str_model_expression``. Returns ------- columns : list of str """ if formula is None: return [] formula = str_model_expression(formula, add_constant=False) columns = [] tokens = map( lambda x: x.extra, tz.remove( lambda x: x.extra is None, _tokens_from_patsy(patsy.parse_formula.parse_formula(formula)))) for tok in tokens: # if there are parentheses in the expression we # want to drop them and everything outside # and start again from the top if '(' in tok: start = tok.find('(') + 1 fin = tok.rfind(')') columns.extend(columns_in_formula(tok[start:fin])) else: for toknum, tokval, _, _, _ in generate_tokens( StringIO(tok).readline): if toknum == NAME: columns.append(tokval) return list(tz.unique(columns))
[ "def", "columns_in_formula", "(", "formula", ")", ":", "if", "formula", "is", "None", ":", "return", "[", "]", "formula", "=", "str_model_expression", "(", "formula", ",", "add_constant", "=", "False", ")", "columns", "=", "[", "]", "tokens", "=", "map", "(", "lambda", "x", ":", "x", ".", "extra", ",", "tz", ".", "remove", "(", "lambda", "x", ":", "x", ".", "extra", "is", "None", ",", "_tokens_from_patsy", "(", "patsy", ".", "parse_formula", ".", "parse_formula", "(", "formula", ")", ")", ")", ")", "for", "tok", "in", "tokens", ":", "# if there are parentheses in the expression we", "# want to drop them and everything outside", "# and start again from the top", "if", "'('", "in", "tok", ":", "start", "=", "tok", ".", "find", "(", "'('", ")", "+", "1", "fin", "=", "tok", ".", "rfind", "(", "')'", ")", "columns", ".", "extend", "(", "columns_in_formula", "(", "tok", "[", "start", ":", "fin", "]", ")", ")", "else", ":", "for", "toknum", ",", "tokval", ",", "_", ",", "_", ",", "_", "in", "generate_tokens", "(", "StringIO", "(", "tok", ")", ".", "readline", ")", ":", "if", "toknum", "==", "NAME", ":", "columns", ".", "append", "(", "tokval", ")", "return", "list", "(", "tz", ".", "unique", "(", "columns", ")", ")" ]
Returns the names of all the columns used in a patsy formula. Parameters ---------- formula : str, iterable, or dict Any formula construction supported by ``str_model_expression``. Returns ------- columns : list of str
[ "Returns", "the", "names", "of", "all", "the", "columns", "used", "in", "a", "patsy", "formula", "." ]
python
train
timstaley/voeventdb
voeventdb/server/database/models.py
https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L62-L70
def to_odict(self, exclude=None): """ Returns an OrderedDict representation of the SQLalchemy table row. """ if exclude is None: exclude = tuple() colnames = [c.name for c in self.__table__.columns if c.name not in exclude] return OrderedDict(((col, getattr(self, col)) for col in colnames))
[ "def", "to_odict", "(", "self", ",", "exclude", "=", "None", ")", ":", "if", "exclude", "is", "None", ":", "exclude", "=", "tuple", "(", ")", "colnames", "=", "[", "c", ".", "name", "for", "c", "in", "self", ".", "__table__", ".", "columns", "if", "c", ".", "name", "not", "in", "exclude", "]", "return", "OrderedDict", "(", "(", "(", "col", ",", "getattr", "(", "self", ",", "col", ")", ")", "for", "col", "in", "colnames", ")", ")" ]
Returns an OrderedDict representation of the SQLalchemy table row.
[ "Returns", "an", "OrderedDict", "representation", "of", "the", "SQLalchemy", "table", "row", "." ]
python
train
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/kraus.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/kraus.py#L361-L397
def _evolve(self, state, qargs=None): """Evolve a quantum state by the QuantumChannel. Args: state (QuantumState): The input statevector or density matrix. qargs (list): a list of QuantumState subsystem positions to apply the operator on. Returns: QuantumState: the output quantum state. Raises: QiskitError: if the operator dimension does not match the specified QuantumState subsystem dimensions. """ # If subsystem evolution we use the SuperOp representation if qargs is not None: return SuperOp(self)._evolve(state, qargs) # Otherwise we compute full evolution directly state = self._format_state(state) if state.shape[0] != self._input_dim: raise QiskitError( "QuantumChannel input dimension is not equal to state dimension." ) if state.ndim == 1 and self._data[1] is None and len( self._data[0]) == 1: # If we only have a single Kraus operator we can implement unitary-type # evolution of a state vector psi -> K[0].psi return np.dot(self._data[0][0], state) # Otherwise we always return a density matrix state = self._format_state(state, density_matrix=True) kraus_l, kraus_r = self._data if kraus_r is None: kraus_r = kraus_l return np.einsum('AiB,BC,AjC->ij', kraus_l, state, np.conjugate(kraus_r))
[ "def", "_evolve", "(", "self", ",", "state", ",", "qargs", "=", "None", ")", ":", "# If subsystem evolution we use the SuperOp representation", "if", "qargs", "is", "not", "None", ":", "return", "SuperOp", "(", "self", ")", ".", "_evolve", "(", "state", ",", "qargs", ")", "# Otherwise we compute full evolution directly", "state", "=", "self", ".", "_format_state", "(", "state", ")", "if", "state", ".", "shape", "[", "0", "]", "!=", "self", ".", "_input_dim", ":", "raise", "QiskitError", "(", "\"QuantumChannel input dimension is not equal to state dimension.\"", ")", "if", "state", ".", "ndim", "==", "1", "and", "self", ".", "_data", "[", "1", "]", "is", "None", "and", "len", "(", "self", ".", "_data", "[", "0", "]", ")", "==", "1", ":", "# If we only have a single Kraus operator we can implement unitary-type", "# evolution of a state vector psi -> K[0].psi", "return", "np", ".", "dot", "(", "self", ".", "_data", "[", "0", "]", "[", "0", "]", ",", "state", ")", "# Otherwise we always return a density matrix", "state", "=", "self", ".", "_format_state", "(", "state", ",", "density_matrix", "=", "True", ")", "kraus_l", ",", "kraus_r", "=", "self", ".", "_data", "if", "kraus_r", "is", "None", ":", "kraus_r", "=", "kraus_l", "return", "np", ".", "einsum", "(", "'AiB,BC,AjC->ij'", ",", "kraus_l", ",", "state", ",", "np", ".", "conjugate", "(", "kraus_r", ")", ")" ]
Evolve a quantum state by the QuantumChannel. Args: state (QuantumState): The input statevector or density matrix. qargs (list): a list of QuantumState subsystem positions to apply the operator on. Returns: QuantumState: the output quantum state. Raises: QiskitError: if the operator dimension does not match the specified QuantumState subsystem dimensions.
[ "Evolve", "a", "quantum", "state", "by", "the", "QuantumChannel", "." ]
python
test
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/external/path/_path.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/external/path/_path.py#L646-L670
def lines(self, encoding=None, errors='strict', retain=True): r""" Open this file, read all lines, return them in a list. Optional arguments: encoding - The Unicode encoding (or character set) of the file. The default is None, meaning the content of the file is read as 8-bit characters and returned as a list of (non-Unicode) str objects. errors - How to handle Unicode errors; see help(str.decode) for the options. Default is 'strict' retain - If true, retain newline characters; but all newline character combinations ('\r', '\n', '\r\n') are translated to '\n'. If false, newline characters are stripped off. Default is True. This uses 'U' mode in Python 2.3 and later. """ if encoding is None and retain: f = self.open('U') try: return f.readlines() finally: f.close() else: return self.text(encoding, errors).splitlines(retain)
[ "def", "lines", "(", "self", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ",", "retain", "=", "True", ")", ":", "if", "encoding", "is", "None", "and", "retain", ":", "f", "=", "self", ".", "open", "(", "'U'", ")", "try", ":", "return", "f", ".", "readlines", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "else", ":", "return", "self", ".", "text", "(", "encoding", ",", "errors", ")", ".", "splitlines", "(", "retain", ")" ]
r""" Open this file, read all lines, return them in a list. Optional arguments: encoding - The Unicode encoding (or character set) of the file. The default is None, meaning the content of the file is read as 8-bit characters and returned as a list of (non-Unicode) str objects. errors - How to handle Unicode errors; see help(str.decode) for the options. Default is 'strict' retain - If true, retain newline characters; but all newline character combinations ('\r', '\n', '\r\n') are translated to '\n'. If false, newline characters are stripped off. Default is True. This uses 'U' mode in Python 2.3 and later.
[ "r", "Open", "this", "file", "read", "all", "lines", "return", "them", "in", "a", "list", "." ]
python
test
fossasia/knittingpattern
knittingpattern/Parser.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Parser.py#L184-L192
def new_pattern(self, id_, name, rows=None): """Create a new knitting pattern. If rows is :obj:`None` it is replaced with the :meth:`new_row_collection`. """ if rows is None: rows = self.new_row_collection() return self._spec.new_pattern(id_, name, rows, self)
[ "def", "new_pattern", "(", "self", ",", "id_", ",", "name", ",", "rows", "=", "None", ")", ":", "if", "rows", "is", "None", ":", "rows", "=", "self", ".", "new_row_collection", "(", ")", "return", "self", ".", "_spec", ".", "new_pattern", "(", "id_", ",", "name", ",", "rows", ",", "self", ")" ]
Create a new knitting pattern. If rows is :obj:`None` it is replaced with the :meth:`new_row_collection`.
[ "Create", "a", "new", "knitting", "pattern", "." ]
python
valid
lepture/flask-oauthlib
flask_oauthlib/utils.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/utils.py#L40-L46
def to_bytes(text, encoding='utf-8'): """Make sure text is bytes type.""" if not text: return text if not isinstance(text, bytes_type): text = text.encode(encoding) return text
[ "def", "to_bytes", "(", "text", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "not", "text", ":", "return", "text", "if", "not", "isinstance", "(", "text", ",", "bytes_type", ")", ":", "text", "=", "text", ".", "encode", "(", "encoding", ")", "return", "text" ]
Make sure text is bytes type.
[ "Make", "sure", "text", "is", "bytes", "type", "." ]
python
test
senaite/senaite.core
bika/lims/browser/analysisrequest/add2.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/add2.py#L1443-L1520
def ajax_recalculate_prices(self): """Recalculate prices for all ARs """ # When the option "Include and display pricing information" in # Bika Setup Accounting tab is not selected if not self.show_recalculate_prices(): return {} # The sorted records from the request records = self.get_records() client = self.get_client() bika_setup = api.get_bika_setup() member_discount = float(bika_setup.getMemberDiscount()) member_discount_applies = False if client: member_discount_applies = client.getMemberDiscountApplies() prices = {} for n, record in enumerate(records): ardiscount_amount = 0.00 arservices_price = 0.00 arprofiles_price = 0.00 arprofiles_vat_amount = 0.00 arservice_vat_amount = 0.00 services_from_priced_profile = [] profile_uids = record.get("Profiles_uid", "").split(",") profile_uids = filter(lambda x: x, profile_uids) profiles = map(self.get_object_by_uid, profile_uids) services = map(self.get_object_by_uid, record.get("Analyses", [])) # ANALYSIS PROFILES PRICE for profile in profiles: use_profile_price = profile.getUseAnalysisProfilePrice() if not use_profile_price: continue profile_price = float(profile.getAnalysisProfilePrice()) arprofiles_price += profile_price arprofiles_vat_amount += profile.getVATAmount() profile_services = profile.getService() services_from_priced_profile.extend(profile_services) # ANALYSIS SERVICES PRICE for service in services: if service in services_from_priced_profile: continue service_price = float(service.getPrice()) # service_vat = float(service.getVAT()) service_vat_amount = float(service.getVATAmount()) arservice_vat_amount += service_vat_amount arservices_price += service_price base_price = arservices_price + arprofiles_price # Calculate the member discount if it applies if member_discount and member_discount_applies: logger.info("Member discount applies with {}%".format( member_discount)) ardiscount_amount = base_price * member_discount / 100 subtotal = base_price - ardiscount_amount vat_amount = arprofiles_vat_amount + arservice_vat_amount total = subtotal + vat_amount prices[n] = { "discount": "{0:.2f}".format(ardiscount_amount), "subtotal": "{0:.2f}".format(subtotal), "vat": "{0:.2f}".format(vat_amount), "total": "{0:.2f}".format(total), } logger.info("Prices for AR {}: Discount={discount} " "VAT={vat} Subtotal={subtotal} total={total}" .format(n, **prices[n])) return prices
[ "def", "ajax_recalculate_prices", "(", "self", ")", ":", "# When the option \"Include and display pricing information\" in", "# Bika Setup Accounting tab is not selected", "if", "not", "self", ".", "show_recalculate_prices", "(", ")", ":", "return", "{", "}", "# The sorted records from the request", "records", "=", "self", ".", "get_records", "(", ")", "client", "=", "self", ".", "get_client", "(", ")", "bika_setup", "=", "api", ".", "get_bika_setup", "(", ")", "member_discount", "=", "float", "(", "bika_setup", ".", "getMemberDiscount", "(", ")", ")", "member_discount_applies", "=", "False", "if", "client", ":", "member_discount_applies", "=", "client", ".", "getMemberDiscountApplies", "(", ")", "prices", "=", "{", "}", "for", "n", ",", "record", "in", "enumerate", "(", "records", ")", ":", "ardiscount_amount", "=", "0.00", "arservices_price", "=", "0.00", "arprofiles_price", "=", "0.00", "arprofiles_vat_amount", "=", "0.00", "arservice_vat_amount", "=", "0.00", "services_from_priced_profile", "=", "[", "]", "profile_uids", "=", "record", ".", "get", "(", "\"Profiles_uid\"", ",", "\"\"", ")", ".", "split", "(", "\",\"", ")", "profile_uids", "=", "filter", "(", "lambda", "x", ":", "x", ",", "profile_uids", ")", "profiles", "=", "map", "(", "self", ".", "get_object_by_uid", ",", "profile_uids", ")", "services", "=", "map", "(", "self", ".", "get_object_by_uid", ",", "record", ".", "get", "(", "\"Analyses\"", ",", "[", "]", ")", ")", "# ANALYSIS PROFILES PRICE", "for", "profile", "in", "profiles", ":", "use_profile_price", "=", "profile", ".", "getUseAnalysisProfilePrice", "(", ")", "if", "not", "use_profile_price", ":", "continue", "profile_price", "=", "float", "(", "profile", ".", "getAnalysisProfilePrice", "(", ")", ")", "arprofiles_price", "+=", "profile_price", "arprofiles_vat_amount", "+=", "profile", ".", "getVATAmount", "(", ")", "profile_services", "=", "profile", ".", "getService", "(", ")", "services_from_priced_profile", ".", "extend", "(", "profile_services", ")", "# ANALYSIS SERVICES PRICE", "for", "service", "in", "services", ":", "if", "service", "in", "services_from_priced_profile", ":", "continue", "service_price", "=", "float", "(", "service", ".", "getPrice", "(", ")", ")", "# service_vat = float(service.getVAT())", "service_vat_amount", "=", "float", "(", "service", ".", "getVATAmount", "(", ")", ")", "arservice_vat_amount", "+=", "service_vat_amount", "arservices_price", "+=", "service_price", "base_price", "=", "arservices_price", "+", "arprofiles_price", "# Calculate the member discount if it applies", "if", "member_discount", "and", "member_discount_applies", ":", "logger", ".", "info", "(", "\"Member discount applies with {}%\"", ".", "format", "(", "member_discount", ")", ")", "ardiscount_amount", "=", "base_price", "*", "member_discount", "/", "100", "subtotal", "=", "base_price", "-", "ardiscount_amount", "vat_amount", "=", "arprofiles_vat_amount", "+", "arservice_vat_amount", "total", "=", "subtotal", "+", "vat_amount", "prices", "[", "n", "]", "=", "{", "\"discount\"", ":", "\"{0:.2f}\"", ".", "format", "(", "ardiscount_amount", ")", ",", "\"subtotal\"", ":", "\"{0:.2f}\"", ".", "format", "(", "subtotal", ")", ",", "\"vat\"", ":", "\"{0:.2f}\"", ".", "format", "(", "vat_amount", ")", ",", "\"total\"", ":", "\"{0:.2f}\"", ".", "format", "(", "total", ")", ",", "}", "logger", ".", "info", "(", "\"Prices for AR {}: Discount={discount} \"", "\"VAT={vat} Subtotal={subtotal} total={total}\"", ".", "format", "(", "n", ",", "*", "*", "prices", "[", "n", "]", ")", ")", "return", "prices" ]
Recalculate prices for all ARs
[ "Recalculate", "prices", "for", "all", "ARs" ]
python
train
RedFantom/ttkwidgets
ttkwidgets/table.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/table.py#L616-L627
def set_children(self, item, *newchildren): """ Replaces item’s children with newchildren. Children present in item that are not present in newchildren are detached from tree. No items in newchildren may be an ancestor of item. :param newchildren: new item's children (list of item identifiers) :type newchildren: sequence[str] """ self._visual_drag.set_children(item, *newchildren) ttk.Treeview.set_children(self, item, *newchildren)
[ "def", "set_children", "(", "self", ",", "item", ",", "*", "newchildren", ")", ":", "self", ".", "_visual_drag", ".", "set_children", "(", "item", ",", "*", "newchildren", ")", "ttk", ".", "Treeview", ".", "set_children", "(", "self", ",", "item", ",", "*", "newchildren", ")" ]
Replaces item’s children with newchildren. Children present in item that are not present in newchildren are detached from tree. No items in newchildren may be an ancestor of item. :param newchildren: new item's children (list of item identifiers) :type newchildren: sequence[str]
[ "Replaces", "item’s", "children", "with", "newchildren", "." ]
python
train
erget/StereoVision
stereovision/calibration.py
https://github.com/erget/StereoVision/blob/1adff45e291362f52188e0fd0211265845a4461a/stereovision/calibration.py#L124-L128
def export(self, output_folder): """Export matrices as ``*.npy`` files to an output folder.""" if not os.path.exists(output_folder): os.makedirs(output_folder) self._interact_with_folder(output_folder, 'w')
[ "def", "export", "(", "self", ",", "output_folder", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "output_folder", ")", ":", "os", ".", "makedirs", "(", "output_folder", ")", "self", ".", "_interact_with_folder", "(", "output_folder", ",", "'w'", ")" ]
Export matrices as ``*.npy`` files to an output folder.
[ "Export", "matrices", "as", "*", ".", "npy", "files", "to", "an", "output", "folder", "." ]
python
train
pypa/pipenv
pipenv/vendor/pyparsing.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L1300-L1327
def copy( self ): """ Make a copy of this :class:`ParserElement`. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element. Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) prints:: [5120, 100, 655360, 268435456] Equivalent form of ``expr.copy()`` is just ``expr()``:: integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") """ cpy = copy.copy( self ) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy
[ "def", "copy", "(", "self", ")", ":", "cpy", "=", "copy", ".", "copy", "(", "self", ")", "cpy", ".", "parseAction", "=", "self", ".", "parseAction", "[", ":", "]", "cpy", ".", "ignoreExprs", "=", "self", ".", "ignoreExprs", "[", ":", "]", "if", "self", ".", "copyDefaultWhiteChars", ":", "cpy", ".", "whiteChars", "=", "ParserElement", ".", "DEFAULT_WHITE_CHARS", "return", "cpy" ]
Make a copy of this :class:`ParserElement`. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element. Example:: integer = Word(nums).setParseAction(lambda toks: int(toks[0])) integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) prints:: [5120, 100, 655360, 268435456] Equivalent form of ``expr.copy()`` is just ``expr()``:: integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
[ "Make", "a", "copy", "of", "this", ":", "class", ":", "ParserElement", ".", "Useful", "for", "defining", "different", "parse", "actions", "for", "the", "same", "parsing", "pattern", "using", "copies", "of", "the", "original", "parse", "element", "." ]
python
train
PyCQA/pylint
pylint/checkers/raw_metrics.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/raw_metrics.py#L77-L84
def process_tokens(self, tokens): """update stats""" i = 0 tokens = list(tokens) while i < len(tokens): i, lines_number, line_type = get_type(tokens, i) self.stats["total_lines"] += lines_number self.stats[line_type] += lines_number
[ "def", "process_tokens", "(", "self", ",", "tokens", ")", ":", "i", "=", "0", "tokens", "=", "list", "(", "tokens", ")", "while", "i", "<", "len", "(", "tokens", ")", ":", "i", ",", "lines_number", ",", "line_type", "=", "get_type", "(", "tokens", ",", "i", ")", "self", ".", "stats", "[", "\"total_lines\"", "]", "+=", "lines_number", "self", ".", "stats", "[", "line_type", "]", "+=", "lines_number" ]
update stats
[ "update", "stats" ]
python
test
aiogram/aiogram
aiogram/dispatcher/filters/filters.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/filters/filters.py#L214-L225
def validate(cls, full_config: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]: """ If ``cls.key`` is not :obj:`None` and that is in config returns config with that argument. :param full_config: :return: """ if cls.key is not None: if cls.key in full_config: return {cls.key: full_config[cls.key]} elif cls.required: return {cls.key: cls.default}
[ "def", "validate", "(", "cls", ",", "full_config", ":", "typing", ".", "Dict", "[", "str", ",", "typing", ".", "Any", "]", ")", "->", "typing", ".", "Dict", "[", "str", ",", "typing", ".", "Any", "]", ":", "if", "cls", ".", "key", "is", "not", "None", ":", "if", "cls", ".", "key", "in", "full_config", ":", "return", "{", "cls", ".", "key", ":", "full_config", "[", "cls", ".", "key", "]", "}", "elif", "cls", ".", "required", ":", "return", "{", "cls", ".", "key", ":", "cls", ".", "default", "}" ]
If ``cls.key`` is not :obj:`None` and that is in config returns config with that argument. :param full_config: :return:
[ "If", "cls", ".", "key", "is", "not", ":", "obj", ":", "None", "and", "that", "is", "in", "config", "returns", "config", "with", "that", "argument", "." ]
python
train
marshallward/f90nml
f90nml/parser.py
https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/parser.py#L127-L131
def sparse_arrays(self, value): """Validate and enable spare arrays.""" if not isinstance(value, bool): raise TypeError('sparse_arrays attribute must be a logical type.') self._sparse_arrays = value
[ "def", "sparse_arrays", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "TypeError", "(", "'sparse_arrays attribute must be a logical type.'", ")", "self", ".", "_sparse_arrays", "=", "value" ]
Validate and enable spare arrays.
[ "Validate", "and", "enable", "spare", "arrays", "." ]
python
train
IdentityPython/fedoidcmsg
src/fedoidcmsg/entity.py
https://github.com/IdentityPython/fedoidcmsg/blob/d30107be02521fa6cdfe285da3b6b0cdd153c8cc/src/fedoidcmsg/entity.py#L209-L232
def update_metadata_statement(self, metadata_statement, receiver='', federation=None, context=''): """ Update a metadata statement by: * adding signed metadata statements or uris pointing to signed metadata statements. * adding the entities signing keys * create metadata statements one per signed metadata statement or uri sign these and add them to the metadata statement :param metadata_statement: A :py:class:`fedoidcmsg.MetadataStatement` instance :param receiver: The intended receiver of the metadata statement :param federation: :param context: :return: An augmented metadata statement """ self.add_sms_spec_to_request(metadata_statement, federation=federation, context=context) self.add_signing_keys(metadata_statement) metadata_statement = self.self_sign(metadata_statement, receiver) # These are unprotected here so can as well be removed del metadata_statement['signing_keys'] return metadata_statement
[ "def", "update_metadata_statement", "(", "self", ",", "metadata_statement", ",", "receiver", "=", "''", ",", "federation", "=", "None", ",", "context", "=", "''", ")", ":", "self", ".", "add_sms_spec_to_request", "(", "metadata_statement", ",", "federation", "=", "federation", ",", "context", "=", "context", ")", "self", ".", "add_signing_keys", "(", "metadata_statement", ")", "metadata_statement", "=", "self", ".", "self_sign", "(", "metadata_statement", ",", "receiver", ")", "# These are unprotected here so can as well be removed", "del", "metadata_statement", "[", "'signing_keys'", "]", "return", "metadata_statement" ]
Update a metadata statement by: * adding signed metadata statements or uris pointing to signed metadata statements. * adding the entities signing keys * create metadata statements one per signed metadata statement or uri sign these and add them to the metadata statement :param metadata_statement: A :py:class:`fedoidcmsg.MetadataStatement` instance :param receiver: The intended receiver of the metadata statement :param federation: :param context: :return: An augmented metadata statement
[ "Update", "a", "metadata", "statement", "by", ":", "*", "adding", "signed", "metadata", "statements", "or", "uris", "pointing", "to", "signed", "metadata", "statements", ".", "*", "adding", "the", "entities", "signing", "keys", "*", "create", "metadata", "statements", "one", "per", "signed", "metadata", "statement", "or", "uri", "sign", "these", "and", "add", "them", "to", "the", "metadata", "statement" ]
python
test
architv/harvey
harvey/harvey.py
https://github.com/architv/harvey/blob/2b96d57b7a1e0dd706f1f00aba3d92a7ae702960/harvey/harvey.py#L53-L58
def _get_config_name(): '''Get git config user name''' p = subprocess.Popen('git config --get user.name', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = p.stdout.readlines() return _stripslashes(output[0])
[ "def", "_get_config_name", "(", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "'git config --get user.name'", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "output", "=", "p", ".", "stdout", ".", "readlines", "(", ")", "return", "_stripslashes", "(", "output", "[", "0", "]", ")" ]
Get git config user name
[ "Get", "git", "config", "user", "name" ]
python
train
cthoyt/onto2nx
src/onto2nx/ontospy/core/loader.py
https://github.com/cthoyt/onto2nx/blob/94c86e5e187cca67534afe0260097177b66e02c8/src/onto2nx/ontospy/core/loader.py#L116-L141
def load_uri(self, uri, verbose): """ :param uri: :param rdf_format_opts: :param verbose: :return: """ if verbose: printDebug("----------") if verbose: printDebug("Reading: <%s>" % uri) success = False for f in self.rdf_format_opts: if verbose: printDebug(".. trying rdf serialization: <%s>" % f) try: self.rdfgraph.parse(uri, format=f) if verbose: printDebug("..... success!", bold=True) success = True self.sources_valid += [uri] break except: if verbose: printDebug("..... failed") if not success == True: self.loading_failed(self.rdf_format_opts) self.sources_invalid += [uri]
[ "def", "load_uri", "(", "self", ",", "uri", ",", "verbose", ")", ":", "if", "verbose", ":", "printDebug", "(", "\"----------\"", ")", "if", "verbose", ":", "printDebug", "(", "\"Reading: <%s>\"", "%", "uri", ")", "success", "=", "False", "for", "f", "in", "self", ".", "rdf_format_opts", ":", "if", "verbose", ":", "printDebug", "(", "\".. trying rdf serialization: <%s>\"", "%", "f", ")", "try", ":", "self", ".", "rdfgraph", ".", "parse", "(", "uri", ",", "format", "=", "f", ")", "if", "verbose", ":", "printDebug", "(", "\"..... success!\"", ",", "bold", "=", "True", ")", "success", "=", "True", "self", ".", "sources_valid", "+=", "[", "uri", "]", "break", "except", ":", "if", "verbose", ":", "printDebug", "(", "\"..... failed\"", ")", "if", "not", "success", "==", "True", ":", "self", ".", "loading_failed", "(", "self", ".", "rdf_format_opts", ")", "self", ".", "sources_invalid", "+=", "[", "uri", "]" ]
:param uri: :param rdf_format_opts: :param verbose: :return:
[ ":", "param", "uri", ":", ":", "param", "rdf_format_opts", ":", ":", "param", "verbose", ":", ":", "return", ":" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/srtm.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/srtm.py#L305-L313
def _avg(value1, value2, weight): """Returns the weighted average of two values and handles the case where one value is None. If both values are None, None is returned. """ if value1 is None: return value2 if value2 is None: return value1 return value2 * weight + value1 * (1 - weight)
[ "def", "_avg", "(", "value1", ",", "value2", ",", "weight", ")", ":", "if", "value1", "is", "None", ":", "return", "value2", "if", "value2", "is", "None", ":", "return", "value1", "return", "value2", "*", "weight", "+", "value1", "*", "(", "1", "-", "weight", ")" ]
Returns the weighted average of two values and handles the case where one value is None. If both values are None, None is returned.
[ "Returns", "the", "weighted", "average", "of", "two", "values", "and", "handles", "the", "case", "where", "one", "value", "is", "None", ".", "If", "both", "values", "are", "None", "None", "is", "returned", "." ]
python
train
pazz/urwidtrees
urwidtrees/lru_cache.py
https://github.com/pazz/urwidtrees/blob/d1fa38ce4f37db00bdfc574b856023b5db4c7ead/urwidtrees/lru_cache.py#L10-L141
def lru_cache(maxsize=100, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). def decorating_function(user_function): cache = dict() stats = [0, 0] # make statistics updateable non-locally HITS, MISSES = 0, 1 # names for the stats fields kwd_mark = (object(),) # separate positional and keyword args cache_get = cache.get # bound method to lookup key or return None _len = len # localize the global len() function lock = Lock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list nonlocal_root = [root] # make updateable non-locally root[:] = [root, root, None, None] # initialize by pointing to self PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields def make_key(args, kwds, typed, tuple=tuple, sorted=sorted, type=type): # helper function to build a cache key from positional and keyword args key = args if kwds: sorted_items = tuple(sorted(kwds.items())) key += kwd_mark + sorted_items if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for k, v in sorted_items) return key if maxsize == 0: def wrapper(*args, **kwds): # no caching, just do a statistics update after a successful call result = user_function(*args, **kwds) stats[MISSES] += 1 return result elif maxsize is None: def wrapper(*args, **kwds): # simple caching without ordering or size limit key = make_key(args, kwds, typed) if kwds or typed else args result = cache_get(key, root) # root used here as a unique not-found sentinel if result is not root: stats[HITS] += 1 return result result = user_function(*args, **kwds) cache[key] = result stats[MISSES] += 1 return result else: def wrapper(*args, **kwds): # size limited caching that tracks accesses by recency key = make_key(args, kwds, typed) if kwds or typed else args with lock: link = cache_get(key) if link is not None: # record recent use of the key by moving it to the front of the list root, = nonlocal_root link_prev, link_next, key, result = link link_prev[NEXT] = link_next link_next[PREV] = link_prev last = root[PREV] last[NEXT] = root[PREV] = link link[PREV] = last link[NEXT] = root stats[HITS] += 1 return result result = user_function(*args, **kwds) with lock: root = nonlocal_root[0] if _len(cache) < maxsize: # put result in a new link at the front of the list last = root[PREV] link = [last, root, key, result] cache[key] = last[NEXT] = root[PREV] = link else: # use root to store the new key and result root[KEY] = key root[RESULT] = result cache[key] = root # empty the oldest link and make it the new root root = nonlocal_root[0] = root[NEXT] del cache[root[KEY]] root[KEY] = None root[RESULT] = None stats[MISSES] += 1 return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" with lock: cache.clear() root = nonlocal_root[0] root[:] = [root, root, None, None] stats[:] = [0, 0] wrapper.__wrapped__ = user_function wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return update_wrapper(wrapper, user_function) return decorating_function
[ "def", "lru_cache", "(", "maxsize", "=", "100", ",", "typed", "=", "False", ")", ":", "# Users should only access the lru_cache through its public API:", "# cache_info, cache_clear, and f.__wrapped__", "# The internals of the lru_cache are encapsulated for thread safety and", "# to allow the implementation to change (including a possible C version).", "def", "decorating_function", "(", "user_function", ")", ":", "cache", "=", "dict", "(", ")", "stats", "=", "[", "0", ",", "0", "]", "# make statistics updateable non-locally", "HITS", ",", "MISSES", "=", "0", ",", "1", "# names for the stats fields", "kwd_mark", "=", "(", "object", "(", ")", ",", ")", "# separate positional and keyword args", "cache_get", "=", "cache", ".", "get", "# bound method to lookup key or return None", "_len", "=", "len", "# localize the global len() function", "lock", "=", "Lock", "(", ")", "# because linkedlist updates aren't threadsafe", "root", "=", "[", "]", "# root of the circular doubly linked list", "nonlocal_root", "=", "[", "root", "]", "# make updateable non-locally", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", ",", "None", "]", "# initialize by pointing to self", "PREV", ",", "NEXT", ",", "KEY", ",", "RESULT", "=", "0", ",", "1", ",", "2", ",", "3", "# names for the link fields", "def", "make_key", "(", "args", ",", "kwds", ",", "typed", ",", "tuple", "=", "tuple", ",", "sorted", "=", "sorted", ",", "type", "=", "type", ")", ":", "# helper function to build a cache key from positional and keyword args", "key", "=", "args", "if", "kwds", ":", "sorted_items", "=", "tuple", "(", "sorted", "(", "kwds", ".", "items", "(", ")", ")", ")", "key", "+=", "kwd_mark", "+", "sorted_items", "if", "typed", ":", "key", "+=", "tuple", "(", "type", "(", "v", ")", "for", "v", "in", "args", ")", "if", "kwds", ":", "key", "+=", "tuple", "(", "type", "(", "v", ")", "for", "k", ",", "v", "in", "sorted_items", ")", "return", "key", "if", "maxsize", "==", "0", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# no caching, just do a statistics update after a successful call", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "stats", "[", "MISSES", "]", "+=", "1", "return", "result", "elif", "maxsize", "is", "None", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# simple caching without ordering or size limit", "key", "=", "make_key", "(", "args", ",", "kwds", ",", "typed", ")", "if", "kwds", "or", "typed", "else", "args", "result", "=", "cache_get", "(", "key", ",", "root", ")", "# root used here as a unique not-found sentinel", "if", "result", "is", "not", "root", ":", "stats", "[", "HITS", "]", "+=", "1", "return", "result", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "cache", "[", "key", "]", "=", "result", "stats", "[", "MISSES", "]", "+=", "1", "return", "result", "else", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# size limited caching that tracks accesses by recency", "key", "=", "make_key", "(", "args", ",", "kwds", ",", "typed", ")", "if", "kwds", "or", "typed", "else", "args", "with", "lock", ":", "link", "=", "cache_get", "(", "key", ")", "if", "link", "is", "not", "None", ":", "# record recent use of the key by moving it to the front of the list", "root", ",", "=", "nonlocal_root", "link_prev", ",", "link_next", ",", "key", ",", "result", "=", "link", "link_prev", "[", "NEXT", "]", "=", "link_next", "link_next", "[", "PREV", "]", "=", "link_prev", "last", "=", "root", "[", "PREV", "]", "last", "[", "NEXT", "]", "=", "root", "[", "PREV", "]", "=", "link", "link", "[", "PREV", "]", "=", "last", "link", "[", "NEXT", "]", "=", "root", "stats", "[", "HITS", "]", "+=", "1", "return", "result", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "with", "lock", ":", "root", "=", "nonlocal_root", "[", "0", "]", "if", "_len", "(", "cache", ")", "<", "maxsize", ":", "# put result in a new link at the front of the list", "last", "=", "root", "[", "PREV", "]", "link", "=", "[", "last", ",", "root", ",", "key", ",", "result", "]", "cache", "[", "key", "]", "=", "last", "[", "NEXT", "]", "=", "root", "[", "PREV", "]", "=", "link", "else", ":", "# use root to store the new key and result", "root", "[", "KEY", "]", "=", "key", "root", "[", "RESULT", "]", "=", "result", "cache", "[", "key", "]", "=", "root", "# empty the oldest link and make it the new root", "root", "=", "nonlocal_root", "[", "0", "]", "=", "root", "[", "NEXT", "]", "del", "cache", "[", "root", "[", "KEY", "]", "]", "root", "[", "KEY", "]", "=", "None", "root", "[", "RESULT", "]", "=", "None", "stats", "[", "MISSES", "]", "+=", "1", "return", "result", "def", "cache_info", "(", ")", ":", "\"\"\"Report cache statistics\"\"\"", "with", "lock", ":", "return", "_CacheInfo", "(", "stats", "[", "HITS", "]", ",", "stats", "[", "MISSES", "]", ",", "maxsize", ",", "len", "(", "cache", ")", ")", "def", "cache_clear", "(", ")", ":", "\"\"\"Clear the cache and cache statistics\"\"\"", "with", "lock", ":", "cache", ".", "clear", "(", ")", "root", "=", "nonlocal_root", "[", "0", "]", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", ",", "None", "]", "stats", "[", ":", "]", "=", "[", "0", ",", "0", "]", "wrapper", ".", "__wrapped__", "=", "user_function", "wrapper", ".", "cache_info", "=", "cache_info", "wrapper", ".", "cache_clear", "=", "cache_clear", "return", "update_wrapper", "(", "wrapper", ",", "user_function", ")", "return", "decorating_function" ]
Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
[ "Least", "-", "recently", "-", "used", "cache", "decorator", "." ]
python
train
osrg/ryu
ryu/lib/packet/bgp.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/packet/bgp.py#L3731-L3740
def has_matching_leftmost(self, remote_as): """Check if leftmost AS matches *remote_as*.""" if not self.value or not remote_as: return False leftmost_seg = self.path_seg_list[0] if leftmost_seg and leftmost_seg[0] == remote_as: return True return False
[ "def", "has_matching_leftmost", "(", "self", ",", "remote_as", ")", ":", "if", "not", "self", ".", "value", "or", "not", "remote_as", ":", "return", "False", "leftmost_seg", "=", "self", ".", "path_seg_list", "[", "0", "]", "if", "leftmost_seg", "and", "leftmost_seg", "[", "0", "]", "==", "remote_as", ":", "return", "True", "return", "False" ]
Check if leftmost AS matches *remote_as*.
[ "Check", "if", "leftmost", "AS", "matches", "*", "remote_as", "*", "." ]
python
train
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/generic/clean.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/clean.py#L14-L33
def clean_dataframe(df, is_slugify=True, threshold=50, rename_cols=None): """ This method is used to: - slugify the column names (if slugify is set to True) - convert columns to 'category' (if len(unique) < threshold) or 'int' - clean the dataframe and rename if necessary """ if is_slugify: df = df.rename(columns=slugify) df = df.dropna(axis=1, how='all') for column in get_category_cols(df, threshold=threshold): df[column] = df[column].astype('category') for column in get_int_cols(df): df[column] = df[column].astype(int) if rename_cols is not None: df = df.rename(columns=rename_cols) return df
[ "def", "clean_dataframe", "(", "df", ",", "is_slugify", "=", "True", ",", "threshold", "=", "50", ",", "rename_cols", "=", "None", ")", ":", "if", "is_slugify", ":", "df", "=", "df", ".", "rename", "(", "columns", "=", "slugify", ")", "df", "=", "df", ".", "dropna", "(", "axis", "=", "1", ",", "how", "=", "'all'", ")", "for", "column", "in", "get_category_cols", "(", "df", ",", "threshold", "=", "threshold", ")", ":", "df", "[", "column", "]", "=", "df", "[", "column", "]", ".", "astype", "(", "'category'", ")", "for", "column", "in", "get_int_cols", "(", "df", ")", ":", "df", "[", "column", "]", "=", "df", "[", "column", "]", ".", "astype", "(", "int", ")", "if", "rename_cols", "is", "not", "None", ":", "df", "=", "df", ".", "rename", "(", "columns", "=", "rename_cols", ")", "return", "df" ]
This method is used to: - slugify the column names (if slugify is set to True) - convert columns to 'category' (if len(unique) < threshold) or 'int' - clean the dataframe and rename if necessary
[ "This", "method", "is", "used", "to", ":", "-", "slugify", "the", "column", "names", "(", "if", "slugify", "is", "set", "to", "True", ")", "-", "convert", "columns", "to", "category", "(", "if", "len", "(", "unique", ")", "<", "threshold", ")", "or", "int", "-", "clean", "the", "dataframe", "and", "rename", "if", "necessary" ]
python
test
bsolomon1124/pyfinance
pyfinance/general.py
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/general.py#L986-L988
def opt_weights(self): """Optimal weights (period-end).""" return pd.DataFrame(self._xs, index=self.newidx, columns=self.cols)
[ "def", "opt_weights", "(", "self", ")", ":", "return", "pd", ".", "DataFrame", "(", "self", ".", "_xs", ",", "index", "=", "self", ".", "newidx", ",", "columns", "=", "self", ".", "cols", ")" ]
Optimal weights (period-end).
[ "Optimal", "weights", "(", "period", "-", "end", ")", "." ]
python
train