repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
nfcpy/nfcpy
src/nfc/tag/tt2_nxp.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt2_nxp.py#L346-L374
def protect(self, password=None, read_protect=False, protect_from=0): """Set password protection or permanent lock bits. If the *password* argument is None, all memory pages will be protected by setting the relevant lock bits (note that lock bits can not be reset). If valid NDEF management data is found, protect() also sets the NDEF write flag to read-only. All Tags of the NTAG21x family can alternatively be protected by password. If a *password* argument is provided, the protect() method writes the first 4 byte of the *password* string into the Tag's password (PWD) memory bytes and the following 2 byte of the *password* string into the password acknowledge (PACK) memory bytes. Factory default values are used if the *password* argument is an empty string. Lock bits are not set for password protection. The *read_protect* and *protect_from* arguments are only evaluated if *password* is not None. If *read_protect* is True, the memory protection bit (PROT) is set to require password verification also for reading of protected memory pages. The value of *protect_from* determines the first password protected memory page (one page is 4 byte) with the exception that the smallest set value is page 3 even if *protect_from* is smaller. """ args = (password, read_protect, protect_from) return super(NTAG21x, self).protect(*args)
[ "def", "protect", "(", "self", ",", "password", "=", "None", ",", "read_protect", "=", "False", ",", "protect_from", "=", "0", ")", ":", "args", "=", "(", "password", ",", "read_protect", ",", "protect_from", ")", "return", "super", "(", "NTAG21x", ",", "self", ")", ".", "protect", "(", "*", "args", ")" ]
Set password protection or permanent lock bits. If the *password* argument is None, all memory pages will be protected by setting the relevant lock bits (note that lock bits can not be reset). If valid NDEF management data is found, protect() also sets the NDEF write flag to read-only. All Tags of the NTAG21x family can alternatively be protected by password. If a *password* argument is provided, the protect() method writes the first 4 byte of the *password* string into the Tag's password (PWD) memory bytes and the following 2 byte of the *password* string into the password acknowledge (PACK) memory bytes. Factory default values are used if the *password* argument is an empty string. Lock bits are not set for password protection. The *read_protect* and *protect_from* arguments are only evaluated if *password* is not None. If *read_protect* is True, the memory protection bit (PROT) is set to require password verification also for reading of protected memory pages. The value of *protect_from* determines the first password protected memory page (one page is 4 byte) with the exception that the smallest set value is page 3 even if *protect_from* is smaller.
[ "Set", "password", "protection", "or", "permanent", "lock", "bits", "." ]
python
train
wdbm/shijian
shijian.py
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1578-L1611
def add_time_variables(df, reindex = True): """ Return a DataFrame with variables for weekday index, weekday name, timedelta through day, fraction through day, hours through day and days through week added, optionally with the index set to datetime and the variable `datetime` removed. It is assumed that the variable `datetime` exists. """ if not "datetime" in df.columns: log.error("field datetime not found in DataFrame") return False df["datetime"] = pd.to_datetime(df["datetime"]) df["month"] = df["datetime"].dt.month df["month_name"] = df["datetime"].dt.strftime("%B") df["weekday"] = df["datetime"].dt.weekday df["weekday_name"] = df["datetime"].dt.weekday_name df["time_through_day"] = df["datetime"].map( lambda x: x - datetime.datetime.combine( x.date(), datetime.time() ) ) df["fraction_through_day"] = df["time_through_day"].map( lambda x: x / datetime.timedelta(hours = 24) ) df["hour"] = df["datetime"].dt.hour df["hours_through_day"] = df["fraction_through_day"] * 24 df["days_through_week"] = df.apply( lambda row: row["weekday"] + row["fraction_through_day"], axis = 1 ) df["days_through_year"] = df["datetime"].dt.dayofyear df.index = df["datetime"] #del df["datetime"] return df
[ "def", "add_time_variables", "(", "df", ",", "reindex", "=", "True", ")", ":", "if", "not", "\"datetime\"", "in", "df", ".", "columns", ":", "log", ".", "error", "(", "\"field datetime not found in DataFrame\"", ")", "return", "False", "df", "[", "\"datetime\"", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "\"datetime\"", "]", ")", "df", "[", "\"month\"", "]", "=", "df", "[", "\"datetime\"", "]", ".", "dt", ".", "month", "df", "[", "\"month_name\"", "]", "=", "df", "[", "\"datetime\"", "]", ".", "dt", ".", "strftime", "(", "\"%B\"", ")", "df", "[", "\"weekday\"", "]", "=", "df", "[", "\"datetime\"", "]", ".", "dt", ".", "weekday", "df", "[", "\"weekday_name\"", "]", "=", "df", "[", "\"datetime\"", "]", ".", "dt", ".", "weekday_name", "df", "[", "\"time_through_day\"", "]", "=", "df", "[", "\"datetime\"", "]", ".", "map", "(", "lambda", "x", ":", "x", "-", "datetime", ".", "datetime", ".", "combine", "(", "x", ".", "date", "(", ")", ",", "datetime", ".", "time", "(", ")", ")", ")", "df", "[", "\"fraction_through_day\"", "]", "=", "df", "[", "\"time_through_day\"", "]", ".", "map", "(", "lambda", "x", ":", "x", "/", "datetime", ".", "timedelta", "(", "hours", "=", "24", ")", ")", "df", "[", "\"hour\"", "]", "=", "df", "[", "\"datetime\"", "]", ".", "dt", ".", "hour", "df", "[", "\"hours_through_day\"", "]", "=", "df", "[", "\"fraction_through_day\"", "]", "*", "24", "df", "[", "\"days_through_week\"", "]", "=", "df", ".", "apply", "(", "lambda", "row", ":", "row", "[", "\"weekday\"", "]", "+", "row", "[", "\"fraction_through_day\"", "]", ",", "axis", "=", "1", ")", "df", "[", "\"days_through_year\"", "]", "=", "df", "[", "\"datetime\"", "]", ".", "dt", ".", "dayofyear", "df", ".", "index", "=", "df", "[", "\"datetime\"", "]", "#del df[\"datetime\"]", "return", "df" ]
Return a DataFrame with variables for weekday index, weekday name, timedelta through day, fraction through day, hours through day and days through week added, optionally with the index set to datetime and the variable `datetime` removed. It is assumed that the variable `datetime` exists.
[ "Return", "a", "DataFrame", "with", "variables", "for", "weekday", "index", "weekday", "name", "timedelta", "through", "day", "fraction", "through", "day", "hours", "through", "day", "and", "days", "through", "week", "added", "optionally", "with", "the", "index", "set", "to", "datetime", "and", "the", "variable", "datetime", "removed", ".", "It", "is", "assumed", "that", "the", "variable", "datetime", "exists", "." ]
python
train
twisted/mantissa
xmantissa/cachejs.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/cachejs.py#L45-L57
def wasModified(self): """ Check to see if this module has been modified on disk since the last time it was cached. @return: True if it has been modified, False if not. """ self.filePath.restat() mtime = self.filePath.getmtime() if mtime >= self.lastModified: return True else: return False
[ "def", "wasModified", "(", "self", ")", ":", "self", ".", "filePath", ".", "restat", "(", ")", "mtime", "=", "self", ".", "filePath", ".", "getmtime", "(", ")", "if", "mtime", ">=", "self", ".", "lastModified", ":", "return", "True", "else", ":", "return", "False" ]
Check to see if this module has been modified on disk since the last time it was cached. @return: True if it has been modified, False if not.
[ "Check", "to", "see", "if", "this", "module", "has", "been", "modified", "on", "disk", "since", "the", "last", "time", "it", "was", "cached", "." ]
python
train
summa-tx/riemann
riemann/simple.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L223-L258
def unsigned_legacy_tx(tx_ins, tx_outs, **kwargs): '''Create an unsigned transaction Use this to generate sighashes for unsigned TxIns Gotcha: it requires you to know the timelock and version it will _not_ guess them becuase it may not have acess to all scripts Hint: set version to 2 if using sequence number relative time locks Args: tx_ins list(TxIn instances): list of transaction inputs tx_outs list(TxOut instances): list of transaction outputs **kwargs: version (int): transaction version number locktime (hex): transaction locktime expiry (int): overwinter expiry time tx_joinsplits (list): list of joinsplits transactions joinsplit_pubkey (bytes): joinsplit public key joinsplit_sig (bytes): joinsplit signature Returns: (Tx instance): unsigned transaction ''' return tb.make_tx( version=kwargs['version'] if 'version' in kwargs else 1, tx_ins=tx_ins, tx_outs=tx_outs, lock_time=kwargs['lock_time'] if 'lock_time' in kwargs else 0, expiry=kwargs['expiry'] if 'expiry' in kwargs else 0, tx_joinsplits=(kwargs['tx_joinsplits'] if 'tx_joinsplits' in kwargs else None), joinsplit_pubkey=(kwargs['joinsplit_pubkey'] if 'joinsplit_pubkey' in kwargs else None), joinsplit_sig=(kwargs['joinsplit_sig'] if 'joinsplit_sig' in kwargs else None))
[ "def", "unsigned_legacy_tx", "(", "tx_ins", ",", "tx_outs", ",", "*", "*", "kwargs", ")", ":", "return", "tb", ".", "make_tx", "(", "version", "=", "kwargs", "[", "'version'", "]", "if", "'version'", "in", "kwargs", "else", "1", ",", "tx_ins", "=", "tx_ins", ",", "tx_outs", "=", "tx_outs", ",", "lock_time", "=", "kwargs", "[", "'lock_time'", "]", "if", "'lock_time'", "in", "kwargs", "else", "0", ",", "expiry", "=", "kwargs", "[", "'expiry'", "]", "if", "'expiry'", "in", "kwargs", "else", "0", ",", "tx_joinsplits", "=", "(", "kwargs", "[", "'tx_joinsplits'", "]", "if", "'tx_joinsplits'", "in", "kwargs", "else", "None", ")", ",", "joinsplit_pubkey", "=", "(", "kwargs", "[", "'joinsplit_pubkey'", "]", "if", "'joinsplit_pubkey'", "in", "kwargs", "else", "None", ")", ",", "joinsplit_sig", "=", "(", "kwargs", "[", "'joinsplit_sig'", "]", "if", "'joinsplit_sig'", "in", "kwargs", "else", "None", ")", ")" ]
Create an unsigned transaction Use this to generate sighashes for unsigned TxIns Gotcha: it requires you to know the timelock and version it will _not_ guess them becuase it may not have acess to all scripts Hint: set version to 2 if using sequence number relative time locks Args: tx_ins list(TxIn instances): list of transaction inputs tx_outs list(TxOut instances): list of transaction outputs **kwargs: version (int): transaction version number locktime (hex): transaction locktime expiry (int): overwinter expiry time tx_joinsplits (list): list of joinsplits transactions joinsplit_pubkey (bytes): joinsplit public key joinsplit_sig (bytes): joinsplit signature Returns: (Tx instance): unsigned transaction
[ "Create", "an", "unsigned", "transaction", "Use", "this", "to", "generate", "sighashes", "for", "unsigned", "TxIns", "Gotcha", ":", "it", "requires", "you", "to", "know", "the", "timelock", "and", "version", "it", "will", "_not_", "guess", "them", "becuase", "it", "may", "not", "have", "acess", "to", "all", "scripts", "Hint", ":", "set", "version", "to", "2", "if", "using", "sequence", "number", "relative", "time", "locks" ]
python
train
mrcagney/gtfstk
gtfstk/miscellany.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/miscellany.py#L620-L652
def compute_center( feed: "Feed", num_busiest_stops: Optional[int] = None ) -> Tuple: """ Return the centroid (WGS84 longitude-latitude pair) of the convex hull of the stops of the given Feed. If ``num_busiest_stops`` (integer) is given, then compute the ``num_busiest_stops`` busiest stops in the feed on the first Monday of the feed and return the mean of the longitudes and the mean of the latitudes of these stops, respectively. """ s = feed.stops.copy() if num_busiest_stops is None: hull = compute_convex_hull(feed) lon, lat = list(hull.centroid.coords)[0] else: date = feed.get_first_week()[0] ss = feed.compute_stop_stats([date]).sort_values( "num_trips", ascending=False ) if ss.stop_id.isnull().all(): # No stats, which could happen with a crappy feed. # Fall back to all stops. hull = compute_convex_hull(feed) lon, lat = list(hull.centroid.coords)[0] else: f = ss.head(num_busiest_stops) f = s.merge(f) lon = f["stop_lon"].mean() lat = f["stop_lat"].mean() return lon, lat
[ "def", "compute_center", "(", "feed", ":", "\"Feed\"", ",", "num_busiest_stops", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "Tuple", ":", "s", "=", "feed", ".", "stops", ".", "copy", "(", ")", "if", "num_busiest_stops", "is", "None", ":", "hull", "=", "compute_convex_hull", "(", "feed", ")", "lon", ",", "lat", "=", "list", "(", "hull", ".", "centroid", ".", "coords", ")", "[", "0", "]", "else", ":", "date", "=", "feed", ".", "get_first_week", "(", ")", "[", "0", "]", "ss", "=", "feed", ".", "compute_stop_stats", "(", "[", "date", "]", ")", ".", "sort_values", "(", "\"num_trips\"", ",", "ascending", "=", "False", ")", "if", "ss", ".", "stop_id", ".", "isnull", "(", ")", ".", "all", "(", ")", ":", "# No stats, which could happen with a crappy feed.", "# Fall back to all stops.", "hull", "=", "compute_convex_hull", "(", "feed", ")", "lon", ",", "lat", "=", "list", "(", "hull", ".", "centroid", ".", "coords", ")", "[", "0", "]", "else", ":", "f", "=", "ss", ".", "head", "(", "num_busiest_stops", ")", "f", "=", "s", ".", "merge", "(", "f", ")", "lon", "=", "f", "[", "\"stop_lon\"", "]", ".", "mean", "(", ")", "lat", "=", "f", "[", "\"stop_lat\"", "]", ".", "mean", "(", ")", "return", "lon", ",", "lat" ]
Return the centroid (WGS84 longitude-latitude pair) of the convex hull of the stops of the given Feed. If ``num_busiest_stops`` (integer) is given, then compute the ``num_busiest_stops`` busiest stops in the feed on the first Monday of the feed and return the mean of the longitudes and the mean of the latitudes of these stops, respectively.
[ "Return", "the", "centroid", "(", "WGS84", "longitude", "-", "latitude", "pair", ")", "of", "the", "convex", "hull", "of", "the", "stops", "of", "the", "given", "Feed", ".", "If", "num_busiest_stops", "(", "integer", ")", "is", "given", "then", "compute", "the", "num_busiest_stops", "busiest", "stops", "in", "the", "feed", "on", "the", "first", "Monday", "of", "the", "feed", "and", "return", "the", "mean", "of", "the", "longitudes", "and", "the", "mean", "of", "the", "latitudes", "of", "these", "stops", "respectively", "." ]
python
train
rueckstiess/mtools
mtools/mplotqueries/plottypes/histogram_type.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/histogram_type.py#L80-L151
def plot(self, axis, ith_plot, total_plots, limits): """ Plot the histogram as a whole over all groups. Do not plot as individual groups like other plot types. """ print(self.plot_type_str.upper() + " plot") print("%5s %9s %s" % ("id", " #points", "group")) for idx, group in enumerate(self.groups): print("%5s %9s %s" % (idx + 1, len(self.groups[group]), group)) print('') datasets = [] colors = [] minx = np.inf maxx = -np.inf for idx, group in enumerate(self.groups): x = date2num([logevent.datetime for logevent in self.groups[group]]) minx = min(minx, min(x)) maxx = max(maxx, max(x)) datasets.append(x) color, marker = self.color_map(group) colors.append(color) if total_plots > 1: # if more than one plot, move histogram to twin axis on the right twin_axis = axis.twinx() twin_axis.set_ylabel(self.ylabel) axis.set_zorder(twin_axis.get_zorder() + 1) # put ax ahead of ax2 axis.patch.set_visible(False) # hide the 'canvas' axis = twin_axis n_bins = max(1, int((maxx - minx) * 24. * 60. * 60. / self.bucketsize)) if n_bins > 1000: # warning for too many buckets print("warning: %i buckets, will take a while to render. " "consider increasing --bucketsize." % n_bins) n, bins, artists = axis.hist(datasets, bins=n_bins, align='mid', log=self.logscale, histtype="barstacked" if self.barstacked else "bar", color=colors, edgecolor="none", linewidth=0, alpha=0.8, picker=True, label=map(str, self.groups.keys())) # scale current y-axis to match min and max values axis.set_ylim(np.min(n), np.max(n)) # add meta-data for picking if len(self.groups) > 1: for g, group in enumerate(self.groups.keys()): for i in range(len(artists[g])): artists[g][i]._mt_plot_type = self artists[g][i]._mt_group = group artists[g][i]._mt_n = n[g][i] if self.barstacked: artists[g][i]._mt_n -= (n[g - 1][i] if g > 0 else 0) artists[g][i]._mt_bin = bins[i] else: for i in range(len(artists)): artists[i]._mt_plot_type = self artists[i]._mt_group = group artists[i]._mt_n = n[i] artists[i]._mt_bin = bins[i] return artists
[ "def", "plot", "(", "self", ",", "axis", ",", "ith_plot", ",", "total_plots", ",", "limits", ")", ":", "print", "(", "self", ".", "plot_type_str", ".", "upper", "(", ")", "+", "\" plot\"", ")", "print", "(", "\"%5s %9s %s\"", "%", "(", "\"id\"", ",", "\" #points\"", ",", "\"group\"", ")", ")", "for", "idx", ",", "group", "in", "enumerate", "(", "self", ".", "groups", ")", ":", "print", "(", "\"%5s %9s %s\"", "%", "(", "idx", "+", "1", ",", "len", "(", "self", ".", "groups", "[", "group", "]", ")", ",", "group", ")", ")", "print", "(", "''", ")", "datasets", "=", "[", "]", "colors", "=", "[", "]", "minx", "=", "np", ".", "inf", "maxx", "=", "-", "np", ".", "inf", "for", "idx", ",", "group", "in", "enumerate", "(", "self", ".", "groups", ")", ":", "x", "=", "date2num", "(", "[", "logevent", ".", "datetime", "for", "logevent", "in", "self", ".", "groups", "[", "group", "]", "]", ")", "minx", "=", "min", "(", "minx", ",", "min", "(", "x", ")", ")", "maxx", "=", "max", "(", "maxx", ",", "max", "(", "x", ")", ")", "datasets", ".", "append", "(", "x", ")", "color", ",", "marker", "=", "self", ".", "color_map", "(", "group", ")", "colors", ".", "append", "(", "color", ")", "if", "total_plots", ">", "1", ":", "# if more than one plot, move histogram to twin axis on the right", "twin_axis", "=", "axis", ".", "twinx", "(", ")", "twin_axis", ".", "set_ylabel", "(", "self", ".", "ylabel", ")", "axis", ".", "set_zorder", "(", "twin_axis", ".", "get_zorder", "(", ")", "+", "1", ")", "# put ax ahead of ax2", "axis", ".", "patch", ".", "set_visible", "(", "False", ")", "# hide the 'canvas'", "axis", "=", "twin_axis", "n_bins", "=", "max", "(", "1", ",", "int", "(", "(", "maxx", "-", "minx", ")", "*", "24.", "*", "60.", "*", "60.", "/", "self", ".", "bucketsize", ")", ")", "if", "n_bins", ">", "1000", ":", "# warning for too many buckets", "print", "(", "\"warning: %i buckets, will take a while to render. \"", "\"consider increasing --bucketsize.\"", "%", "n_bins", ")", "n", ",", "bins", ",", "artists", "=", "axis", ".", "hist", "(", "datasets", ",", "bins", "=", "n_bins", ",", "align", "=", "'mid'", ",", "log", "=", "self", ".", "logscale", ",", "histtype", "=", "\"barstacked\"", "if", "self", ".", "barstacked", "else", "\"bar\"", ",", "color", "=", "colors", ",", "edgecolor", "=", "\"none\"", ",", "linewidth", "=", "0", ",", "alpha", "=", "0.8", ",", "picker", "=", "True", ",", "label", "=", "map", "(", "str", ",", "self", ".", "groups", ".", "keys", "(", ")", ")", ")", "# scale current y-axis to match min and max values", "axis", ".", "set_ylim", "(", "np", ".", "min", "(", "n", ")", ",", "np", ".", "max", "(", "n", ")", ")", "# add meta-data for picking", "if", "len", "(", "self", ".", "groups", ")", ">", "1", ":", "for", "g", ",", "group", "in", "enumerate", "(", "self", ".", "groups", ".", "keys", "(", ")", ")", ":", "for", "i", "in", "range", "(", "len", "(", "artists", "[", "g", "]", ")", ")", ":", "artists", "[", "g", "]", "[", "i", "]", ".", "_mt_plot_type", "=", "self", "artists", "[", "g", "]", "[", "i", "]", ".", "_mt_group", "=", "group", "artists", "[", "g", "]", "[", "i", "]", ".", "_mt_n", "=", "n", "[", "g", "]", "[", "i", "]", "if", "self", ".", "barstacked", ":", "artists", "[", "g", "]", "[", "i", "]", ".", "_mt_n", "-=", "(", "n", "[", "g", "-", "1", "]", "[", "i", "]", "if", "g", ">", "0", "else", "0", ")", "artists", "[", "g", "]", "[", "i", "]", ".", "_mt_bin", "=", "bins", "[", "i", "]", "else", ":", "for", "i", "in", "range", "(", "len", "(", "artists", ")", ")", ":", "artists", "[", "i", "]", ".", "_mt_plot_type", "=", "self", "artists", "[", "i", "]", ".", "_mt_group", "=", "group", "artists", "[", "i", "]", ".", "_mt_n", "=", "n", "[", "i", "]", "artists", "[", "i", "]", ".", "_mt_bin", "=", "bins", "[", "i", "]", "return", "artists" ]
Plot the histogram as a whole over all groups. Do not plot as individual groups like other plot types.
[ "Plot", "the", "histogram", "as", "a", "whole", "over", "all", "groups", "." ]
python
train
Azure/azure-uamqp-python
uamqp/client.py
https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/client.py#L249-L281
def close(self): """Close the client. This includes closing the Session and CBS authentication layer as well as the Connection. If the client was opened using an external Connection, this will be left intact. No further messages can be sent or received and the client cannot be re-opened. All pending, unsent messages will remain uncleared to allow them to be inspected and queued to a new client. """ if self.message_handler: self.message_handler.destroy() self.message_handler = None self._shutdown = True if self._keep_alive_thread: self._keep_alive_thread.join() self._keep_alive_thread = None if not self._session: return # already closed. if not self._connection.cbs: _logger.debug("Closing non-CBS session.") self._session.destroy() else: _logger.debug("CBS session pending.") self._session = None if not self._ext_connection: _logger.debug("Closing exclusive connection.") self._connection.destroy() else: _logger.debug("Shared connection remaining open.") self._connection = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "message_handler", ":", "self", ".", "message_handler", ".", "destroy", "(", ")", "self", ".", "message_handler", "=", "None", "self", ".", "_shutdown", "=", "True", "if", "self", ".", "_keep_alive_thread", ":", "self", ".", "_keep_alive_thread", ".", "join", "(", ")", "self", ".", "_keep_alive_thread", "=", "None", "if", "not", "self", ".", "_session", ":", "return", "# already closed.", "if", "not", "self", ".", "_connection", ".", "cbs", ":", "_logger", ".", "debug", "(", "\"Closing non-CBS session.\"", ")", "self", ".", "_session", ".", "destroy", "(", ")", "else", ":", "_logger", ".", "debug", "(", "\"CBS session pending.\"", ")", "self", ".", "_session", "=", "None", "if", "not", "self", ".", "_ext_connection", ":", "_logger", ".", "debug", "(", "\"Closing exclusive connection.\"", ")", "self", ".", "_connection", ".", "destroy", "(", ")", "else", ":", "_logger", ".", "debug", "(", "\"Shared connection remaining open.\"", ")", "self", ".", "_connection", "=", "None" ]
Close the client. This includes closing the Session and CBS authentication layer as well as the Connection. If the client was opened using an external Connection, this will be left intact. No further messages can be sent or received and the client cannot be re-opened. All pending, unsent messages will remain uncleared to allow them to be inspected and queued to a new client.
[ "Close", "the", "client", ".", "This", "includes", "closing", "the", "Session", "and", "CBS", "authentication", "layer", "as", "well", "as", "the", "Connection", ".", "If", "the", "client", "was", "opened", "using", "an", "external", "Connection", "this", "will", "be", "left", "intact", "." ]
python
train
kislyuk/aegea
aegea/packages/github3/pulls.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/pulls.py#L254-L264
def iter_comments(self, number=-1, etag=None): """Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s """ url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
[ "def", "iter_comments", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'comments'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "ReviewComment", ",", "etag", "=", "etag", ")" ]
Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s
[ "Iterate", "over", "the", "comments", "on", "this", "pull", "request", "." ]
python
train
ioos/compliance-checker
compliance_checker/cf/cf.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L3129-L3194
def check_compression_gathering(self, ds): """ At the current time the netCDF interface does not provide for packing data. However a simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and add_offset . After the data values of a variable have been read, they are to be multiplied by the scale_factor , and have add_offset added to them. If both attributes are present, the data are scaled before the offset is added. When scaled data are written, the application should first subtract the offset and then divide by the scale factor. The units of a variable should be representative of the unpacked data. This standard is more restrictive than the NUG with respect to the use of the scale_factor and add_offset attributes; ambiguities and precision problems related to data type conversions are resolved by these restrictions. If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset attributes are of a different data type from the variable (containing the packed data) then the unpacked data should match the type of these attributes, which must both be of type float or both be of type double . An additional restriction in this case is that the variable containing the packed data must be of type byte , short or int . It is not advised to unpack an int into a float as there is a potential precision loss. When data to be packed contains missing values the attributes that indicate missing values ( _FillValue , valid_min , valid_max , valid_range ) must be of the same data type as the packed data. See Section 2.5.1, “Missing Data” for a discussion of how applications should treat variables that have attributes indicating both missing values and transformations defined by a scale and/or offset. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results """ ret_val = [] for compress_var in ds.get_variables_by_attributes(compress=lambda s: s is not None): valid = True reasoning = [] # puts the referenced variable being compressed into a set compress_set = set(compress_var.compress.split(' ')) if compress_var.ndim != 1: valid = False reasoning.append("Compression variable {} may only have one dimension".format(compress_var.name)) # ensure compression variable is a proper index, and thus is an # signed or unsigned integer type of some sort if compress_var.dtype.kind not in {'i', 'u'}: valid = False reasoning.append("Compression variable {} must be an integer type to form a proper array index".format(compress_var.name)) # make sure all the variables referred to are contained by the # variables. if not compress_set.issubset(ds.dimensions): not_in_dims = sorted(compress_set.difference(ds.dimensions)) valid = False reasoning.append("The following dimensions referenced by the compress attribute of variable {} do not exist: {}".format(compress_var.name, not_in_dims)) result = Result(BaseCheck.MEDIUM, valid, self.section_titles['8.2'], reasoning) ret_val.append(result) return ret_val
[ "def", "check_compression_gathering", "(", "self", ",", "ds", ")", ":", "ret_val", "=", "[", "]", "for", "compress_var", "in", "ds", ".", "get_variables_by_attributes", "(", "compress", "=", "lambda", "s", ":", "s", "is", "not", "None", ")", ":", "valid", "=", "True", "reasoning", "=", "[", "]", "# puts the referenced variable being compressed into a set", "compress_set", "=", "set", "(", "compress_var", ".", "compress", ".", "split", "(", "' '", ")", ")", "if", "compress_var", ".", "ndim", "!=", "1", ":", "valid", "=", "False", "reasoning", ".", "append", "(", "\"Compression variable {} may only have one dimension\"", ".", "format", "(", "compress_var", ".", "name", ")", ")", "# ensure compression variable is a proper index, and thus is an", "# signed or unsigned integer type of some sort", "if", "compress_var", ".", "dtype", ".", "kind", "not", "in", "{", "'i'", ",", "'u'", "}", ":", "valid", "=", "False", "reasoning", ".", "append", "(", "\"Compression variable {} must be an integer type to form a proper array index\"", ".", "format", "(", "compress_var", ".", "name", ")", ")", "# make sure all the variables referred to are contained by the", "# variables.", "if", "not", "compress_set", ".", "issubset", "(", "ds", ".", "dimensions", ")", ":", "not_in_dims", "=", "sorted", "(", "compress_set", ".", "difference", "(", "ds", ".", "dimensions", ")", ")", "valid", "=", "False", "reasoning", ".", "append", "(", "\"The following dimensions referenced by the compress attribute of variable {} do not exist: {}\"", ".", "format", "(", "compress_var", ".", "name", ",", "not_in_dims", ")", ")", "result", "=", "Result", "(", "BaseCheck", ".", "MEDIUM", ",", "valid", ",", "self", ".", "section_titles", "[", "'8.2'", "]", ",", "reasoning", ")", "ret_val", ".", "append", "(", "result", ")", "return", "ret_val" ]
At the current time the netCDF interface does not provide for packing data. However a simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and add_offset . After the data values of a variable have been read, they are to be multiplied by the scale_factor , and have add_offset added to them. If both attributes are present, the data are scaled before the offset is added. When scaled data are written, the application should first subtract the offset and then divide by the scale factor. The units of a variable should be representative of the unpacked data. This standard is more restrictive than the NUG with respect to the use of the scale_factor and add_offset attributes; ambiguities and precision problems related to data type conversions are resolved by these restrictions. If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset attributes are of a different data type from the variable (containing the packed data) then the unpacked data should match the type of these attributes, which must both be of type float or both be of type double . An additional restriction in this case is that the variable containing the packed data must be of type byte , short or int . It is not advised to unpack an int into a float as there is a potential precision loss. When data to be packed contains missing values the attributes that indicate missing values ( _FillValue , valid_min , valid_max , valid_range ) must be of the same data type as the packed data. See Section 2.5.1, “Missing Data” for a discussion of how applications should treat variables that have attributes indicating both missing values and transformations defined by a scale and/or offset. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
[ "At", "the", "current", "time", "the", "netCDF", "interface", "does", "not", "provide", "for", "packing", "data", ".", "However", "a", "simple", "packing", "may", "be", "achieved", "through", "the", "use", "of", "the", "optional", "NUG", "defined", "attributes", "scale_factor", "and", "add_offset", ".", "After", "the", "data", "values", "of", "a", "variable", "have", "been", "read", "they", "are", "to", "be", "multiplied", "by", "the", "scale_factor", "and", "have", "add_offset", "added", "to", "them", ".", "If", "both", "attributes", "are", "present", "the", "data", "are", "scaled", "before", "the", "offset", "is", "added", ".", "When", "scaled", "data", "are", "written", "the", "application", "should", "first", "subtract", "the", "offset", "and", "then", "divide", "by", "the", "scale", "factor", ".", "The", "units", "of", "a", "variable", "should", "be", "representative", "of", "the", "unpacked", "data", "." ]
python
train
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L237-L276
def try_get_dn_string(subject, shorten=False): """ Returns DN as a string :param subject: :param shorten: :return: """ try: from cryptography.x509.oid import NameOID from cryptography.x509 import ObjectIdentifier oid_names = { getattr(NameOID, 'COMMON_NAME', ObjectIdentifier("2.5.4.3")): "CN", getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier("2.5.4.6")): "C", getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier("2.5.4.7")): "L", getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier("2.5.4.8")): "ST", getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier("2.5.4.9")): "St", getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier("2.5.4.10")): "O", getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier("2.5.4.11")): "OU", getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier("2.5.4.5")): "SN", getattr(NameOID, 'USER_ID', ObjectIdentifier("0.9.2342.19200300.100.1.1")): "userID", getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier("0.9.2342.19200300.100.1.25")): "domainComponent", getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier("1.2.840.113549.1.9.1")): "emailAddress", getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier("2.5.4.17")): "ZIP", } ret = [] try: for attribute in subject: oid = attribute.oid dot = oid.dotted_string oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name val = attribute.value ret.append('%s: %s' % (oid_name, val)) except: pass return ', '.join(ret) except Exception as e: logger.warning('Unexpected error: %s' % e) return 'N/A'
[ "def", "try_get_dn_string", "(", "subject", ",", "shorten", "=", "False", ")", ":", "try", ":", "from", "cryptography", ".", "x509", ".", "oid", "import", "NameOID", "from", "cryptography", ".", "x509", "import", "ObjectIdentifier", "oid_names", "=", "{", "getattr", "(", "NameOID", ",", "'COMMON_NAME'", ",", "ObjectIdentifier", "(", "\"2.5.4.3\"", ")", ")", ":", "\"CN\"", ",", "getattr", "(", "NameOID", ",", "'COUNTRY_NAME'", ",", "ObjectIdentifier", "(", "\"2.5.4.6\"", ")", ")", ":", "\"C\"", ",", "getattr", "(", "NameOID", ",", "'LOCALITY_NAME'", ",", "ObjectIdentifier", "(", "\"2.5.4.7\"", ")", ")", ":", "\"L\"", ",", "getattr", "(", "NameOID", ",", "'STATE_OR_PROVINCE_NAME'", ",", "ObjectIdentifier", "(", "\"2.5.4.8\"", ")", ")", ":", "\"ST\"", ",", "getattr", "(", "NameOID", ",", "'STREET_ADDRESS'", ",", "ObjectIdentifier", "(", "\"2.5.4.9\"", ")", ")", ":", "\"St\"", ",", "getattr", "(", "NameOID", ",", "'ORGANIZATION_NAME'", ",", "ObjectIdentifier", "(", "\"2.5.4.10\"", ")", ")", ":", "\"O\"", ",", "getattr", "(", "NameOID", ",", "'ORGANIZATIONAL_UNIT_NAME'", ",", "ObjectIdentifier", "(", "\"2.5.4.11\"", ")", ")", ":", "\"OU\"", ",", "getattr", "(", "NameOID", ",", "'SERIAL_NUMBER'", ",", "ObjectIdentifier", "(", "\"2.5.4.5\"", ")", ")", ":", "\"SN\"", ",", "getattr", "(", "NameOID", ",", "'USER_ID'", ",", "ObjectIdentifier", "(", "\"0.9.2342.19200300.100.1.1\"", ")", ")", ":", "\"userID\"", ",", "getattr", "(", "NameOID", ",", "'DOMAIN_COMPONENT'", ",", "ObjectIdentifier", "(", "\"0.9.2342.19200300.100.1.25\"", ")", ")", ":", "\"domainComponent\"", ",", "getattr", "(", "NameOID", ",", "'EMAIL_ADDRESS'", ",", "ObjectIdentifier", "(", "\"1.2.840.113549.1.9.1\"", ")", ")", ":", "\"emailAddress\"", ",", "getattr", "(", "NameOID", ",", "'POSTAL_CODE'", ",", "ObjectIdentifier", "(", "\"2.5.4.17\"", ")", ")", ":", "\"ZIP\"", ",", "}", "ret", "=", "[", "]", "try", ":", "for", "attribute", "in", "subject", ":", "oid", "=", "attribute", ".", "oid", "dot", "=", "oid", ".", "dotted_string", "oid_name", "=", "oid_names", "[", "oid", "]", "if", "shorten", "and", "oid", "in", "oid_names", "else", "oid", ".", "_name", "val", "=", "attribute", ".", "value", "ret", ".", "append", "(", "'%s: %s'", "%", "(", "oid_name", ",", "val", ")", ")", "except", ":", "pass", "return", "', '", ".", "join", "(", "ret", ")", "except", "Exception", "as", "e", ":", "logger", ".", "warning", "(", "'Unexpected error: %s'", "%", "e", ")", "return", "'N/A'" ]
Returns DN as a string :param subject: :param shorten: :return:
[ "Returns", "DN", "as", "a", "string", ":", "param", "subject", ":", ":", "param", "shorten", ":", ":", "return", ":" ]
python
train
nakagami/pyfirebirdsql
firebirdsql/decfloat.py
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/decfloat.py#L47-L100
def dpd_to_int(dpd): """ Convert DPD encodined value to int (0-999) dpd: DPD encoded value. 10bit unsigned int """ b = [None] * 10 b[9] = 1 if dpd & 0b1000000000 else 0 b[8] = 1 if dpd & 0b0100000000 else 0 b[7] = 1 if dpd & 0b0010000000 else 0 b[6] = 1 if dpd & 0b0001000000 else 0 b[5] = 1 if dpd & 0b0000100000 else 0 b[4] = 1 if dpd & 0b0000010000 else 0 b[3] = 1 if dpd & 0b0000001000 else 0 b[2] = 1 if dpd & 0b0000000100 else 0 b[1] = 1 if dpd & 0b0000000010 else 0 b[0] = 1 if dpd & 0b0000000001 else 0 d = [None] * 3 if b[3] == 0: d[2] = b[9] * 4 + b[8] * 2 + b[7] d[1] = b[6] * 4 + b[5] * 2 + b[4] d[0] = b[2] * 4 + b[1] * 2 + b[0] elif (b[3], b[2], b[1]) == (1, 0, 0): d[2] = b[9] * 4 + b[8] * 2 + b[7] d[1] = b[6] * 4 + b[5] * 2 + b[4] d[0] = 8 + b[0] elif (b[3], b[2], b[1]) == (1, 0, 1): d[2] = b[9] * 4 + b[8] * 2 + b[7] d[1] = 8 + b[4] d[0] = b[6] * 4 + b[5] * 2 + b[0] elif (b[3], b[2], b[1]) == (1, 1, 0): d[2] = 8 + b[7] d[1] = b[6] * 4 + b[5] * 2 + b[4] d[0] = b[9] * 4 + b[8] * 2 + b[0] elif (b[6], b[5], b[3], b[2], b[1]) == (0, 0, 1, 1, 1): d[2] = 8 + b[7] d[1] = 8 + b[4] d[0] = b[9] * 4 + b[8] * 2 + b[0] elif (b[6], b[5], b[3], b[2], b[1]) == (0, 1, 1, 1, 1): d[2] = 8 + b[7] d[1] = b[9] * 4 + b[8] * 2 + b[4] d[0] = 8 + b[0] elif (b[6], b[5], b[3], b[2], b[1]) == (1, 0, 1, 1, 1): d[2] = b[9] * 4 + b[8] * 2 + b[7] d[1] = 8 + b[4] d[0] = 8 + b[0] elif (b[6], b[5], b[3], b[2], b[1]) == (1, 1, 1, 1, 1): d[2] = 8 + b[7] d[1] = 8 + b[4] d[0] = 8 + b[0] else: raise ValueError('Invalid DPD encoding') return d[2] * 100 + d[1] * 10 + d[0]
[ "def", "dpd_to_int", "(", "dpd", ")", ":", "b", "=", "[", "None", "]", "*", "10", "b", "[", "9", "]", "=", "1", "if", "dpd", "&", "0b1000000000", "else", "0", "b", "[", "8", "]", "=", "1", "if", "dpd", "&", "0b0100000000", "else", "0", "b", "[", "7", "]", "=", "1", "if", "dpd", "&", "0b0010000000", "else", "0", "b", "[", "6", "]", "=", "1", "if", "dpd", "&", "0b0001000000", "else", "0", "b", "[", "5", "]", "=", "1", "if", "dpd", "&", "0b0000100000", "else", "0", "b", "[", "4", "]", "=", "1", "if", "dpd", "&", "0b0000010000", "else", "0", "b", "[", "3", "]", "=", "1", "if", "dpd", "&", "0b0000001000", "else", "0", "b", "[", "2", "]", "=", "1", "if", "dpd", "&", "0b0000000100", "else", "0", "b", "[", "1", "]", "=", "1", "if", "dpd", "&", "0b0000000010", "else", "0", "b", "[", "0", "]", "=", "1", "if", "dpd", "&", "0b0000000001", "else", "0", "d", "=", "[", "None", "]", "*", "3", "if", "b", "[", "3", "]", "==", "0", ":", "d", "[", "2", "]", "=", "b", "[", "9", "]", "*", "4", "+", "b", "[", "8", "]", "*", "2", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "b", "[", "6", "]", "*", "4", "+", "b", "[", "5", "]", "*", "2", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "b", "[", "2", "]", "*", "4", "+", "b", "[", "1", "]", "*", "2", "+", "b", "[", "0", "]", "elif", "(", "b", "[", "3", "]", ",", "b", "[", "2", "]", ",", "b", "[", "1", "]", ")", "==", "(", "1", ",", "0", ",", "0", ")", ":", "d", "[", "2", "]", "=", "b", "[", "9", "]", "*", "4", "+", "b", "[", "8", "]", "*", "2", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "b", "[", "6", "]", "*", "4", "+", "b", "[", "5", "]", "*", "2", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "8", "+", "b", "[", "0", "]", "elif", "(", "b", "[", "3", "]", ",", "b", "[", "2", "]", ",", "b", "[", "1", "]", ")", "==", "(", "1", ",", "0", ",", "1", ")", ":", "d", "[", "2", "]", "=", "b", "[", "9", "]", "*", "4", "+", "b", "[", "8", "]", "*", "2", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "8", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "b", "[", "6", "]", "*", "4", "+", "b", "[", "5", "]", "*", "2", "+", "b", "[", "0", "]", "elif", "(", "b", "[", "3", "]", ",", "b", "[", "2", "]", ",", "b", "[", "1", "]", ")", "==", "(", "1", ",", "1", ",", "0", ")", ":", "d", "[", "2", "]", "=", "8", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "b", "[", "6", "]", "*", "4", "+", "b", "[", "5", "]", "*", "2", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "b", "[", "9", "]", "*", "4", "+", "b", "[", "8", "]", "*", "2", "+", "b", "[", "0", "]", "elif", "(", "b", "[", "6", "]", ",", "b", "[", "5", "]", ",", "b", "[", "3", "]", ",", "b", "[", "2", "]", ",", "b", "[", "1", "]", ")", "==", "(", "0", ",", "0", ",", "1", ",", "1", ",", "1", ")", ":", "d", "[", "2", "]", "=", "8", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "8", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "b", "[", "9", "]", "*", "4", "+", "b", "[", "8", "]", "*", "2", "+", "b", "[", "0", "]", "elif", "(", "b", "[", "6", "]", ",", "b", "[", "5", "]", ",", "b", "[", "3", "]", ",", "b", "[", "2", "]", ",", "b", "[", "1", "]", ")", "==", "(", "0", ",", "1", ",", "1", ",", "1", ",", "1", ")", ":", "d", "[", "2", "]", "=", "8", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "b", "[", "9", "]", "*", "4", "+", "b", "[", "8", "]", "*", "2", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "8", "+", "b", "[", "0", "]", "elif", "(", "b", "[", "6", "]", ",", "b", "[", "5", "]", ",", "b", "[", "3", "]", ",", "b", "[", "2", "]", ",", "b", "[", "1", "]", ")", "==", "(", "1", ",", "0", ",", "1", ",", "1", ",", "1", ")", ":", "d", "[", "2", "]", "=", "b", "[", "9", "]", "*", "4", "+", "b", "[", "8", "]", "*", "2", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "8", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "8", "+", "b", "[", "0", "]", "elif", "(", "b", "[", "6", "]", ",", "b", "[", "5", "]", ",", "b", "[", "3", "]", ",", "b", "[", "2", "]", ",", "b", "[", "1", "]", ")", "==", "(", "1", ",", "1", ",", "1", ",", "1", ",", "1", ")", ":", "d", "[", "2", "]", "=", "8", "+", "b", "[", "7", "]", "d", "[", "1", "]", "=", "8", "+", "b", "[", "4", "]", "d", "[", "0", "]", "=", "8", "+", "b", "[", "0", "]", "else", ":", "raise", "ValueError", "(", "'Invalid DPD encoding'", ")", "return", "d", "[", "2", "]", "*", "100", "+", "d", "[", "1", "]", "*", "10", "+", "d", "[", "0", "]" ]
Convert DPD encodined value to int (0-999) dpd: DPD encoded value. 10bit unsigned int
[ "Convert", "DPD", "encodined", "value", "to", "int", "(", "0", "-", "999", ")", "dpd", ":", "DPD", "encoded", "value", ".", "10bit", "unsigned", "int" ]
python
train
line/line-bot-sdk-python
linebot/webhook.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/webhook.py#L165-L185
def add(self, event, message=None): """[Decorator] Add handler method. :param event: Specify a kind of Event which you want to handle :type event: T <= :py:class:`linebot.models.events.Event` class :param message: (optional) If event is MessageEvent, specify kind of Messages which you want to handle :type: message: T <= :py:class:`linebot.models.messages.Message` class :rtype: func :return: decorator """ def decorator(func): if isinstance(message, (list, tuple)): for it in message: self.__add_handler(func, event, message=it) else: self.__add_handler(func, event, message=message) return func return decorator
[ "def", "add", "(", "self", ",", "event", ",", "message", "=", "None", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "isinstance", "(", "message", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "it", "in", "message", ":", "self", ".", "__add_handler", "(", "func", ",", "event", ",", "message", "=", "it", ")", "else", ":", "self", ".", "__add_handler", "(", "func", ",", "event", ",", "message", "=", "message", ")", "return", "func", "return", "decorator" ]
[Decorator] Add handler method. :param event: Specify a kind of Event which you want to handle :type event: T <= :py:class:`linebot.models.events.Event` class :param message: (optional) If event is MessageEvent, specify kind of Messages which you want to handle :type: message: T <= :py:class:`linebot.models.messages.Message` class :rtype: func :return: decorator
[ "[", "Decorator", "]", "Add", "handler", "method", "." ]
python
train
saltstack/salt
salt/states/reg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/reg.py#L461-L539
def absent(name, vname=None, use_32bit_registry=False): r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted. ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} hive, key = _parse_key(name) # Determine what to do reg_check = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not reg_check['success'] or reg_check['vdata'] == '(value not set)': ret['comment'] = '{0} is already absent'.format(name) return ret remove_change = {'Key': r'{0}\{1}'.format(hive, key), 'Entry': '{0}'.format(vname if vname else '(Default)')} # Check for test option if __opts__['test']: ret['result'] = None ret['changes'] = {'reg': {'Will remove': remove_change}} return ret # Delete the value ret['result'] = __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry) if not ret['result']: ret['changes'] = {} ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive) else: ret['changes'] = {'reg': {'Removed': remove_change}} ret['comment'] = r'Removed {0} from {1}'.format(key, hive) return ret
[ "def", "absent", "(", "name", ",", "vname", "=", "None", ",", "use_32bit_registry", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "hive", ",", "key", "=", "_parse_key", "(", "name", ")", "# Determine what to do", "reg_check", "=", "__utils__", "[", "'reg.read_value'", "]", "(", "hive", "=", "hive", ",", "key", "=", "key", ",", "vname", "=", "vname", ",", "use_32bit_registry", "=", "use_32bit_registry", ")", "if", "not", "reg_check", "[", "'success'", "]", "or", "reg_check", "[", "'vdata'", "]", "==", "'(value not set)'", ":", "ret", "[", "'comment'", "]", "=", "'{0} is already absent'", ".", "format", "(", "name", ")", "return", "ret", "remove_change", "=", "{", "'Key'", ":", "r'{0}\\{1}'", ".", "format", "(", "hive", ",", "key", ")", ",", "'Entry'", ":", "'{0}'", ".", "format", "(", "vname", "if", "vname", "else", "'(Default)'", ")", "}", "# Check for test option", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "{", "'reg'", ":", "{", "'Will remove'", ":", "remove_change", "}", "}", "return", "ret", "# Delete the value", "ret", "[", "'result'", "]", "=", "__utils__", "[", "'reg.delete_value'", "]", "(", "hive", "=", "hive", ",", "key", "=", "key", ",", "vname", "=", "vname", ",", "use_32bit_registry", "=", "use_32bit_registry", ")", "if", "not", "ret", "[", "'result'", "]", ":", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "r'Failed to remove {0} from {1}'", ".", "format", "(", "key", ",", "hive", ")", "else", ":", "ret", "[", "'changes'", "]", "=", "{", "'reg'", ":", "{", "'Removed'", ":", "remove_change", "}", "}", "ret", "[", "'comment'", "]", "=", "r'Removed {0} from {1}'", ".", "format", "(", "key", ",", "hive", ")", "return", "ret" ]
r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted.
[ "r", "Ensure", "a", "registry", "value", "is", "removed", ".", "To", "remove", "a", "key", "use", "key_absent", "." ]
python
train
sfischer13/python-prompt
prompt/__init__.py
https://github.com/sfischer13/python-prompt/blob/d2acf5db64a9e45247c7abf1d67c2eb7db87bb48/prompt/__init__.py#L108-L132
def integer(prompt=None, empty=False): """Prompt an integer. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- int or None An int if the user entered a valid integer. None if the user pressed only Enter and ``empty`` was True. """ s = _prompt_input(prompt) if empty and not s: return None else: try: return int(s) except ValueError: return integer(prompt=prompt, empty=empty)
[ "def", "integer", "(", "prompt", "=", "None", ",", "empty", "=", "False", ")", ":", "s", "=", "_prompt_input", "(", "prompt", ")", "if", "empty", "and", "not", "s", ":", "return", "None", "else", ":", "try", ":", "return", "int", "(", "s", ")", "except", "ValueError", ":", "return", "integer", "(", "prompt", "=", "prompt", ",", "empty", "=", "empty", ")" ]
Prompt an integer. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- int or None An int if the user entered a valid integer. None if the user pressed only Enter and ``empty`` was True.
[ "Prompt", "an", "integer", "." ]
python
train
taddeus/wspy
extension.py
https://github.com/taddeus/wspy/blob/13f054a72442bb8dcc37b0ac011cab6025830d66/extension.py#L31-L42
def conflicts(self, ext): """ Check if the extension conflicts with an already accepted extension. This may be the case when the two extensions use the same reserved bits, or have the same name (when the same extension is negotiated multiple times with different parameters). """ return ext.rsv1 and self.rsv1 \ or ext.rsv2 and self.rsv2 \ or ext.rsv3 and self.rsv3 \ or set(ext.names) & set(self.names) \ or set(ext.opcodes) & set(self.opcodes)
[ "def", "conflicts", "(", "self", ",", "ext", ")", ":", "return", "ext", ".", "rsv1", "and", "self", ".", "rsv1", "or", "ext", ".", "rsv2", "and", "self", ".", "rsv2", "or", "ext", ".", "rsv3", "and", "self", ".", "rsv3", "or", "set", "(", "ext", ".", "names", ")", "&", "set", "(", "self", ".", "names", ")", "or", "set", "(", "ext", ".", "opcodes", ")", "&", "set", "(", "self", ".", "opcodes", ")" ]
Check if the extension conflicts with an already accepted extension. This may be the case when the two extensions use the same reserved bits, or have the same name (when the same extension is negotiated multiple times with different parameters).
[ "Check", "if", "the", "extension", "conflicts", "with", "an", "already", "accepted", "extension", ".", "This", "may", "be", "the", "case", "when", "the", "two", "extensions", "use", "the", "same", "reserved", "bits", "or", "have", "the", "same", "name", "(", "when", "the", "same", "extension", "is", "negotiated", "multiple", "times", "with", "different", "parameters", ")", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/update/records/reflash_controller.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/update/records/reflash_controller.py#L86-L115
def FromBinary(cls, record_data, record_count=1): """Create an UpdateRecord subclass from binary record data. This should be called with a binary record blob (NOT including the record type header) and it will decode it into a ReflashControllerRecord. Args: record_data (bytearray): The raw record data that we wish to parse into an UpdateRecord subclass NOT including its 8 byte record header. record_count (int): The number of records included in record_data. Raises: ArgumentError: If the record_data is malformed and cannot be parsed. Returns: ReflashControllerRecord: The decoded reflash tile record. """ if len(record_data) < ReflashControllerRecord.RecordHeaderLength: raise ArgumentError("Record was too short to contain a full reflash record header", length=len(record_data), header_length=ReflashControllerRecord.RecordHeaderLength) offset, data_length = struct.unpack_from("<LL", record_data) bindata = record_data[ReflashControllerRecord.RecordHeaderLength:] if len(bindata) != data_length: raise ArgumentError("Embedded firmware length did not agree with actual length of embeded data", length=len(bindata), embedded_length=data_length) return ReflashControllerRecord(bindata, offset)
[ "def", "FromBinary", "(", "cls", ",", "record_data", ",", "record_count", "=", "1", ")", ":", "if", "len", "(", "record_data", ")", "<", "ReflashControllerRecord", ".", "RecordHeaderLength", ":", "raise", "ArgumentError", "(", "\"Record was too short to contain a full reflash record header\"", ",", "length", "=", "len", "(", "record_data", ")", ",", "header_length", "=", "ReflashControllerRecord", ".", "RecordHeaderLength", ")", "offset", ",", "data_length", "=", "struct", ".", "unpack_from", "(", "\"<LL\"", ",", "record_data", ")", "bindata", "=", "record_data", "[", "ReflashControllerRecord", ".", "RecordHeaderLength", ":", "]", "if", "len", "(", "bindata", ")", "!=", "data_length", ":", "raise", "ArgumentError", "(", "\"Embedded firmware length did not agree with actual length of embeded data\"", ",", "length", "=", "len", "(", "bindata", ")", ",", "embedded_length", "=", "data_length", ")", "return", "ReflashControllerRecord", "(", "bindata", ",", "offset", ")" ]
Create an UpdateRecord subclass from binary record data. This should be called with a binary record blob (NOT including the record type header) and it will decode it into a ReflashControllerRecord. Args: record_data (bytearray): The raw record data that we wish to parse into an UpdateRecord subclass NOT including its 8 byte record header. record_count (int): The number of records included in record_data. Raises: ArgumentError: If the record_data is malformed and cannot be parsed. Returns: ReflashControllerRecord: The decoded reflash tile record.
[ "Create", "an", "UpdateRecord", "subclass", "from", "binary", "record", "data", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py#L81-L93
def show_firmware_version_output_show_firmware_version_build_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") build_time = ET.SubElement(show_firmware_version, "build-time") build_time.text = kwargs.pop('build_time') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_firmware_version_output_show_firmware_version_build_time", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_firmware_version", "=", "ET", ".", "Element", "(", "\"show_firmware_version\"", ")", "config", "=", "show_firmware_version", "output", "=", "ET", ".", "SubElement", "(", "show_firmware_version", ",", "\"output\"", ")", "show_firmware_version", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-firmware-version\"", ")", "build_time", "=", "ET", ".", "SubElement", "(", "show_firmware_version", ",", "\"build-time\"", ")", "build_time", ".", "text", "=", "kwargs", ".", "pop", "(", "'build_time'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
allenai/allennlp
allennlp/modules/conditional_random_field.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/conditional_random_field.py#L207-L251
def _input_likelihood(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: """ Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences. """ batch_size, sequence_length, num_tags = logits.size() # Transpose batch size and sequence dimensions mask = mask.float().transpose(0, 1).contiguous() logits = logits.transpose(0, 1).contiguous() # Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the # transitions to the initial states and the logits for the first timestep. if self.include_start_end_transitions: alpha = self.start_transitions.view(1, num_tags) + logits[0] else: alpha = logits[0] # For each i we compute logits for the transitions from timestep i-1 to timestep i. # We do so in a (batch_size, num_tags, num_tags) tensor where the axes are # (instance, current_tag, next_tag) for i in range(1, sequence_length): # The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis. emit_scores = logits[i].view(batch_size, 1, num_tags) # Transition scores are (current_tag, next_tag) so we broadcast along the instance axis. transition_scores = self.transitions.view(1, num_tags, num_tags) # Alpha is for the current_tag, so we broadcast along the next_tag axis. broadcast_alpha = alpha.view(batch_size, num_tags, 1) # Add all the scores together and logexp over the current_tag axis inner = broadcast_alpha + emit_scores + transition_scores # In valid positions (mask == 1) we want to take the logsumexp over the current_tag dimension # of ``inner``. Otherwise (mask == 0) we want to retain the previous alpha. alpha = (util.logsumexp(inner, 1) * mask[i].view(batch_size, 1) + alpha * (1 - mask[i]).view(batch_size, 1)) # Every sequence needs to end with a transition to the stop_tag. if self.include_start_end_transitions: stops = alpha + self.end_transitions.view(1, num_tags) else: stops = alpha # Finally we log_sum_exp along the num_tags dim, result is (batch_size,) return util.logsumexp(stops)
[ "def", "_input_likelihood", "(", "self", ",", "logits", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ")", "->", "torch", ".", "Tensor", ":", "batch_size", ",", "sequence_length", ",", "num_tags", "=", "logits", ".", "size", "(", ")", "# Transpose batch size and sequence dimensions", "mask", "=", "mask", ".", "float", "(", ")", ".", "transpose", "(", "0", ",", "1", ")", ".", "contiguous", "(", ")", "logits", "=", "logits", ".", "transpose", "(", "0", ",", "1", ")", ".", "contiguous", "(", ")", "# Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the", "# transitions to the initial states and the logits for the first timestep.", "if", "self", ".", "include_start_end_transitions", ":", "alpha", "=", "self", ".", "start_transitions", ".", "view", "(", "1", ",", "num_tags", ")", "+", "logits", "[", "0", "]", "else", ":", "alpha", "=", "logits", "[", "0", "]", "# For each i we compute logits for the transitions from timestep i-1 to timestep i.", "# We do so in a (batch_size, num_tags, num_tags) tensor where the axes are", "# (instance, current_tag, next_tag)", "for", "i", "in", "range", "(", "1", ",", "sequence_length", ")", ":", "# The emit scores are for time i (\"next_tag\") so we broadcast along the current_tag axis.", "emit_scores", "=", "logits", "[", "i", "]", ".", "view", "(", "batch_size", ",", "1", ",", "num_tags", ")", "# Transition scores are (current_tag, next_tag) so we broadcast along the instance axis.", "transition_scores", "=", "self", ".", "transitions", ".", "view", "(", "1", ",", "num_tags", ",", "num_tags", ")", "# Alpha is for the current_tag, so we broadcast along the next_tag axis.", "broadcast_alpha", "=", "alpha", ".", "view", "(", "batch_size", ",", "num_tags", ",", "1", ")", "# Add all the scores together and logexp over the current_tag axis", "inner", "=", "broadcast_alpha", "+", "emit_scores", "+", "transition_scores", "# In valid positions (mask == 1) we want to take the logsumexp over the current_tag dimension", "# of ``inner``. Otherwise (mask == 0) we want to retain the previous alpha.", "alpha", "=", "(", "util", ".", "logsumexp", "(", "inner", ",", "1", ")", "*", "mask", "[", "i", "]", ".", "view", "(", "batch_size", ",", "1", ")", "+", "alpha", "*", "(", "1", "-", "mask", "[", "i", "]", ")", ".", "view", "(", "batch_size", ",", "1", ")", ")", "# Every sequence needs to end with a transition to the stop_tag.", "if", "self", ".", "include_start_end_transitions", ":", "stops", "=", "alpha", "+", "self", ".", "end_transitions", ".", "view", "(", "1", ",", "num_tags", ")", "else", ":", "stops", "=", "alpha", "# Finally we log_sum_exp along the num_tags dim, result is (batch_size,)", "return", "util", ".", "logsumexp", "(", "stops", ")" ]
Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences.
[ "Computes", "the", "(", "batch_size", ")", "denominator", "term", "for", "the", "log", "-", "likelihood", "which", "is", "the", "sum", "of", "the", "likelihoods", "across", "all", "possible", "state", "sequences", "." ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/email_manager.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/email_manager.py#L54-L102
def __imap_search(self, ** criteria_dict): """ Searches for query in the given IMAP criteria and returns the message numbers that match as a list of strings. Criteria without values (eg DELETED) should be keyword args with KEY=True, or else not passed. Criteria with values should be keyword args of the form KEY="VALUE" where KEY is a valid IMAP key. IMAP default is to AND all criteria together. We don't support other logic quite yet. All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>, BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM <string>, HEADER <field-name> <string> (UNTESTED), KEYWORD <flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>, OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN, SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>, SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>, UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED, UNKEYWORD <flag>, UNSEEN. For details on keys and their values, see http://tools.ietf.org/html/rfc3501#section-6.4.4 :param criteria_dict: dictionary of search criteria keywords :raises: EmailException if something in IMAP breaks :returns: List of message numbers as strings matched by given criteria """ self.imap_connect() criteria = [] for key in criteria_dict: if criteria_dict[key] is True: criteria.append('(%s)' % key) else: criteria.append('(%s "%s")' % (key, criteria_dict[key])) # If any of these criteria are not valid IMAP keys, IMAP will tell us. status, msg_nums = self.mailbox.search('UTF-8', * criteria) self.imap_disconnect() if 0 == len(msg_nums): msg_nums = [] if 'OK' in status: return self.__parse_imap_search_result(msg_nums) else: raise EmailException("IMAP status is " + str(status))
[ "def", "__imap_search", "(", "self", ",", "*", "*", "criteria_dict", ")", ":", "self", ".", "imap_connect", "(", ")", "criteria", "=", "[", "]", "for", "key", "in", "criteria_dict", ":", "if", "criteria_dict", "[", "key", "]", "is", "True", ":", "criteria", ".", "append", "(", "'(%s)'", "%", "key", ")", "else", ":", "criteria", ".", "append", "(", "'(%s \"%s\")'", "%", "(", "key", ",", "criteria_dict", "[", "key", "]", ")", ")", "# If any of these criteria are not valid IMAP keys, IMAP will tell us.", "status", ",", "msg_nums", "=", "self", ".", "mailbox", ".", "search", "(", "'UTF-8'", ",", "*", "criteria", ")", "self", ".", "imap_disconnect", "(", ")", "if", "0", "==", "len", "(", "msg_nums", ")", ":", "msg_nums", "=", "[", "]", "if", "'OK'", "in", "status", ":", "return", "self", ".", "__parse_imap_search_result", "(", "msg_nums", ")", "else", ":", "raise", "EmailException", "(", "\"IMAP status is \"", "+", "str", "(", "status", ")", ")" ]
Searches for query in the given IMAP criteria and returns the message numbers that match as a list of strings. Criteria without values (eg DELETED) should be keyword args with KEY=True, or else not passed. Criteria with values should be keyword args of the form KEY="VALUE" where KEY is a valid IMAP key. IMAP default is to AND all criteria together. We don't support other logic quite yet. All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>, BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM <string>, HEADER <field-name> <string> (UNTESTED), KEYWORD <flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>, OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN, SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>, SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>, UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED, UNKEYWORD <flag>, UNSEEN. For details on keys and their values, see http://tools.ietf.org/html/rfc3501#section-6.4.4 :param criteria_dict: dictionary of search criteria keywords :raises: EmailException if something in IMAP breaks :returns: List of message numbers as strings matched by given criteria
[ "Searches", "for", "query", "in", "the", "given", "IMAP", "criteria", "and", "returns", "the", "message", "numbers", "that", "match", "as", "a", "list", "of", "strings", "." ]
python
train
oanda/v20-python
src/v20/pricing.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/pricing.py#L196-L216
def from_dict(data, ctx): """ Instantiate a new QuoteHomeConversionFactors from a dict (generally from loading a JSON response). The data used to instantiate the QuoteHomeConversionFactors is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('positiveUnits') is not None: data['positiveUnits'] = ctx.convert_decimal_number( data.get('positiveUnits') ) if data.get('negativeUnits') is not None: data['negativeUnits'] = ctx.convert_decimal_number( data.get('negativeUnits') ) return QuoteHomeConversionFactors(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'positiveUnits'", ")", "is", "not", "None", ":", "data", "[", "'positiveUnits'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'positiveUnits'", ")", ")", "if", "data", ".", "get", "(", "'negativeUnits'", ")", "is", "not", "None", ":", "data", "[", "'negativeUnits'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'negativeUnits'", ")", ")", "return", "QuoteHomeConversionFactors", "(", "*", "*", "data", ")" ]
Instantiate a new QuoteHomeConversionFactors from a dict (generally from loading a JSON response). The data used to instantiate the QuoteHomeConversionFactors is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
[ "Instantiate", "a", "new", "QuoteHomeConversionFactors", "from", "a", "dict", "(", "generally", "from", "loading", "a", "JSON", "response", ")", ".", "The", "data", "used", "to", "instantiate", "the", "QuoteHomeConversionFactors", "is", "a", "shallow", "copy", "of", "the", "dict", "passed", "in", "with", "any", "complex", "child", "types", "instantiated", "appropriately", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/link.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/link.py#L475-L480
def _update_file_args(self, file_mapping): """Adjust the arguments to deal with staging files to the scratch area""" for key, value in self.args.items(): new_value = file_mapping.get(value, value) if new_value != value: self.args[key] = new_value
[ "def", "_update_file_args", "(", "self", ",", "file_mapping", ")", ":", "for", "key", ",", "value", "in", "self", ".", "args", ".", "items", "(", ")", ":", "new_value", "=", "file_mapping", ".", "get", "(", "value", ",", "value", ")", "if", "new_value", "!=", "value", ":", "self", ".", "args", "[", "key", "]", "=", "new_value" ]
Adjust the arguments to deal with staging files to the scratch area
[ "Adjust", "the", "arguments", "to", "deal", "with", "staging", "files", "to", "the", "scratch", "area" ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/aws.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/aws.py#L337-L358
def get_requests(self): """ Creates product structure and returns list of files for download. :return: List of download requests and list of empty folders that need to be created :rtype: (list(download.DownloadRequest), list(str)) """ self.download_list = [DownloadRequest(url=self.get_url(metafile), filename=self.get_filepath(metafile), data_type=AwsConstants.AWS_FILES[metafile], data_name=metafile) for metafile in self.metafiles if metafile in AwsConstants.PRODUCT_FILES] tile_parent_folder = os.path.join(self.parent_folder, self.product_id) for tile_info in self.product_info['tiles']: tile_name, date, aws_index = self.url_to_tile(self.get_tile_url(tile_info)) if self.tile_list is None or AwsTile.parse_tile_name(tile_name) in self.tile_list: tile_downloads, tile_folders = AwsTile(tile_name, date, aws_index, parent_folder=tile_parent_folder, bands=self.bands, metafiles=self.metafiles, data_source=self.data_source).get_requests() self.download_list.extend(tile_downloads) self.folder_list.extend(tile_folders) self.sort_download_list() return self.download_list, self.folder_list
[ "def", "get_requests", "(", "self", ")", ":", "self", ".", "download_list", "=", "[", "DownloadRequest", "(", "url", "=", "self", ".", "get_url", "(", "metafile", ")", ",", "filename", "=", "self", ".", "get_filepath", "(", "metafile", ")", ",", "data_type", "=", "AwsConstants", ".", "AWS_FILES", "[", "metafile", "]", ",", "data_name", "=", "metafile", ")", "for", "metafile", "in", "self", ".", "metafiles", "if", "metafile", "in", "AwsConstants", ".", "PRODUCT_FILES", "]", "tile_parent_folder", "=", "os", ".", "path", ".", "join", "(", "self", ".", "parent_folder", ",", "self", ".", "product_id", ")", "for", "tile_info", "in", "self", ".", "product_info", "[", "'tiles'", "]", ":", "tile_name", ",", "date", ",", "aws_index", "=", "self", ".", "url_to_tile", "(", "self", ".", "get_tile_url", "(", "tile_info", ")", ")", "if", "self", ".", "tile_list", "is", "None", "or", "AwsTile", ".", "parse_tile_name", "(", "tile_name", ")", "in", "self", ".", "tile_list", ":", "tile_downloads", ",", "tile_folders", "=", "AwsTile", "(", "tile_name", ",", "date", ",", "aws_index", ",", "parent_folder", "=", "tile_parent_folder", ",", "bands", "=", "self", ".", "bands", ",", "metafiles", "=", "self", ".", "metafiles", ",", "data_source", "=", "self", ".", "data_source", ")", ".", "get_requests", "(", ")", "self", ".", "download_list", ".", "extend", "(", "tile_downloads", ")", "self", ".", "folder_list", ".", "extend", "(", "tile_folders", ")", "self", ".", "sort_download_list", "(", ")", "return", "self", ".", "download_list", ",", "self", ".", "folder_list" ]
Creates product structure and returns list of files for download. :return: List of download requests and list of empty folders that need to be created :rtype: (list(download.DownloadRequest), list(str))
[ "Creates", "product", "structure", "and", "returns", "list", "of", "files", "for", "download", "." ]
python
train
saltstack/salt
salt/thorium/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/__init__.py#L163-L190
def call_runtime(self): ''' Execute the runtime ''' cache = self.gather_cache() chunks = self.get_chunks() interval = self.opts['thorium_interval'] recompile = self.opts.get('thorium_recompile', 300) r_start = time.time() while True: events = self.get_events() if not events: time.sleep(interval) continue start = time.time() self.state.inject_globals['__events__'] = events self.state.call_chunks(chunks) elapsed = time.time() - start left = interval - elapsed if left > 0: time.sleep(left) self.state.reset_run_num() if (start - r_start) > recompile: cache = self.gather_cache() chunks = self.get_chunks() if self.reg_ret is not None: self.returners['{0}.save_reg'.format(self.reg_ret)](chunks) r_start = time.time()
[ "def", "call_runtime", "(", "self", ")", ":", "cache", "=", "self", ".", "gather_cache", "(", ")", "chunks", "=", "self", ".", "get_chunks", "(", ")", "interval", "=", "self", ".", "opts", "[", "'thorium_interval'", "]", "recompile", "=", "self", ".", "opts", ".", "get", "(", "'thorium_recompile'", ",", "300", ")", "r_start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "events", "=", "self", ".", "get_events", "(", ")", "if", "not", "events", ":", "time", ".", "sleep", "(", "interval", ")", "continue", "start", "=", "time", ".", "time", "(", ")", "self", ".", "state", ".", "inject_globals", "[", "'__events__'", "]", "=", "events", "self", ".", "state", ".", "call_chunks", "(", "chunks", ")", "elapsed", "=", "time", ".", "time", "(", ")", "-", "start", "left", "=", "interval", "-", "elapsed", "if", "left", ">", "0", ":", "time", ".", "sleep", "(", "left", ")", "self", ".", "state", ".", "reset_run_num", "(", ")", "if", "(", "start", "-", "r_start", ")", ">", "recompile", ":", "cache", "=", "self", ".", "gather_cache", "(", ")", "chunks", "=", "self", ".", "get_chunks", "(", ")", "if", "self", ".", "reg_ret", "is", "not", "None", ":", "self", ".", "returners", "[", "'{0}.save_reg'", ".", "format", "(", "self", ".", "reg_ret", ")", "]", "(", "chunks", ")", "r_start", "=", "time", ".", "time", "(", ")" ]
Execute the runtime
[ "Execute", "the", "runtime" ]
python
train
l04m33/pyx
pyx/http.py
https://github.com/l04m33/pyx/blob/b70efec605832ba3c7079e991584db3f5d1da8cb/pyx/http.py#L273-L287
def respond(self, code): """Starts a response. ``code`` is an integer standing for standard HTTP status code. This method will automatically adjust the response to adapt to request parameters, such as "Accept-Encoding" and "TE". """ # TODO: respect encodings etc. in the request resp = HttpResponse(code, self.connection) resp.request = self if hasattr(self, 'version'): resp.version = self.version return resp
[ "def", "respond", "(", "self", ",", "code", ")", ":", "# TODO: respect encodings etc. in the request", "resp", "=", "HttpResponse", "(", "code", ",", "self", ".", "connection", ")", "resp", ".", "request", "=", "self", "if", "hasattr", "(", "self", ",", "'version'", ")", ":", "resp", ".", "version", "=", "self", ".", "version", "return", "resp" ]
Starts a response. ``code`` is an integer standing for standard HTTP status code. This method will automatically adjust the response to adapt to request parameters, such as "Accept-Encoding" and "TE".
[ "Starts", "a", "response", "." ]
python
train
adafruit/Adafruit_Python_LED_Backpack
Adafruit_LED_Backpack/SevenSegment.py
https://github.com/adafruit/Adafruit_Python_LED_Backpack/blob/7356b4dd8b4bb162d60987878c2cb752fdd017d5/Adafruit_LED_Backpack/SevenSegment.py#L167-L188
def print_number_str(self, value, justify_right=True): """Print a 4 character long string of numeric values to the display. Characters in the string should be any supported character by set_digit, or a decimal point. Decimal point characters will be associated with the previous character. """ # Calculate length of value without decimals. length = sum(map(lambda x: 1 if x != '.' else 0, value)) # Error if value without decimals is longer than 4 characters. if length > 4: self.print_number_str('----') return # Calculcate starting position of digits based on justification. pos = (4-length) if justify_right else 0 # Go through each character and print it on the display. for i, ch in enumerate(value): if ch == '.': # Print decimal points on the previous digit. self.set_decimal(pos-1, True) else: self.set_digit(pos, ch) pos += 1
[ "def", "print_number_str", "(", "self", ",", "value", ",", "justify_right", "=", "True", ")", ":", "# Calculate length of value without decimals.", "length", "=", "sum", "(", "map", "(", "lambda", "x", ":", "1", "if", "x", "!=", "'.'", "else", "0", ",", "value", ")", ")", "# Error if value without decimals is longer than 4 characters.", "if", "length", ">", "4", ":", "self", ".", "print_number_str", "(", "'----'", ")", "return", "# Calculcate starting position of digits based on justification.", "pos", "=", "(", "4", "-", "length", ")", "if", "justify_right", "else", "0", "# Go through each character and print it on the display.", "for", "i", ",", "ch", "in", "enumerate", "(", "value", ")", ":", "if", "ch", "==", "'.'", ":", "# Print decimal points on the previous digit.", "self", ".", "set_decimal", "(", "pos", "-", "1", ",", "True", ")", "else", ":", "self", ".", "set_digit", "(", "pos", ",", "ch", ")", "pos", "+=", "1" ]
Print a 4 character long string of numeric values to the display. Characters in the string should be any supported character by set_digit, or a decimal point. Decimal point characters will be associated with the previous character.
[ "Print", "a", "4", "character", "long", "string", "of", "numeric", "values", "to", "the", "display", ".", "Characters", "in", "the", "string", "should", "be", "any", "supported", "character", "by", "set_digit", "or", "a", "decimal", "point", ".", "Decimal", "point", "characters", "will", "be", "associated", "with", "the", "previous", "character", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py#L698-L711
def port_profile_global_port_profile_static_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile") port_profile = ET.SubElement(port_profile_global, "port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') static = ET.SubElement(port_profile, "static") mac_address = ET.SubElement(static, "mac-address") mac_address.text = kwargs.pop('mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "port_profile_global_port_profile_static_mac_address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "port_profile_global", "=", "ET", ".", "SubElement", "(", "config", ",", "\"port-profile-global\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-port-profile\"", ")", "port_profile", "=", "ET", ".", "SubElement", "(", "port_profile_global", ",", "\"port-profile\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "port_profile", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "static", "=", "ET", ".", "SubElement", "(", "port_profile", ",", "\"static\"", ")", "mac_address", "=", "ET", ".", "SubElement", "(", "static", ",", "\"mac-address\"", ")", "mac_address", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac_address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
yougov/FogBugzPy
fogbugz.py
https://github.com/yougov/FogBugzPy/blob/593aba31dff69b5d42f864e544f8a9c1872e3a16/fogbugz.py#L125-L172
def __encode_multipart_formdata(self, fields, files): """ fields is a sequence of (key, value) elements for regular form fields. files is a sequence of (filename, filehandle) files to be uploaded returns (content_type, body) """ BOUNDARY = _make_boundary() if len(files) > 0: fields['nFileCount'] = str(len(files)) crlf = '\r\n' buf = BytesIO() for k, v in fields.items(): if DEBUG: print("field: %s: %s" % (repr(k), repr(v))) lines = [ '--' + BOUNDARY, 'Content-disposition: form-data; name="%s"' % k, '', str(v), '', ] buf.write(crlf.join(lines).encode('utf-8')) n = 0 for f, h in files.items(): n += 1 lines = [ '--' + BOUNDARY, 'Content-disposition: form-data; name="File%d"; ' 'filename="%s"' % (n, f), '', ] buf.write(crlf.join(lines).encode('utf-8')) lines = [ 'Content-type: application/octet-stream', '', '', ] buf.write(crlf.join(lines).encode('utf-8')) buf.write(h.read()) buf.write(crlf.encode('utf-8')) buf.write(('--' + BOUNDARY + '--' + crlf).encode('utf-8')) content_type = "multipart/form-data; boundary=%s" % BOUNDARY return content_type, buf.getvalue()
[ "def", "__encode_multipart_formdata", "(", "self", ",", "fields", ",", "files", ")", ":", "BOUNDARY", "=", "_make_boundary", "(", ")", "if", "len", "(", "files", ")", ">", "0", ":", "fields", "[", "'nFileCount'", "]", "=", "str", "(", "len", "(", "files", ")", ")", "crlf", "=", "'\\r\\n'", "buf", "=", "BytesIO", "(", ")", "for", "k", ",", "v", "in", "fields", ".", "items", "(", ")", ":", "if", "DEBUG", ":", "print", "(", "\"field: %s: %s\"", "%", "(", "repr", "(", "k", ")", ",", "repr", "(", "v", ")", ")", ")", "lines", "=", "[", "'--'", "+", "BOUNDARY", ",", "'Content-disposition: form-data; name=\"%s\"'", "%", "k", ",", "''", ",", "str", "(", "v", ")", ",", "''", ",", "]", "buf", ".", "write", "(", "crlf", ".", "join", "(", "lines", ")", ".", "encode", "(", "'utf-8'", ")", ")", "n", "=", "0", "for", "f", ",", "h", "in", "files", ".", "items", "(", ")", ":", "n", "+=", "1", "lines", "=", "[", "'--'", "+", "BOUNDARY", ",", "'Content-disposition: form-data; name=\"File%d\"; '", "'filename=\"%s\"'", "%", "(", "n", ",", "f", ")", ",", "''", ",", "]", "buf", ".", "write", "(", "crlf", ".", "join", "(", "lines", ")", ".", "encode", "(", "'utf-8'", ")", ")", "lines", "=", "[", "'Content-type: application/octet-stream'", ",", "''", ",", "''", ",", "]", "buf", ".", "write", "(", "crlf", ".", "join", "(", "lines", ")", ".", "encode", "(", "'utf-8'", ")", ")", "buf", ".", "write", "(", "h", ".", "read", "(", ")", ")", "buf", ".", "write", "(", "crlf", ".", "encode", "(", "'utf-8'", ")", ")", "buf", ".", "write", "(", "(", "'--'", "+", "BOUNDARY", "+", "'--'", "+", "crlf", ")", ".", "encode", "(", "'utf-8'", ")", ")", "content_type", "=", "\"multipart/form-data; boundary=%s\"", "%", "BOUNDARY", "return", "content_type", ",", "buf", ".", "getvalue", "(", ")" ]
fields is a sequence of (key, value) elements for regular form fields. files is a sequence of (filename, filehandle) files to be uploaded returns (content_type, body)
[ "fields", "is", "a", "sequence", "of", "(", "key", "value", ")", "elements", "for", "regular", "form", "fields", ".", "files", "is", "a", "sequence", "of", "(", "filename", "filehandle", ")", "files", "to", "be", "uploaded", "returns", "(", "content_type", "body", ")" ]
python
valid
jart/fabulous
fabulous/gotham.py
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/gotham.py#L95-L105
def lorem_gotham_title(): """Names your poem """ w = lambda l: l[random.randrange(len(l))] sentence = lambda *l: lambda: " ".join(l) pick = lambda *l: (l[random.randrange(len(l))])() return pick( sentence('why i',w(me_verb)), sentence(w(place)), sentence('a',w(adj),w(adj),w(place)), sentence('the',w(them)))
[ "def", "lorem_gotham_title", "(", ")", ":", "w", "=", "lambda", "l", ":", "l", "[", "random", ".", "randrange", "(", "len", "(", "l", ")", ")", "]", "sentence", "=", "lambda", "*", "l", ":", "lambda", ":", "\" \"", ".", "join", "(", "l", ")", "pick", "=", "lambda", "*", "l", ":", "(", "l", "[", "random", ".", "randrange", "(", "len", "(", "l", ")", ")", "]", ")", "(", ")", "return", "pick", "(", "sentence", "(", "'why i'", ",", "w", "(", "me_verb", ")", ")", ",", "sentence", "(", "w", "(", "place", ")", ")", ",", "sentence", "(", "'a'", ",", "w", "(", "adj", ")", ",", "w", "(", "adj", ")", ",", "w", "(", "place", ")", ")", ",", "sentence", "(", "'the'", ",", "w", "(", "them", ")", ")", ")" ]
Names your poem
[ "Names", "your", "poem" ]
python
train
jbasko/configmanager
configmanager/managers.py
https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/managers.py#L152-L167
def configparser(self): """ Adapter to dump/load INI format strings and files using standard library's ``ConfigParser`` (or the backported configparser module in Python 2). Returns: ConfigPersistenceAdapter """ if self._configparser_adapter is None: self._configparser_adapter = ConfigPersistenceAdapter( config=self, reader_writer=ConfigParserReaderWriter( config_parser_factory=self.settings.configparser_factory, ), ) return self._configparser_adapter
[ "def", "configparser", "(", "self", ")", ":", "if", "self", ".", "_configparser_adapter", "is", "None", ":", "self", ".", "_configparser_adapter", "=", "ConfigPersistenceAdapter", "(", "config", "=", "self", ",", "reader_writer", "=", "ConfigParserReaderWriter", "(", "config_parser_factory", "=", "self", ".", "settings", ".", "configparser_factory", ",", ")", ",", ")", "return", "self", ".", "_configparser_adapter" ]
Adapter to dump/load INI format strings and files using standard library's ``ConfigParser`` (or the backported configparser module in Python 2). Returns: ConfigPersistenceAdapter
[ "Adapter", "to", "dump", "/", "load", "INI", "format", "strings", "and", "files", "using", "standard", "library", "s", "ConfigParser", "(", "or", "the", "backported", "configparser", "module", "in", "Python", "2", ")", ".", "Returns", ":", "ConfigPersistenceAdapter" ]
python
train
tcalmant/ipopo
pelix/ipopo/instance.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/instance.py#L927-L950
def __unset_binding(self, dependency, service, reference): # type: (Any, Any, ServiceReference) -> None """ Removes a service from the component :param dependency: The dependency handler :param service: The injected service :param reference: The reference of the injected service """ # Call the component back self.__safe_field_callback( dependency.get_field(), constants.IPOPO_CALLBACK_UNBIND_FIELD, service, reference, ) self.safe_callback(constants.IPOPO_CALLBACK_UNBIND, service, reference) # Update the injected field setattr(self.instance, dependency.get_field(), dependency.get_value()) # Unget the service self.bundle_context.unget_service(reference)
[ "def", "__unset_binding", "(", "self", ",", "dependency", ",", "service", ",", "reference", ")", ":", "# type: (Any, Any, ServiceReference) -> None", "# Call the component back", "self", ".", "__safe_field_callback", "(", "dependency", ".", "get_field", "(", ")", ",", "constants", ".", "IPOPO_CALLBACK_UNBIND_FIELD", ",", "service", ",", "reference", ",", ")", "self", ".", "safe_callback", "(", "constants", ".", "IPOPO_CALLBACK_UNBIND", ",", "service", ",", "reference", ")", "# Update the injected field", "setattr", "(", "self", ".", "instance", ",", "dependency", ".", "get_field", "(", ")", ",", "dependency", ".", "get_value", "(", ")", ")", "# Unget the service", "self", ".", "bundle_context", ".", "unget_service", "(", "reference", ")" ]
Removes a service from the component :param dependency: The dependency handler :param service: The injected service :param reference: The reference of the injected service
[ "Removes", "a", "service", "from", "the", "component" ]
python
train
prezi/django-zipkin
django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L286-L302
def getTraceIdsBySpanName(self, service_name, span_name, end_ts, limit, order): """ Fetch trace ids by service and span name. Gets "limit" number of entries from before the "end_ts". Span name is optional. Timestamps are in microseconds. Parameters: - service_name - span_name - end_ts - limit - order """ self.send_getTraceIdsBySpanName(service_name, span_name, end_ts, limit, order) return self.recv_getTraceIdsBySpanName()
[ "def", "getTraceIdsBySpanName", "(", "self", ",", "service_name", ",", "span_name", ",", "end_ts", ",", "limit", ",", "order", ")", ":", "self", ".", "send_getTraceIdsBySpanName", "(", "service_name", ",", "span_name", ",", "end_ts", ",", "limit", ",", "order", ")", "return", "self", ".", "recv_getTraceIdsBySpanName", "(", ")" ]
Fetch trace ids by service and span name. Gets "limit" number of entries from before the "end_ts". Span name is optional. Timestamps are in microseconds. Parameters: - service_name - span_name - end_ts - limit - order
[ "Fetch", "trace", "ids", "by", "service", "and", "span", "name", ".", "Gets", "limit", "number", "of", "entries", "from", "before", "the", "end_ts", "." ]
python
train
Metatab/metapack
metapack/terms.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L242-L254
def headers(self): """Return the headers for the resource. Returns the AltName, if specified; if not, then the Name, and if that is empty, a name based on the column position. These headers are specifically applicable to the output table, and may not apply to the resource source. FOr those headers, use source_headers""" t = self.schema_term if t: return [self._name_for_col_term(c, i) for i, c in enumerate(t.children, 1) if c.term_is("Table.Column")] else: return None
[ "def", "headers", "(", "self", ")", ":", "t", "=", "self", ".", "schema_term", "if", "t", ":", "return", "[", "self", ".", "_name_for_col_term", "(", "c", ",", "i", ")", "for", "i", ",", "c", "in", "enumerate", "(", "t", ".", "children", ",", "1", ")", "if", "c", ".", "term_is", "(", "\"Table.Column\"", ")", "]", "else", ":", "return", "None" ]
Return the headers for the resource. Returns the AltName, if specified; if not, then the Name, and if that is empty, a name based on the column position. These headers are specifically applicable to the output table, and may not apply to the resource source. FOr those headers, use source_headers
[ "Return", "the", "headers", "for", "the", "resource", ".", "Returns", "the", "AltName", "if", "specified", ";", "if", "not", "then", "the", "Name", "and", "if", "that", "is", "empty", "a", "name", "based", "on", "the", "column", "position", ".", "These", "headers", "are", "specifically", "applicable", "to", "the", "output", "table", "and", "may", "not", "apply", "to", "the", "resource", "source", ".", "FOr", "those", "headers", "use", "source_headers" ]
python
train
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L573-L584
def get_url_endpoint(self): """ Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint. """ endpoint = self.url if self.type not in ('Hypermap:WorldMap',): endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % ( self.catalog.slug, self.id ) return endpoint
[ "def", "get_url_endpoint", "(", "self", ")", ":", "endpoint", "=", "self", ".", "url", "if", "self", ".", "type", "not", "in", "(", "'Hypermap:WorldMap'", ",", ")", ":", "endpoint", "=", "'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml'", "%", "(", "self", ".", "catalog", ".", "slug", ",", "self", ".", "id", ")", "return", "endpoint" ]
Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint.
[ "Returns", "the", "Hypermap", "endpoint", "for", "a", "layer", ".", "This", "endpoint", "will", "be", "the", "WMTS", "MapProxy", "endpoint", "only", "for", "WM", "we", "use", "the", "original", "endpoint", "." ]
python
train
mesbahamin/chronophore
chronophore/controller.py
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L202-L228
def undo_sign_in(entry, session=None): """Delete a signed in entry. :param entry: `models.Entry` object. The entry to delete. :param session: (optional) SQLAlchemy session through which to access the database. """ # noqa if session is None: session = Session() else: session = session entry_to_delete = ( session .query(Entry) .filter(Entry.uuid == entry.uuid) .one_or_none() ) if entry_to_delete: logger.info('Undo sign in: {}'.format(entry_to_delete.user_id)) logger.debug('Undo sign in: {}'.format(entry_to_delete)) session.delete(entry_to_delete) session.commit() else: error_message = 'Entry not found: {}'.format(entry) logger.error(error_message) raise ValueError(error_message)
[ "def", "undo_sign_in", "(", "entry", ",", "session", "=", "None", ")", ":", "# noqa", "if", "session", "is", "None", ":", "session", "=", "Session", "(", ")", "else", ":", "session", "=", "session", "entry_to_delete", "=", "(", "session", ".", "query", "(", "Entry", ")", ".", "filter", "(", "Entry", ".", "uuid", "==", "entry", ".", "uuid", ")", ".", "one_or_none", "(", ")", ")", "if", "entry_to_delete", ":", "logger", ".", "info", "(", "'Undo sign in: {}'", ".", "format", "(", "entry_to_delete", ".", "user_id", ")", ")", "logger", ".", "debug", "(", "'Undo sign in: {}'", ".", "format", "(", "entry_to_delete", ")", ")", "session", ".", "delete", "(", "entry_to_delete", ")", "session", ".", "commit", "(", ")", "else", ":", "error_message", "=", "'Entry not found: {}'", ".", "format", "(", "entry", ")", "logger", ".", "error", "(", "error_message", ")", "raise", "ValueError", "(", "error_message", ")" ]
Delete a signed in entry. :param entry: `models.Entry` object. The entry to delete. :param session: (optional) SQLAlchemy session through which to access the database.
[ "Delete", "a", "signed", "in", "entry", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/satellitelink.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L703-L740
def update_infos(self, forced=False, test=False): """Update satellite info each self.polling_interval seconds so we smooth arbiter actions for just useful actions. Raise a satellite update status Brok If forced is True, then ignore the ping period. This is used when the configuration has not yet been dispatched to the Arbiter satellites. If test is True, do not really ping the daemon (useful for the unit tests only) :param forced: ignore the ping smoothing :type forced: bool :param test: :type test: bool :return: None if the last request is too recent, False if a timeout was raised during the request, else the managed configurations dictionary """ logger.debug("Update informations, forced: %s", forced) # First look if it's not too early to ping now = time.time() if not forced and self.last_check and self.last_check + self.polling_interval > now: logger.debug("Too early to ping %s, ping period is %ds!, last check: %d, now: %d", self.name, self.polling_interval, self.last_check, now) return None self.get_conf(test=test) # Update the daemon last check timestamp self.last_check = time.time() # Update the state of this element self.broks.append(self.get_update_status_brok()) return self.cfg_managed
[ "def", "update_infos", "(", "self", ",", "forced", "=", "False", ",", "test", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Update informations, forced: %s\"", ",", "forced", ")", "# First look if it's not too early to ping", "now", "=", "time", ".", "time", "(", ")", "if", "not", "forced", "and", "self", ".", "last_check", "and", "self", ".", "last_check", "+", "self", ".", "polling_interval", ">", "now", ":", "logger", ".", "debug", "(", "\"Too early to ping %s, ping period is %ds!, last check: %d, now: %d\"", ",", "self", ".", "name", ",", "self", ".", "polling_interval", ",", "self", ".", "last_check", ",", "now", ")", "return", "None", "self", ".", "get_conf", "(", "test", "=", "test", ")", "# Update the daemon last check timestamp", "self", ".", "last_check", "=", "time", ".", "time", "(", ")", "# Update the state of this element", "self", ".", "broks", ".", "append", "(", "self", ".", "get_update_status_brok", "(", ")", ")", "return", "self", ".", "cfg_managed" ]
Update satellite info each self.polling_interval seconds so we smooth arbiter actions for just useful actions. Raise a satellite update status Brok If forced is True, then ignore the ping period. This is used when the configuration has not yet been dispatched to the Arbiter satellites. If test is True, do not really ping the daemon (useful for the unit tests only) :param forced: ignore the ping smoothing :type forced: bool :param test: :type test: bool :return: None if the last request is too recent, False if a timeout was raised during the request, else the managed configurations dictionary
[ "Update", "satellite", "info", "each", "self", ".", "polling_interval", "seconds", "so", "we", "smooth", "arbiter", "actions", "for", "just", "useful", "actions", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/interactive.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L1301-L1312
def do_continue(self, arg): """ continue - continue execution g - continue execution go - continue execution """ if self.cmdprefix: raise CmdError("prefix not allowed") if arg: raise CmdError("too many arguments") if self.debug.get_debugee_count() > 0: return True
[ "def", "do_continue", "(", "self", ",", "arg", ")", ":", "if", "self", ".", "cmdprefix", ":", "raise", "CmdError", "(", "\"prefix not allowed\"", ")", "if", "arg", ":", "raise", "CmdError", "(", "\"too many arguments\"", ")", "if", "self", ".", "debug", ".", "get_debugee_count", "(", ")", ">", "0", ":", "return", "True" ]
continue - continue execution g - continue execution go - continue execution
[ "continue", "-", "continue", "execution", "g", "-", "continue", "execution", "go", "-", "continue", "execution" ]
python
train
gwastro/pycbc
pycbc/workflow/psd.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/psd.py#L31-L37
def chunks(l, n): """ Yield n successive chunks from l. """ newn = int(len(l) / n) for i in xrange(0, n-1): yield l[i*newn:i*newn+newn] yield l[n*newn-newn:]
[ "def", "chunks", "(", "l", ",", "n", ")", ":", "newn", "=", "int", "(", "len", "(", "l", ")", "/", "n", ")", "for", "i", "in", "xrange", "(", "0", ",", "n", "-", "1", ")", ":", "yield", "l", "[", "i", "*", "newn", ":", "i", "*", "newn", "+", "newn", "]", "yield", "l", "[", "n", "*", "newn", "-", "newn", ":", "]" ]
Yield n successive chunks from l.
[ "Yield", "n", "successive", "chunks", "from", "l", "." ]
python
train
gmr/tredis
tredis/hashes.py
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/hashes.py#L139-L160
def hdel(self, key, *fields): """ Remove the specified fields from the hash stored at `key`. Specified fields that do not exist within this hash are ignored. If `key` does not exist, it is treated as an empty hash and this command returns zero. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param fields: iterable of field names to retrieve :returns: the number of fields that were removed from the hash, not including specified by non-existing fields. :rtype: int """ if not fields: future = concurrent.TracebackFuture() future.set_result(0) else: future = self._execute([b'HDEL', key] + list(fields)) return future
[ "def", "hdel", "(", "self", ",", "key", ",", "*", "fields", ")", ":", "if", "not", "fields", ":", "future", "=", "concurrent", ".", "TracebackFuture", "(", ")", "future", ".", "set_result", "(", "0", ")", "else", ":", "future", "=", "self", ".", "_execute", "(", "[", "b'HDEL'", ",", "key", "]", "+", "list", "(", "fields", ")", ")", "return", "future" ]
Remove the specified fields from the hash stored at `key`. Specified fields that do not exist within this hash are ignored. If `key` does not exist, it is treated as an empty hash and this command returns zero. :param key: The key of the hash :type key: :class:`str`, :class:`bytes` :param fields: iterable of field names to retrieve :returns: the number of fields that were removed from the hash, not including specified by non-existing fields. :rtype: int
[ "Remove", "the", "specified", "fields", "from", "the", "hash", "stored", "at", "key", "." ]
python
train
google/apitools
apitools/base/py/batch.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/batch.py#L294-L317
def _ConvertHeaderToId(header): """Convert a Content-ID header value to an id. Presumes the Content-ID header conforms to the format that _ConvertIdToHeader() returns. Args: header: A string indicating the Content-ID header value. Returns: The extracted id value. Raises: BatchError if the header is not in the expected format. """ if not (header.startswith('<') or header.endswith('>')): raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) if '+' not in header: raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) _, request_id = header[1:-1].rsplit('+', 1) return urllib_parse.unquote(request_id)
[ "def", "_ConvertHeaderToId", "(", "header", ")", ":", "if", "not", "(", "header", ".", "startswith", "(", "'<'", ")", "or", "header", ".", "endswith", "(", "'>'", ")", ")", ":", "raise", "exceptions", ".", "BatchError", "(", "'Invalid value for Content-ID: %s'", "%", "header", ")", "if", "'+'", "not", "in", "header", ":", "raise", "exceptions", ".", "BatchError", "(", "'Invalid value for Content-ID: %s'", "%", "header", ")", "_", ",", "request_id", "=", "header", "[", "1", ":", "-", "1", "]", ".", "rsplit", "(", "'+'", ",", "1", ")", "return", "urllib_parse", ".", "unquote", "(", "request_id", ")" ]
Convert a Content-ID header value to an id. Presumes the Content-ID header conforms to the format that _ConvertIdToHeader() returns. Args: header: A string indicating the Content-ID header value. Returns: The extracted id value. Raises: BatchError if the header is not in the expected format.
[ "Convert", "a", "Content", "-", "ID", "header", "value", "to", "an", "id", "." ]
python
train
optimizely/python-sdk
optimizely/helpers/condition.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L47-L64
def _get_condition_json(self, index): """ Method to generate json for logging audience condition. Args: index: Index of the condition. Returns: String: Audience condition JSON. """ condition = self.condition_data[index] condition_log = { 'name': condition[0], 'value': condition[1], 'type': condition[2], 'match': condition[3] } return json.dumps(condition_log)
[ "def", "_get_condition_json", "(", "self", ",", "index", ")", ":", "condition", "=", "self", ".", "condition_data", "[", "index", "]", "condition_log", "=", "{", "'name'", ":", "condition", "[", "0", "]", ",", "'value'", ":", "condition", "[", "1", "]", ",", "'type'", ":", "condition", "[", "2", "]", ",", "'match'", ":", "condition", "[", "3", "]", "}", "return", "json", ".", "dumps", "(", "condition_log", ")" ]
Method to generate json for logging audience condition. Args: index: Index of the condition. Returns: String: Audience condition JSON.
[ "Method", "to", "generate", "json", "for", "logging", "audience", "condition", "." ]
python
train
numenta/nupic
src/nupic/algorithms/spatial_pooler.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L937-L951
def stripUnlearnedColumns(self, activeArray): """ Removes the set of columns who have never been active from the set of active columns selected in the inhibition round. Such columns cannot represent learned pattern and are therefore meaningless if only inference is required. This should not be done when using a random, unlearned SP since you would end up with no active columns. :param activeArray: An array whose size is equal to the number of columns. Any columns marked as active with an activeDutyCycle of 0 have never been activated before and therefore are not active due to learning. Any of these (unlearned) columns will be disabled (set to 0). """ neverLearned = numpy.where(self._activeDutyCycles == 0)[0] activeArray[neverLearned] = 0
[ "def", "stripUnlearnedColumns", "(", "self", ",", "activeArray", ")", ":", "neverLearned", "=", "numpy", ".", "where", "(", "self", ".", "_activeDutyCycles", "==", "0", ")", "[", "0", "]", "activeArray", "[", "neverLearned", "]", "=", "0" ]
Removes the set of columns who have never been active from the set of active columns selected in the inhibition round. Such columns cannot represent learned pattern and are therefore meaningless if only inference is required. This should not be done when using a random, unlearned SP since you would end up with no active columns. :param activeArray: An array whose size is equal to the number of columns. Any columns marked as active with an activeDutyCycle of 0 have never been activated before and therefore are not active due to learning. Any of these (unlearned) columns will be disabled (set to 0).
[ "Removes", "the", "set", "of", "columns", "who", "have", "never", "been", "active", "from", "the", "set", "of", "active", "columns", "selected", "in", "the", "inhibition", "round", ".", "Such", "columns", "cannot", "represent", "learned", "pattern", "and", "are", "therefore", "meaningless", "if", "only", "inference", "is", "required", ".", "This", "should", "not", "be", "done", "when", "using", "a", "random", "unlearned", "SP", "since", "you", "would", "end", "up", "with", "no", "active", "columns", "." ]
python
valid
incuna/django-user-management
user_management/api/views.py
https://github.com/incuna/django-user-management/blob/6784e33191d4eff624d2cf2df9ca01db4f23c9c6/user_management/api/views.py#L49-L75
def delete(self, request, *args, **kwargs): """Delete auth token when `delete` request was issued.""" # Logic repeated from DRF because one cannot easily reuse it auth = get_authorization_header(request).split() if not auth or auth[0].lower() != b'token': return response.Response(status=status.HTTP_400_BAD_REQUEST) if len(auth) == 1: msg = 'Invalid token header. No credentials provided.' return response.Response(msg, status=status.HTTP_400_BAD_REQUEST) elif len(auth) > 2: msg = 'Invalid token header. Token string should not contain spaces.' return response.Response(msg, status=status.HTTP_400_BAD_REQUEST) try: token = self.model.objects.get(key=auth[1]) except self.model.DoesNotExist: pass else: token.delete() signals.user_logged_out.send( type(self), user=token.user, request=request, ) return response.Response(status=status.HTTP_204_NO_CONTENT)
[ "def", "delete", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Logic repeated from DRF because one cannot easily reuse it", "auth", "=", "get_authorization_header", "(", "request", ")", ".", "split", "(", ")", "if", "not", "auth", "or", "auth", "[", "0", "]", ".", "lower", "(", ")", "!=", "b'token'", ":", "return", "response", ".", "Response", "(", "status", "=", "status", ".", "HTTP_400_BAD_REQUEST", ")", "if", "len", "(", "auth", ")", "==", "1", ":", "msg", "=", "'Invalid token header. No credentials provided.'", "return", "response", ".", "Response", "(", "msg", ",", "status", "=", "status", ".", "HTTP_400_BAD_REQUEST", ")", "elif", "len", "(", "auth", ")", ">", "2", ":", "msg", "=", "'Invalid token header. Token string should not contain spaces.'", "return", "response", ".", "Response", "(", "msg", ",", "status", "=", "status", ".", "HTTP_400_BAD_REQUEST", ")", "try", ":", "token", "=", "self", ".", "model", ".", "objects", ".", "get", "(", "key", "=", "auth", "[", "1", "]", ")", "except", "self", ".", "model", ".", "DoesNotExist", ":", "pass", "else", ":", "token", ".", "delete", "(", ")", "signals", ".", "user_logged_out", ".", "send", "(", "type", "(", "self", ")", ",", "user", "=", "token", ".", "user", ",", "request", "=", "request", ",", ")", "return", "response", ".", "Response", "(", "status", "=", "status", ".", "HTTP_204_NO_CONTENT", ")" ]
Delete auth token when `delete` request was issued.
[ "Delete", "auth", "token", "when", "delete", "request", "was", "issued", "." ]
python
test
gem/oq-engine
openquake/hazardlib/gsim/gmpe_table.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/gmpe_table.py#L306-L342
def init(self, fle=None): """ Executes the preprocessing steps at the instantiation stage to read in the tables from hdf5 and hold them in memory. """ if fle is None: fname = self.kwargs.get('gmpe_table', self.GMPE_TABLE) if fname is None: raise ValueError('You forgot to set GMPETable.GMPE_TABLE!') elif os.path.isabs(fname): self.GMPE_TABLE = fname else: # NB: (hackish) GMPE_DIR must be set externally self.GMPE_TABLE = os.path.abspath( os.path.join(self.GMPE_DIR, fname)) fle = h5py.File(self.GMPE_TABLE, "r") try: # this is the format inside the datastore self.distance_type = fle["distance_type"].value except KeyError: # this is the original format outside the datastore self.distance_type = decode(fle["Distances"].attrs["metric"]) self.REQUIRES_DISTANCES = set([self.distance_type]) # Load in magnitude self.m_w = fle["Mw"][:] # Load in distances self.distances = fle["Distances"][:] # Load intensity measure types and levels self.imls = hdf_arrays_to_dict(fle["IMLs"]) self.DEFINED_FOR_INTENSITY_MEASURE_TYPES = set(self._supported_imts()) if "SA" in self.imls and "T" not in self.imls: raise ValueError("Spectral Acceleration must be accompanied by " "periods") # Get the standard deviations self._setup_standard_deviations(fle) if "Amplification" in fle: self._setup_amplification(fle)
[ "def", "init", "(", "self", ",", "fle", "=", "None", ")", ":", "if", "fle", "is", "None", ":", "fname", "=", "self", ".", "kwargs", ".", "get", "(", "'gmpe_table'", ",", "self", ".", "GMPE_TABLE", ")", "if", "fname", "is", "None", ":", "raise", "ValueError", "(", "'You forgot to set GMPETable.GMPE_TABLE!'", ")", "elif", "os", ".", "path", ".", "isabs", "(", "fname", ")", ":", "self", ".", "GMPE_TABLE", "=", "fname", "else", ":", "# NB: (hackish) GMPE_DIR must be set externally", "self", ".", "GMPE_TABLE", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "GMPE_DIR", ",", "fname", ")", ")", "fle", "=", "h5py", ".", "File", "(", "self", ".", "GMPE_TABLE", ",", "\"r\"", ")", "try", ":", "# this is the format inside the datastore", "self", ".", "distance_type", "=", "fle", "[", "\"distance_type\"", "]", ".", "value", "except", "KeyError", ":", "# this is the original format outside the datastore", "self", ".", "distance_type", "=", "decode", "(", "fle", "[", "\"Distances\"", "]", ".", "attrs", "[", "\"metric\"", "]", ")", "self", ".", "REQUIRES_DISTANCES", "=", "set", "(", "[", "self", ".", "distance_type", "]", ")", "# Load in magnitude", "self", ".", "m_w", "=", "fle", "[", "\"Mw\"", "]", "[", ":", "]", "# Load in distances", "self", ".", "distances", "=", "fle", "[", "\"Distances\"", "]", "[", ":", "]", "# Load intensity measure types and levels", "self", ".", "imls", "=", "hdf_arrays_to_dict", "(", "fle", "[", "\"IMLs\"", "]", ")", "self", ".", "DEFINED_FOR_INTENSITY_MEASURE_TYPES", "=", "set", "(", "self", ".", "_supported_imts", "(", ")", ")", "if", "\"SA\"", "in", "self", ".", "imls", "and", "\"T\"", "not", "in", "self", ".", "imls", ":", "raise", "ValueError", "(", "\"Spectral Acceleration must be accompanied by \"", "\"periods\"", ")", "# Get the standard deviations", "self", ".", "_setup_standard_deviations", "(", "fle", ")", "if", "\"Amplification\"", "in", "fle", ":", "self", ".", "_setup_amplification", "(", "fle", ")" ]
Executes the preprocessing steps at the instantiation stage to read in the tables from hdf5 and hold them in memory.
[ "Executes", "the", "preprocessing", "steps", "at", "the", "instantiation", "stage", "to", "read", "in", "the", "tables", "from", "hdf5", "and", "hold", "them", "in", "memory", "." ]
python
train
wbond/oscrypto
oscrypto/_win/tls.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/tls.py#L327-L387
def wrap(cls, socket, hostname, session=None): """ Takes an existing socket and adds TLS :param socket: A socket.socket object to wrap with TLS :param hostname: A unicode string of the hostname or IP the socket is connected to :param session: An existing TLSSession object to allow for session reuse, specific protocol or manual certificate validation :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library """ if not isinstance(socket, socket_.socket): raise TypeError(pretty_message( ''' socket must be an instance of socket.socket, not %s ''', type_name(socket) )) if not isinstance(hostname, str_cls): raise TypeError(pretty_message( ''' hostname must be a unicode string, not %s ''', type_name(hostname) )) if session is not None and not isinstance(session, TLSSession): raise TypeError(pretty_message( ''' session must be an instance of oscrypto.tls.TLSSession, not %s ''', type_name(session) )) new_socket = cls(None, None, session=session) new_socket._socket = socket new_socket._hostname = hostname # Since we don't create the socket connection here, we can't try to # reconnect with a lower version of the TLS protocol, so we just # move the data to public exception type TLSVerificationError() try: new_socket._handshake() except (_TLSDowngradeError) as e: new_e = TLSVerificationError(e.message, e.certificate) raise new_e except (_TLSRetryError) as e: new_e = TLSError(e.message) raise new_e return new_socket
[ "def", "wrap", "(", "cls", ",", "socket", ",", "hostname", ",", "session", "=", "None", ")", ":", "if", "not", "isinstance", "(", "socket", ",", "socket_", ".", "socket", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n socket must be an instance of socket.socket, not %s\n '''", ",", "type_name", "(", "socket", ")", ")", ")", "if", "not", "isinstance", "(", "hostname", ",", "str_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n hostname must be a unicode string, not %s\n '''", ",", "type_name", "(", "hostname", ")", ")", ")", "if", "session", "is", "not", "None", "and", "not", "isinstance", "(", "session", ",", "TLSSession", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n session must be an instance of oscrypto.tls.TLSSession, not %s\n '''", ",", "type_name", "(", "session", ")", ")", ")", "new_socket", "=", "cls", "(", "None", ",", "None", ",", "session", "=", "session", ")", "new_socket", ".", "_socket", "=", "socket", "new_socket", ".", "_hostname", "=", "hostname", "# Since we don't create the socket connection here, we can't try to", "# reconnect with a lower version of the TLS protocol, so we just", "# move the data to public exception type TLSVerificationError()", "try", ":", "new_socket", ".", "_handshake", "(", ")", "except", "(", "_TLSDowngradeError", ")", "as", "e", ":", "new_e", "=", "TLSVerificationError", "(", "e", ".", "message", ",", "e", ".", "certificate", ")", "raise", "new_e", "except", "(", "_TLSRetryError", ")", "as", "e", ":", "new_e", "=", "TLSError", "(", "e", ".", "message", ")", "raise", "new_e", "return", "new_socket" ]
Takes an existing socket and adds TLS :param socket: A socket.socket object to wrap with TLS :param hostname: A unicode string of the hostname or IP the socket is connected to :param session: An existing TLSSession object to allow for session reuse, specific protocol or manual certificate validation :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library
[ "Takes", "an", "existing", "socket", "and", "adds", "TLS" ]
python
valid
openstack/networking-hyperv
networking_hyperv/neutron/qos/qos_driver.py
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/qos/qos_driver.py#L59-L68
def delete(self, port, qos_policy=None): """Remove QoS rules from port. :param port: port object. :param qos_policy: the QoS policy to be removed from port. """ LOG.info("Deleting QoS policy %(qos_policy)s on port %(port)s", dict(qos_policy=qos_policy, port=port)) self._utils.remove_port_qos_rule(port["port_id"])
[ "def", "delete", "(", "self", ",", "port", ",", "qos_policy", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"Deleting QoS policy %(qos_policy)s on port %(port)s\"", ",", "dict", "(", "qos_policy", "=", "qos_policy", ",", "port", "=", "port", ")", ")", "self", ".", "_utils", ".", "remove_port_qos_rule", "(", "port", "[", "\"port_id\"", "]", ")" ]
Remove QoS rules from port. :param port: port object. :param qos_policy: the QoS policy to be removed from port.
[ "Remove", "QoS", "rules", "from", "port", "." ]
python
train
spotify/luigi
luigi/setup_logging.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/setup_logging.py#L52-L93
def setup(cls, opts=type('opts', (), { 'background': None, 'logdir': None, 'logging_conf_file': None, 'log_level': 'DEBUG' })): """Setup logging via CLI params and config.""" logger = logging.getLogger('luigi') if cls._configured: logger.info('logging already configured') return False cls._configured = True if cls.config.getboolean('core', 'no_configure_logging', False): logger.info('logging disabled in settings') return False configured = cls._cli(opts) if configured: logger = logging.getLogger('luigi') logger.info('logging configured via special settings') return True configured = cls._conf(opts) if configured: logger = logging.getLogger('luigi') logger.info('logging configured via *.conf file') return True configured = cls._section(opts) if configured: logger = logging.getLogger('luigi') logger.info('logging configured via config section') return True configured = cls._default(opts) if configured: logger = logging.getLogger('luigi') logger.info('logging configured by default settings') return configured
[ "def", "setup", "(", "cls", ",", "opts", "=", "type", "(", "'opts'", ",", "(", ")", ",", "{", "'background'", ":", "None", ",", "'logdir'", ":", "None", ",", "'logging_conf_file'", ":", "None", ",", "'log_level'", ":", "'DEBUG'", "}", ")", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'luigi'", ")", "if", "cls", ".", "_configured", ":", "logger", ".", "info", "(", "'logging already configured'", ")", "return", "False", "cls", ".", "_configured", "=", "True", "if", "cls", ".", "config", ".", "getboolean", "(", "'core'", ",", "'no_configure_logging'", ",", "False", ")", ":", "logger", ".", "info", "(", "'logging disabled in settings'", ")", "return", "False", "configured", "=", "cls", ".", "_cli", "(", "opts", ")", "if", "configured", ":", "logger", "=", "logging", ".", "getLogger", "(", "'luigi'", ")", "logger", ".", "info", "(", "'logging configured via special settings'", ")", "return", "True", "configured", "=", "cls", ".", "_conf", "(", "opts", ")", "if", "configured", ":", "logger", "=", "logging", ".", "getLogger", "(", "'luigi'", ")", "logger", ".", "info", "(", "'logging configured via *.conf file'", ")", "return", "True", "configured", "=", "cls", ".", "_section", "(", "opts", ")", "if", "configured", ":", "logger", "=", "logging", ".", "getLogger", "(", "'luigi'", ")", "logger", ".", "info", "(", "'logging configured via config section'", ")", "return", "True", "configured", "=", "cls", ".", "_default", "(", "opts", ")", "if", "configured", ":", "logger", "=", "logging", ".", "getLogger", "(", "'luigi'", ")", "logger", ".", "info", "(", "'logging configured by default settings'", ")", "return", "configured" ]
Setup logging via CLI params and config.
[ "Setup", "logging", "via", "CLI", "params", "and", "config", "." ]
python
train
pyvisa/pyvisa
pyvisa/rname.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/rname.py#L222-L344
def build_rn_class(interface_type, resource_parts, resource_class, is_rc_optional=True): """Builds a resource name class by mixing a named tuple and ResourceName. It also registers the class. The field names are changed to lower case and the spaces replaced by underscores ('_'). :param interface_type: the interface type :type: interface_type: str :param resource_parts: each of the parts of the resource name indicating name and default value. Use None for mandatory fields. :type resource_parts: tuple[(str, str)] :param resource_class: the resource class :type resource_class: str :param is_rc_optional: indicates if the resource class part is optional :type is_rc_optional: boolean. """ interface_type = interface_type.upper() resource_class = resource_class.upper() syntax = interface_type fmt = interface_type fields = [] # Contains the resource parts but using python friendly names # (all lower case and replacing spaces by underscores) p_resource_parts = [] kwdoc = [] # Assemble the syntax and format string based on the resource parts for ndx, (name, default_value) in enumerate(resource_parts): pname = name.lower().replace(' ', '_') fields.append(pname) p_resource_parts.append((pname, default_value)) sep = '::' if ndx else '' fmt += sep + '{0.%s}' % pname if default_value is None: syntax += sep + name else: syntax += '[' + sep + name + ']' kwdoc.append('- %s (%s)' % (pname, 'required' if default_value is None else default_value)) fmt += '::' + resource_class if not is_rc_optional: syntax += '::' + resource_class else: syntax += '[' + '::' + resource_class + ']' class _C(namedtuple('Internal', ' '.join(fields)), ResourceName): """%s %s" Can be created with the following keyword only arguments: %s Format : %s """ % (resource_class, interface_type, ' \n'.join(kwdoc), syntax) def __new__(cls, **kwargs): new_kwargs = dict(p_resource_parts, **kwargs) for key, value in new_kwargs.items(): if value is None: raise ValueError(key + ' is a required parameter') return super(_C, cls).__new__(cls, **new_kwargs) @classmethod def from_parts(cls, *parts): if len(parts) < sum(1 for _, v in p_resource_parts if v is not None): raise ValueError('not enough parts') elif len(parts) > len(p_resource_parts): raise ValueError('too many parts') (k, default), rp = p_resource_parts[0], p_resource_parts[1:] # The first part (just after the interface_type) is the only # optional part which can be and empty and therefore the # default value should be used. p, pending = parts[0], parts[1:] kwargs = {k: default if p == '' else p} # The rest of the parts are consumed when mandatory elements are required. while len(pending) < len(rp): (k, default), rp = rp[0], rp[1:] if default is None: if not parts: raise ValueError(k + ' part is mandatory') p, pending = pending[0], pending[1:] if not p: raise ValueError(k + ' part is mandatory') kwargs[k] = p else: kwargs[k] = default # When the length of the pending provided and resource parts # are equal, we just consume everything. kwargs.update((k, p) for (k, v), p in zip(rp, pending)) return cls(**kwargs) _C.interface_type = interface_type _C.resource_class = resource_class _C.is_rc_optional = is_rc_optional _C._canonical_fmt = fmt _C._visa_syntax = syntax _C.__name__ = str(interface_type + resource_class.title()) return register_subclass(_C)
[ "def", "build_rn_class", "(", "interface_type", ",", "resource_parts", ",", "resource_class", ",", "is_rc_optional", "=", "True", ")", ":", "interface_type", "=", "interface_type", ".", "upper", "(", ")", "resource_class", "=", "resource_class", ".", "upper", "(", ")", "syntax", "=", "interface_type", "fmt", "=", "interface_type", "fields", "=", "[", "]", "# Contains the resource parts but using python friendly names", "# (all lower case and replacing spaces by underscores)", "p_resource_parts", "=", "[", "]", "kwdoc", "=", "[", "]", "# Assemble the syntax and format string based on the resource parts", "for", "ndx", ",", "(", "name", ",", "default_value", ")", "in", "enumerate", "(", "resource_parts", ")", ":", "pname", "=", "name", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "fields", ".", "append", "(", "pname", ")", "p_resource_parts", ".", "append", "(", "(", "pname", ",", "default_value", ")", ")", "sep", "=", "'::'", "if", "ndx", "else", "''", "fmt", "+=", "sep", "+", "'{0.%s}'", "%", "pname", "if", "default_value", "is", "None", ":", "syntax", "+=", "sep", "+", "name", "else", ":", "syntax", "+=", "'['", "+", "sep", "+", "name", "+", "']'", "kwdoc", ".", "append", "(", "'- %s (%s)'", "%", "(", "pname", ",", "'required'", "if", "default_value", "is", "None", "else", "default_value", ")", ")", "fmt", "+=", "'::'", "+", "resource_class", "if", "not", "is_rc_optional", ":", "syntax", "+=", "'::'", "+", "resource_class", "else", ":", "syntax", "+=", "'['", "+", "'::'", "+", "resource_class", "+", "']'", "class", "_C", "(", "namedtuple", "(", "'Internal'", ",", "' '", ".", "join", "(", "fields", ")", ")", ",", "ResourceName", ")", ":", "\"\"\"%s %s\"\n\n Can be created with the following keyword only arguments:\n %s\n\n Format :\n %s\n \"\"\"", "%", "(", "resource_class", ",", "interface_type", ",", "' \\n'", ".", "join", "(", "kwdoc", ")", ",", "syntax", ")", "def", "__new__", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "new_kwargs", "=", "dict", "(", "p_resource_parts", ",", "*", "*", "kwargs", ")", "for", "key", ",", "value", "in", "new_kwargs", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "raise", "ValueError", "(", "key", "+", "' is a required parameter'", ")", "return", "super", "(", "_C", ",", "cls", ")", ".", "__new__", "(", "cls", ",", "*", "*", "new_kwargs", ")", "@", "classmethod", "def", "from_parts", "(", "cls", ",", "*", "parts", ")", ":", "if", "len", "(", "parts", ")", "<", "sum", "(", "1", "for", "_", ",", "v", "in", "p_resource_parts", "if", "v", "is", "not", "None", ")", ":", "raise", "ValueError", "(", "'not enough parts'", ")", "elif", "len", "(", "parts", ")", ">", "len", "(", "p_resource_parts", ")", ":", "raise", "ValueError", "(", "'too many parts'", ")", "(", "k", ",", "default", ")", ",", "rp", "=", "p_resource_parts", "[", "0", "]", ",", "p_resource_parts", "[", "1", ":", "]", "# The first part (just after the interface_type) is the only", "# optional part which can be and empty and therefore the", "# default value should be used.", "p", ",", "pending", "=", "parts", "[", "0", "]", ",", "parts", "[", "1", ":", "]", "kwargs", "=", "{", "k", ":", "default", "if", "p", "==", "''", "else", "p", "}", "# The rest of the parts are consumed when mandatory elements are required.", "while", "len", "(", "pending", ")", "<", "len", "(", "rp", ")", ":", "(", "k", ",", "default", ")", ",", "rp", "=", "rp", "[", "0", "]", ",", "rp", "[", "1", ":", "]", "if", "default", "is", "None", ":", "if", "not", "parts", ":", "raise", "ValueError", "(", "k", "+", "' part is mandatory'", ")", "p", ",", "pending", "=", "pending", "[", "0", "]", ",", "pending", "[", "1", ":", "]", "if", "not", "p", ":", "raise", "ValueError", "(", "k", "+", "' part is mandatory'", ")", "kwargs", "[", "k", "]", "=", "p", "else", ":", "kwargs", "[", "k", "]", "=", "default", "# When the length of the pending provided and resource parts", "# are equal, we just consume everything.", "kwargs", ".", "update", "(", "(", "k", ",", "p", ")", "for", "(", "k", ",", "v", ")", ",", "p", "in", "zip", "(", "rp", ",", "pending", ")", ")", "return", "cls", "(", "*", "*", "kwargs", ")", "_C", ".", "interface_type", "=", "interface_type", "_C", ".", "resource_class", "=", "resource_class", "_C", ".", "is_rc_optional", "=", "is_rc_optional", "_C", ".", "_canonical_fmt", "=", "fmt", "_C", ".", "_visa_syntax", "=", "syntax", "_C", ".", "__name__", "=", "str", "(", "interface_type", "+", "resource_class", ".", "title", "(", ")", ")", "return", "register_subclass", "(", "_C", ")" ]
Builds a resource name class by mixing a named tuple and ResourceName. It also registers the class. The field names are changed to lower case and the spaces replaced by underscores ('_'). :param interface_type: the interface type :type: interface_type: str :param resource_parts: each of the parts of the resource name indicating name and default value. Use None for mandatory fields. :type resource_parts: tuple[(str, str)] :param resource_class: the resource class :type resource_class: str :param is_rc_optional: indicates if the resource class part is optional :type is_rc_optional: boolean.
[ "Builds", "a", "resource", "name", "class", "by", "mixing", "a", "named", "tuple", "and", "ResourceName", "." ]
python
train
davenquinn/Attitude
attitude/geom/util.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/geom/util.py#L55-L64
def angle(v1,v2, cos=False): """ Find the angle between two vectors. :param cos: If True, the cosine of the angle will be returned. False by default. """ n = (norm(v1)*norm(v2)) _ = dot(v1,v2)/n return _ if cos else N.arccos(_)
[ "def", "angle", "(", "v1", ",", "v2", ",", "cos", "=", "False", ")", ":", "n", "=", "(", "norm", "(", "v1", ")", "*", "norm", "(", "v2", ")", ")", "_", "=", "dot", "(", "v1", ",", "v2", ")", "/", "n", "return", "_", "if", "cos", "else", "N", ".", "arccos", "(", "_", ")" ]
Find the angle between two vectors. :param cos: If True, the cosine of the angle will be returned. False by default.
[ "Find", "the", "angle", "between", "two", "vectors", "." ]
python
train
neurosynth/neurosynth
neurosynth/base/dataset.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/dataset.py#L758-L765
def _sdf_to_csr(self): """ Convert FeatureTable to SciPy CSR matrix. """ data = self.data.to_dense() self.data = { 'columns': list(data.columns), 'index': list(data.index), 'values': sparse.csr_matrix(data.values) }
[ "def", "_sdf_to_csr", "(", "self", ")", ":", "data", "=", "self", ".", "data", ".", "to_dense", "(", ")", "self", ".", "data", "=", "{", "'columns'", ":", "list", "(", "data", ".", "columns", ")", ",", "'index'", ":", "list", "(", "data", ".", "index", ")", ",", "'values'", ":", "sparse", ".", "csr_matrix", "(", "data", ".", "values", ")", "}" ]
Convert FeatureTable to SciPy CSR matrix.
[ "Convert", "FeatureTable", "to", "SciPy", "CSR", "matrix", "." ]
python
test
frascoweb/easywebassets
easywebassets/package.py
https://github.com/frascoweb/easywebassets/blob/02f84376067c827c84fc1773895bb2784e033949/easywebassets/package.py#L194-L198
def urls_for(self, asset_type, *args, **kwargs): """Returns urls needed to include all assets of asset_type """ return self.urls_for_depends(asset_type, *args, **kwargs) + \ self.urls_for_self(asset_type, *args, **kwargs)
[ "def", "urls_for", "(", "self", ",", "asset_type", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "urls_for_depends", "(", "asset_type", ",", "*", "args", ",", "*", "*", "kwargs", ")", "+", "self", ".", "urls_for_self", "(", "asset_type", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Returns urls needed to include all assets of asset_type
[ "Returns", "urls", "needed", "to", "include", "all", "assets", "of", "asset_type" ]
python
test
Falkonry/falkonry-python-client
falkonryclient/service/falkonry.py
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/falkonry.py#L321-L334
def get_historical_output(self, assessment, options): """ To get output of a historical Assessment :param assessment: string :param options: dict """ responseFormat=None if options and 'format' in options and options['format'] is not None: responseFormat = options['format'] options['format'] = None url = '/assessment/' + str(assessment) + '/output?' + urllib.parse.urlencode(options) response = self.http.downstream(url, responseFormat) return response
[ "def", "get_historical_output", "(", "self", ",", "assessment", ",", "options", ")", ":", "responseFormat", "=", "None", "if", "options", "and", "'format'", "in", "options", "and", "options", "[", "'format'", "]", "is", "not", "None", ":", "responseFormat", "=", "options", "[", "'format'", "]", "options", "[", "'format'", "]", "=", "None", "url", "=", "'/assessment/'", "+", "str", "(", "assessment", ")", "+", "'/output?'", "+", "urllib", ".", "parse", ".", "urlencode", "(", "options", ")", "response", "=", "self", ".", "http", ".", "downstream", "(", "url", ",", "responseFormat", ")", "return", "response" ]
To get output of a historical Assessment :param assessment: string :param options: dict
[ "To", "get", "output", "of", "a", "historical", "Assessment", ":", "param", "assessment", ":", "string", ":", "param", "options", ":", "dict" ]
python
train
google/grr
grr/server/grr_response_server/artifact_registry.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/artifact_registry.py#L360-L411
def GetArtifacts(self, os_name=None, name_list=None, source_type=None, exclude_dependents=False, provides=None, reload_datastore_artifacts=False): """Retrieve artifact classes with optional filtering. All filters must match for the artifact to be returned. Args: os_name: string to match against supported_os name_list: list of strings to match against artifact names source_type: rdf_artifacts.ArtifactSource.SourceType to match against source_type exclude_dependents: if true only artifacts with no dependencies will be returned provides: return the artifacts that provide these dependencies reload_datastore_artifacts: If true, the data store sources are queried for new artifacts. Returns: set of artifacts matching filter criteria """ self._CheckDirty(reload_datastore_artifacts=reload_datastore_artifacts) results = set() for artifact in itervalues(self._artifacts): # artifact.supported_os = [] matches all OSes if os_name and artifact.supported_os and ( os_name not in artifact.supported_os): continue if name_list and artifact.name not in name_list: continue if source_type: source_types = [c.type for c in artifact.sources] if source_type not in source_types: continue if exclude_dependents and GetArtifactPathDependencies(artifact): continue if not provides: results.add(artifact) else: # This needs to remain the last test, if it matches the result is added for provide_string in artifact.provides: if provide_string in provides: results.add(artifact) break return results
[ "def", "GetArtifacts", "(", "self", ",", "os_name", "=", "None", ",", "name_list", "=", "None", ",", "source_type", "=", "None", ",", "exclude_dependents", "=", "False", ",", "provides", "=", "None", ",", "reload_datastore_artifacts", "=", "False", ")", ":", "self", ".", "_CheckDirty", "(", "reload_datastore_artifacts", "=", "reload_datastore_artifacts", ")", "results", "=", "set", "(", ")", "for", "artifact", "in", "itervalues", "(", "self", ".", "_artifacts", ")", ":", "# artifact.supported_os = [] matches all OSes", "if", "os_name", "and", "artifact", ".", "supported_os", "and", "(", "os_name", "not", "in", "artifact", ".", "supported_os", ")", ":", "continue", "if", "name_list", "and", "artifact", ".", "name", "not", "in", "name_list", ":", "continue", "if", "source_type", ":", "source_types", "=", "[", "c", ".", "type", "for", "c", "in", "artifact", ".", "sources", "]", "if", "source_type", "not", "in", "source_types", ":", "continue", "if", "exclude_dependents", "and", "GetArtifactPathDependencies", "(", "artifact", ")", ":", "continue", "if", "not", "provides", ":", "results", ".", "add", "(", "artifact", ")", "else", ":", "# This needs to remain the last test, if it matches the result is added", "for", "provide_string", "in", "artifact", ".", "provides", ":", "if", "provide_string", "in", "provides", ":", "results", ".", "add", "(", "artifact", ")", "break", "return", "results" ]
Retrieve artifact classes with optional filtering. All filters must match for the artifact to be returned. Args: os_name: string to match against supported_os name_list: list of strings to match against artifact names source_type: rdf_artifacts.ArtifactSource.SourceType to match against source_type exclude_dependents: if true only artifacts with no dependencies will be returned provides: return the artifacts that provide these dependencies reload_datastore_artifacts: If true, the data store sources are queried for new artifacts. Returns: set of artifacts matching filter criteria
[ "Retrieve", "artifact", "classes", "with", "optional", "filtering", "." ]
python
train
adamziel/python_translate
python_translate/translations.py
https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/translations.py#L177-L207
def add_fallback_catalogue(self, catalogue): """ Merges translations from the given Catalogue into the current one only when the translation does not exist. This is used to provide default translations when they do not exist for the current locale. @type id: The @param id: message id """ assert isinstance(catalogue, MessageCatalogue) # detect circular references c = self while True: if c.locale == catalogue.locale: raise ValueError( 'Circular reference detected when adding a ' 'fallback catalogue for locale "%s".' % catalogue.locale) c = c.parent if c is None: break catalogue.parent = self self.fallback_catalogue = catalogue for resource in catalogue.resources: self.add_resource(resource)
[ "def", "add_fallback_catalogue", "(", "self", ",", "catalogue", ")", ":", "assert", "isinstance", "(", "catalogue", ",", "MessageCatalogue", ")", "# detect circular references", "c", "=", "self", "while", "True", ":", "if", "c", ".", "locale", "==", "catalogue", ".", "locale", ":", "raise", "ValueError", "(", "'Circular reference detected when adding a '", "'fallback catalogue for locale \"%s\".'", "%", "catalogue", ".", "locale", ")", "c", "=", "c", ".", "parent", "if", "c", "is", "None", ":", "break", "catalogue", ".", "parent", "=", "self", "self", ".", "fallback_catalogue", "=", "catalogue", "for", "resource", "in", "catalogue", ".", "resources", ":", "self", ".", "add_resource", "(", "resource", ")" ]
Merges translations from the given Catalogue into the current one only when the translation does not exist. This is used to provide default translations when they do not exist for the current locale. @type id: The @param id: message id
[ "Merges", "translations", "from", "the", "given", "Catalogue", "into", "the", "current", "one", "only", "when", "the", "translation", "does", "not", "exist", "." ]
python
train
MagicStack/asyncpg
asyncpg/connection.py
https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L460-L530
async def copy_from_table(self, table_name, *, output, columns=None, schema_name=None, timeout=None, format=None, oids=None, delimiter=None, null=None, header=None, quote=None, escape=None, force_quote=None, encoding=None): """Copy table contents to a file or file-like object. :param str table_name: The name of the table to copy data from. :param output: A :term:`path-like object <python:path-like object>`, or a :term:`file-like object <python:file-like object>`, or a :term:`coroutine function <python:coroutine function>` that takes a ``bytes`` instance as a sole argument. :param list columns: An optional list of column names to copy. :param str schema_name: An optional schema name to qualify the table. :param float timeout: Optional timeout value in seconds. The remaining keyword arguments are ``COPY`` statement options, see `COPY statement documentation`_ for details. :return: The status string of the COPY command. Example: .. code-block:: pycon >>> import asyncpg >>> import asyncio >>> async def run(): ... con = await asyncpg.connect(user='postgres') ... result = await con.copy_from_table( ... 'mytable', columns=('foo', 'bar'), ... output='file.csv', format='csv') ... print(result) ... >>> asyncio.get_event_loop().run_until_complete(run()) 'COPY 100' .. _`COPY statement documentation`: https://www.postgresql.org/docs/current/static/sql-copy.html .. versionadded:: 0.11.0 """ tabname = utils._quote_ident(table_name) if schema_name: tabname = utils._quote_ident(schema_name) + '.' + tabname if columns: cols = '({})'.format( ', '.join(utils._quote_ident(c) for c in columns)) else: cols = '' opts = self._format_copy_opts( format=format, oids=oids, delimiter=delimiter, null=null, header=header, quote=quote, escape=escape, force_quote=force_quote, encoding=encoding ) copy_stmt = 'COPY {tab}{cols} TO STDOUT {opts}'.format( tab=tabname, cols=cols, opts=opts) return await self._copy_out(copy_stmt, output, timeout)
[ "async", "def", "copy_from_table", "(", "self", ",", "table_name", ",", "*", ",", "output", ",", "columns", "=", "None", ",", "schema_name", "=", "None", ",", "timeout", "=", "None", ",", "format", "=", "None", ",", "oids", "=", "None", ",", "delimiter", "=", "None", ",", "null", "=", "None", ",", "header", "=", "None", ",", "quote", "=", "None", ",", "escape", "=", "None", ",", "force_quote", "=", "None", ",", "encoding", "=", "None", ")", ":", "tabname", "=", "utils", ".", "_quote_ident", "(", "table_name", ")", "if", "schema_name", ":", "tabname", "=", "utils", ".", "_quote_ident", "(", "schema_name", ")", "+", "'.'", "+", "tabname", "if", "columns", ":", "cols", "=", "'({})'", ".", "format", "(", "', '", ".", "join", "(", "utils", ".", "_quote_ident", "(", "c", ")", "for", "c", "in", "columns", ")", ")", "else", ":", "cols", "=", "''", "opts", "=", "self", ".", "_format_copy_opts", "(", "format", "=", "format", ",", "oids", "=", "oids", ",", "delimiter", "=", "delimiter", ",", "null", "=", "null", ",", "header", "=", "header", ",", "quote", "=", "quote", ",", "escape", "=", "escape", ",", "force_quote", "=", "force_quote", ",", "encoding", "=", "encoding", ")", "copy_stmt", "=", "'COPY {tab}{cols} TO STDOUT {opts}'", ".", "format", "(", "tab", "=", "tabname", ",", "cols", "=", "cols", ",", "opts", "=", "opts", ")", "return", "await", "self", ".", "_copy_out", "(", "copy_stmt", ",", "output", ",", "timeout", ")" ]
Copy table contents to a file or file-like object. :param str table_name: The name of the table to copy data from. :param output: A :term:`path-like object <python:path-like object>`, or a :term:`file-like object <python:file-like object>`, or a :term:`coroutine function <python:coroutine function>` that takes a ``bytes`` instance as a sole argument. :param list columns: An optional list of column names to copy. :param str schema_name: An optional schema name to qualify the table. :param float timeout: Optional timeout value in seconds. The remaining keyword arguments are ``COPY`` statement options, see `COPY statement documentation`_ for details. :return: The status string of the COPY command. Example: .. code-block:: pycon >>> import asyncpg >>> import asyncio >>> async def run(): ... con = await asyncpg.connect(user='postgres') ... result = await con.copy_from_table( ... 'mytable', columns=('foo', 'bar'), ... output='file.csv', format='csv') ... print(result) ... >>> asyncio.get_event_loop().run_until_complete(run()) 'COPY 100' .. _`COPY statement documentation`: https://www.postgresql.org/docs/current/static/sql-copy.html .. versionadded:: 0.11.0
[ "Copy", "table", "contents", "to", "a", "file", "or", "file", "-", "like", "object", "." ]
python
train
HazyResearch/metal
metal/contrib/visualization/analysis.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/contrib/visualization/analysis.py#L79-L97
def plot_probabilities_histogram(Y_p, title=None): """Plot a histogram from a numpy array of probabilities Args: Y_p: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1]) """ if Y_p.ndim > 1: msg = ( f"Arg Y_p should be a 1-dimensional np.ndarray, not of shape " f"{Y_p.shape}." ) raise ValueError(msg) plt.hist(Y_p, bins=20) plt.xlim((0, 1.025)) plt.xlabel("Probability") plt.ylabel("# Predictions") if isinstance(title, str): plt.title(title) plt.show()
[ "def", "plot_probabilities_histogram", "(", "Y_p", ",", "title", "=", "None", ")", ":", "if", "Y_p", ".", "ndim", ">", "1", ":", "msg", "=", "(", "f\"Arg Y_p should be a 1-dimensional np.ndarray, not of shape \"", "f\"{Y_p.shape}.\"", ")", "raise", "ValueError", "(", "msg", ")", "plt", ".", "hist", "(", "Y_p", ",", "bins", "=", "20", ")", "plt", ".", "xlim", "(", "(", "0", ",", "1.025", ")", ")", "plt", ".", "xlabel", "(", "\"Probability\"", ")", "plt", ".", "ylabel", "(", "\"# Predictions\"", ")", "if", "isinstance", "(", "title", ",", "str", ")", ":", "plt", ".", "title", "(", "title", ")", "plt", ".", "show", "(", ")" ]
Plot a histogram from a numpy array of probabilities Args: Y_p: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])
[ "Plot", "a", "histogram", "from", "a", "numpy", "array", "of", "probabilities" ]
python
train
bjmorgan/lattice_mc
lattice_mc/lattice_site.py
https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/lattice_site.py#L94-L110
def cn_occupation_energy( self, delta_occupation=None ): """ The coordination-number dependent energy for this site. Args: delta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }. If this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None Returns: (Float): The coordination-number dependent energy for this site. """ nn_occupations = self.site_specific_nn_occupation() if delta_occupation: for site in delta_occupation: assert( site in nn_occupations ) nn_occupations[ site ] += delta_occupation[ site ] return sum( [ self.cn_occupation_energies[ s ][ n ] for s, n in nn_occupations.items() ] )
[ "def", "cn_occupation_energy", "(", "self", ",", "delta_occupation", "=", "None", ")", ":", "nn_occupations", "=", "self", ".", "site_specific_nn_occupation", "(", ")", "if", "delta_occupation", ":", "for", "site", "in", "delta_occupation", ":", "assert", "(", "site", "in", "nn_occupations", ")", "nn_occupations", "[", "site", "]", "+=", "delta_occupation", "[", "site", "]", "return", "sum", "(", "[", "self", ".", "cn_occupation_energies", "[", "s", "]", "[", "n", "]", "for", "s", ",", "n", "in", "nn_occupations", ".", "items", "(", ")", "]", ")" ]
The coordination-number dependent energy for this site. Args: delta_occupation (:obj:Dict(Str:Int), optional): A dictionary of a change in (site-type specific) coordination number, e.g. { 'A' : 1, 'B' : -1 }. If this is not None, the coordination-number dependent energy is calculated including these changes in neighbour-site occupations. Defaults to None Returns: (Float): The coordination-number dependent energy for this site.
[ "The", "coordination", "-", "number", "dependent", "energy", "for", "this", "site", "." ]
python
train
raiden-network/raiden
raiden/blockchain_events_handler.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/blockchain_events_handler.py#L45-L75
def handle_tokennetwork_new(raiden: 'RaidenService', event: Event): """ Handles a `TokenNetworkCreated` event. """ data = event.event_data args = data['args'] block_number = data['block_number'] token_network_address = args['token_network_address'] token_address = typing.TokenAddress(args['token_address']) block_hash = data['block_hash'] token_network_proxy = raiden.chain.token_network(token_network_address) raiden.blockchain_events.add_token_network_listener( token_network_proxy=token_network_proxy, contract_manager=raiden.contract_manager, from_block=block_number, ) token_network_state = TokenNetworkState( token_network_address, token_address, ) transaction_hash = event.event_data['transaction_hash'] new_token_network = ContractReceiveNewTokenNetwork( transaction_hash=transaction_hash, payment_network_identifier=event.originating_contract, token_network=token_network_state, block_number=block_number, block_hash=block_hash, ) raiden.handle_and_track_state_change(new_token_network)
[ "def", "handle_tokennetwork_new", "(", "raiden", ":", "'RaidenService'", ",", "event", ":", "Event", ")", ":", "data", "=", "event", ".", "event_data", "args", "=", "data", "[", "'args'", "]", "block_number", "=", "data", "[", "'block_number'", "]", "token_network_address", "=", "args", "[", "'token_network_address'", "]", "token_address", "=", "typing", ".", "TokenAddress", "(", "args", "[", "'token_address'", "]", ")", "block_hash", "=", "data", "[", "'block_hash'", "]", "token_network_proxy", "=", "raiden", ".", "chain", ".", "token_network", "(", "token_network_address", ")", "raiden", ".", "blockchain_events", ".", "add_token_network_listener", "(", "token_network_proxy", "=", "token_network_proxy", ",", "contract_manager", "=", "raiden", ".", "contract_manager", ",", "from_block", "=", "block_number", ",", ")", "token_network_state", "=", "TokenNetworkState", "(", "token_network_address", ",", "token_address", ",", ")", "transaction_hash", "=", "event", ".", "event_data", "[", "'transaction_hash'", "]", "new_token_network", "=", "ContractReceiveNewTokenNetwork", "(", "transaction_hash", "=", "transaction_hash", ",", "payment_network_identifier", "=", "event", ".", "originating_contract", ",", "token_network", "=", "token_network_state", ",", "block_number", "=", "block_number", ",", "block_hash", "=", "block_hash", ",", ")", "raiden", ".", "handle_and_track_state_change", "(", "new_token_network", ")" ]
Handles a `TokenNetworkCreated` event.
[ "Handles", "a", "TokenNetworkCreated", "event", "." ]
python
train
openearth/mmi-python
mmi/tracker.py
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/tracker.py#L145-L163
def get(self, key=None, view=None): """Register a new model (models)""" self.set_header("Access-Control-Allow-Origin", "*") self.set_header("Content-Type", "application/json") if key is not None: value = {} value.update(self.database[key]) if view is not None: # generate a context with the relevant variables context = {} context["value"] = value context["ctx"] = self.ctx result = json.dumps(getattr(views, view)(context)) else: result = json.dumps(value) else: result = json.dumps(self.database.values()) self.write(result)
[ "def", "get", "(", "self", ",", "key", "=", "None", ",", "view", "=", "None", ")", ":", "self", ".", "set_header", "(", "\"Access-Control-Allow-Origin\"", ",", "\"*\"", ")", "self", ".", "set_header", "(", "\"Content-Type\"", ",", "\"application/json\"", ")", "if", "key", "is", "not", "None", ":", "value", "=", "{", "}", "value", ".", "update", "(", "self", ".", "database", "[", "key", "]", ")", "if", "view", "is", "not", "None", ":", "# generate a context with the relevant variables", "context", "=", "{", "}", "context", "[", "\"value\"", "]", "=", "value", "context", "[", "\"ctx\"", "]", "=", "self", ".", "ctx", "result", "=", "json", ".", "dumps", "(", "getattr", "(", "views", ",", "view", ")", "(", "context", ")", ")", "else", ":", "result", "=", "json", ".", "dumps", "(", "value", ")", "else", ":", "result", "=", "json", ".", "dumps", "(", "self", ".", "database", ".", "values", "(", ")", ")", "self", ".", "write", "(", "result", ")" ]
Register a new model (models)
[ "Register", "a", "new", "model", "(", "models", ")" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L750-L828
def _validate_resolution_output_length(path, entity_name, results, allow_mult=False, all_mult=False, ask_to_resolve=True): """ :param path: Path to the object that required resolution; propagated from command-line :type path: string :param entity_name: Name of the object :type entity_name: string :param results: Result of resolution; non-empty list of object specifications (each specification is a dictionary with keys "project" and "id") :type results: list of dictionaries :param allow_mult: If True, it is okay to choose from multiple results of a single resolved object, or return all results found; if False, raise an error if multiple results are found :type allow_mult: boolean :param all_mult: If True, return all results if multiple results are found for a single resolved object; if False, user needs to choose a single result if multiple are found; the value of all_mult only has an effect if allow_mult is True) :type all_mult: boolean :param ask_to_resolve: Whether picking may be necessary (if True, a list is returned; if False, only one result is returned); if specified as True, then all results will be returned, regardless of the values of allow_mult and all_mult :type ask_to_resolve: boolean :returns: The results of resolving entity_name, expected to be of the following form: <resolved_object> # If only one result is present or the user # is able to select from multiple OR [<resolved_object>, ...] # If multiple results are present and # it is allowed where <resolved_object> is of the following form: {"project": <project_id>, "id": <object_id>} :rtype: dict or list of dicts :raises: ValueError if results is empty :raises: ResolutionError if too many results are found and the user is not in interactive mode and cannot select one Precondition: results must be a nonempty list Validates length of results. If there are multiple results found and the user is in interactive mode, then the user will be prompted to select a single result to be returned. """ if len(results) == 0: raise ValueError("'results' must be nonempty.") # Caller wants ALL results, so return the entire results list # At this point, do not care about the values of allow_mult or all_mult if not ask_to_resolve: return results if len(results) > 1: # The other way the caller can specify it wants all results is by setting # allow_mult to be True and allowing all_mult to be True (or if the object name is a glob pattern) if allow_mult and (all_mult or is_glob_pattern(entity_name)): return results if INTERACTIVE_CLI: print('The given path "' + path + '" resolves to the following data objects:') if any(['describe' not in result for result in results]): # findDataObject API call must be made to get 'describe' mappings project, folderpath, entity_name = resolve_path(path, expected='entity') results = _resolve_global_entity(project, folderpath, entity_name) choice = pick([get_ls_l_desc(result['describe']) for result in results], allow_mult=allow_mult) if allow_mult and choice == '*': return results else: return [results[choice]] if allow_mult else results[choice] else: raise ResolutionError('The given path "' + path + '" resolves to ' + str(len(results)) + ' data objects') else: return [results[0]] if allow_mult else results[0]
[ "def", "_validate_resolution_output_length", "(", "path", ",", "entity_name", ",", "results", ",", "allow_mult", "=", "False", ",", "all_mult", "=", "False", ",", "ask_to_resolve", "=", "True", ")", ":", "if", "len", "(", "results", ")", "==", "0", ":", "raise", "ValueError", "(", "\"'results' must be nonempty.\"", ")", "# Caller wants ALL results, so return the entire results list", "# At this point, do not care about the values of allow_mult or all_mult", "if", "not", "ask_to_resolve", ":", "return", "results", "if", "len", "(", "results", ")", ">", "1", ":", "# The other way the caller can specify it wants all results is by setting", "# allow_mult to be True and allowing all_mult to be True (or if the object name is a glob pattern)", "if", "allow_mult", "and", "(", "all_mult", "or", "is_glob_pattern", "(", "entity_name", ")", ")", ":", "return", "results", "if", "INTERACTIVE_CLI", ":", "print", "(", "'The given path \"'", "+", "path", "+", "'\" resolves to the following data objects:'", ")", "if", "any", "(", "[", "'describe'", "not", "in", "result", "for", "result", "in", "results", "]", ")", ":", "# findDataObject API call must be made to get 'describe' mappings", "project", ",", "folderpath", ",", "entity_name", "=", "resolve_path", "(", "path", ",", "expected", "=", "'entity'", ")", "results", "=", "_resolve_global_entity", "(", "project", ",", "folderpath", ",", "entity_name", ")", "choice", "=", "pick", "(", "[", "get_ls_l_desc", "(", "result", "[", "'describe'", "]", ")", "for", "result", "in", "results", "]", ",", "allow_mult", "=", "allow_mult", ")", "if", "allow_mult", "and", "choice", "==", "'*'", ":", "return", "results", "else", ":", "return", "[", "results", "[", "choice", "]", "]", "if", "allow_mult", "else", "results", "[", "choice", "]", "else", ":", "raise", "ResolutionError", "(", "'The given path \"'", "+", "path", "+", "'\" resolves to '", "+", "str", "(", "len", "(", "results", ")", ")", "+", "' data objects'", ")", "else", ":", "return", "[", "results", "[", "0", "]", "]", "if", "allow_mult", "else", "results", "[", "0", "]" ]
:param path: Path to the object that required resolution; propagated from command-line :type path: string :param entity_name: Name of the object :type entity_name: string :param results: Result of resolution; non-empty list of object specifications (each specification is a dictionary with keys "project" and "id") :type results: list of dictionaries :param allow_mult: If True, it is okay to choose from multiple results of a single resolved object, or return all results found; if False, raise an error if multiple results are found :type allow_mult: boolean :param all_mult: If True, return all results if multiple results are found for a single resolved object; if False, user needs to choose a single result if multiple are found; the value of all_mult only has an effect if allow_mult is True) :type all_mult: boolean :param ask_to_resolve: Whether picking may be necessary (if True, a list is returned; if False, only one result is returned); if specified as True, then all results will be returned, regardless of the values of allow_mult and all_mult :type ask_to_resolve: boolean :returns: The results of resolving entity_name, expected to be of the following form: <resolved_object> # If only one result is present or the user # is able to select from multiple OR [<resolved_object>, ...] # If multiple results are present and # it is allowed where <resolved_object> is of the following form: {"project": <project_id>, "id": <object_id>} :rtype: dict or list of dicts :raises: ValueError if results is empty :raises: ResolutionError if too many results are found and the user is not in interactive mode and cannot select one Precondition: results must be a nonempty list Validates length of results. If there are multiple results found and the user is in interactive mode, then the user will be prompted to select a single result to be returned.
[ ":", "param", "path", ":", "Path", "to", "the", "object", "that", "required", "resolution", ";", "propagated", "from", "command", "-", "line", ":", "type", "path", ":", "string", ":", "param", "entity_name", ":", "Name", "of", "the", "object", ":", "type", "entity_name", ":", "string", ":", "param", "results", ":", "Result", "of", "resolution", ";", "non", "-", "empty", "list", "of", "object", "specifications", "(", "each", "specification", "is", "a", "dictionary", "with", "keys", "project", "and", "id", ")", ":", "type", "results", ":", "list", "of", "dictionaries", ":", "param", "allow_mult", ":", "If", "True", "it", "is", "okay", "to", "choose", "from", "multiple", "results", "of", "a", "single", "resolved", "object", "or", "return", "all", "results", "found", ";", "if", "False", "raise", "an", "error", "if", "multiple", "results", "are", "found", ":", "type", "allow_mult", ":", "boolean", ":", "param", "all_mult", ":", "If", "True", "return", "all", "results", "if", "multiple", "results", "are", "found", "for", "a", "single", "resolved", "object", ";", "if", "False", "user", "needs", "to", "choose", "a", "single", "result", "if", "multiple", "are", "found", ";", "the", "value", "of", "all_mult", "only", "has", "an", "effect", "if", "allow_mult", "is", "True", ")", ":", "type", "all_mult", ":", "boolean", ":", "param", "ask_to_resolve", ":", "Whether", "picking", "may", "be", "necessary", "(", "if", "True", "a", "list", "is", "returned", ";", "if", "False", "only", "one", "result", "is", "returned", ")", ";", "if", "specified", "as", "True", "then", "all", "results", "will", "be", "returned", "regardless", "of", "the", "values", "of", "allow_mult", "and", "all_mult", ":", "type", "ask_to_resolve", ":", "boolean", ":", "returns", ":", "The", "results", "of", "resolving", "entity_name", "expected", "to", "be", "of", "the", "following", "form", ":", "<resolved_object", ">", "#", "If", "only", "one", "result", "is", "present", "or", "the", "user", "#", "is", "able", "to", "select", "from", "multiple", "OR", "[", "<resolved_object", ">", "...", "]", "#", "If", "multiple", "results", "are", "present", "and", "#", "it", "is", "allowed", "where", "<resolved_object", ">", "is", "of", "the", "following", "form", ":", "{", "project", ":", "<project_id", ">", "id", ":", "<object_id", ">", "}", ":", "rtype", ":", "dict", "or", "list", "of", "dicts", ":", "raises", ":", "ValueError", "if", "results", "is", "empty", ":", "raises", ":", "ResolutionError", "if", "too", "many", "results", "are", "found", "and", "the", "user", "is", "not", "in", "interactive", "mode", "and", "cannot", "select", "one" ]
python
train
portfors-lab/sparkle
sparkle/gui/plotting/calibration_explore_display.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/calibration_explore_display.py#L136-L143
def setXlimits(self, lims): """Sets the X axis limits of the signal plots :param lims: (min, max) of x axis, in same units as data :type lims: (float, float) """ self.responseSignalPlot.setXlim(lims) self.stimSignalPlot.setXlim(lims)
[ "def", "setXlimits", "(", "self", ",", "lims", ")", ":", "self", ".", "responseSignalPlot", ".", "setXlim", "(", "lims", ")", "self", ".", "stimSignalPlot", ".", "setXlim", "(", "lims", ")" ]
Sets the X axis limits of the signal plots :param lims: (min, max) of x axis, in same units as data :type lims: (float, float)
[ "Sets", "the", "X", "axis", "limits", "of", "the", "signal", "plots" ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/qos/cpu/slot/port_group/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos/cpu/slot/port_group/__init__.py#L127-L148
def _set_group(self, v, load=False): """ Setter method for group, mapped from YANG variable /qos/cpu/slot/port_group/group (list) If this variable is read-only (config: false) in the source YANG file, then _set_group is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_group() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("group_id",group.group, yang_name="group", rest_name="group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='group-id', extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}), is_container='list', yang_name="group", rest_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """group must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("group_id",group.group, yang_name="group", rest_name="group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='group-id', extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}), is_container='list', yang_name="group", rest_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='list', is_config=True)""", }) self.__group = t if hasattr(self, '_set'): self._set()
[ "def", "_set_group", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"group_id\"", ",", "group", ".", "group", ",", "yang_name", "=", "\"group\"", ",", "rest_name", "=", "\"group\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'group-id'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure CPU QoS group parameters'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'callpoint'", ":", "u'QosCpuGroupConfig'", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'alt-name'", ":", "u'group'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"group\"", ",", "rest_name", "=", "\"group\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure CPU QoS group parameters'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'callpoint'", ":", "u'QosCpuGroupConfig'", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'alt-name'", ":", "u'group'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-qos-cpu'", ",", "defining_module", "=", "'brocade-qos-cpu'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"group must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"group_id\",group.group, yang_name=\"group\", rest_name=\"group\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='group-id', extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}), is_container='list', yang_name=\"group\", rest_name=\"group\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS group parameters', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'callpoint': u'QosCpuGroupConfig', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'group'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__group", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for group, mapped from YANG variable /qos/cpu/slot/port_group/group (list) If this variable is read-only (config: false) in the source YANG file, then _set_group is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_group() directly.
[ "Setter", "method", "for", "group", "mapped", "from", "YANG", "variable", "/", "qos", "/", "cpu", "/", "slot", "/", "port_group", "/", "group", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_group", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_group", "()", "directly", "." ]
python
train
ponty/PyVirtualDisplay
pyvirtualdisplay/abstractdisplay.py
https://github.com/ponty/PyVirtualDisplay/blob/903841f5ef13bf162be6fdd22daa5c349af45d67/pyvirtualdisplay/abstractdisplay.py#L84-L98
def redirect_display(self, on): ''' on: * True -> set $DISPLAY to virtual screen * False -> set $DISPLAY to original screen :param on: bool ''' d = self.new_display_var if on else self.old_display_var if d is None: log.debug('unset DISPLAY') del os.environ['DISPLAY'] else: log.debug('DISPLAY=%s', d) os.environ['DISPLAY'] = d
[ "def", "redirect_display", "(", "self", ",", "on", ")", ":", "d", "=", "self", ".", "new_display_var", "if", "on", "else", "self", ".", "old_display_var", "if", "d", "is", "None", ":", "log", ".", "debug", "(", "'unset DISPLAY'", ")", "del", "os", ".", "environ", "[", "'DISPLAY'", "]", "else", ":", "log", ".", "debug", "(", "'DISPLAY=%s'", ",", "d", ")", "os", ".", "environ", "[", "'DISPLAY'", "]", "=", "d" ]
on: * True -> set $DISPLAY to virtual screen * False -> set $DISPLAY to original screen :param on: bool
[ "on", ":", "*", "True", "-", ">", "set", "$DISPLAY", "to", "virtual", "screen", "*", "False", "-", ">", "set", "$DISPLAY", "to", "original", "screen" ]
python
train
CityOfZion/neo-python-core
neocore/Cryptography/ECCurve.py
https://github.com/CityOfZion/neo-python-core/blob/786c02cc2f41712d70b1f064ae3d67f86167107f/neocore/Cryptography/ECCurve.py#L817-L834
def decode_secp256r1(str, unhex=True, check_on_curve=True): """ decode a public key on the secp256r1 curve """ GFp = FiniteField(int("FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF", 16)) ec = EllipticCurve(GFp, 115792089210356248762697446949407573530086143415290314195533631308867097853948, 41058363725152142129326129780047268409114441015993725554835256314039467401291) point = ec.decode_from_hex(str, unhex=unhex) if check_on_curve: if point.isoncurve(): return ECDSA(GFp, point, int("FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC", 16)) else: raise Exception("Could not decode string") return ECDSA(GFp, point, int("FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC", 16))
[ "def", "decode_secp256r1", "(", "str", ",", "unhex", "=", "True", ",", "check_on_curve", "=", "True", ")", ":", "GFp", "=", "FiniteField", "(", "int", "(", "\"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF\"", ",", "16", ")", ")", "ec", "=", "EllipticCurve", "(", "GFp", ",", "115792089210356248762697446949407573530086143415290314195533631308867097853948", ",", "41058363725152142129326129780047268409114441015993725554835256314039467401291", ")", "point", "=", "ec", ".", "decode_from_hex", "(", "str", ",", "unhex", "=", "unhex", ")", "if", "check_on_curve", ":", "if", "point", ".", "isoncurve", "(", ")", ":", "return", "ECDSA", "(", "GFp", ",", "point", ",", "int", "(", "\"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC\"", ",", "16", ")", ")", "else", ":", "raise", "Exception", "(", "\"Could not decode string\"", ")", "return", "ECDSA", "(", "GFp", ",", "point", ",", "int", "(", "\"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC\"", ",", "16", ")", ")" ]
decode a public key on the secp256r1 curve
[ "decode", "a", "public", "key", "on", "the", "secp256r1", "curve" ]
python
train
mongodb/mongo-python-driver
pymongo/common.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/common.py#L162-L173
def validate_integer(option, value): """Validates that 'value' is an integer (or basestring representation). """ if isinstance(value, integer_types): return value elif isinstance(value, string_type): try: return int(value) except ValueError: raise ValueError("The value of %s must be " "an integer" % (option,)) raise TypeError("Wrong type for %s, value must be an integer" % (option,))
[ "def", "validate_integer", "(", "option", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "integer_types", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "string_type", ")", ":", "try", ":", "return", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"The value of %s must be \"", "\"an integer\"", "%", "(", "option", ",", ")", ")", "raise", "TypeError", "(", "\"Wrong type for %s, value must be an integer\"", "%", "(", "option", ",", ")", ")" ]
Validates that 'value' is an integer (or basestring representation).
[ "Validates", "that", "value", "is", "an", "integer", "(", "or", "basestring", "representation", ")", "." ]
python
train
bristosoft/financial
finance.py
https://github.com/bristosoft/financial/blob/382c4fef610d67777d7109d9d0ae230ab67ca20f/finance.py#L90-L100
def syd(c, s, l): """ This accountancy function computes sum of the years digits depreciation for an asset purchased for cash with a known life span and salvage value. The depreciation is returned as a list in python. c = historcal cost or price paid s = the expected salvage proceeds l = expected useful life of the fixed asset Example: syd(1000, 100, 5) """ return [(c-s) * (x/(l*(l+1)/2)) for x in range(l,0,-1)]
[ "def", "syd", "(", "c", ",", "s", ",", "l", ")", ":", "return", "[", "(", "c", "-", "s", ")", "*", "(", "x", "/", "(", "l", "*", "(", "l", "+", "1", ")", "/", "2", ")", ")", "for", "x", "in", "range", "(", "l", ",", "0", ",", "-", "1", ")", "]" ]
This accountancy function computes sum of the years digits depreciation for an asset purchased for cash with a known life span and salvage value. The depreciation is returned as a list in python. c = historcal cost or price paid s = the expected salvage proceeds l = expected useful life of the fixed asset Example: syd(1000, 100, 5)
[ "This", "accountancy", "function", "computes", "sum", "of", "the", "years", "digits", "depreciation", "for", "an", "asset", "purchased", "for", "cash", "with", "a", "known", "life", "span", "and", "salvage", "value", ".", "The", "depreciation", "is", "returned", "as", "a", "list", "in", "python", ".", "c", "=", "historcal", "cost", "or", "price", "paid", "s", "=", "the", "expected", "salvage", "proceeds", "l", "=", "expected", "useful", "life", "of", "the", "fixed", "asset", "Example", ":", "syd", "(", "1000", "100", "5", ")" ]
python
train
soravux/scoop
scoop/bootstrap/__main__.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/bootstrap/__main__.py#L240-L293
def run(self, globs=None): """Import user module and start __main__ passing globals() is required when subclassing in another module """ # Without this, the underneath import clashes with the top-level one global scoop if globs is None: globs = globals() # import the user module if scoop.MAIN_MODULE: globs.update(self.setupEnvironment(self)) # Start the user program from scoop import futures def futures_startup(): """Execute the user code. Wraps futures._startup (SCOOP initialisation) over the user module. Needs """ return futures._startup( functools.partial( runpy.run_path, scoop.MAIN_MODULE, init_globals=globs, run_name="__main__" ) ) if self.args.profile: import cProfile # runctx instead of run is required for local function try: os.makedirs("profile") except: pass cProfile.runctx( "futures_startup()", globs, locals(), "./profile/{0}.prof".format(os.getpid()) ) else: try: futures_startup() finally: # Must reimport (potentially not there after bootstrap) import scoop # Ensure a communication queue exists (may happend when a # connection wasn't established such as cloud-mode wait). if scoop._control.execQueue: scoop._control.execQueue.shutdown()
[ "def", "run", "(", "self", ",", "globs", "=", "None", ")", ":", "# Without this, the underneath import clashes with the top-level one", "global", "scoop", "if", "globs", "is", "None", ":", "globs", "=", "globals", "(", ")", "# import the user module", "if", "scoop", ".", "MAIN_MODULE", ":", "globs", ".", "update", "(", "self", ".", "setupEnvironment", "(", "self", ")", ")", "# Start the user program", "from", "scoop", "import", "futures", "def", "futures_startup", "(", ")", ":", "\"\"\"Execute the user code.\n Wraps futures._startup (SCOOP initialisation) over the user module.\n Needs \"\"\"", "return", "futures", ".", "_startup", "(", "functools", ".", "partial", "(", "runpy", ".", "run_path", ",", "scoop", ".", "MAIN_MODULE", ",", "init_globals", "=", "globs", ",", "run_name", "=", "\"__main__\"", ")", ")", "if", "self", ".", "args", ".", "profile", ":", "import", "cProfile", "# runctx instead of run is required for local function", "try", ":", "os", ".", "makedirs", "(", "\"profile\"", ")", "except", ":", "pass", "cProfile", ".", "runctx", "(", "\"futures_startup()\"", ",", "globs", ",", "locals", "(", ")", ",", "\"./profile/{0}.prof\"", ".", "format", "(", "os", ".", "getpid", "(", ")", ")", ")", "else", ":", "try", ":", "futures_startup", "(", ")", "finally", ":", "# Must reimport (potentially not there after bootstrap)", "import", "scoop", "# Ensure a communication queue exists (may happend when a", "# connection wasn't established such as cloud-mode wait).", "if", "scoop", ".", "_control", ".", "execQueue", ":", "scoop", ".", "_control", ".", "execQueue", ".", "shutdown", "(", ")" ]
Import user module and start __main__ passing globals() is required when subclassing in another module
[ "Import", "user", "module", "and", "start", "__main__", "passing", "globals", "()", "is", "required", "when", "subclassing", "in", "another", "module" ]
python
train
saltstack/salt
salt/pillar/nsot.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/nsot.py#L150-L169
def _proxy_info(minion_id, api_url, email, secret_key, fqdn_separator): ''' retrieve a dict of a device that exists in nsot :param minion_id: str :param api_url: str :param email: str :param secret_key: str :param fqdn_separator: str :return: dict ''' device_info = {} if fqdn_separator: minion_id = minion_id.replace('.', fqdn_separator) token = _get_token(api_url, email, secret_key) if token: headers = {'Authorization': 'AuthToken {}:{}'.format(email, token)} device_info = _query_nsot(api_url, headers, device=minion_id) return device_info
[ "def", "_proxy_info", "(", "minion_id", ",", "api_url", ",", "email", ",", "secret_key", ",", "fqdn_separator", ")", ":", "device_info", "=", "{", "}", "if", "fqdn_separator", ":", "minion_id", "=", "minion_id", ".", "replace", "(", "'.'", ",", "fqdn_separator", ")", "token", "=", "_get_token", "(", "api_url", ",", "email", ",", "secret_key", ")", "if", "token", ":", "headers", "=", "{", "'Authorization'", ":", "'AuthToken {}:{}'", ".", "format", "(", "email", ",", "token", ")", "}", "device_info", "=", "_query_nsot", "(", "api_url", ",", "headers", ",", "device", "=", "minion_id", ")", "return", "device_info" ]
retrieve a dict of a device that exists in nsot :param minion_id: str :param api_url: str :param email: str :param secret_key: str :param fqdn_separator: str :return: dict
[ "retrieve", "a", "dict", "of", "a", "device", "that", "exists", "in", "nsot" ]
python
train
Tanganelli/CoAPthon3
coapthon/messages/option.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/messages/option.py#L38-L52
def value(self): """ Return the option value. :return: the option value in the correct format depending on the option """ if type(self._value) is None: self._value = bytearray() opt_type = defines.OptionRegistry.LIST[self._number].value_type if opt_type == defines.INTEGER: if byte_len(self._value) > 0: return int(self._value) else: return defines.OptionRegistry.LIST[self._number].default return self._value
[ "def", "value", "(", "self", ")", ":", "if", "type", "(", "self", ".", "_value", ")", "is", "None", ":", "self", ".", "_value", "=", "bytearray", "(", ")", "opt_type", "=", "defines", ".", "OptionRegistry", ".", "LIST", "[", "self", ".", "_number", "]", ".", "value_type", "if", "opt_type", "==", "defines", ".", "INTEGER", ":", "if", "byte_len", "(", "self", ".", "_value", ")", ">", "0", ":", "return", "int", "(", "self", ".", "_value", ")", "else", ":", "return", "defines", ".", "OptionRegistry", ".", "LIST", "[", "self", ".", "_number", "]", ".", "default", "return", "self", ".", "_value" ]
Return the option value. :return: the option value in the correct format depending on the option
[ "Return", "the", "option", "value", "." ]
python
train
lyft/python-kmsauth
kmsauth/services.py
https://github.com/lyft/python-kmsauth/blob/aa2dd957a5d3e58c89fe51a55c6053ff81d9191e/kmsauth/services.py#L80-L92
def get_boto_session( region, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None ): """Get a boto3 session.""" return boto3.session.Session( region_name=region, aws_secret_access_key=aws_secret_access_key, aws_access_key_id=aws_access_key_id, aws_session_token=aws_session_token )
[ "def", "get_boto_session", "(", "region", ",", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "aws_session_token", "=", "None", ")", ":", "return", "boto3", ".", "session", ".", "Session", "(", "region_name", "=", "region", ",", "aws_secret_access_key", "=", "aws_secret_access_key", ",", "aws_access_key_id", "=", "aws_access_key_id", ",", "aws_session_token", "=", "aws_session_token", ")" ]
Get a boto3 session.
[ "Get", "a", "boto3", "session", "." ]
python
train
mikedh/trimesh
trimesh/sample.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/sample.py#L117-L140
def sample_surface_even(mesh, count): """ Sample the surface of a mesh, returning samples which are approximately evenly spaced. Parameters --------- mesh: Trimesh object count: number of points to return Returns --------- samples: (count,3) points in space on the surface of mesh face_index: (count,) indices of faces for each sampled point """ from .points import remove_close radius = np.sqrt(mesh.area / (2 * count)) samples, ids = sample_surface(mesh, count * 5) result, mask = remove_close(samples, radius) return result, ids[mask]
[ "def", "sample_surface_even", "(", "mesh", ",", "count", ")", ":", "from", ".", "points", "import", "remove_close", "radius", "=", "np", ".", "sqrt", "(", "mesh", ".", "area", "/", "(", "2", "*", "count", ")", ")", "samples", ",", "ids", "=", "sample_surface", "(", "mesh", ",", "count", "*", "5", ")", "result", ",", "mask", "=", "remove_close", "(", "samples", ",", "radius", ")", "return", "result", ",", "ids", "[", "mask", "]" ]
Sample the surface of a mesh, returning samples which are approximately evenly spaced. Parameters --------- mesh: Trimesh object count: number of points to return Returns --------- samples: (count,3) points in space on the surface of mesh face_index: (count,) indices of faces for each sampled point
[ "Sample", "the", "surface", "of", "a", "mesh", "returning", "samples", "which", "are", "approximately", "evenly", "spaced", "." ]
python
train
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L1265-L1349
def post_visible_setup(self): """Actions to be performed only after the main window's `show` method was triggered""" self.restore_scrollbar_position.emit() # [Workaround for Issue 880] # QDockWidget objects are not painted if restored as floating # windows, so we must dock them before showing the mainwindow, # then set them again as floating windows here. for widget in self.floating_dockwidgets: widget.setFloating(True) # In MacOS X 10.7 our app is not displayed after initialized (I don't # know why because this doesn't happen when started from the terminal), # so we need to resort to this hack to make it appear. if running_in_mac_app(): idx = __file__.index(MAC_APP_NAME) app_path = __file__[:idx] subprocess.call(['open', app_path + MAC_APP_NAME]) # Server to maintain just one Spyder instance and open files in it if # the user tries to start other instances with # $ spyder foo.py if (CONF.get('main', 'single_instance') and not self.new_instance and self.open_files_server): t = threading.Thread(target=self.start_open_files_server) t.setDaemon(True) t.start() # Connect the window to the signal emmited by the previous server # when it gets a client connected to it self.sig_open_external_file.connect(self.open_external_file) # Create Plugins and toolbars submenus self.create_plugins_menu() self.create_toolbars_menu() # Update toolbar visibility status self.toolbars_visible = CONF.get('main', 'toolbars_visible') self.load_last_visible_toolbars() # Update lock status self.lock_interface_action.setChecked(self.interface_locked) # Hide Internal Console so that people don't use it instead of # the External or IPython ones if self.console.dockwidget.isVisible() and DEV is None: self.console.toggle_view_action.setChecked(False) self.console.dockwidget.hide() # Show Help and Consoles by default plugins_to_show = [self.ipyconsole] if self.help is not None: plugins_to_show.append(self.help) for plugin in plugins_to_show: if plugin.dockwidget.isVisible(): plugin.dockwidget.raise_() # Show history file if no console is visible if not self.ipyconsole.isvisible: self.historylog.add_history(get_conf_path('history.py')) if self.open_project: self.projects.open_project(self.open_project) else: # Load last project if a project was active when Spyder # was closed self.projects.reopen_last_project() # If no project is active, load last session if self.projects.get_active_project() is None: self.editor.setup_open_files() # Check for spyder updates if DEV is None and CONF.get('main', 'check_updates_on_startup'): self.give_updates_feedback = False self.check_updates(startup=True) # Show dialog with missing dependencies self.report_missing_dependencies() # Raise the menuBar to the top of the main window widget's stack # (Fixes issue 3887) self.menuBar().raise_() self.is_setting_up = False
[ "def", "post_visible_setup", "(", "self", ")", ":", "self", ".", "restore_scrollbar_position", ".", "emit", "(", ")", "# [Workaround for Issue 880]\r", "# QDockWidget objects are not painted if restored as floating\r", "# windows, so we must dock them before showing the mainwindow,\r", "# then set them again as floating windows here.\r", "for", "widget", "in", "self", ".", "floating_dockwidgets", ":", "widget", ".", "setFloating", "(", "True", ")", "# In MacOS X 10.7 our app is not displayed after initialized (I don't\r", "# know why because this doesn't happen when started from the terminal),\r", "# so we need to resort to this hack to make it appear.\r", "if", "running_in_mac_app", "(", ")", ":", "idx", "=", "__file__", ".", "index", "(", "MAC_APP_NAME", ")", "app_path", "=", "__file__", "[", ":", "idx", "]", "subprocess", ".", "call", "(", "[", "'open'", ",", "app_path", "+", "MAC_APP_NAME", "]", ")", "# Server to maintain just one Spyder instance and open files in it if\r", "# the user tries to start other instances with\r", "# $ spyder foo.py\r", "if", "(", "CONF", ".", "get", "(", "'main'", ",", "'single_instance'", ")", "and", "not", "self", ".", "new_instance", "and", "self", ".", "open_files_server", ")", ":", "t", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "start_open_files_server", ")", "t", ".", "setDaemon", "(", "True", ")", "t", ".", "start", "(", ")", "# Connect the window to the signal emmited by the previous server\r", "# when it gets a client connected to it\r", "self", ".", "sig_open_external_file", ".", "connect", "(", "self", ".", "open_external_file", ")", "# Create Plugins and toolbars submenus\r", "self", ".", "create_plugins_menu", "(", ")", "self", ".", "create_toolbars_menu", "(", ")", "# Update toolbar visibility status\r", "self", ".", "toolbars_visible", "=", "CONF", ".", "get", "(", "'main'", ",", "'toolbars_visible'", ")", "self", ".", "load_last_visible_toolbars", "(", ")", "# Update lock status\r", "self", ".", "lock_interface_action", ".", "setChecked", "(", "self", ".", "interface_locked", ")", "# Hide Internal Console so that people don't use it instead of\r", "# the External or IPython ones\r", "if", "self", ".", "console", ".", "dockwidget", ".", "isVisible", "(", ")", "and", "DEV", "is", "None", ":", "self", ".", "console", ".", "toggle_view_action", ".", "setChecked", "(", "False", ")", "self", ".", "console", ".", "dockwidget", ".", "hide", "(", ")", "# Show Help and Consoles by default\r", "plugins_to_show", "=", "[", "self", ".", "ipyconsole", "]", "if", "self", ".", "help", "is", "not", "None", ":", "plugins_to_show", ".", "append", "(", "self", ".", "help", ")", "for", "plugin", "in", "plugins_to_show", ":", "if", "plugin", ".", "dockwidget", ".", "isVisible", "(", ")", ":", "plugin", ".", "dockwidget", ".", "raise_", "(", ")", "# Show history file if no console is visible\r", "if", "not", "self", ".", "ipyconsole", ".", "isvisible", ":", "self", ".", "historylog", ".", "add_history", "(", "get_conf_path", "(", "'history.py'", ")", ")", "if", "self", ".", "open_project", ":", "self", ".", "projects", ".", "open_project", "(", "self", ".", "open_project", ")", "else", ":", "# Load last project if a project was active when Spyder\r", "# was closed\r", "self", ".", "projects", ".", "reopen_last_project", "(", ")", "# If no project is active, load last session\r", "if", "self", ".", "projects", ".", "get_active_project", "(", ")", "is", "None", ":", "self", ".", "editor", ".", "setup_open_files", "(", ")", "# Check for spyder updates\r", "if", "DEV", "is", "None", "and", "CONF", ".", "get", "(", "'main'", ",", "'check_updates_on_startup'", ")", ":", "self", ".", "give_updates_feedback", "=", "False", "self", ".", "check_updates", "(", "startup", "=", "True", ")", "# Show dialog with missing dependencies\r", "self", ".", "report_missing_dependencies", "(", ")", "# Raise the menuBar to the top of the main window widget's stack\r", "# (Fixes issue 3887)\r", "self", ".", "menuBar", "(", ")", ".", "raise_", "(", ")", "self", ".", "is_setting_up", "=", "False" ]
Actions to be performed only after the main window's `show` method was triggered
[ "Actions", "to", "be", "performed", "only", "after", "the", "main", "window", "s", "show", "method", "was", "triggered" ]
python
train
liip/taxi
taxi/plugins.py
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/plugins.py#L116-L121
def register_commands(self): """ Load entry points for custom commands. """ for command in self._entry_points[self.COMMANDS_ENTRY_POINT].values(): command.load()
[ "def", "register_commands", "(", "self", ")", ":", "for", "command", "in", "self", ".", "_entry_points", "[", "self", ".", "COMMANDS_ENTRY_POINT", "]", ".", "values", "(", ")", ":", "command", ".", "load", "(", ")" ]
Load entry points for custom commands.
[ "Load", "entry", "points", "for", "custom", "commands", "." ]
python
train
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/fields.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/fields.py#L233-L237
def is_searchable(self): """A bool value that indicates whether the address is a valid address to search by.""" return self.raw or (self.is_valid_country and (not self.state or self.is_valid_state))
[ "def", "is_searchable", "(", "self", ")", ":", "return", "self", ".", "raw", "or", "(", "self", ".", "is_valid_country", "and", "(", "not", "self", ".", "state", "or", "self", ".", "is_valid_state", ")", ")" ]
A bool value that indicates whether the address is a valid address to search by.
[ "A", "bool", "value", "that", "indicates", "whether", "the", "address", "is", "a", "valid", "address", "to", "search", "by", "." ]
python
train
mkoura/dump2polarion
dump2polarion/configuration.py
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/configuration.py#L163-L192
def get_config(config_file=None, config_values=None, load_project_conf=True): """Loads config file and returns its content.""" config_values = config_values or {} config_settings = {} default_conf = _get_default_conf() user_conf = _get_user_conf(config_file) if config_file else {} # load project configuration only when user configuration was not specified project_conf = {} if user_conf or not load_project_conf else _get_project_conf() if not (user_conf or project_conf or config_values): if load_project_conf: raise Dump2PolarionException( "Failed to find configuration file for the project " "and no configuration file or values passed." ) raise Dump2PolarionException("No configuration file or values passed.") # merge configuration config_settings.update(default_conf) config_settings.update(user_conf) config_settings.update(project_conf) config_settings.update(config_values) _populate_urls(config_settings) _set_legacy_project_id(config_settings) _set_legacy_custom_fields(config_settings) _check_config(config_settings) return config_settings
[ "def", "get_config", "(", "config_file", "=", "None", ",", "config_values", "=", "None", ",", "load_project_conf", "=", "True", ")", ":", "config_values", "=", "config_values", "or", "{", "}", "config_settings", "=", "{", "}", "default_conf", "=", "_get_default_conf", "(", ")", "user_conf", "=", "_get_user_conf", "(", "config_file", ")", "if", "config_file", "else", "{", "}", "# load project configuration only when user configuration was not specified", "project_conf", "=", "{", "}", "if", "user_conf", "or", "not", "load_project_conf", "else", "_get_project_conf", "(", ")", "if", "not", "(", "user_conf", "or", "project_conf", "or", "config_values", ")", ":", "if", "load_project_conf", ":", "raise", "Dump2PolarionException", "(", "\"Failed to find configuration file for the project \"", "\"and no configuration file or values passed.\"", ")", "raise", "Dump2PolarionException", "(", "\"No configuration file or values passed.\"", ")", "# merge configuration", "config_settings", ".", "update", "(", "default_conf", ")", "config_settings", ".", "update", "(", "user_conf", ")", "config_settings", ".", "update", "(", "project_conf", ")", "config_settings", ".", "update", "(", "config_values", ")", "_populate_urls", "(", "config_settings", ")", "_set_legacy_project_id", "(", "config_settings", ")", "_set_legacy_custom_fields", "(", "config_settings", ")", "_check_config", "(", "config_settings", ")", "return", "config_settings" ]
Loads config file and returns its content.
[ "Loads", "config", "file", "and", "returns", "its", "content", "." ]
python
train
summa-tx/riemann
riemann/tx/sprout.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L188-L197
def calculate_fee(self, input_values): ''' Tx, list(int) -> int ''' total_in = sum(input_values) total_out = sum([utils.le2i(tx_out.value) for tx_out in self.tx_outs]) for js in self.tx_joinsplits: total_in += utils.le2i(js.vpub_new) total_out += utils.le2i(js.vpub_old) return total_in - total_out
[ "def", "calculate_fee", "(", "self", ",", "input_values", ")", ":", "total_in", "=", "sum", "(", "input_values", ")", "total_out", "=", "sum", "(", "[", "utils", ".", "le2i", "(", "tx_out", ".", "value", ")", "for", "tx_out", "in", "self", ".", "tx_outs", "]", ")", "for", "js", "in", "self", ".", "tx_joinsplits", ":", "total_in", "+=", "utils", ".", "le2i", "(", "js", ".", "vpub_new", ")", "total_out", "+=", "utils", ".", "le2i", "(", "js", ".", "vpub_old", ")", "return", "total_in", "-", "total_out" ]
Tx, list(int) -> int
[ "Tx", "list", "(", "int", ")", "-", ">", "int" ]
python
train
jim-easterbrook/pywws
src/pywws/timezone.py
https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/timezone.py#L81-L85
def to_utc(self, dt): """Convert any timestamp to UTC (with tzinfo).""" if dt.tzinfo is None: return dt.replace(tzinfo=self.utc) return dt.astimezone(self.utc)
[ "def", "to_utc", "(", "self", ",", "dt", ")", ":", "if", "dt", ".", "tzinfo", "is", "None", ":", "return", "dt", ".", "replace", "(", "tzinfo", "=", "self", ".", "utc", ")", "return", "dt", ".", "astimezone", "(", "self", ".", "utc", ")" ]
Convert any timestamp to UTC (with tzinfo).
[ "Convert", "any", "timestamp", "to", "UTC", "(", "with", "tzinfo", ")", "." ]
python
train
delfick/harpoon
harpoon/ship/runner.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/ship/runner.py#L226-L328
def create_container(self, conf, detach, tty): """Create a single container""" name = conf.name image_name = conf.image_name if conf.tag is not NotSpecified: image_name = conf.image_name_with_tag container_name = conf.container_name with conf.assumed_role(): env = dict(e.pair for e in conf.env) binds = conf.volumes.binds command = conf.formatted_command volume_names = conf.volumes.volume_names volumes_from = list(conf.volumes.share_with_names) no_tty_option = conf.no_tty_option ports = [p.container_port.port_pair for p in conf.ports] port_bindings = self.exposed(conf.ports) uncreated = [] for name in binds: if not os.path.exists(name): log.info("Making volume for mounting\tvolume=%s", name) try: os.makedirs(name) except OSError as error: uncreated.append((name, error)) if uncreated: raise BadOption("Failed to create some volumes on the host", uncreated=uncreated) log.info("Creating container from %s\timage=%s\tcontainer_name=%s\ttty=%s", image_name, name, container_name, tty) if binds: log.info("\tUsing volumes\tvolumes=%s", volume_names) if env: log.info("\tUsing environment\tenv=%s", sorted(env.keys())) if ports: log.info("\tUsing ports\tports=%s", ports) if port_bindings: log.info("\tPort bindings: %s", port_bindings) if volumes_from: log.info("\tVolumes from: %s", volumes_from) host_config = conf.harpoon.docker_api.create_host_config( binds = binds , volumes_from = volumes_from , port_bindings = port_bindings , devices = conf.devices , lxc_conf = conf.lxc_conf , privileged = conf.privileged , restart_policy = conf.restart_policy , dns = conf.network.dns , dns_search = conf.network.dns_search , extra_hosts = conf.network.extra_hosts , network_mode = conf.network.network_mode , publish_all_ports = conf.network.publish_all_ports , cap_add = conf.cpu.cap_add , cap_drop = conf.cpu.cap_drop , mem_limit = conf.cpu.mem_limit , cpu_shares = conf.cpu.cpu_shares , cpuset_cpus = conf.cpu.cpuset_cpus , cpuset_mems = conf.cpu.cpuset_mems , memswap_limit = conf.cpu.memswap_limit , ulimits = conf.ulimits , read_only = conf.read_only_rootfs , log_config = conf.log_config , security_opt = conf.security_opt , **conf.other_options.host_config ) container_id = conf.harpoon.docker_api.create_container(image_name , name=container_name , detach=detach , command=command , volumes=volume_names , environment=env , tty = False if no_tty_option else tty , user = conf.user , ports = ports , stdin_open = tty , hostname = conf.network.hostname , domainname = conf.network.domainname , network_disabled = conf.network.disabled , host_config = host_config , **conf.other_options.create ) if isinstance(container_id, dict): if "errorDetail" in container_id: raise BadImage("Failed to create container", image=name, error=container_id["errorDetail"]) container_id = container_id["Id"] return container_id
[ "def", "create_container", "(", "self", ",", "conf", ",", "detach", ",", "tty", ")", ":", "name", "=", "conf", ".", "name", "image_name", "=", "conf", ".", "image_name", "if", "conf", ".", "tag", "is", "not", "NotSpecified", ":", "image_name", "=", "conf", ".", "image_name_with_tag", "container_name", "=", "conf", ".", "container_name", "with", "conf", ".", "assumed_role", "(", ")", ":", "env", "=", "dict", "(", "e", ".", "pair", "for", "e", "in", "conf", ".", "env", ")", "binds", "=", "conf", ".", "volumes", ".", "binds", "command", "=", "conf", ".", "formatted_command", "volume_names", "=", "conf", ".", "volumes", ".", "volume_names", "volumes_from", "=", "list", "(", "conf", ".", "volumes", ".", "share_with_names", ")", "no_tty_option", "=", "conf", ".", "no_tty_option", "ports", "=", "[", "p", ".", "container_port", ".", "port_pair", "for", "p", "in", "conf", ".", "ports", "]", "port_bindings", "=", "self", ".", "exposed", "(", "conf", ".", "ports", ")", "uncreated", "=", "[", "]", "for", "name", "in", "binds", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "log", ".", "info", "(", "\"Making volume for mounting\\tvolume=%s\"", ",", "name", ")", "try", ":", "os", ".", "makedirs", "(", "name", ")", "except", "OSError", "as", "error", ":", "uncreated", ".", "append", "(", "(", "name", ",", "error", ")", ")", "if", "uncreated", ":", "raise", "BadOption", "(", "\"Failed to create some volumes on the host\"", ",", "uncreated", "=", "uncreated", ")", "log", ".", "info", "(", "\"Creating container from %s\\timage=%s\\tcontainer_name=%s\\ttty=%s\"", ",", "image_name", ",", "name", ",", "container_name", ",", "tty", ")", "if", "binds", ":", "log", ".", "info", "(", "\"\\tUsing volumes\\tvolumes=%s\"", ",", "volume_names", ")", "if", "env", ":", "log", ".", "info", "(", "\"\\tUsing environment\\tenv=%s\"", ",", "sorted", "(", "env", ".", "keys", "(", ")", ")", ")", "if", "ports", ":", "log", ".", "info", "(", "\"\\tUsing ports\\tports=%s\"", ",", "ports", ")", "if", "port_bindings", ":", "log", ".", "info", "(", "\"\\tPort bindings: %s\"", ",", "port_bindings", ")", "if", "volumes_from", ":", "log", ".", "info", "(", "\"\\tVolumes from: %s\"", ",", "volumes_from", ")", "host_config", "=", "conf", ".", "harpoon", ".", "docker_api", ".", "create_host_config", "(", "binds", "=", "binds", ",", "volumes_from", "=", "volumes_from", ",", "port_bindings", "=", "port_bindings", ",", "devices", "=", "conf", ".", "devices", ",", "lxc_conf", "=", "conf", ".", "lxc_conf", ",", "privileged", "=", "conf", ".", "privileged", ",", "restart_policy", "=", "conf", ".", "restart_policy", ",", "dns", "=", "conf", ".", "network", ".", "dns", ",", "dns_search", "=", "conf", ".", "network", ".", "dns_search", ",", "extra_hosts", "=", "conf", ".", "network", ".", "extra_hosts", ",", "network_mode", "=", "conf", ".", "network", ".", "network_mode", ",", "publish_all_ports", "=", "conf", ".", "network", ".", "publish_all_ports", ",", "cap_add", "=", "conf", ".", "cpu", ".", "cap_add", ",", "cap_drop", "=", "conf", ".", "cpu", ".", "cap_drop", ",", "mem_limit", "=", "conf", ".", "cpu", ".", "mem_limit", ",", "cpu_shares", "=", "conf", ".", "cpu", ".", "cpu_shares", ",", "cpuset_cpus", "=", "conf", ".", "cpu", ".", "cpuset_cpus", ",", "cpuset_mems", "=", "conf", ".", "cpu", ".", "cpuset_mems", ",", "memswap_limit", "=", "conf", ".", "cpu", ".", "memswap_limit", ",", "ulimits", "=", "conf", ".", "ulimits", ",", "read_only", "=", "conf", ".", "read_only_rootfs", ",", "log_config", "=", "conf", ".", "log_config", ",", "security_opt", "=", "conf", ".", "security_opt", ",", "*", "*", "conf", ".", "other_options", ".", "host_config", ")", "container_id", "=", "conf", ".", "harpoon", ".", "docker_api", ".", "create_container", "(", "image_name", ",", "name", "=", "container_name", ",", "detach", "=", "detach", ",", "command", "=", "command", ",", "volumes", "=", "volume_names", ",", "environment", "=", "env", ",", "tty", "=", "False", "if", "no_tty_option", "else", "tty", ",", "user", "=", "conf", ".", "user", ",", "ports", "=", "ports", ",", "stdin_open", "=", "tty", ",", "hostname", "=", "conf", ".", "network", ".", "hostname", ",", "domainname", "=", "conf", ".", "network", ".", "domainname", ",", "network_disabled", "=", "conf", ".", "network", ".", "disabled", ",", "host_config", "=", "host_config", ",", "*", "*", "conf", ".", "other_options", ".", "create", ")", "if", "isinstance", "(", "container_id", ",", "dict", ")", ":", "if", "\"errorDetail\"", "in", "container_id", ":", "raise", "BadImage", "(", "\"Failed to create container\"", ",", "image", "=", "name", ",", "error", "=", "container_id", "[", "\"errorDetail\"", "]", ")", "container_id", "=", "container_id", "[", "\"Id\"", "]", "return", "container_id" ]
Create a single container
[ "Create", "a", "single", "container" ]
python
train
python-diamond/Diamond
src/collectors/memcached/memcached.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/memcached/memcached.py#L61-L78
def get_default_config(self): """ Returns the default collector settings """ config = super(MemcachedCollector, self).get_default_config() config.update({ 'path': 'memcached', # Which rows of 'status' you would like to publish. # 'telnet host port' and type stats and hit enter to see the list of # possibilities. # Leave unset to publish all # 'publish': '' # Connection settings 'hosts': ['localhost:11211'] }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "MemcachedCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'memcached'", ",", "# Which rows of 'status' you would like to publish.", "# 'telnet host port' and type stats and hit enter to see the list of", "# possibilities.", "# Leave unset to publish all", "# 'publish': ''", "# Connection settings", "'hosts'", ":", "[", "'localhost:11211'", "]", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
googledatalab/pydatalab
google/datalab/kernel/__init__.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/kernel/__init__.py#L44-L122
def load_ipython_extension(shell): """ Called when the extension is loaded. Args: shell - (NotebookWebApplication): handle to the Notebook interactive shell instance. """ # Inject our user agent on all requests by monkey-patching a wrapper around httplib2.Http.request. def _request(self, uri, method="GET", body=None, headers=None, redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): if headers is None: headers = {} headers['user-agent'] = 'GoogleCloudDataLab/1.0' return _orig_request(self, uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type) _httplib2.Http.request = _request # Similarly for the requests library. def _init_session(self): _orig_init(self) self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0' _requests.Session.__init__ = _init_session # Be more tolerant with magics. If the user specified a cell magic that doesn't # exist and an empty cell body but a line magic with that name exists, run that # instead. Conversely, if the user specified a line magic that doesn't exist but # a cell magic exists with that name, run the cell magic with an empty body. def _run_line_magic(self, magic_name, line): fn = self.find_line_magic(magic_name) if fn is None: cm = self.find_cell_magic(magic_name) if cm: return _run_cell_magic(self, magic_name, line, None) return _orig_run_line_magic(self, magic_name, line) def _run_cell_magic(self, magic_name, line, cell): if cell is None or len(cell) == 0 or cell.isspace(): fn = self.find_line_magic(magic_name) if fn: return _orig_run_line_magic(self, magic_name, line) # IPython will complain if cell is empty string but not if it is None cell = None return _orig_run_cell_magic(self, magic_name, line, cell) _shell.InteractiveShell.run_cell_magic = _run_cell_magic _shell.InteractiveShell.run_line_magic = _run_line_magic # Define global 'project_id' and 'set_project_id' functions to manage the default project ID. We # do this conditionally in a try/catch # to avoid the call to Context.default() when running tests # which mock IPython.get_ipython(). def _get_project_id(): try: return google.datalab.Context.default().project_id except Exception: return None def _set_project_id(project_id): context = google.datalab.Context.default() context.set_project_id(project_id) try: from datalab.context import Context as _old_context _old_context.default().set_project_id(project_id) except ImportError: # If the old library is not loaded, then we don't have to do anything pass try: if 'datalab_project_id' not in _IPython.get_ipython().user_ns: _IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id _IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id except TypeError: pass
[ "def", "load_ipython_extension", "(", "shell", ")", ":", "# Inject our user agent on all requests by monkey-patching a wrapper around httplib2.Http.request.", "def", "_request", "(", "self", ",", "uri", ",", "method", "=", "\"GET\"", ",", "body", "=", "None", ",", "headers", "=", "None", ",", "redirections", "=", "_httplib2", ".", "DEFAULT_MAX_REDIRECTS", ",", "connection_type", "=", "None", ")", ":", "if", "headers", "is", "None", ":", "headers", "=", "{", "}", "headers", "[", "'user-agent'", "]", "=", "'GoogleCloudDataLab/1.0'", "return", "_orig_request", "(", "self", ",", "uri", ",", "method", "=", "method", ",", "body", "=", "body", ",", "headers", "=", "headers", ",", "redirections", "=", "redirections", ",", "connection_type", "=", "connection_type", ")", "_httplib2", ".", "Http", ".", "request", "=", "_request", "# Similarly for the requests library.", "def", "_init_session", "(", "self", ")", ":", "_orig_init", "(", "self", ")", "self", ".", "headers", "[", "'User-Agent'", "]", "=", "'GoogleCloudDataLab/1.0'", "_requests", ".", "Session", ".", "__init__", "=", "_init_session", "# Be more tolerant with magics. If the user specified a cell magic that doesn't", "# exist and an empty cell body but a line magic with that name exists, run that", "# instead. Conversely, if the user specified a line magic that doesn't exist but", "# a cell magic exists with that name, run the cell magic with an empty body.", "def", "_run_line_magic", "(", "self", ",", "magic_name", ",", "line", ")", ":", "fn", "=", "self", ".", "find_line_magic", "(", "magic_name", ")", "if", "fn", "is", "None", ":", "cm", "=", "self", ".", "find_cell_magic", "(", "magic_name", ")", "if", "cm", ":", "return", "_run_cell_magic", "(", "self", ",", "magic_name", ",", "line", ",", "None", ")", "return", "_orig_run_line_magic", "(", "self", ",", "magic_name", ",", "line", ")", "def", "_run_cell_magic", "(", "self", ",", "magic_name", ",", "line", ",", "cell", ")", ":", "if", "cell", "is", "None", "or", "len", "(", "cell", ")", "==", "0", "or", "cell", ".", "isspace", "(", ")", ":", "fn", "=", "self", ".", "find_line_magic", "(", "magic_name", ")", "if", "fn", ":", "return", "_orig_run_line_magic", "(", "self", ",", "magic_name", ",", "line", ")", "# IPython will complain if cell is empty string but not if it is None", "cell", "=", "None", "return", "_orig_run_cell_magic", "(", "self", ",", "magic_name", ",", "line", ",", "cell", ")", "_shell", ".", "InteractiveShell", ".", "run_cell_magic", "=", "_run_cell_magic", "_shell", ".", "InteractiveShell", ".", "run_line_magic", "=", "_run_line_magic", "# Define global 'project_id' and 'set_project_id' functions to manage the default project ID. We", "# do this conditionally in a try/catch # to avoid the call to Context.default() when running tests", "# which mock IPython.get_ipython().", "def", "_get_project_id", "(", ")", ":", "try", ":", "return", "google", ".", "datalab", ".", "Context", ".", "default", "(", ")", ".", "project_id", "except", "Exception", ":", "return", "None", "def", "_set_project_id", "(", "project_id", ")", ":", "context", "=", "google", ".", "datalab", ".", "Context", ".", "default", "(", ")", "context", ".", "set_project_id", "(", "project_id", ")", "try", ":", "from", "datalab", ".", "context", "import", "Context", "as", "_old_context", "_old_context", ".", "default", "(", ")", ".", "set_project_id", "(", "project_id", ")", "except", "ImportError", ":", "# If the old library is not loaded, then we don't have to do anything", "pass", "try", ":", "if", "'datalab_project_id'", "not", "in", "_IPython", ".", "get_ipython", "(", ")", ".", "user_ns", ":", "_IPython", ".", "get_ipython", "(", ")", ".", "user_ns", "[", "'datalab_project_id'", "]", "=", "_get_project_id", "_IPython", ".", "get_ipython", "(", ")", ".", "user_ns", "[", "'set_datalab_project_id'", "]", "=", "_set_project_id", "except", "TypeError", ":", "pass" ]
Called when the extension is loaded. Args: shell - (NotebookWebApplication): handle to the Notebook interactive shell instance.
[ "Called", "when", "the", "extension", "is", "loaded", "." ]
python
train
albert12132/templar
templar/markdown.py
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L81-L101
def sub_retab(match): r"""Remove all tabs and convert them into spaces. PARAMETERS: match -- regex match; uses re_retab pattern: \1 is text before tab, \2 is a consecutive string of tabs. A simple substitution of 4 spaces would result in the following: to\tlive # original to live # simple substitution Instead, we convert tabs like the following: to\tlive # original to live # the tab *looks* like two spaces, so we convert # it to two spaces """ before = match.group(1) tabs = len(match.group(2)) return before + (' ' * (TAB_SIZE * tabs - len(before) % TAB_SIZE))
[ "def", "sub_retab", "(", "match", ")", ":", "before", "=", "match", ".", "group", "(", "1", ")", "tabs", "=", "len", "(", "match", ".", "group", "(", "2", ")", ")", "return", "before", "+", "(", "' '", "*", "(", "TAB_SIZE", "*", "tabs", "-", "len", "(", "before", ")", "%", "TAB_SIZE", ")", ")" ]
r"""Remove all tabs and convert them into spaces. PARAMETERS: match -- regex match; uses re_retab pattern: \1 is text before tab, \2 is a consecutive string of tabs. A simple substitution of 4 spaces would result in the following: to\tlive # original to live # simple substitution Instead, we convert tabs like the following: to\tlive # original to live # the tab *looks* like two spaces, so we convert # it to two spaces
[ "r", "Remove", "all", "tabs", "and", "convert", "them", "into", "spaces", "." ]
python
train
Esri/ArcREST
src/arcrest/manageorg/_parameters.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_parameters.py#L1775-L1782
def value(self): """returns the values as a dictionary""" val = {} for k in self.__allowed_keys: value = getattr(self, "_" + k) if value is not None: val[k] = value return val
[ "def", "value", "(", "self", ")", ":", "val", "=", "{", "}", "for", "k", "in", "self", ".", "__allowed_keys", ":", "value", "=", "getattr", "(", "self", ",", "\"_\"", "+", "k", ")", "if", "value", "is", "not", "None", ":", "val", "[", "k", "]", "=", "value", "return", "val" ]
returns the values as a dictionary
[ "returns", "the", "values", "as", "a", "dictionary" ]
python
train
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L1746-L1760
def AgregarCalidad(self, analisis_muestra=None, nro_boletin=None, cod_grado=None, valor_grado=None, valor_contenido_proteico=None, valor_factor=None, **kwargs): "Agrega la información sobre la calidad, al autorizar o posteriormente" self.certificacion['primaria']['calidad'] = dict( analisisMuestra=analisis_muestra, nroBoletin=nro_boletin, codGrado=cod_grado, # G1 G2 G3 F1 F2 F3 valorGrado=valor_grado or None, # opcional valorContProteico=valor_contenido_proteico, valorFactor=valor_factor, detalleMuestraAnalisis=[], # <!--1 or more repetitions:--> ) return True
[ "def", "AgregarCalidad", "(", "self", ",", "analisis_muestra", "=", "None", ",", "nro_boletin", "=", "None", ",", "cod_grado", "=", "None", ",", "valor_grado", "=", "None", ",", "valor_contenido_proteico", "=", "None", ",", "valor_factor", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "certificacion", "[", "'primaria'", "]", "[", "'calidad'", "]", "=", "dict", "(", "analisisMuestra", "=", "analisis_muestra", ",", "nroBoletin", "=", "nro_boletin", ",", "codGrado", "=", "cod_grado", ",", "# G1 G2 G3 F1 F2 F3", "valorGrado", "=", "valor_grado", "or", "None", ",", "# opcional", "valorContProteico", "=", "valor_contenido_proteico", ",", "valorFactor", "=", "valor_factor", ",", "detalleMuestraAnalisis", "=", "[", "]", ",", "# <!--1 or more repetitions:-->", ")", "return", "True" ]
Agrega la información sobre la calidad, al autorizar o posteriormente
[ "Agrega", "la", "información", "sobre", "la", "calidad", "al", "autorizar", "o", "posteriormente" ]
python
train
studionow/pybrightcove
pybrightcove/http_core.py
https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/http_core.py#L226-L244
def modify_request(self, http_request=None): """Sets HTTP request components based on the URI.""" if http_request is None: http_request = HttpRequest() if http_request.uri is None: http_request.uri = Uri() # Determine the correct scheme. if self.scheme: http_request.uri.scheme = self.scheme if self.port: http_request.uri.port = self.port if self.host: http_request.uri.host = self.host # Set the relative uri path if self.path: http_request.uri.path = self.path if self.query: http_request.uri.query = self.query.copy() return http_request
[ "def", "modify_request", "(", "self", ",", "http_request", "=", "None", ")", ":", "if", "http_request", "is", "None", ":", "http_request", "=", "HttpRequest", "(", ")", "if", "http_request", ".", "uri", "is", "None", ":", "http_request", ".", "uri", "=", "Uri", "(", ")", "# Determine the correct scheme.", "if", "self", ".", "scheme", ":", "http_request", ".", "uri", ".", "scheme", "=", "self", ".", "scheme", "if", "self", ".", "port", ":", "http_request", ".", "uri", ".", "port", "=", "self", ".", "port", "if", "self", ".", "host", ":", "http_request", ".", "uri", ".", "host", "=", "self", ".", "host", "# Set the relative uri path", "if", "self", ".", "path", ":", "http_request", ".", "uri", ".", "path", "=", "self", ".", "path", "if", "self", ".", "query", ":", "http_request", ".", "uri", ".", "query", "=", "self", ".", "query", ".", "copy", "(", ")", "return", "http_request" ]
Sets HTTP request components based on the URI.
[ "Sets", "HTTP", "request", "components", "based", "on", "the", "URI", "." ]
python
train
log2timeline/plaso
plaso/engine/processing_status.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/processing_status.py#L342-L403
def _UpdateProcessStatus( self, process_status, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings): """Updates a process status. Args: process_status (ProcessStatus): process status. identifier (str): process identifier. status (str): human readable status of the process e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the process. number_of_consumed_sources (int): total number of event sources consumed by the process. number_of_produced_sources (int): total number of event sources produced by the process. number_of_consumed_events (int): total number of events consumed by the process. number_of_produced_events (int): total number of events produced by the process. number_of_consumed_event_tags (int): total number of event tags consumed by the process. number_of_produced_event_tags (int): total number of event tags produced by the process. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the process. number_of_produced_warnings (int): total number of warnings produced by the process. """ new_sources = process_status.UpdateNumberOfEventSources( number_of_consumed_sources, number_of_produced_sources) new_events = process_status.UpdateNumberOfEvents( number_of_consumed_events, number_of_produced_events) new_event_tags = process_status.UpdateNumberOfEventTags( number_of_consumed_event_tags, number_of_produced_event_tags) new_warnings = process_status.UpdateNumberOfWarnings( number_of_consumed_warnings, number_of_produced_warnings) new_reports = process_status.UpdateNumberOfEventReports( number_of_consumed_reports, number_of_produced_reports) process_status.display_name = display_name process_status.identifier = identifier process_status.pid = pid process_status.status = status process_status.used_memory = used_memory if (new_sources or new_events or new_event_tags or new_warnings or new_reports): process_status.last_running_time = time.time()
[ "def", "_UpdateProcessStatus", "(", "self", ",", "process_status", ",", "identifier", ",", "status", ",", "pid", ",", "used_memory", ",", "display_name", ",", "number_of_consumed_sources", ",", "number_of_produced_sources", ",", "number_of_consumed_events", ",", "number_of_produced_events", ",", "number_of_consumed_event_tags", ",", "number_of_produced_event_tags", ",", "number_of_consumed_reports", ",", "number_of_produced_reports", ",", "number_of_consumed_warnings", ",", "number_of_produced_warnings", ")", ":", "new_sources", "=", "process_status", ".", "UpdateNumberOfEventSources", "(", "number_of_consumed_sources", ",", "number_of_produced_sources", ")", "new_events", "=", "process_status", ".", "UpdateNumberOfEvents", "(", "number_of_consumed_events", ",", "number_of_produced_events", ")", "new_event_tags", "=", "process_status", ".", "UpdateNumberOfEventTags", "(", "number_of_consumed_event_tags", ",", "number_of_produced_event_tags", ")", "new_warnings", "=", "process_status", ".", "UpdateNumberOfWarnings", "(", "number_of_consumed_warnings", ",", "number_of_produced_warnings", ")", "new_reports", "=", "process_status", ".", "UpdateNumberOfEventReports", "(", "number_of_consumed_reports", ",", "number_of_produced_reports", ")", "process_status", ".", "display_name", "=", "display_name", "process_status", ".", "identifier", "=", "identifier", "process_status", ".", "pid", "=", "pid", "process_status", ".", "status", "=", "status", "process_status", ".", "used_memory", "=", "used_memory", "if", "(", "new_sources", "or", "new_events", "or", "new_event_tags", "or", "new_warnings", "or", "new_reports", ")", ":", "process_status", ".", "last_running_time", "=", "time", ".", "time", "(", ")" ]
Updates a process status. Args: process_status (ProcessStatus): process status. identifier (str): process identifier. status (str): human readable status of the process e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the process. number_of_consumed_sources (int): total number of event sources consumed by the process. number_of_produced_sources (int): total number of event sources produced by the process. number_of_consumed_events (int): total number of events consumed by the process. number_of_produced_events (int): total number of events produced by the process. number_of_consumed_event_tags (int): total number of event tags consumed by the process. number_of_produced_event_tags (int): total number of event tags produced by the process. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the process. number_of_produced_warnings (int): total number of warnings produced by the process.
[ "Updates", "a", "process", "status", "." ]
python
train
inspirehep/inspire-schemas
inspire_schemas/builders/signatures.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/signatures.py#L69-L85
def add_affiliation(self, value, curated_relation=None, record=None): """Add an affiliation. Args: value (string): affiliation value curated_relation (bool): is relation curated record (dict): affiliation JSON reference """ if value: affiliation = { 'value': value } if record: affiliation['record'] = record if curated_relation is not None: affiliation['curated_relation'] = curated_relation self._ensure_list_field('affiliations', affiliation)
[ "def", "add_affiliation", "(", "self", ",", "value", ",", "curated_relation", "=", "None", ",", "record", "=", "None", ")", ":", "if", "value", ":", "affiliation", "=", "{", "'value'", ":", "value", "}", "if", "record", ":", "affiliation", "[", "'record'", "]", "=", "record", "if", "curated_relation", "is", "not", "None", ":", "affiliation", "[", "'curated_relation'", "]", "=", "curated_relation", "self", ".", "_ensure_list_field", "(", "'affiliations'", ",", "affiliation", ")" ]
Add an affiliation. Args: value (string): affiliation value curated_relation (bool): is relation curated record (dict): affiliation JSON reference
[ "Add", "an", "affiliation", "." ]
python
train
HEPData/hepdata-validator
hepdata_validator/__init__.py
https://github.com/HEPData/hepdata-validator/blob/d0b0cab742a009c8f0e8aac9f8c8e434a524d43c/hepdata_validator/__init__.py#L65-L79
def get_messages(self, file_name=None): """ Return messages for a file (if file_name provided). If file_name is none, returns all messages as a dict. :param file_name: :return: array if file_name is provided, dict otherwise. """ if file_name is None: return self.messages elif file_name in self.messages: return self.messages[file_name] else: return []
[ "def", "get_messages", "(", "self", ",", "file_name", "=", "None", ")", ":", "if", "file_name", "is", "None", ":", "return", "self", ".", "messages", "elif", "file_name", "in", "self", ".", "messages", ":", "return", "self", ".", "messages", "[", "file_name", "]", "else", ":", "return", "[", "]" ]
Return messages for a file (if file_name provided). If file_name is none, returns all messages as a dict. :param file_name: :return: array if file_name is provided, dict otherwise.
[ "Return", "messages", "for", "a", "file", "(", "if", "file_name", "provided", ")", ".", "If", "file_name", "is", "none", "returns", "all", "messages", "as", "a", "dict", ".", ":", "param", "file_name", ":", ":", "return", ":", "array", "if", "file_name", "is", "provided", "dict", "otherwise", "." ]
python
train
City-of-Helsinki/django-helusers
helusers/utils.py
https://github.com/City-of-Helsinki/django-helusers/blob/9064979f6f990987358e2bca3c24a80fad201bdb/helusers/utils.py#L5-L17
def uuid_to_username(uuid): """ Convert UUID to username. >>> uuid_to_username('00fbac99-0bab-5e66-8e84-2e567ea4d1f6') 'u-ad52zgilvnpgnduefzlh5jgr6y' >>> uuid_to_username(UUID('00fbac99-0bab-5e66-8e84-2e567ea4d1f6')) 'u-ad52zgilvnpgnduefzlh5jgr6y' """ uuid_data = getattr(uuid, 'bytes', None) or UUID(uuid).bytes b32coded = base64.b32encode(uuid_data) return 'u-' + b32coded.decode('ascii').replace('=', '').lower()
[ "def", "uuid_to_username", "(", "uuid", ")", ":", "uuid_data", "=", "getattr", "(", "uuid", ",", "'bytes'", ",", "None", ")", "or", "UUID", "(", "uuid", ")", ".", "bytes", "b32coded", "=", "base64", ".", "b32encode", "(", "uuid_data", ")", "return", "'u-'", "+", "b32coded", ".", "decode", "(", "'ascii'", ")", ".", "replace", "(", "'='", ",", "''", ")", ".", "lower", "(", ")" ]
Convert UUID to username. >>> uuid_to_username('00fbac99-0bab-5e66-8e84-2e567ea4d1f6') 'u-ad52zgilvnpgnduefzlh5jgr6y' >>> uuid_to_username(UUID('00fbac99-0bab-5e66-8e84-2e567ea4d1f6')) 'u-ad52zgilvnpgnduefzlh5jgr6y'
[ "Convert", "UUID", "to", "username", "." ]
python
train
esheldon/fitsio
fitsio/hdu/table.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L985-L1002
def _fix_tbit_dtype(self, array, colnums): """ If necessary, patch up the TBIT to convert to bool array parameters ---------- array: record array colnums: column numbers for lookup """ descr = array.dtype.descr for i, colnum in enumerate(colnums): npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) if (istbit): coldescr = list(descr[i]) coldescr[1] = '?' descr[i] = tuple(coldescr) return array.view(descr)
[ "def", "_fix_tbit_dtype", "(", "self", ",", "array", ",", "colnums", ")", ":", "descr", "=", "array", ".", "dtype", ".", "descr", "for", "i", ",", "colnum", "in", "enumerate", "(", "colnums", ")", ":", "npy_type", ",", "isvar", ",", "istbit", "=", "self", ".", "_get_tbl_numpy_dtype", "(", "colnum", ")", "if", "(", "istbit", ")", ":", "coldescr", "=", "list", "(", "descr", "[", "i", "]", ")", "coldescr", "[", "1", "]", "=", "'?'", "descr", "[", "i", "]", "=", "tuple", "(", "coldescr", ")", "return", "array", ".", "view", "(", "descr", ")" ]
If necessary, patch up the TBIT to convert to bool array parameters ---------- array: record array colnums: column numbers for lookup
[ "If", "necessary", "patch", "up", "the", "TBIT", "to", "convert", "to", "bool", "array" ]
python
train
metapensiero/metapensiero.signal
src/metapensiero/signal/core.py
https://github.com/metapensiero/metapensiero.signal/blob/1cbbb2e4bff00bf4887163b08b70d278e472bfe3/src/metapensiero/signal/core.py#L264-L290
def disconnect(self, cback, subscribers=None, instance=None): """Remove a previously added function or method from the set of the signal's handlers. :param cback: the callback (or *handler*) to be added to the set :returns: ``None`` or the value returned by the corresponding wrapper """ if subscribers is None: subscribers = self.subscribers # wrapper if self._fdisconnect is not None: def _disconnect(cback): self._disconnect(subscribers, cback) notify = partial(self._notify_one, instance) if instance is not None: result = self._fdisconnect(instance, cback, subscribers, _disconnect, notify) else: result = self._fdisconnect(cback, subscribers, _disconnect, notify) if inspect.isawaitable(result): result = pull_result(result) else: self._disconnect(subscribers, cback) result = None return result
[ "def", "disconnect", "(", "self", ",", "cback", ",", "subscribers", "=", "None", ",", "instance", "=", "None", ")", ":", "if", "subscribers", "is", "None", ":", "subscribers", "=", "self", ".", "subscribers", "# wrapper", "if", "self", ".", "_fdisconnect", "is", "not", "None", ":", "def", "_disconnect", "(", "cback", ")", ":", "self", ".", "_disconnect", "(", "subscribers", ",", "cback", ")", "notify", "=", "partial", "(", "self", ".", "_notify_one", ",", "instance", ")", "if", "instance", "is", "not", "None", ":", "result", "=", "self", ".", "_fdisconnect", "(", "instance", ",", "cback", ",", "subscribers", ",", "_disconnect", ",", "notify", ")", "else", ":", "result", "=", "self", ".", "_fdisconnect", "(", "cback", ",", "subscribers", ",", "_disconnect", ",", "notify", ")", "if", "inspect", ".", "isawaitable", "(", "result", ")", ":", "result", "=", "pull_result", "(", "result", ")", "else", ":", "self", ".", "_disconnect", "(", "subscribers", ",", "cback", ")", "result", "=", "None", "return", "result" ]
Remove a previously added function or method from the set of the signal's handlers. :param cback: the callback (or *handler*) to be added to the set :returns: ``None`` or the value returned by the corresponding wrapper
[ "Remove", "a", "previously", "added", "function", "or", "method", "from", "the", "set", "of", "the", "signal", "s", "handlers", "." ]
python
train
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1448-L1455
def simxUnpackInts(intsPackedInString): ''' Please have a look at the function description/documentation in the V-REP user manual ''' b=[] for i in range(int(len(intsPackedInString)/4)): b.append(struct.unpack('<i',intsPackedInString[4*i:4*(i+1)])[0]) return b
[ "def", "simxUnpackInts", "(", "intsPackedInString", ")", ":", "b", "=", "[", "]", "for", "i", "in", "range", "(", "int", "(", "len", "(", "intsPackedInString", ")", "/", "4", ")", ")", ":", "b", ".", "append", "(", "struct", ".", "unpack", "(", "'<i'", ",", "intsPackedInString", "[", "4", "*", "i", ":", "4", "*", "(", "i", "+", "1", ")", "]", ")", "[", "0", "]", ")", "return", "b" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
Azure/azure-event-hubs-python
azure/eventhub/async_ops/__init__.py
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventhub/async_ops/__init__.py#L237-L270
def add_async_sender( self, partition=None, operation=None, send_timeout=60, keep_alive=30, auto_reconnect=True, loop=None): """ Add an async sender to the client to send ~azure.eventhub.common.EventData object to an EventHub. :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. :type partition: str :operation: An optional operation to be appended to the hostname in the target URL. The value must start with `/` character. :type operation: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: int :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not be pinged. :type keep_alive: int :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. Default value is `True`. :type auto_reconnect: bool :rtype: ~azure.eventhub.async_ops.sender_async.SenderAsync """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) if operation: target = target + operation handler = AsyncSender( self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) self.clients.append(handler) return handler
[ "def", "add_async_sender", "(", "self", ",", "partition", "=", "None", ",", "operation", "=", "None", ",", "send_timeout", "=", "60", ",", "keep_alive", "=", "30", ",", "auto_reconnect", "=", "True", ",", "loop", "=", "None", ")", ":", "target", "=", "\"amqps://{}{}\"", ".", "format", "(", "self", ".", "address", ".", "hostname", ",", "self", ".", "address", ".", "path", ")", "if", "operation", ":", "target", "=", "target", "+", "operation", "handler", "=", "AsyncSender", "(", "self", ",", "target", ",", "partition", "=", "partition", ",", "send_timeout", "=", "send_timeout", ",", "keep_alive", "=", "keep_alive", ",", "auto_reconnect", "=", "auto_reconnect", ",", "loop", "=", "loop", ")", "self", ".", "clients", ".", "append", "(", "handler", ")", "return", "handler" ]
Add an async sender to the client to send ~azure.eventhub.common.EventData object to an EventHub. :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. :type partition: str :operation: An optional operation to be appended to the hostname in the target URL. The value must start with `/` character. :type operation: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: int :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not be pinged. :type keep_alive: int :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. Default value is `True`. :type auto_reconnect: bool :rtype: ~azure.eventhub.async_ops.sender_async.SenderAsync
[ "Add", "an", "async", "sender", "to", "the", "client", "to", "send", "~azure", ".", "eventhub", ".", "common", ".", "EventData", "object", "to", "an", "EventHub", "." ]
python
train
gwpy/gwpy
gwpy/timeseries/statevector.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L778-L827
def get(cls, channel, start, end, bits=None, **kwargs): """Get data for this channel from frames or NDS Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine bits : `Bits`, `list`, optional definition of bits for this `StateVector` pad : `float`, optional value with which to fill gaps in the source data, only used if gap is not given, or ``gap='pad'`` is given dtype : `numpy.dtype`, `str`, `type`, or `dict` numeric data type for returned data, e.g. `numpy.float`, or `dict` of (`channel`, `dtype`) pairs nproc : `int`, optional, default: `1` number of parallel processes to use, serial process by default. verbose : `bool`, optional print verbose output about NDS progress. **kwargs other keyword arguments to pass to either :meth:`.find` (for direct GWF file access) or :meth:`.fetch` for remote NDS2 access See Also -------- StateVector.fetch for grabbing data from a remote NDS2 server StateVector.find for discovering and reading data from local GWF files """ new = cls.DictClass.get([channel], start, end, **kwargs)[channel] if bits: new.bits = bits return new
[ "def", "get", "(", "cls", ",", "channel", ",", "start", ",", "end", ",", "bits", "=", "None", ",", "*", "*", "kwargs", ")", ":", "new", "=", "cls", ".", "DictClass", ".", "get", "(", "[", "channel", "]", ",", "start", ",", "end", ",", "*", "*", "kwargs", ")", "[", "channel", "]", "if", "bits", ":", "new", ".", "bits", "=", "bits", "return", "new" ]
Get data for this channel from frames or NDS Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine bits : `Bits`, `list`, optional definition of bits for this `StateVector` pad : `float`, optional value with which to fill gaps in the source data, only used if gap is not given, or ``gap='pad'`` is given dtype : `numpy.dtype`, `str`, `type`, or `dict` numeric data type for returned data, e.g. `numpy.float`, or `dict` of (`channel`, `dtype`) pairs nproc : `int`, optional, default: `1` number of parallel processes to use, serial process by default. verbose : `bool`, optional print verbose output about NDS progress. **kwargs other keyword arguments to pass to either :meth:`.find` (for direct GWF file access) or :meth:`.fetch` for remote NDS2 access See Also -------- StateVector.fetch for grabbing data from a remote NDS2 server StateVector.find for discovering and reading data from local GWF files
[ "Get", "data", "for", "this", "channel", "from", "frames", "or", "NDS" ]
python
train
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L945-L1117
def block_create( self, type, account, wallet=None, representative=None, key=None, destination=None, amount=None, balance=None, previous=None, source=None, work=None, ): """ Creates a json representations of new block based on input data & signed with private key or account in **wallet** for offline signing .. enable_control required .. version 8.1 required :param type: Type of block to create one of **open**, **receive**, **change**, **send** :type type: str :param account: Account for the signed block :type account: str :param wallet: Wallet to use :type wallet: str :param representative: Representative account for **open** and **change** blocks :type representative: str :param key: Private key to use to open account for **open** blocks :type key: str :param destination: Destination account for **send** blocks :type destination: str :param amount: Amount in raw for **send** blocks :type amount: int :param balance: Balance in raw of account for **send** blocks :type balance: int :param previous: Previous block hash for **receive**, **send** and **change** blocks :type previous: str :param source: Source block for **open** and **receive** blocks :type source: str :param work: Work value to use for block from external source :type work: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.block_create( ... type="open", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", ... representative="xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1", ... key="0000000000000000000000000000000000000000000000000000000000000001" ... ) { "block": { "account": "xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", "representative": "xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1", "signature": "5974324F8CC42DA56F62FC212A17886BDCB18DE363D04DA84EEDC99CB4A33919D14A2CF9DE9D534FAA6D0B91D01F0622205D898293525E692586C84F2DCF9208", "source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", "type": "open", "work": "4ec76c9bda2325ed" }, "hash": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4" } >>> rpc.block_create( ... type="receive", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... previous="F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4", ... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... ) { "block": { "previous": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4", "signature": "A13FD22527771667D5DFF33D69787D734836A3561D8A490C1F4917A05D77EA09860461D5FBFC99246A4EAB5627F119AD477598E22EE021C4711FACF4F3C80D0E", "source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", "type": "receive", "work": "6acb5dd43a38d76a" }, "hash": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E" } >>> rpc.block_create( ... type="send", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... amount=10000000000000000000000000000000, ... balance=20000000000000000000000000000000, ... destination="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", ... previous="314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E", ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... work="478563b2d9facfd4", ... ) { "block": { "balance": "0000007E37BE2022C0914B2680000000", "destination": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", "previous": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E", "signature": "F19CA177EFA8692C8CBF7478CE3213F56E4A85DF760DA7A9E69141849831F8FD79BA9ED89CEC807B690FB4AA42D5008F9DBA7115E63C935401F1F0EFA547BC00", "type": "send", "work": "478563b2d9facfd4" }, "hash": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A" } >>> rpc.block_create( ... type="change", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... representative="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", ... previous="F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A", ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... ) { "block": { "previous": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A", "representative": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", "signature": "98B4D56881D9A88B170A6B2976AE21900C26A27F0E2C338D93FDED56183B73D19AA5BEB48E43FCBB8FF8293FDD368CEF50600FECEFD490A0855ED702ED209E04", "type": "change", "work": "55e5b7a83edc3f4f" }, "hash": "654FA425CEBFC9E7726089E4EDE7A105462D93DBC915FFB70B50909920A7D286" } """ payload = { "type": self._process_value(type, 'blocktype'), "account": self._process_value(account, 'account'), } if representative is not None: payload['representative'] = self._process_value(representative, 'account') if key is not None: payload['key'] = self._process_value(key, 'privatekey') if source is not None: payload['source'] = self._process_value(source, 'block') if destination is not None: payload['destination'] = self._process_value(destination, 'account') if amount is not None: payload['amount'] = self._process_value(amount, 'int') if balance is not None: payload['balance'] = self._process_value(balance, 'int') if previous is not None: payload['previous'] = self._process_value(previous, 'block') if wallet is not None: payload['wallet'] = self._process_value(wallet, 'wallet') if work is not None: payload['work'] = self._process_value(work, 'work') resp = self.call('block_create', payload) resp['block'] = json.loads(resp['block']) return resp
[ "def", "block_create", "(", "self", ",", "type", ",", "account", ",", "wallet", "=", "None", ",", "representative", "=", "None", ",", "key", "=", "None", ",", "destination", "=", "None", ",", "amount", "=", "None", ",", "balance", "=", "None", ",", "previous", "=", "None", ",", "source", "=", "None", ",", "work", "=", "None", ",", ")", ":", "payload", "=", "{", "\"type\"", ":", "self", ".", "_process_value", "(", "type", ",", "'blocktype'", ")", ",", "\"account\"", ":", "self", ".", "_process_value", "(", "account", ",", "'account'", ")", ",", "}", "if", "representative", "is", "not", "None", ":", "payload", "[", "'representative'", "]", "=", "self", ".", "_process_value", "(", "representative", ",", "'account'", ")", "if", "key", "is", "not", "None", ":", "payload", "[", "'key'", "]", "=", "self", ".", "_process_value", "(", "key", ",", "'privatekey'", ")", "if", "source", "is", "not", "None", ":", "payload", "[", "'source'", "]", "=", "self", ".", "_process_value", "(", "source", ",", "'block'", ")", "if", "destination", "is", "not", "None", ":", "payload", "[", "'destination'", "]", "=", "self", ".", "_process_value", "(", "destination", ",", "'account'", ")", "if", "amount", "is", "not", "None", ":", "payload", "[", "'amount'", "]", "=", "self", ".", "_process_value", "(", "amount", ",", "'int'", ")", "if", "balance", "is", "not", "None", ":", "payload", "[", "'balance'", "]", "=", "self", ".", "_process_value", "(", "balance", ",", "'int'", ")", "if", "previous", "is", "not", "None", ":", "payload", "[", "'previous'", "]", "=", "self", ".", "_process_value", "(", "previous", ",", "'block'", ")", "if", "wallet", "is", "not", "None", ":", "payload", "[", "'wallet'", "]", "=", "self", ".", "_process_value", "(", "wallet", ",", "'wallet'", ")", "if", "work", "is", "not", "None", ":", "payload", "[", "'work'", "]", "=", "self", ".", "_process_value", "(", "work", ",", "'work'", ")", "resp", "=", "self", ".", "call", "(", "'block_create'", ",", "payload", ")", "resp", "[", "'block'", "]", "=", "json", ".", "loads", "(", "resp", "[", "'block'", "]", ")", "return", "resp" ]
Creates a json representations of new block based on input data & signed with private key or account in **wallet** for offline signing .. enable_control required .. version 8.1 required :param type: Type of block to create one of **open**, **receive**, **change**, **send** :type type: str :param account: Account for the signed block :type account: str :param wallet: Wallet to use :type wallet: str :param representative: Representative account for **open** and **change** blocks :type representative: str :param key: Private key to use to open account for **open** blocks :type key: str :param destination: Destination account for **send** blocks :type destination: str :param amount: Amount in raw for **send** blocks :type amount: int :param balance: Balance in raw of account for **send** blocks :type balance: int :param previous: Previous block hash for **receive**, **send** and **change** blocks :type previous: str :param source: Source block for **open** and **receive** blocks :type source: str :param work: Work value to use for block from external source :type work: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.block_create( ... type="open", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", ... representative="xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1", ... key="0000000000000000000000000000000000000000000000000000000000000001" ... ) { "block": { "account": "xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", "representative": "xrb_1hza3f7wiiqa7ig3jczyxj5yo86yegcmqk3criaz838j91sxcckpfhbhhra1", "signature": "5974324F8CC42DA56F62FC212A17886BDCB18DE363D04DA84EEDC99CB4A33919D14A2CF9DE9D534FAA6D0B91D01F0622205D898293525E692586C84F2DCF9208", "source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", "type": "open", "work": "4ec76c9bda2325ed" }, "hash": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4" } >>> rpc.block_create( ... type="receive", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... previous="F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4", ... source="19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... ) { "block": { "previous": "F47B23107E5F34B2CE06F562B5C435DF72A533251CB414C51B2B62A8F63A00E4", "signature": "A13FD22527771667D5DFF33D69787D734836A3561D8A490C1F4917A05D77EA09860461D5FBFC99246A4EAB5627F119AD477598E22EE021C4711FACF4F3C80D0E", "source": "19D3D919475DEED4696B5D13018151D1AF88B2BD3BCFF048B45031C1F36D1858", "type": "receive", "work": "6acb5dd43a38d76a" }, "hash": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E" } >>> rpc.block_create( ... type="send", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... amount=10000000000000000000000000000000, ... balance=20000000000000000000000000000000, ... destination="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", ... previous="314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E", ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... work="478563b2d9facfd4", ... ) { "block": { "balance": "0000007E37BE2022C0914B2680000000", "destination": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", "previous": "314BA8D9057678C1F53371C2DB3026C1FAC01EC8E7802FD9A2E8130FC523429E", "signature": "F19CA177EFA8692C8CBF7478CE3213F56E4A85DF760DA7A9E69141849831F8FD79BA9ED89CEC807B690FB4AA42D5008F9DBA7115E63C935401F1F0EFA547BC00", "type": "send", "work": "478563b2d9facfd4" }, "hash": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A" } >>> rpc.block_create( ... type="change", ... account="xrb_3kdbxitaj7f6mrir6miiwtw4muhcc58e6tn5st6rfaxsdnb7gr4roudwn951", ... representative="xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", ... previous="F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A", ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... ) { "block": { "previous": "F958305C0FF0551421D4ABEDCCF302079D020A0A3833E33F185E2B0415D4567A", "representative": "xrb_18gmu6engqhgtjnppqam181o5nfhj4sdtgyhy36dan3jr9spt84rzwmktafc", "signature": "98B4D56881D9A88B170A6B2976AE21900C26A27F0E2C338D93FDED56183B73D19AA5BEB48E43FCBB8FF8293FDD368CEF50600FECEFD490A0855ED702ED209E04", "type": "change", "work": "55e5b7a83edc3f4f" }, "hash": "654FA425CEBFC9E7726089E4EDE7A105462D93DBC915FFB70B50909920A7D286" }
[ "Creates", "a", "json", "representations", "of", "new", "block", "based", "on", "input", "data", "&", "signed", "with", "private", "key", "or", "account", "in", "**", "wallet", "**", "for", "offline", "signing" ]
python
train
PMEAL/OpenPNM
openpnm/core/Base.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L1329-L1380
def num_throats(self, labels='all', mode='union'): r""" Return the number of throats of the specified labels Parameters ---------- labels : list of strings, optional The throat labels that should be included in the count. If not supplied, all throats are counted. mode : string, optional Specifies how the count should be performed. The options are: **'or', 'union', 'any'** : (default) Throats with *one or more* of the given labels are counted. **'and', 'intersection', 'all'** : Throats with *all* of the given labels are counted. **'xor', 'exclusive_or'** : Throats with *only one* of the given labels are counted. **'nor', 'none', 'not'** : Throats with *none* of the given labels are counted. **'nand'** : Throats with *some but not all* of the given labels are counted. **'xnor'** : Throats with *more than one* of the given labels are counted. Returns ------- Nt : int Number of throats with the specified labels See Also -------- num_pores count Notes ----- Technically, *'nand'* and *'xnor'* should also count throats with *none* of the labels, however, to make the count more useful these are not included. """ # Count number of pores of specified type Ts = self._get_indices(labels=labels, mode=mode, element='throat') Nt = sp.shape(Ts)[0] return Nt
[ "def", "num_throats", "(", "self", ",", "labels", "=", "'all'", ",", "mode", "=", "'union'", ")", ":", "# Count number of pores of specified type", "Ts", "=", "self", ".", "_get_indices", "(", "labels", "=", "labels", ",", "mode", "=", "mode", ",", "element", "=", "'throat'", ")", "Nt", "=", "sp", ".", "shape", "(", "Ts", ")", "[", "0", "]", "return", "Nt" ]
r""" Return the number of throats of the specified labels Parameters ---------- labels : list of strings, optional The throat labels that should be included in the count. If not supplied, all throats are counted. mode : string, optional Specifies how the count should be performed. The options are: **'or', 'union', 'any'** : (default) Throats with *one or more* of the given labels are counted. **'and', 'intersection', 'all'** : Throats with *all* of the given labels are counted. **'xor', 'exclusive_or'** : Throats with *only one* of the given labels are counted. **'nor', 'none', 'not'** : Throats with *none* of the given labels are counted. **'nand'** : Throats with *some but not all* of the given labels are counted. **'xnor'** : Throats with *more than one* of the given labels are counted. Returns ------- Nt : int Number of throats with the specified labels See Also -------- num_pores count Notes ----- Technically, *'nand'* and *'xnor'* should also count throats with *none* of the labels, however, to make the count more useful these are not included.
[ "r", "Return", "the", "number", "of", "throats", "of", "the", "specified", "labels" ]
python
train
NarrativeScience/lsi
src/lsi/utils/table.py
https://github.com/NarrativeScience/lsi/blob/7d901b03fdb1a34ef795e5412bfe9685d948e32d/src/lsi/utils/table.py#L128-L144
def prepare_rows(table): """ Prepare the rows so they're all strings, and all the same length. :param table: A 2D grid of anything. :type table: [[``object``]] :return: A table of strings, where every row is the same length. :rtype: [[``str``]] """ num_columns = max(len(row) for row in table) for row in table: while len(row) < num_columns: row.append('') for i in range(num_columns): row[i] = str(row[i]) if row[i] is not None else '' return table
[ "def", "prepare_rows", "(", "table", ")", ":", "num_columns", "=", "max", "(", "len", "(", "row", ")", "for", "row", "in", "table", ")", "for", "row", "in", "table", ":", "while", "len", "(", "row", ")", "<", "num_columns", ":", "row", ".", "append", "(", "''", ")", "for", "i", "in", "range", "(", "num_columns", ")", ":", "row", "[", "i", "]", "=", "str", "(", "row", "[", "i", "]", ")", "if", "row", "[", "i", "]", "is", "not", "None", "else", "''", "return", "table" ]
Prepare the rows so they're all strings, and all the same length. :param table: A 2D grid of anything. :type table: [[``object``]] :return: A table of strings, where every row is the same length. :rtype: [[``str``]]
[ "Prepare", "the", "rows", "so", "they", "re", "all", "strings", "and", "all", "the", "same", "length", "." ]
python
test
senaite/senaite.core
bika/lims/upgrade/v01_02_008.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_02_008.py#L103-L117
def fix_client_permissions(portal): """Fix client permissions """ wfs = get_workflows() start = time.time() clients = portal.clients.objectValues() total = len(clients) for num, client in enumerate(clients): logger.info("Fixing permission for client {}/{} ({})" .format(num, total, client.getName())) update_role_mappings(client, wfs=wfs) end = time.time() logger.info("Fixing client permissions took %.2fs" % float(end-start)) transaction.commit()
[ "def", "fix_client_permissions", "(", "portal", ")", ":", "wfs", "=", "get_workflows", "(", ")", "start", "=", "time", ".", "time", "(", ")", "clients", "=", "portal", ".", "clients", ".", "objectValues", "(", ")", "total", "=", "len", "(", "clients", ")", "for", "num", ",", "client", "in", "enumerate", "(", "clients", ")", ":", "logger", ".", "info", "(", "\"Fixing permission for client {}/{} ({})\"", ".", "format", "(", "num", ",", "total", ",", "client", ".", "getName", "(", ")", ")", ")", "update_role_mappings", "(", "client", ",", "wfs", "=", "wfs", ")", "end", "=", "time", ".", "time", "(", ")", "logger", ".", "info", "(", "\"Fixing client permissions took %.2fs\"", "%", "float", "(", "end", "-", "start", ")", ")", "transaction", ".", "commit", "(", ")" ]
Fix client permissions
[ "Fix", "client", "permissions" ]
python
train
espressif/esptool
ecdsa/numbertheory.py
https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/ecdsa/numbertheory.py#L185-L203
def inverse_mod( a, m ): """Inverse of a mod m.""" if a < 0 or m <= a: a = a % m # From Ferguson and Schneier, roughly: c, d = a, m uc, vc, ud, vd = 1, 0, 0, 1 while c != 0: q, c, d = divmod( d, c ) + ( c, ) uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc # At this point, d is the GCD, and ud*a+vd*m = d. # If d == 1, this means that ud is a inverse. assert d == 1 if ud > 0: return ud else: return ud + m
[ "def", "inverse_mod", "(", "a", ",", "m", ")", ":", "if", "a", "<", "0", "or", "m", "<=", "a", ":", "a", "=", "a", "%", "m", "# From Ferguson and Schneier, roughly:", "c", ",", "d", "=", "a", ",", "m", "uc", ",", "vc", ",", "ud", ",", "vd", "=", "1", ",", "0", ",", "0", ",", "1", "while", "c", "!=", "0", ":", "q", ",", "c", ",", "d", "=", "divmod", "(", "d", ",", "c", ")", "+", "(", "c", ",", ")", "uc", ",", "vc", ",", "ud", ",", "vd", "=", "ud", "-", "q", "*", "uc", ",", "vd", "-", "q", "*", "vc", ",", "uc", ",", "vc", "# At this point, d is the GCD, and ud*a+vd*m = d.", "# If d == 1, this means that ud is a inverse.", "assert", "d", "==", "1", "if", "ud", ">", "0", ":", "return", "ud", "else", ":", "return", "ud", "+", "m" ]
Inverse of a mod m.
[ "Inverse", "of", "a", "mod", "m", "." ]
python
train
openeventdata/mordecai
mordecai/geoparse.py
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L291-L297
def is_country(self, text): """Check if a piece of text is in the list of countries""" ct_list = self._just_cts.keys() if text in ct_list: return True else: return False
[ "def", "is_country", "(", "self", ",", "text", ")", ":", "ct_list", "=", "self", ".", "_just_cts", ".", "keys", "(", ")", "if", "text", "in", "ct_list", ":", "return", "True", "else", ":", "return", "False" ]
Check if a piece of text is in the list of countries
[ "Check", "if", "a", "piece", "of", "text", "is", "in", "the", "list", "of", "countries" ]
python
train
uogbuji/versa
tools/py/driver/memory.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/driver/memory.py#L168-L196
def add_many(self, rels): ''' Add a list of relationships to the extent rels - a list of 0 or more relationship tuples, e.g.: [ (origin, rel, target, {attrname1: attrval1, attrname2: attrval2}), ] origin - origin of the relationship (similar to an RDF subject) rel - type IRI of the relationship (similar to an RDF predicate) target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} you can omit the dictionary of attributes if there are none, as long as you are not specifying a statement ID ''' for curr_rel in rels: attrs = self._attr_cls() if len(curr_rel) == 2: # handle __iter__ output for copy() origin, rel, target, attrs = curr_rel[1] elif len(curr_rel) == 3: origin, rel, target = curr_rel elif len(curr_rel) == 4: origin, rel, target, attrs = curr_rel else: raise ValueError assert rel self.add(origin, rel, target, attrs) return
[ "def", "add_many", "(", "self", ",", "rels", ")", ":", "for", "curr_rel", "in", "rels", ":", "attrs", "=", "self", ".", "_attr_cls", "(", ")", "if", "len", "(", "curr_rel", ")", "==", "2", ":", "# handle __iter__ output for copy()", "origin", ",", "rel", ",", "target", ",", "attrs", "=", "curr_rel", "[", "1", "]", "elif", "len", "(", "curr_rel", ")", "==", "3", ":", "origin", ",", "rel", ",", "target", "=", "curr_rel", "elif", "len", "(", "curr_rel", ")", "==", "4", ":", "origin", ",", "rel", ",", "target", ",", "attrs", "=", "curr_rel", "else", ":", "raise", "ValueError", "assert", "rel", "self", ".", "add", "(", "origin", ",", "rel", ",", "target", ",", "attrs", ")", "return" ]
Add a list of relationships to the extent rels - a list of 0 or more relationship tuples, e.g.: [ (origin, rel, target, {attrname1: attrval1, attrname2: attrval2}), ] origin - origin of the relationship (similar to an RDF subject) rel - type IRI of the relationship (similar to an RDF predicate) target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} you can omit the dictionary of attributes if there are none, as long as you are not specifying a statement ID
[ "Add", "a", "list", "of", "relationships", "to", "the", "extent" ]
python
train
programa-stic/barf-project
barf/core/reil/builder.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/builder.py#L166-L171
def gen_nop(): """Return a NOP instruction. """ empty_reg = ReilEmptyOperand() return ReilBuilder.build(ReilMnemonic.NOP, empty_reg, empty_reg, empty_reg)
[ "def", "gen_nop", "(", ")", ":", "empty_reg", "=", "ReilEmptyOperand", "(", ")", "return", "ReilBuilder", ".", "build", "(", "ReilMnemonic", ".", "NOP", ",", "empty_reg", ",", "empty_reg", ",", "empty_reg", ")" ]
Return a NOP instruction.
[ "Return", "a", "NOP", "instruction", "." ]
python
train