repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Fantomas42/django-blog-zinnia
zinnia/context.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/context.py#L25-L40
def get_context_loop_positions(context): """ Return the paginated current position within a loop, and the non-paginated position. """ try: loop_counter = context['forloop']['counter'] except KeyError: return 0, 0 try: page = context['page_obj'] except KeyError: return loop_counter, loop_counter total_loop_counter = ((page.number - 1) * page.paginator.per_page + loop_counter) return total_loop_counter, loop_counter
[ "def", "get_context_loop_positions", "(", "context", ")", ":", "try", ":", "loop_counter", "=", "context", "[", "'forloop'", "]", "[", "'counter'", "]", "except", "KeyError", ":", "return", "0", ",", "0", "try", ":", "page", "=", "context", "[", "'page_obj'", "]", "except", "KeyError", ":", "return", "loop_counter", ",", "loop_counter", "total_loop_counter", "=", "(", "(", "page", ".", "number", "-", "1", ")", "*", "page", ".", "paginator", ".", "per_page", "+", "loop_counter", ")", "return", "total_loop_counter", ",", "loop_counter" ]
Return the paginated current position within a loop, and the non-paginated position.
[ "Return", "the", "paginated", "current", "position", "within", "a", "loop", "and", "the", "non", "-", "paginated", "position", "." ]
python
train
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2057-L2069
def to_feather(self, fname): """ Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path """ from pandas.io.feather_format import to_feather to_feather(self, fname)
[ "def", "to_feather", "(", "self", ",", "fname", ")", ":", "from", "pandas", ".", "io", ".", "feather_format", "import", "to_feather", "to_feather", "(", "self", ",", "fname", ")" ]
Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path
[ "Write", "out", "the", "binary", "feather", "-", "format", "for", "DataFrames", "." ]
python
train
projectshift/shift-schema
shiftschema/result.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/result.py#L352-L359
def format_error(self, error, args=None): """ Format error with positional or named arguments (if any) """ if type(args) is dict: return error.format(**args) if type(args) is list or type(args) is tuple: return error.format(*args) return error
[ "def", "format_error", "(", "self", ",", "error", ",", "args", "=", "None", ")", ":", "if", "type", "(", "args", ")", "is", "dict", ":", "return", "error", ".", "format", "(", "*", "*", "args", ")", "if", "type", "(", "args", ")", "is", "list", "or", "type", "(", "args", ")", "is", "tuple", ":", "return", "error", ".", "format", "(", "*", "args", ")", "return", "error" ]
Format error with positional or named arguments (if any)
[ "Format", "error", "with", "positional", "or", "named", "arguments", "(", "if", "any", ")" ]
python
train
4degrees/riffle
source/riffle/browser.py
https://github.com/4degrees/riffle/blob/e5a0d908df8c93ff1ee7abdda8875fd1667df53d/source/riffle/browser.py#L146-L151
def _onNavigate(self, index): '''Handle selection of path segment.''' if index > 0: self.setLocation( self._locationWidget.itemData(index), interactive=True )
[ "def", "_onNavigate", "(", "self", ",", "index", ")", ":", "if", "index", ">", "0", ":", "self", ".", "setLocation", "(", "self", ".", "_locationWidget", ".", "itemData", "(", "index", ")", ",", "interactive", "=", "True", ")" ]
Handle selection of path segment.
[ "Handle", "selection", "of", "path", "segment", "." ]
python
test
chaoss/grimoirelab-kingarthur
arthur/server.py
https://github.com/chaoss/grimoirelab-kingarthur/blob/9d6a638bee68d5e5c511f045eeebf06340fd3252/arthur/server.py#L73-L82
def write_items(cls, writer, items_generator): """Write items to the queue :param writer: the writer object :param items_generator: items to be written in the queue """ while True: items = items_generator() writer.write(items) time.sleep(1)
[ "def", "write_items", "(", "cls", ",", "writer", ",", "items_generator", ")", ":", "while", "True", ":", "items", "=", "items_generator", "(", ")", "writer", ".", "write", "(", "items", ")", "time", ".", "sleep", "(", "1", ")" ]
Write items to the queue :param writer: the writer object :param items_generator: items to be written in the queue
[ "Write", "items", "to", "the", "queue" ]
python
test
SheffieldML/GPy
GPy/kern/src/ODE_UYC.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/ODE_UYC.py#L96-L121
def Kdiag(self, X): """Compute the diagonal of the covariance matrix associated to X.""" Kdiag = np.zeros(X.shape[0]) ly=1/self.lengthscale_Y lu=np.sqrt(3)/self.lengthscale_U Vu = self.variance_U Vy=self.variance_Y k1 = (2*lu+ly)/(lu+ly)**2 k2 = (ly-2*lu + 2*lu-ly ) / (ly-lu)**2 k3 = 1/(lu+ly) + (lu)/(lu+ly)**2 slices = index_to_slices(X[:,-1]) for i, ss1 in enumerate(slices): for s1 in ss1: if i==0: Kdiag[s1]+= self.variance_U + self.ubias elif i==1: Kdiag[s1]+= Vu*Vy*(k1+k2+k3) else: raise ValueError("invalid input/output index") #Kdiag[slices[0][0]]+= self.variance_U #matern32 diag #Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag return Kdiag
[ "def", "Kdiag", "(", "self", ",", "X", ")", ":", "Kdiag", "=", "np", ".", "zeros", "(", "X", ".", "shape", "[", "0", "]", ")", "ly", "=", "1", "/", "self", ".", "lengthscale_Y", "lu", "=", "np", ".", "sqrt", "(", "3", ")", "/", "self", ".", "lengthscale_U", "Vu", "=", "self", ".", "variance_U", "Vy", "=", "self", ".", "variance_Y", "k1", "=", "(", "2", "*", "lu", "+", "ly", ")", "/", "(", "lu", "+", "ly", ")", "**", "2", "k2", "=", "(", "ly", "-", "2", "*", "lu", "+", "2", "*", "lu", "-", "ly", ")", "/", "(", "ly", "-", "lu", ")", "**", "2", "k3", "=", "1", "/", "(", "lu", "+", "ly", ")", "+", "(", "lu", ")", "/", "(", "lu", "+", "ly", ")", "**", "2", "slices", "=", "index_to_slices", "(", "X", "[", ":", ",", "-", "1", "]", ")", "for", "i", ",", "ss1", "in", "enumerate", "(", "slices", ")", ":", "for", "s1", "in", "ss1", ":", "if", "i", "==", "0", ":", "Kdiag", "[", "s1", "]", "+=", "self", ".", "variance_U", "+", "self", ".", "ubias", "elif", "i", "==", "1", ":", "Kdiag", "[", "s1", "]", "+=", "Vu", "*", "Vy", "*", "(", "k1", "+", "k2", "+", "k3", ")", "else", ":", "raise", "ValueError", "(", "\"invalid input/output index\"", ")", "#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag", "#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag", "return", "Kdiag" ]
Compute the diagonal of the covariance matrix associated to X.
[ "Compute", "the", "diagonal", "of", "the", "covariance", "matrix", "associated", "to", "X", "." ]
python
train
SchroterQuentin/django-search-listview
search_listview/list.py
https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L184-L190
def alias_field(model, field): """ Return the prefix name of a field """ for part in field.split(LOOKUP_SEP)[:-1]: model = associate_model(model,part) return model.__name__ + "-" + field.split(LOOKUP_SEP)[-1]
[ "def", "alias_field", "(", "model", ",", "field", ")", ":", "for", "part", "in", "field", ".", "split", "(", "LOOKUP_SEP", ")", "[", ":", "-", "1", "]", ":", "model", "=", "associate_model", "(", "model", ",", "part", ")", "return", "model", ".", "__name__", "+", "\"-\"", "+", "field", ".", "split", "(", "LOOKUP_SEP", ")", "[", "-", "1", "]" ]
Return the prefix name of a field
[ "Return", "the", "prefix", "name", "of", "a", "field" ]
python
train
OSSOS/MOP
src/ossos/plotting/scripts/rose_topdown.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/plotting/scripts/rose_topdown.py#L165-L205
def plot_ossos_discoveries(ax, discoveries, plot_discoveries, plot_colossos=False, split_plutinos=False): """ plotted at their discovery locations, provided by the Version Releases in decimal degrees. """ fc = ['b', '#E47833', 'k'] alpha = [0.85, 0.6, 1.] marker = ['o', 'd'] size = [7, 25] plottable = [] # Which blocks' discoveries to include? for d in discoveries: for n in plot_discoveries: if d['object'].startswith(n): # can for sure be better, but this hack works. Need to get where going plottable.append(d) # Hack to get in the O15BD objects # directory_name = '/Users/bannisterm/Dropbox/OSSOS/measure3/ossin/D_tmp/' # kbos = parsers.ossos_discoveries(directory_name, all_objects=False, data_release=None) # for kbo in kbos: # plottable_kbo = {'RAdeg': kbo.discovery.coordinate.ra.to_string(unit=units.degree, sep=':'), # 'dist': kbo.orbit.distance.value} # plottable.append(plottable_kbo) if plot_colossos: fainter = [] colossos = [] for n in plottable: if n['object'] in parameters.COLOSSOS: colossos.append(n) else: fainter.append(n) plot_ossos_points(fainter, ax, marker[0], size[0], fc[0], alpha[1], 1) plot_ossos_points(colossos, ax, marker[1], size[1], fc[2], alpha[2], 2) elif split_plutinos: # plutino_index = np.where((plottable['cl'] == 'res') & (plottable['j'] == 3) & (plottable['k'] == 2)) raise NotImplementedError else: plot_ossos_points(plottable, ax, marker[0], size[0], fc[0], alpha[0], 2) return
[ "def", "plot_ossos_discoveries", "(", "ax", ",", "discoveries", ",", "plot_discoveries", ",", "plot_colossos", "=", "False", ",", "split_plutinos", "=", "False", ")", ":", "fc", "=", "[", "'b'", ",", "'#E47833'", ",", "'k'", "]", "alpha", "=", "[", "0.85", ",", "0.6", ",", "1.", "]", "marker", "=", "[", "'o'", ",", "'d'", "]", "size", "=", "[", "7", ",", "25", "]", "plottable", "=", "[", "]", "# Which blocks' discoveries to include?", "for", "d", "in", "discoveries", ":", "for", "n", "in", "plot_discoveries", ":", "if", "d", "[", "'object'", "]", ".", "startswith", "(", "n", ")", ":", "# can for sure be better, but this hack works. Need to get where going", "plottable", ".", "append", "(", "d", ")", "# Hack to get in the O15BD objects", "# directory_name = '/Users/bannisterm/Dropbox/OSSOS/measure3/ossin/D_tmp/'", "# kbos = parsers.ossos_discoveries(directory_name, all_objects=False, data_release=None)", "# for kbo in kbos:", "# plottable_kbo = {'RAdeg': kbo.discovery.coordinate.ra.to_string(unit=units.degree, sep=':'),", "# 'dist': kbo.orbit.distance.value}", "# plottable.append(plottable_kbo)", "if", "plot_colossos", ":", "fainter", "=", "[", "]", "colossos", "=", "[", "]", "for", "n", "in", "plottable", ":", "if", "n", "[", "'object'", "]", "in", "parameters", ".", "COLOSSOS", ":", "colossos", ".", "append", "(", "n", ")", "else", ":", "fainter", ".", "append", "(", "n", ")", "plot_ossos_points", "(", "fainter", ",", "ax", ",", "marker", "[", "0", "]", ",", "size", "[", "0", "]", ",", "fc", "[", "0", "]", ",", "alpha", "[", "1", "]", ",", "1", ")", "plot_ossos_points", "(", "colossos", ",", "ax", ",", "marker", "[", "1", "]", ",", "size", "[", "1", "]", ",", "fc", "[", "2", "]", ",", "alpha", "[", "2", "]", ",", "2", ")", "elif", "split_plutinos", ":", "# plutino_index = np.where((plottable['cl'] == 'res') & (plottable['j'] == 3) & (plottable['k'] == 2))", "raise", "NotImplementedError", "else", ":", "plot_ossos_points", "(", "plottable", ",", "ax", ",", "marker", "[", "0", "]", ",", "size", "[", "0", "]", ",", "fc", "[", "0", "]", ",", "alpha", "[", "0", "]", ",", "2", ")", "return" ]
plotted at their discovery locations, provided by the Version Releases in decimal degrees.
[ "plotted", "at", "their", "discovery", "locations", "provided", "by", "the", "Version", "Releases", "in", "decimal", "degrees", "." ]
python
train
vinci1it2000/schedula
examples/processing_chain/utils/plot.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/examples/processing_chain/utils/plot.py#L39-L52
def plot_lines(it): """ Plotting lines. :param it: Data to plot where key value is the name of the series. :type it: list[dict] :return: The plot. :rtype: plotly.plotly.iplot """ data = [go.Scatter(mode='lines', **d) for d in it] return py.iplot(data, filename='scatter-mode')
[ "def", "plot_lines", "(", "it", ")", ":", "data", "=", "[", "go", ".", "Scatter", "(", "mode", "=", "'lines'", ",", "*", "*", "d", ")", "for", "d", "in", "it", "]", "return", "py", ".", "iplot", "(", "data", ",", "filename", "=", "'scatter-mode'", ")" ]
Plotting lines. :param it: Data to plot where key value is the name of the series. :type it: list[dict] :return: The plot. :rtype: plotly.plotly.iplot
[ "Plotting", "lines", "." ]
python
train
androguard/androguard
androguard/core/bytecodes/axml/__init__.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/axml/__init__.py#L833-L847
def getAttributeValue(self, index): """ This function is only used to look up strings All other work is done by :func:`~androguard.core.bytecodes.axml.format_value` # FIXME should unite those functions :param index: index of the attribute :return: """ offset = self._get_attribute_offset(index) valueType = self.m_attributes[offset + ATTRIBUTE_IX_VALUE_TYPE] if valueType == TYPE_STRING: valueString = self.m_attributes[offset + ATTRIBUTE_IX_VALUE_STRING] return self.sb[valueString] return ''
[ "def", "getAttributeValue", "(", "self", ",", "index", ")", ":", "offset", "=", "self", ".", "_get_attribute_offset", "(", "index", ")", "valueType", "=", "self", ".", "m_attributes", "[", "offset", "+", "ATTRIBUTE_IX_VALUE_TYPE", "]", "if", "valueType", "==", "TYPE_STRING", ":", "valueString", "=", "self", ".", "m_attributes", "[", "offset", "+", "ATTRIBUTE_IX_VALUE_STRING", "]", "return", "self", ".", "sb", "[", "valueString", "]", "return", "''" ]
This function is only used to look up strings All other work is done by :func:`~androguard.core.bytecodes.axml.format_value` # FIXME should unite those functions :param index: index of the attribute :return:
[ "This", "function", "is", "only", "used", "to", "look", "up", "strings", "All", "other", "work", "is", "done", "by", ":", "func", ":", "~androguard", ".", "core", ".", "bytecodes", ".", "axml", ".", "format_value", "#", "FIXME", "should", "unite", "those", "functions", ":", "param", "index", ":", "index", "of", "the", "attribute", ":", "return", ":" ]
python
train
nhfruchter/pgh-bustime
pghbustime/interface.py
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/interface.py#L327-L360
def bulletins(self, rt="", rtdir="", stpid=""): """ Return list of service alerts ('bulletins') for a route or stop. Arguments: `rt`: route designator or `stpid`: bus stop number or (`rt` and `rtdir`) or (`rt` and `rtdir` and `stpid`) Response: `sb`: (bulletin container) contains list of `nm`: bulletin name/ID `sbj`: bulletin subject `dtl`: full text and/or `brf`: short text `prty`: priority (high, medium, low) `srvc`: (routes bulletin applies to) contains list of `rt`: route designator `rtdir`: route direction `stpid`: bus stop ID number `stpnm`: bus stop name http://realtime.portauthority.org/bustime/apidoc/v1/main.jsp?section=serviceBulletins.jsp """ if not (rt or stpid) or (rtdir and not (rt or stpid)): raise ValueError("You must specify a parameter.") if listlike(stpid): stpid = ",".join(stpid) if listlike(rt): rt = ",".join(rt) url = self.endpoint('BULLETINS', dict(rt=rt, rtdir=rtdir, stpid=stpid)) return self.response(url)
[ "def", "bulletins", "(", "self", ",", "rt", "=", "\"\"", ",", "rtdir", "=", "\"\"", ",", "stpid", "=", "\"\"", ")", ":", "if", "not", "(", "rt", "or", "stpid", ")", "or", "(", "rtdir", "and", "not", "(", "rt", "or", "stpid", ")", ")", ":", "raise", "ValueError", "(", "\"You must specify a parameter.\"", ")", "if", "listlike", "(", "stpid", ")", ":", "stpid", "=", "\",\"", ".", "join", "(", "stpid", ")", "if", "listlike", "(", "rt", ")", ":", "rt", "=", "\",\"", ".", "join", "(", "rt", ")", "url", "=", "self", ".", "endpoint", "(", "'BULLETINS'", ",", "dict", "(", "rt", "=", "rt", ",", "rtdir", "=", "rtdir", ",", "stpid", "=", "stpid", ")", ")", "return", "self", ".", "response", "(", "url", ")" ]
Return list of service alerts ('bulletins') for a route or stop. Arguments: `rt`: route designator or `stpid`: bus stop number or (`rt` and `rtdir`) or (`rt` and `rtdir` and `stpid`) Response: `sb`: (bulletin container) contains list of `nm`: bulletin name/ID `sbj`: bulletin subject `dtl`: full text and/or `brf`: short text `prty`: priority (high, medium, low) `srvc`: (routes bulletin applies to) contains list of `rt`: route designator `rtdir`: route direction `stpid`: bus stop ID number `stpnm`: bus stop name http://realtime.portauthority.org/bustime/apidoc/v1/main.jsp?section=serviceBulletins.jsp
[ "Return", "list", "of", "service", "alerts", "(", "bulletins", ")", "for", "a", "route", "or", "stop", ".", "Arguments", ":", "rt", ":", "route", "designator", "or", "stpid", ":", "bus", "stop", "number", "or", "(", "rt", "and", "rtdir", ")", "or", "(", "rt", "and", "rtdir", "and", "stpid", ")", "Response", ":", "sb", ":", "(", "bulletin", "container", ")", "contains", "list", "of", "nm", ":", "bulletin", "name", "/", "ID", "sbj", ":", "bulletin", "subject", "dtl", ":", "full", "text", "and", "/", "or", "brf", ":", "short", "text", "prty", ":", "priority", "(", "high", "medium", "low", ")", "srvc", ":", "(", "routes", "bulletin", "applies", "to", ")", "contains", "list", "of", "rt", ":", "route", "designator", "rtdir", ":", "route", "direction", "stpid", ":", "bus", "stop", "ID", "number", "stpnm", ":", "bus", "stop", "name", "http", ":", "//", "realtime", ".", "portauthority", ".", "org", "/", "bustime", "/", "apidoc", "/", "v1", "/", "main", ".", "jsp?section", "=", "serviceBulletins", ".", "jsp" ]
python
train
mbj4668/pyang
pyang/translators/dsdl.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L564-L575
def get_default(self, stmt, refd): """Return default value for `stmt` node. `refd` is a dictionary of applicable refinements that is constructed in the `process_patches` method. """ if refd["default"]: return refd["default"] defst = stmt.search_one("default") if defst: return defst.arg return None
[ "def", "get_default", "(", "self", ",", "stmt", ",", "refd", ")", ":", "if", "refd", "[", "\"default\"", "]", ":", "return", "refd", "[", "\"default\"", "]", "defst", "=", "stmt", ".", "search_one", "(", "\"default\"", ")", "if", "defst", ":", "return", "defst", ".", "arg", "return", "None" ]
Return default value for `stmt` node. `refd` is a dictionary of applicable refinements that is constructed in the `process_patches` method.
[ "Return", "default", "value", "for", "stmt", "node", "." ]
python
train
jay-johnson/network-pipeline
network_pipeline/record_packets_to_csv.py
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/record_packets_to_csv.py#L432-L467
def process_raw_frame(self, id=None, msg=None): """process_raw_frame Convert a complex nested json dictionary to a flattened dictionary and capture all unique keys for table construction :param id: key for this msg :param msg: raw frame for packet """ # normalize into a dataframe df = json_normalize(msg) # convert to a flattened dictionary dt = json.loads(df.to_json()) flat_msg = {} for k in dt: new_key = "raw_{}".format(k) flat_msg[new_key] = dt[k]["0"] if new_key not in self.raw_keys: self.raw_keys[new_key] = k # end of capturing all unique keys dt["raw_id"] = id self.all_raw.append(dt) log.debug("RAW data updated:") log.debug(self.raw_keys) log.debug(self.all_raw) log.debug("") return flat_msg
[ "def", "process_raw_frame", "(", "self", ",", "id", "=", "None", ",", "msg", "=", "None", ")", ":", "# normalize into a dataframe", "df", "=", "json_normalize", "(", "msg", ")", "# convert to a flattened dictionary", "dt", "=", "json", ".", "loads", "(", "df", ".", "to_json", "(", ")", ")", "flat_msg", "=", "{", "}", "for", "k", "in", "dt", ":", "new_key", "=", "\"raw_{}\"", ".", "format", "(", "k", ")", "flat_msg", "[", "new_key", "]", "=", "dt", "[", "k", "]", "[", "\"0\"", "]", "if", "new_key", "not", "in", "self", ".", "raw_keys", ":", "self", ".", "raw_keys", "[", "new_key", "]", "=", "k", "# end of capturing all unique keys", "dt", "[", "\"raw_id\"", "]", "=", "id", "self", ".", "all_raw", ".", "append", "(", "dt", ")", "log", ".", "debug", "(", "\"RAW data updated:\"", ")", "log", ".", "debug", "(", "self", ".", "raw_keys", ")", "log", ".", "debug", "(", "self", ".", "all_raw", ")", "log", ".", "debug", "(", "\"\"", ")", "return", "flat_msg" ]
process_raw_frame Convert a complex nested json dictionary to a flattened dictionary and capture all unique keys for table construction :param id: key for this msg :param msg: raw frame for packet
[ "process_raw_frame" ]
python
train
rbarrois/mpdlcd
mpdlcd/utils.py
https://github.com/rbarrois/mpdlcd/blob/85f16c8cc0883f8abb4c2cc7f69729c3e2f857da/mpdlcd/utils.py#L50-L81
def auto_retry(fun): """Decorator for retrying method calls, based on instance parameters.""" @functools.wraps(fun) def decorated(instance, *args, **kwargs): """Wrapper around a decorated function.""" cfg = instance._retry_config remaining_tries = cfg.retry_attempts current_wait = cfg.retry_wait retry_backoff = cfg.retry_backoff last_error = None while remaining_tries >= 0: try: return fun(instance, *args, **kwargs) except socket.error as e: last_error = e instance._retry_logger.warning('Connection failed: %s', e) remaining_tries -= 1 if remaining_tries == 0: # Last attempt break # Wait a bit time.sleep(current_wait) current_wait *= retry_backoff # All attempts failed, let's raise the last error. raise last_error return decorated
[ "def", "auto_retry", "(", "fun", ")", ":", "@", "functools", ".", "wraps", "(", "fun", ")", "def", "decorated", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper around a decorated function.\"\"\"", "cfg", "=", "instance", ".", "_retry_config", "remaining_tries", "=", "cfg", ".", "retry_attempts", "current_wait", "=", "cfg", ".", "retry_wait", "retry_backoff", "=", "cfg", ".", "retry_backoff", "last_error", "=", "None", "while", "remaining_tries", ">=", "0", ":", "try", ":", "return", "fun", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "socket", ".", "error", "as", "e", ":", "last_error", "=", "e", "instance", ".", "_retry_logger", ".", "warning", "(", "'Connection failed: %s'", ",", "e", ")", "remaining_tries", "-=", "1", "if", "remaining_tries", "==", "0", ":", "# Last attempt", "break", "# Wait a bit", "time", ".", "sleep", "(", "current_wait", ")", "current_wait", "*=", "retry_backoff", "# All attempts failed, let's raise the last error.", "raise", "last_error", "return", "decorated" ]
Decorator for retrying method calls, based on instance parameters.
[ "Decorator", "for", "retrying", "method", "calls", "based", "on", "instance", "parameters", "." ]
python
train
gem/oq-engine
openquake/hazardlib/probability_map.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/probability_map.py#L96-L106
def convert(self, imtls, idx=0): """ Convert a probability curve into a record of dtype `imtls.dt`. :param imtls: DictArray instance :param idx: extract the data corresponding to the given inner index """ curve = numpy.zeros(1, imtls.dt) for imt in imtls: curve[imt] = self.array[imtls(imt), idx] return curve[0]
[ "def", "convert", "(", "self", ",", "imtls", ",", "idx", "=", "0", ")", ":", "curve", "=", "numpy", ".", "zeros", "(", "1", ",", "imtls", ".", "dt", ")", "for", "imt", "in", "imtls", ":", "curve", "[", "imt", "]", "=", "self", ".", "array", "[", "imtls", "(", "imt", ")", ",", "idx", "]", "return", "curve", "[", "0", "]" ]
Convert a probability curve into a record of dtype `imtls.dt`. :param imtls: DictArray instance :param idx: extract the data corresponding to the given inner index
[ "Convert", "a", "probability", "curve", "into", "a", "record", "of", "dtype", "imtls", ".", "dt", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_ntp_rpc/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_ntp_rpc/__init__.py#L96-L119
def _set_show_ntp(self, v, load=False): """ Setter method for show_ntp, mapped from YANG variable /brocade_ntp_rpc/show_ntp (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_ntp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_ntp() directly. YANG Description: show active ntp server for cluster or specified switchid """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_ntp.show_ntp, is_leaf=True, yang_name="show-ntp", rest_name="show-ntp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'ntp-status'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_ntp must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_ntp.show_ntp, is_leaf=True, yang_name="show-ntp", rest_name="show-ntp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'ntp-status'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='rpc', is_config=True)""", }) self.__show_ntp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_show_ntp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "show_ntp", ".", "show_ntp", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"show-ntp\"", ",", "rest_name", "=", "\"show-ntp\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'hidden'", ":", "u'rpccmd'", ",", "u'actionpoint'", ":", "u'ntp-status'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ntp'", ",", "defining_module", "=", "'brocade-ntp'", ",", "yang_type", "=", "'rpc'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"show_ntp must be of a type compatible with rpc\"\"\"", ",", "'defined-type'", ":", "\"rpc\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=show_ntp.show_ntp, is_leaf=True, yang_name=\"show-ntp\", rest_name=\"show-ntp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'ntp-status'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='rpc', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__show_ntp", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for show_ntp, mapped from YANG variable /brocade_ntp_rpc/show_ntp (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_ntp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_ntp() directly. YANG Description: show active ntp server for cluster or specified switchid
[ "Setter", "method", "for", "show_ntp", "mapped", "from", "YANG", "variable", "/", "brocade_ntp_rpc", "/", "show_ntp", "(", "rpc", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_show_ntp", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_show_ntp", "()", "directly", "." ]
python
train
eleme/meepo
meepo/apps/eventsourcing/pub.py
https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/pub.py#L119-L133
def session_rollback(self, session): """Send session_rollback signal in sqlalchemy ``after_rollback``. This marks the failure of session so the session may enter commit phase. """ # this may happen when there's nothing to rollback if not hasattr(session, 'meepo_unique_id'): self.logger.debug("skipped - session_rollback") return # del session meepo id after rollback self.logger.debug("%s - after_rollback" % session.meepo_unique_id) signal("session_rollback").send(session) self._session_del(session)
[ "def", "session_rollback", "(", "self", ",", "session", ")", ":", "# this may happen when there's nothing to rollback", "if", "not", "hasattr", "(", "session", ",", "'meepo_unique_id'", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"skipped - session_rollback\"", ")", "return", "# del session meepo id after rollback", "self", ".", "logger", ".", "debug", "(", "\"%s - after_rollback\"", "%", "session", ".", "meepo_unique_id", ")", "signal", "(", "\"session_rollback\"", ")", ".", "send", "(", "session", ")", "self", ".", "_session_del", "(", "session", ")" ]
Send session_rollback signal in sqlalchemy ``after_rollback``. This marks the failure of session so the session may enter commit phase.
[ "Send", "session_rollback", "signal", "in", "sqlalchemy", "after_rollback", "." ]
python
train
minio/minio-py
minio/api.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L1686-L1710
def _new_multipart_upload(self, bucket_name, object_name, metadata=None, sse=None): """ Initialize new multipart upload request. :param bucket_name: Bucket name of the new multipart request. :param object_name: Object name of the new multipart request. :param metadata: Additional new metadata for the new object. :return: Returns an upload id. """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) headers = {} if metadata: headers.update(metadata) if sse: headers.update(sse.marshal()) response = self._url_open('POST', bucket_name=bucket_name, object_name=object_name, query={'uploads': ''}, headers=headers) return parse_new_multipart_upload(response.data)
[ "def", "_new_multipart_upload", "(", "self", ",", "bucket_name", ",", "object_name", ",", "metadata", "=", "None", ",", "sse", "=", "None", ")", ":", "is_valid_bucket_name", "(", "bucket_name", ")", "is_non_empty_string", "(", "object_name", ")", "headers", "=", "{", "}", "if", "metadata", ":", "headers", ".", "update", "(", "metadata", ")", "if", "sse", ":", "headers", ".", "update", "(", "sse", ".", "marshal", "(", ")", ")", "response", "=", "self", ".", "_url_open", "(", "'POST'", ",", "bucket_name", "=", "bucket_name", ",", "object_name", "=", "object_name", ",", "query", "=", "{", "'uploads'", ":", "''", "}", ",", "headers", "=", "headers", ")", "return", "parse_new_multipart_upload", "(", "response", ".", "data", ")" ]
Initialize new multipart upload request. :param bucket_name: Bucket name of the new multipart request. :param object_name: Object name of the new multipart request. :param metadata: Additional new metadata for the new object. :return: Returns an upload id.
[ "Initialize", "new", "multipart", "upload", "request", "." ]
python
train
edx/edx-enterprise
setup.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/setup.py#L28-L68
def get_requirements(requirements_file): """ Get the contents of a file listing the requirements """ lines = open(requirements_file).readlines() dependencies = [] dependency_links = [] for line in lines: package = line.strip() if package.startswith('#'): # Skip pure comment lines continue if any(package.startswith(prefix) for prefix in VCS_PREFIXES): # VCS reference for dev purposes, expect a trailing comment # with the normal requirement package_link, __, package = package.rpartition('#') # Remove -e <version_control> string package_link = re.sub(r'(.*)(?P<dependency_link>https?.*$)', r'\g<dependency_link>', package_link) package = re.sub(r'(egg=)?(?P<package_name>.*)==.*$', r'\g<package_name>', package) package_version = re.sub(r'.*[^=]==', '', line.strip()) if package: dependency_links.append( '{package_link}#egg={package}-{package_version}'.format( package_link=package_link, package=package, package_version=package_version, ) ) else: # Ignore any trailing comment package, __, __ = package.partition('#') # Remove any whitespace and assume non-empty results are dependencies package = package.strip() if package: dependencies.append(package) return dependencies, dependency_links
[ "def", "get_requirements", "(", "requirements_file", ")", ":", "lines", "=", "open", "(", "requirements_file", ")", ".", "readlines", "(", ")", "dependencies", "=", "[", "]", "dependency_links", "=", "[", "]", "for", "line", "in", "lines", ":", "package", "=", "line", ".", "strip", "(", ")", "if", "package", ".", "startswith", "(", "'#'", ")", ":", "# Skip pure comment lines", "continue", "if", "any", "(", "package", ".", "startswith", "(", "prefix", ")", "for", "prefix", "in", "VCS_PREFIXES", ")", ":", "# VCS reference for dev purposes, expect a trailing comment", "# with the normal requirement", "package_link", ",", "__", ",", "package", "=", "package", ".", "rpartition", "(", "'#'", ")", "# Remove -e <version_control> string", "package_link", "=", "re", ".", "sub", "(", "r'(.*)(?P<dependency_link>https?.*$)'", ",", "r'\\g<dependency_link>'", ",", "package_link", ")", "package", "=", "re", ".", "sub", "(", "r'(egg=)?(?P<package_name>.*)==.*$'", ",", "r'\\g<package_name>'", ",", "package", ")", "package_version", "=", "re", ".", "sub", "(", "r'.*[^=]=='", ",", "''", ",", "line", ".", "strip", "(", ")", ")", "if", "package", ":", "dependency_links", ".", "append", "(", "'{package_link}#egg={package}-{package_version}'", ".", "format", "(", "package_link", "=", "package_link", ",", "package", "=", "package", ",", "package_version", "=", "package_version", ",", ")", ")", "else", ":", "# Ignore any trailing comment", "package", ",", "__", ",", "__", "=", "package", ".", "partition", "(", "'#'", ")", "# Remove any whitespace and assume non-empty results are dependencies", "package", "=", "package", ".", "strip", "(", ")", "if", "package", ":", "dependencies", ".", "append", "(", "package", ")", "return", "dependencies", ",", "dependency_links" ]
Get the contents of a file listing the requirements
[ "Get", "the", "contents", "of", "a", "file", "listing", "the", "requirements" ]
python
valid
ray-project/ray
python/ray/tune/trial.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trial.py#L486-L495
def should_recover(self): """Returns whether the trial qualifies for restoring. This is if a checkpoint frequency is set and has not failed more than max_failures. This may return true even when there may not yet be a checkpoint. """ return (self.checkpoint_freq > 0 and (self.num_failures < self.max_failures or self.max_failures < 0))
[ "def", "should_recover", "(", "self", ")", ":", "return", "(", "self", ".", "checkpoint_freq", ">", "0", "and", "(", "self", ".", "num_failures", "<", "self", ".", "max_failures", "or", "self", ".", "max_failures", "<", "0", ")", ")" ]
Returns whether the trial qualifies for restoring. This is if a checkpoint frequency is set and has not failed more than max_failures. This may return true even when there may not yet be a checkpoint.
[ "Returns", "whether", "the", "trial", "qualifies", "for", "restoring", "." ]
python
train
gristlabs/asttokens
asttokens/util.py
https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/util.py#L214-L230
def replace(text, replacements): """ Replaces multiple slices of text with new values. This is a convenience method for making code modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is an iterable of ``(start, end, new_text)`` tuples. For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces ``"X is THE test"``. """ p = 0 parts = [] for (start, end, new_text) in sorted(replacements): parts.append(text[p:start]) parts.append(new_text) p = end parts.append(text[p:]) return ''.join(parts)
[ "def", "replace", "(", "text", ",", "replacements", ")", ":", "p", "=", "0", "parts", "=", "[", "]", "for", "(", "start", ",", "end", ",", "new_text", ")", "in", "sorted", "(", "replacements", ")", ":", "parts", ".", "append", "(", "text", "[", "p", ":", "start", "]", ")", "parts", ".", "append", "(", "new_text", ")", "p", "=", "end", "parts", ".", "append", "(", "text", "[", "p", ":", "]", ")", "return", "''", ".", "join", "(", "parts", ")" ]
Replaces multiple slices of text with new values. This is a convenience method for making code modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is an iterable of ``(start, end, new_text)`` tuples. For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces ``"X is THE test"``.
[ "Replaces", "multiple", "slices", "of", "text", "with", "new", "values", ".", "This", "is", "a", "convenience", "method", "for", "making", "code", "modifications", "of", "ranges", "e", ".", "g", ".", "as", "identified", "by", "ASTTokens", ".", "get_text_range", "(", "node", ")", ".", "Replacements", "is", "an", "iterable", "of", "(", "start", "end", "new_text", ")", "tuples", "." ]
python
train
openatx/facebook-wda
wda/__init__.py
https://github.com/openatx/facebook-wda/blob/aa644204620c6d5c7705a9c7452d8c0cc39330d5/wda/__init__.py#L394-L407
def set_alert_callback(self, callback): """ Args: callback (func): called when alert popup Example of callback: def callback(session): session.alert.accept() """ if callable(callable): self.http.alert_callback = functools.partial(callback, self) else: self.http.alert_callback = None
[ "def", "set_alert_callback", "(", "self", ",", "callback", ")", ":", "if", "callable", "(", "callable", ")", ":", "self", ".", "http", ".", "alert_callback", "=", "functools", ".", "partial", "(", "callback", ",", "self", ")", "else", ":", "self", ".", "http", ".", "alert_callback", "=", "None" ]
Args: callback (func): called when alert popup Example of callback: def callback(session): session.alert.accept()
[ "Args", ":", "callback", "(", "func", ")", ":", "called", "when", "alert", "popup", "Example", "of", "callback", ":" ]
python
train
SAP/PyHDB
pyhdb/lib/stringlib.py
https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/lib/stringlib.py#L19-L27
def allhexlify(data): """Hexlify given data into a string representation with hex values for all chars Input like 'ab\x04ce' becomes '\x61\x62\x04\x63\x65' """ hx = binascii.hexlify(data) return b''.join([b'\\x' + o for o in re.findall(b'..', hx)])
[ "def", "allhexlify", "(", "data", ")", ":", "hx", "=", "binascii", ".", "hexlify", "(", "data", ")", "return", "b''", ".", "join", "(", "[", "b'\\\\x'", "+", "o", "for", "o", "in", "re", ".", "findall", "(", "b'..'", ",", "hx", ")", "]", ")" ]
Hexlify given data into a string representation with hex values for all chars Input like 'ab\x04ce' becomes '\x61\x62\x04\x63\x65'
[ "Hexlify", "given", "data", "into", "a", "string", "representation", "with", "hex", "values", "for", "all", "chars", "Input", "like", "ab", "\\", "x04ce", "becomes", "\\", "x61", "\\", "x62", "\\", "x04", "\\", "x63", "\\", "x65" ]
python
train
saimn/sigal
sigal/gallery.py
https://github.com/saimn/sigal/blob/912ca39991355d358dc85fd55c7aeabdd7acc386/sigal/gallery.py#L103-L118
def big(self): """Path to the original image, if ``keep_orig`` is set (relative to the album directory). Copy the file if needed. """ if self.settings['keep_orig']: s = self.settings if s['use_orig']: # The image *is* the original, just use it return self.filename orig_path = join(s['destination'], self.path, s['orig_dir']) check_or_create_dir(orig_path) big_path = join(orig_path, self.src_filename) if not isfile(big_path): copy(self.src_path, big_path, symlink=s['orig_link'], rellink=self.settings['rel_link']) return join(s['orig_dir'], self.src_filename)
[ "def", "big", "(", "self", ")", ":", "if", "self", ".", "settings", "[", "'keep_orig'", "]", ":", "s", "=", "self", ".", "settings", "if", "s", "[", "'use_orig'", "]", ":", "# The image *is* the original, just use it", "return", "self", ".", "filename", "orig_path", "=", "join", "(", "s", "[", "'destination'", "]", ",", "self", ".", "path", ",", "s", "[", "'orig_dir'", "]", ")", "check_or_create_dir", "(", "orig_path", ")", "big_path", "=", "join", "(", "orig_path", ",", "self", ".", "src_filename", ")", "if", "not", "isfile", "(", "big_path", ")", ":", "copy", "(", "self", ".", "src_path", ",", "big_path", ",", "symlink", "=", "s", "[", "'orig_link'", "]", ",", "rellink", "=", "self", ".", "settings", "[", "'rel_link'", "]", ")", "return", "join", "(", "s", "[", "'orig_dir'", "]", ",", "self", ".", "src_filename", ")" ]
Path to the original image, if ``keep_orig`` is set (relative to the album directory). Copy the file if needed.
[ "Path", "to", "the", "original", "image", "if", "keep_orig", "is", "set", "(", "relative", "to", "the", "album", "directory", ")", ".", "Copy", "the", "file", "if", "needed", "." ]
python
valid
jeremyschulman/halutz
halutz/client.py
https://github.com/jeremyschulman/halutz/blob/6bb398dc99bf723daabd9eda02494a11252ee109/halutz/client.py#L89-L96
def save_swagger_spec(self, filepath=None): """ Saves a copy of the origin_spec to a local file in JSON format """ if filepath is True or filepath is None: filepath = self.file_spec.format(server=self.server) json.dump(self.origin_spec, open(filepath, 'w+'), indent=3)
[ "def", "save_swagger_spec", "(", "self", ",", "filepath", "=", "None", ")", ":", "if", "filepath", "is", "True", "or", "filepath", "is", "None", ":", "filepath", "=", "self", ".", "file_spec", ".", "format", "(", "server", "=", "self", ".", "server", ")", "json", ".", "dump", "(", "self", ".", "origin_spec", ",", "open", "(", "filepath", ",", "'w+'", ")", ",", "indent", "=", "3", ")" ]
Saves a copy of the origin_spec to a local file in JSON format
[ "Saves", "a", "copy", "of", "the", "origin_spec", "to", "a", "local", "file", "in", "JSON", "format" ]
python
train
michaelliao/sinaweibopy
snspy.py
https://github.com/michaelliao/sinaweibopy/blob/0f19dd71c1fbd16ee539620c7e9e986887f5c665/snspy.py#L324-L333
def refresh_access_token(self, refresh_token, redirect_uri=None): ''' Refresh access token. ''' redirect = redirect_uri or self._redirect_uri resp_text = _http('POST', 'https://graph.qq.com/oauth2.0/token', refresh_token=refresh_token, client_id=self._client_id, client_secret=self._client_secret, redirect_uri=redirect, grant_type='refresh_token') return self._parse_access_token(resp_text)
[ "def", "refresh_access_token", "(", "self", ",", "refresh_token", ",", "redirect_uri", "=", "None", ")", ":", "redirect", "=", "redirect_uri", "or", "self", ".", "_redirect_uri", "resp_text", "=", "_http", "(", "'POST'", ",", "'https://graph.qq.com/oauth2.0/token'", ",", "refresh_token", "=", "refresh_token", ",", "client_id", "=", "self", ".", "_client_id", ",", "client_secret", "=", "self", ".", "_client_secret", ",", "redirect_uri", "=", "redirect", ",", "grant_type", "=", "'refresh_token'", ")", "return", "self", ".", "_parse_access_token", "(", "resp_text", ")" ]
Refresh access token.
[ "Refresh", "access", "token", "." ]
python
train
SmokinCaterpillar/pypet
pypet/trajectory.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/trajectory.py#L2180-L2218
def _merge_single_runs(self, other_trajectory, used_runs): """ Updates the `run_information` of the current trajectory.""" count = len(self) # Variable to count the increasing new run indices and create # new run names run_indices = range(len(other_trajectory)) run_name_dict = OrderedDict() to_store_groups_with_annotations = [] for idx in run_indices: # Iterate through all used runs and store annotated groups and mark results and # derived parameters for merging if idx in used_runs: # Update the run information dict of the current trajectory other_info_dict = other_trajectory.f_get_run_information(idx) time_ = other_info_dict['time'] timestamp = other_info_dict['timestamp'] completed = other_info_dict['completed'] short_environment_hexsha = other_info_dict['short_environment_hexsha'] finish_timestamp = other_info_dict['finish_timestamp'] runtime = other_info_dict['runtime'] new_idx = used_runs[idx] new_runname = self.f_wildcard('$', new_idx) run_name_dict[idx] = new_runname info_dict = dict( idx=new_idx, time=time_, timestamp=timestamp, completed=completed, short_environment_hexsha=short_environment_hexsha, finish_timestamp=finish_timestamp, runtime=runtime) self._add_run_info(**info_dict)
[ "def", "_merge_single_runs", "(", "self", ",", "other_trajectory", ",", "used_runs", ")", ":", "count", "=", "len", "(", "self", ")", "# Variable to count the increasing new run indices and create", "# new run names", "run_indices", "=", "range", "(", "len", "(", "other_trajectory", ")", ")", "run_name_dict", "=", "OrderedDict", "(", ")", "to_store_groups_with_annotations", "=", "[", "]", "for", "idx", "in", "run_indices", ":", "# Iterate through all used runs and store annotated groups and mark results and", "# derived parameters for merging", "if", "idx", "in", "used_runs", ":", "# Update the run information dict of the current trajectory", "other_info_dict", "=", "other_trajectory", ".", "f_get_run_information", "(", "idx", ")", "time_", "=", "other_info_dict", "[", "'time'", "]", "timestamp", "=", "other_info_dict", "[", "'timestamp'", "]", "completed", "=", "other_info_dict", "[", "'completed'", "]", "short_environment_hexsha", "=", "other_info_dict", "[", "'short_environment_hexsha'", "]", "finish_timestamp", "=", "other_info_dict", "[", "'finish_timestamp'", "]", "runtime", "=", "other_info_dict", "[", "'runtime'", "]", "new_idx", "=", "used_runs", "[", "idx", "]", "new_runname", "=", "self", ".", "f_wildcard", "(", "'$'", ",", "new_idx", ")", "run_name_dict", "[", "idx", "]", "=", "new_runname", "info_dict", "=", "dict", "(", "idx", "=", "new_idx", ",", "time", "=", "time_", ",", "timestamp", "=", "timestamp", ",", "completed", "=", "completed", ",", "short_environment_hexsha", "=", "short_environment_hexsha", ",", "finish_timestamp", "=", "finish_timestamp", ",", "runtime", "=", "runtime", ")", "self", ".", "_add_run_info", "(", "*", "*", "info_dict", ")" ]
Updates the `run_information` of the current trajectory.
[ "Updates", "the", "run_information", "of", "the", "current", "trajectory", "." ]
python
test
mitsei/dlkit
dlkit/json_/grading/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L1732-L1738
def _get_entry_scores(self): """Takes entries from self._entries and returns a list of scores (or output scores, if based on grades)""" if self.get_gradebook_column().get_grade_system().is_based_on_grades(): return [e.get_grade().get_output_score() for e in self._entries if e.is_graded()] else: return [e.get_score() for e in self._entries if e.is_graded()]
[ "def", "_get_entry_scores", "(", "self", ")", ":", "if", "self", ".", "get_gradebook_column", "(", ")", ".", "get_grade_system", "(", ")", ".", "is_based_on_grades", "(", ")", ":", "return", "[", "e", ".", "get_grade", "(", ")", ".", "get_output_score", "(", ")", "for", "e", "in", "self", ".", "_entries", "if", "e", ".", "is_graded", "(", ")", "]", "else", ":", "return", "[", "e", ".", "get_score", "(", ")", "for", "e", "in", "self", ".", "_entries", "if", "e", ".", "is_graded", "(", ")", "]" ]
Takes entries from self._entries and returns a list of scores (or output scores, if based on grades)
[ "Takes", "entries", "from", "self", ".", "_entries", "and", "returns", "a", "list", "of", "scores", "(", "or", "output", "scores", "if", "based", "on", "grades", ")" ]
python
train
plivo/plivohelper-python
plivohelper.py
https://github.com/plivo/plivohelper-python/blob/a2f706d69e2138fbb973f792041341f662072d26/plivohelper.py#L195-L200
def schedule_hangup(self, call_params): """REST Schedule Hangup Helper """ path = '/' + self.api_version + '/ScheduleHangup/' method = 'POST' return self.request(path, method, call_params)
[ "def", "schedule_hangup", "(", "self", ",", "call_params", ")", ":", "path", "=", "'/'", "+", "self", ".", "api_version", "+", "'/ScheduleHangup/'", "method", "=", "'POST'", "return", "self", ".", "request", "(", "path", ",", "method", ",", "call_params", ")" ]
REST Schedule Hangup Helper
[ "REST", "Schedule", "Hangup", "Helper" ]
python
valid
hyperledger/sawtooth-core
validator/sawtooth_validator/execution/execution_context.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/execution_context.py#L214-L228
def get_all_if_deleted(self): """Return all the addresses deleted in the context. Useful in the squash method. Returns: (dict of str to bytes): The addresses and bytes that have been deleted in the context. """ with self._lock: results = {} for add, fut in self._state.items(): if self._contains_and_deleted(add): results[add] = fut.result() return results
[ "def", "get_all_if_deleted", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "results", "=", "{", "}", "for", "add", ",", "fut", "in", "self", ".", "_state", ".", "items", "(", ")", ":", "if", "self", ".", "_contains_and_deleted", "(", "add", ")", ":", "results", "[", "add", "]", "=", "fut", ".", "result", "(", ")", "return", "results" ]
Return all the addresses deleted in the context. Useful in the squash method. Returns: (dict of str to bytes): The addresses and bytes that have been deleted in the context.
[ "Return", "all", "the", "addresses", "deleted", "in", "the", "context", ".", "Useful", "in", "the", "squash", "method", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/tlm.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/tlm.py#L793-L803
def eval(self, packet): """Returns the result of evaluating this PacketExpression in the context of the given Packet. """ try: context = createPacketContext(packet) result = eval(self._code, packet._defn.globals, context) except ZeroDivisionError: result = None return result
[ "def", "eval", "(", "self", ",", "packet", ")", ":", "try", ":", "context", "=", "createPacketContext", "(", "packet", ")", "result", "=", "eval", "(", "self", ".", "_code", ",", "packet", ".", "_defn", ".", "globals", ",", "context", ")", "except", "ZeroDivisionError", ":", "result", "=", "None", "return", "result" ]
Returns the result of evaluating this PacketExpression in the context of the given Packet.
[ "Returns", "the", "result", "of", "evaluating", "this", "PacketExpression", "in", "the", "context", "of", "the", "given", "Packet", "." ]
python
train
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2296-L2313
def truncated_pareto_like(x, alpha, m, b): R""" Truncated Pareto log-likelihood. The Pareto is a continuous, positive probability distribution with two parameters. It is often used to characterize wealth distribution, or other examples of the 80/20 rule. .. math:: f(x \mid \alpha, m, b) = \frac{\alpha m^{\alpha} x^{-\alpha}}{1-(m/b)**{\alpha}} :Parameters: - `x` : Input data (x > m) - `alpha` : Shape parameter (alpha>0) - `m` : Scale parameter (m>0) - `b` : Upper bound (b>m) """ return flib.truncated_pareto(x, alpha, m, b)
[ "def", "truncated_pareto_like", "(", "x", ",", "alpha", ",", "m", ",", "b", ")", ":", "return", "flib", ".", "truncated_pareto", "(", "x", ",", "alpha", ",", "m", ",", "b", ")" ]
R""" Truncated Pareto log-likelihood. The Pareto is a continuous, positive probability distribution with two parameters. It is often used to characterize wealth distribution, or other examples of the 80/20 rule. .. math:: f(x \mid \alpha, m, b) = \frac{\alpha m^{\alpha} x^{-\alpha}}{1-(m/b)**{\alpha}} :Parameters: - `x` : Input data (x > m) - `alpha` : Shape parameter (alpha>0) - `m` : Scale parameter (m>0) - `b` : Upper bound (b>m)
[ "R", "Truncated", "Pareto", "log", "-", "likelihood", ".", "The", "Pareto", "is", "a", "continuous", "positive", "probability", "distribution", "with", "two", "parameters", ".", "It", "is", "often", "used", "to", "characterize", "wealth", "distribution", "or", "other", "examples", "of", "the", "80", "/", "20", "rule", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L6990-L7008
def Debugger_setSkipAllPauses(self, skip): """ Function path: Debugger.setSkipAllPauses Domain: Debugger Method name: setSkipAllPauses Parameters: Required arguments: 'skip' (type: boolean) -> New value for skip pauses state. No return value. Description: Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc). """ assert isinstance(skip, (bool,) ), "Argument 'skip' must be of type '['bool']'. Received type: '%s'" % type( skip) subdom_funcs = self.synchronous_command('Debugger.setSkipAllPauses', skip =skip) return subdom_funcs
[ "def", "Debugger_setSkipAllPauses", "(", "self", ",", "skip", ")", ":", "assert", "isinstance", "(", "skip", ",", "(", "bool", ",", ")", ")", ",", "\"Argument 'skip' must be of type '['bool']'. Received type: '%s'\"", "%", "type", "(", "skip", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'Debugger.setSkipAllPauses'", ",", "skip", "=", "skip", ")", "return", "subdom_funcs" ]
Function path: Debugger.setSkipAllPauses Domain: Debugger Method name: setSkipAllPauses Parameters: Required arguments: 'skip' (type: boolean) -> New value for skip pauses state. No return value. Description: Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).
[ "Function", "path", ":", "Debugger", ".", "setSkipAllPauses", "Domain", ":", "Debugger", "Method", "name", ":", "setSkipAllPauses", "Parameters", ":", "Required", "arguments", ":", "skip", "(", "type", ":", "boolean", ")", "-", ">", "New", "value", "for", "skip", "pauses", "state", ".", "No", "return", "value", ".", "Description", ":", "Makes", "page", "not", "interrupt", "on", "any", "pauses", "(", "breakpoint", "exception", "dom", "exception", "etc", ")", "." ]
python
train
rigetti/pyquil
pyquil/noise.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/noise.py#L639-L652
def estimate_bitstring_probs(results): """ Given an array of single shot results estimate the probability distribution over all bitstrings. :param np.array results: A 2d array where the outer axis iterates over shots and the inner axis over bits. :return: An array with as many axes as there are qubit and normalized such that it sums to one. ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :rtype: np.array """ nshots, nq = np.shape(results) outcomes = np.array([int("".join(map(str, r)), 2) for r in results]) probs = np.histogram(outcomes, bins=np.arange(-.5, 2 ** nq, 1))[0] / float(nshots) return _bitstring_probs_by_qubit(probs)
[ "def", "estimate_bitstring_probs", "(", "results", ")", ":", "nshots", ",", "nq", "=", "np", ".", "shape", "(", "results", ")", "outcomes", "=", "np", ".", "array", "(", "[", "int", "(", "\"\"", ".", "join", "(", "map", "(", "str", ",", "r", ")", ")", ",", "2", ")", "for", "r", "in", "results", "]", ")", "probs", "=", "np", ".", "histogram", "(", "outcomes", ",", "bins", "=", "np", ".", "arange", "(", "-", ".5", ",", "2", "**", "nq", ",", "1", ")", ")", "[", "0", "]", "/", "float", "(", "nshots", ")", "return", "_bitstring_probs_by_qubit", "(", "probs", ")" ]
Given an array of single shot results estimate the probability distribution over all bitstrings. :param np.array results: A 2d array where the outer axis iterates over shots and the inner axis over bits. :return: An array with as many axes as there are qubit and normalized such that it sums to one. ``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``. :rtype: np.array
[ "Given", "an", "array", "of", "single", "shot", "results", "estimate", "the", "probability", "distribution", "over", "all", "bitstrings", "." ]
python
train
WojciechMula/canvas2svg
canvasvg.py
https://github.com/WojciechMula/canvas2svg/blob/c05d73d88499e5c565386a1765f79d9417a14dac/canvasvg.py#L403-L412
def polyline(document, coords): "polyline with more then 2 vertices" points = [] for i in range(0, len(coords), 2): points.append("%s,%s" % (coords[i], coords[i+1])) return setattribs( document.createElement('polyline'), points = ' '.join(points), )
[ "def", "polyline", "(", "document", ",", "coords", ")", ":", "points", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "coords", ")", ",", "2", ")", ":", "points", ".", "append", "(", "\"%s,%s\"", "%", "(", "coords", "[", "i", "]", ",", "coords", "[", "i", "+", "1", "]", ")", ")", "return", "setattribs", "(", "document", ".", "createElement", "(", "'polyline'", ")", ",", "points", "=", "' '", ".", "join", "(", "points", ")", ",", ")" ]
polyline with more then 2 vertices
[ "polyline", "with", "more", "then", "2", "vertices" ]
python
train
osrg/ryu
ryu/lib/igmplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/igmplib.py#L522-L552
def packet_in_handler(self, req_pkt, req_igmp, msg): """the process when the snooper received IGMP.""" dpid = msg.datapath.id ofproto = msg.datapath.ofproto if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION: in_port = msg.in_port else: in_port = msg.match['in_port'] log = "SW=%s PORT=%d IGMP received. " % ( dpid_to_str(dpid), in_port) self.logger.debug(str(req_igmp)) if igmp.IGMP_TYPE_QUERY == req_igmp.msgtype: self.logger.info(log + "[QUERY]") (req_ipv4, ) = req_pkt.get_protocols(ipv4.ipv4) (req_eth, ) = req_pkt.get_protocols(ethernet.ethernet) self._do_query(req_igmp, req_ipv4, req_eth, in_port, msg) elif (igmp.IGMP_TYPE_REPORT_V1 == req_igmp.msgtype or igmp.IGMP_TYPE_REPORT_V2 == req_igmp.msgtype): self.logger.info(log + "[REPORT]") self._do_report(req_igmp, in_port, msg) elif igmp.IGMP_TYPE_LEAVE == req_igmp.msgtype: self.logger.info(log + "[LEAVE]") self._do_leave(req_igmp, in_port, msg) elif igmp.IGMP_TYPE_REPORT_V3 == req_igmp.msgtype: self.logger.info(log + "V3 is not supported yet.") self._do_flood(in_port, msg) else: self.logger.info(log + "[unknown type:%d]", req_igmp.msgtype) self._do_flood(in_port, msg)
[ "def", "packet_in_handler", "(", "self", ",", "req_pkt", ",", "req_igmp", ",", "msg", ")", ":", "dpid", "=", "msg", ".", "datapath", ".", "id", "ofproto", "=", "msg", ".", "datapath", ".", "ofproto", "if", "ofproto", ".", "OFP_VERSION", "==", "ofproto_v1_0", ".", "OFP_VERSION", ":", "in_port", "=", "msg", ".", "in_port", "else", ":", "in_port", "=", "msg", ".", "match", "[", "'in_port'", "]", "log", "=", "\"SW=%s PORT=%d IGMP received. \"", "%", "(", "dpid_to_str", "(", "dpid", ")", ",", "in_port", ")", "self", ".", "logger", ".", "debug", "(", "str", "(", "req_igmp", ")", ")", "if", "igmp", ".", "IGMP_TYPE_QUERY", "==", "req_igmp", ".", "msgtype", ":", "self", ".", "logger", ".", "info", "(", "log", "+", "\"[QUERY]\"", ")", "(", "req_ipv4", ",", ")", "=", "req_pkt", ".", "get_protocols", "(", "ipv4", ".", "ipv4", ")", "(", "req_eth", ",", ")", "=", "req_pkt", ".", "get_protocols", "(", "ethernet", ".", "ethernet", ")", "self", ".", "_do_query", "(", "req_igmp", ",", "req_ipv4", ",", "req_eth", ",", "in_port", ",", "msg", ")", "elif", "(", "igmp", ".", "IGMP_TYPE_REPORT_V1", "==", "req_igmp", ".", "msgtype", "or", "igmp", ".", "IGMP_TYPE_REPORT_V2", "==", "req_igmp", ".", "msgtype", ")", ":", "self", ".", "logger", ".", "info", "(", "log", "+", "\"[REPORT]\"", ")", "self", ".", "_do_report", "(", "req_igmp", ",", "in_port", ",", "msg", ")", "elif", "igmp", ".", "IGMP_TYPE_LEAVE", "==", "req_igmp", ".", "msgtype", ":", "self", ".", "logger", ".", "info", "(", "log", "+", "\"[LEAVE]\"", ")", "self", ".", "_do_leave", "(", "req_igmp", ",", "in_port", ",", "msg", ")", "elif", "igmp", ".", "IGMP_TYPE_REPORT_V3", "==", "req_igmp", ".", "msgtype", ":", "self", ".", "logger", ".", "info", "(", "log", "+", "\"V3 is not supported yet.\"", ")", "self", ".", "_do_flood", "(", "in_port", ",", "msg", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "log", "+", "\"[unknown type:%d]\"", ",", "req_igmp", ".", "msgtype", ")", "self", ".", "_do_flood", "(", "in_port", ",", "msg", ")" ]
the process when the snooper received IGMP.
[ "the", "process", "when", "the", "snooper", "received", "IGMP", "." ]
python
train
bskinn/opan
opan/vpt2/base.py
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/vpt2/base.py#L54-L144
def new_from_files(self, basepath, basename, repo, \ bohrs=False, \ software=_E_SW.ORCA, \ repo_clobber=False, **kwargs): """ Initialize with data from files. """ # Imports import os from os import path as osp from ..xyz import OpanXYZ as OX from ..grad import OrcaEngrad as OE from ..hess import OrcaHess as OH from .repo import OpanAnharmRepo as OR from ..const import EnumDispDirection as E_DDir, EnumFileType as E_FT from ..const import EnumSoftware as E_SW from ..const import DEF from ..error import AnharmError as ANHErr ## # Store working directory for restore? ## prev_dir = os.getcwd() # Complain if anything is already bound if not self.w_xyz == None: raise ANHErr(ANHErr.STATUS, "XYZ object is already bound", "") ## end if if not self.w_grad == None: raise ANHErr(ANHErr.STATUS, "GRAD object is already bound", "") ## end if if not self.w_hess == None: raise ANHErr(ANHErr.STATUS, "HESS object is already bound", "") ## end if if not self.repo == None: raise ANHErr(ANHErr.STATUS, "Repository object is already bound", "") ## end if # RESUME: vpt2--factor for loading from different software pkgs # Load the three data files self.w_xyz = OX( osp.join(basepath, \ basename + osp.extsep + xyz_ext) ) self.w_grad = OE( osp.join(basepath, \ basename + osp.extsep + engrad_ext), \ 0, E_DDir.NO_DISP, 0.0 ) self.w_hess = OH( osp.join(basepath, \ basename + osp.extsep + hess_ext), \ 0, E_DDir.NO_DISP, 0.0 ) # Only accept new repos for now if not isinstance(repo, str): raise TypeError("Must create new repository when loading " + "a new dataset.") ## end if # Repo is string, treat as filename and try to load # Check if it's a complete path # If it's a relative path, prepend the basepath if osp.split(repo[0]) > 0 and not osp.isabs(repo): repo = osp.join(basepath, repo) ## end if # Complain if it's a directory if osp.isdir(repo): raise IOError("Cannot bind repository -- specified " + "location is a directory") ## end if # If file exists ... if osp.isfile(repo): # Depending on clobber, either delete existing or raise error if repo_clobber: # Clobber old repo os.remove(repo) else: # Raise error raise IOError("Target repository file exists and " + "clobber is disabled.") ## end if ## end if # Should be good to create the repo self.repo = OR(repo)
[ "def", "new_from_files", "(", "self", ",", "basepath", ",", "basename", ",", "repo", ",", "bohrs", "=", "False", ",", "software", "=", "_E_SW", ".", "ORCA", ",", "repo_clobber", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Imports", "import", "os", "from", "os", "import", "path", "as", "osp", "from", ".", ".", "xyz", "import", "OpanXYZ", "as", "OX", "from", ".", ".", "grad", "import", "OrcaEngrad", "as", "OE", "from", ".", ".", "hess", "import", "OrcaHess", "as", "OH", "from", ".", "repo", "import", "OpanAnharmRepo", "as", "OR", "from", ".", ".", "const", "import", "EnumDispDirection", "as", "E_DDir", ",", "EnumFileType", "as", "E_FT", "from", ".", ".", "const", "import", "EnumSoftware", "as", "E_SW", "from", ".", ".", "const", "import", "DEF", "from", ".", ".", "error", "import", "AnharmError", "as", "ANHErr", "## # Store working directory for restore?", "## prev_dir = os.getcwd()", "# Complain if anything is already bound", "if", "not", "self", ".", "w_xyz", "==", "None", ":", "raise", "ANHErr", "(", "ANHErr", ".", "STATUS", ",", "\"XYZ object is already bound\"", ",", "\"\"", ")", "## end if", "if", "not", "self", ".", "w_grad", "==", "None", ":", "raise", "ANHErr", "(", "ANHErr", ".", "STATUS", ",", "\"GRAD object is already bound\"", ",", "\"\"", ")", "## end if", "if", "not", "self", ".", "w_hess", "==", "None", ":", "raise", "ANHErr", "(", "ANHErr", ".", "STATUS", ",", "\"HESS object is already bound\"", ",", "\"\"", ")", "## end if", "if", "not", "self", ".", "repo", "==", "None", ":", "raise", "ANHErr", "(", "ANHErr", ".", "STATUS", ",", "\"Repository object is already bound\"", ",", "\"\"", ")", "## end if", "# RESUME: vpt2--factor for loading from different software pkgs", "# Load the three data files", "self", ".", "w_xyz", "=", "OX", "(", "osp", ".", "join", "(", "basepath", ",", "basename", "+", "osp", ".", "extsep", "+", "xyz_ext", ")", ")", "self", ".", "w_grad", "=", "OE", "(", "osp", ".", "join", "(", "basepath", ",", "basename", "+", "osp", ".", "extsep", "+", "engrad_ext", ")", ",", "0", ",", "E_DDir", ".", "NO_DISP", ",", "0.0", ")", "self", ".", "w_hess", "=", "OH", "(", "osp", ".", "join", "(", "basepath", ",", "basename", "+", "osp", ".", "extsep", "+", "hess_ext", ")", ",", "0", ",", "E_DDir", ".", "NO_DISP", ",", "0.0", ")", "# Only accept new repos for now", "if", "not", "isinstance", "(", "repo", ",", "str", ")", ":", "raise", "TypeError", "(", "\"Must create new repository when loading \"", "+", "\"a new dataset.\"", ")", "## end if", "# Repo is string, treat as filename and try to load", "# Check if it's a complete path", "# If it's a relative path, prepend the basepath", "if", "osp", ".", "split", "(", "repo", "[", "0", "]", ")", ">", "0", "and", "not", "osp", ".", "isabs", "(", "repo", ")", ":", "repo", "=", "osp", ".", "join", "(", "basepath", ",", "repo", ")", "## end if", "# Complain if it's a directory", "if", "osp", ".", "isdir", "(", "repo", ")", ":", "raise", "IOError", "(", "\"Cannot bind repository -- specified \"", "+", "\"location is a directory\"", ")", "## end if", "# If file exists ...", "if", "osp", ".", "isfile", "(", "repo", ")", ":", "# Depending on clobber, either delete existing or raise error", "if", "repo_clobber", ":", "# Clobber old repo", "os", ".", "remove", "(", "repo", ")", "else", ":", "# Raise error", "raise", "IOError", "(", "\"Target repository file exists and \"", "+", "\"clobber is disabled.\"", ")", "## end if", "## end if", "# Should be good to create the repo", "self", ".", "repo", "=", "OR", "(", "repo", ")" ]
Initialize with data from files.
[ "Initialize", "with", "data", "from", "files", "." ]
python
train
wummel/linkchecker
third_party/miniboa-r42/handler_demo.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/miniboa-r42/handler_demo.py#L25-L36
def my_on_connect(client): """ Example on_connect handler. """ client.send('You connected from %s\n' % client.addrport()) if CLIENTS: client.send('Also connected are:\n') for neighbor in CLIENTS: client.send('%s\n' % neighbor.addrport()) else: client.send('Sadly, you are alone.\n') CLIENTS.append(client)
[ "def", "my_on_connect", "(", "client", ")", ":", "client", ".", "send", "(", "'You connected from %s\\n'", "%", "client", ".", "addrport", "(", ")", ")", "if", "CLIENTS", ":", "client", ".", "send", "(", "'Also connected are:\\n'", ")", "for", "neighbor", "in", "CLIENTS", ":", "client", ".", "send", "(", "'%s\\n'", "%", "neighbor", ".", "addrport", "(", ")", ")", "else", ":", "client", ".", "send", "(", "'Sadly, you are alone.\\n'", ")", "CLIENTS", ".", "append", "(", "client", ")" ]
Example on_connect handler.
[ "Example", "on_connect", "handler", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/cmd/show.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/show.py#L83-L118
def show(self, uuid=None, term=None): """Show the information related to unique identities. This method prints information related to unique identities such as identities or enrollments. When <uuid> is given, it will only show information about the unique identity related to <uuid>. When <term> is set, it will only show information about those unique identities that have any attribute (name, email, username, source) which match with the given term. This parameter does not have any effect when <uuid> is set. :param uuid: unique identifier :param term: term to match with unique identities data """ try: if uuid: uidentities = api.unique_identities(self.db, uuid) elif term: uidentities = api.search_unique_identities(self.db, term) else: uidentities = api.unique_identities(self.db) for uid in uidentities: # Add enrollments to a new property 'roles' enrollments = api.enrollments(self.db, uid.uuid) uid.roles = enrollments self.display('show.tmpl', uidentities=uidentities) except NotFoundError as e: self.error(str(e)) return e.code return CMD_SUCCESS
[ "def", "show", "(", "self", ",", "uuid", "=", "None", ",", "term", "=", "None", ")", ":", "try", ":", "if", "uuid", ":", "uidentities", "=", "api", ".", "unique_identities", "(", "self", ".", "db", ",", "uuid", ")", "elif", "term", ":", "uidentities", "=", "api", ".", "search_unique_identities", "(", "self", ".", "db", ",", "term", ")", "else", ":", "uidentities", "=", "api", ".", "unique_identities", "(", "self", ".", "db", ")", "for", "uid", "in", "uidentities", ":", "# Add enrollments to a new property 'roles'", "enrollments", "=", "api", ".", "enrollments", "(", "self", ".", "db", ",", "uid", ".", "uuid", ")", "uid", ".", "roles", "=", "enrollments", "self", ".", "display", "(", "'show.tmpl'", ",", "uidentities", "=", "uidentities", ")", "except", "NotFoundError", "as", "e", ":", "self", ".", "error", "(", "str", "(", "e", ")", ")", "return", "e", ".", "code", "return", "CMD_SUCCESS" ]
Show the information related to unique identities. This method prints information related to unique identities such as identities or enrollments. When <uuid> is given, it will only show information about the unique identity related to <uuid>. When <term> is set, it will only show information about those unique identities that have any attribute (name, email, username, source) which match with the given term. This parameter does not have any effect when <uuid> is set. :param uuid: unique identifier :param term: term to match with unique identities data
[ "Show", "the", "information", "related", "to", "unique", "identities", "." ]
python
train
cloudify-cosmo/repex
repex.py
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L302-L315
def _expand_var(self, in_string, available_variables): """Expand variable to its corresponding value in_string :param string variable: variable name :param value: value to replace with :param string in_string: the string to replace in """ instances = self._get_instances(in_string) for instance in instances: for name, value in available_variables.items(): variable_string = self._get_variable_string(name) if instance == variable_string: in_string = in_string.replace(variable_string, value) return in_string
[ "def", "_expand_var", "(", "self", ",", "in_string", ",", "available_variables", ")", ":", "instances", "=", "self", ".", "_get_instances", "(", "in_string", ")", "for", "instance", "in", "instances", ":", "for", "name", ",", "value", "in", "available_variables", ".", "items", "(", ")", ":", "variable_string", "=", "self", ".", "_get_variable_string", "(", "name", ")", "if", "instance", "==", "variable_string", ":", "in_string", "=", "in_string", ".", "replace", "(", "variable_string", ",", "value", ")", "return", "in_string" ]
Expand variable to its corresponding value in_string :param string variable: variable name :param value: value to replace with :param string in_string: the string to replace in
[ "Expand", "variable", "to", "its", "corresponding", "value", "in_string" ]
python
train
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L3229-L3237
def use_comparative_assessment_taken_view(self): """Pass through to provider AssessmentTakenLookupSession.use_comparative_assessment_taken_view""" self._object_views['assessment_taken'] = COMPARATIVE # self._get_provider_session('assessment_taken_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_assessment_taken_view() except AttributeError: pass
[ "def", "use_comparative_assessment_taken_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'assessment_taken'", "]", "=", "COMPARATIVE", "# self._get_provider_session('assessment_taken_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_assessment_taken_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider AssessmentTakenLookupSession.use_comparative_assessment_taken_view
[ "Pass", "through", "to", "provider", "AssessmentTakenLookupSession", ".", "use_comparative_assessment_taken_view" ]
python
train
chrisspen/dtree
dtree.py
https://github.com/chrisspen/dtree/blob/9e9c9992b22ad9a7e296af7e6837666b05db43ef/dtree.py#L1293-L1314
def build(cls, data, *args, **kwargs): """ Constructs a classification or regression tree in a single batch by analyzing the given data. """ assert isinstance(data, Data) if data.is_continuous_class: fitness_func = gain_variance else: fitness_func = get_gain t = cls(data=data, *args, **kwargs) t._data = data t.sample_count = len(data) t._tree = create_decision_tree( data=data, attributes=data.attribute_names, class_attr=data.class_attribute_name, fitness_func=fitness_func, wrapper=t, ) return t
[ "def", "build", "(", "cls", ",", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "data", ",", "Data", ")", "if", "data", ".", "is_continuous_class", ":", "fitness_func", "=", "gain_variance", "else", ":", "fitness_func", "=", "get_gain", "t", "=", "cls", "(", "data", "=", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "t", ".", "_data", "=", "data", "t", ".", "sample_count", "=", "len", "(", "data", ")", "t", ".", "_tree", "=", "create_decision_tree", "(", "data", "=", "data", ",", "attributes", "=", "data", ".", "attribute_names", ",", "class_attr", "=", "data", ".", "class_attribute_name", ",", "fitness_func", "=", "fitness_func", ",", "wrapper", "=", "t", ",", ")", "return", "t" ]
Constructs a classification or regression tree in a single batch by analyzing the given data.
[ "Constructs", "a", "classification", "or", "regression", "tree", "in", "a", "single", "batch", "by", "analyzing", "the", "given", "data", "." ]
python
train
elemoine/papyrus
papyrus/xsd.py
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/xsd.py#L146-L160
def add_column_property_xsd(self, tb, column_property): """ Add the XSD for a column property to the ``TreeBuilder``. """ if len(column_property.columns) != 1: raise NotImplementedError # pragma: no cover column = column_property.columns[0] if column.primary_key and not self.include_primary_keys: return if column.foreign_keys and not self.include_foreign_keys: if len(column.foreign_keys) != 1: # pragma: no cover # FIXME understand when a column can have multiple # foreign keys raise NotImplementedError() return attrs = {'name': column_property.key} self.add_column_xsd(tb, column, attrs)
[ "def", "add_column_property_xsd", "(", "self", ",", "tb", ",", "column_property", ")", ":", "if", "len", "(", "column_property", ".", "columns", ")", "!=", "1", ":", "raise", "NotImplementedError", "# pragma: no cover", "column", "=", "column_property", ".", "columns", "[", "0", "]", "if", "column", ".", "primary_key", "and", "not", "self", ".", "include_primary_keys", ":", "return", "if", "column", ".", "foreign_keys", "and", "not", "self", ".", "include_foreign_keys", ":", "if", "len", "(", "column", ".", "foreign_keys", ")", "!=", "1", ":", "# pragma: no cover", "# FIXME understand when a column can have multiple", "# foreign keys", "raise", "NotImplementedError", "(", ")", "return", "attrs", "=", "{", "'name'", ":", "column_property", ".", "key", "}", "self", ".", "add_column_xsd", "(", "tb", ",", "column", ",", "attrs", ")" ]
Add the XSD for a column property to the ``TreeBuilder``.
[ "Add", "the", "XSD", "for", "a", "column", "property", "to", "the", "TreeBuilder", "." ]
python
train
aio-libs/aiohttp-cors
aiohttp_cors/urldispatcher_router_adapter.py
https://github.com/aio-libs/aiohttp-cors/blob/14affbd95c88c675eb513c1d295ede1897930f94/aiohttp_cors/urldispatcher_router_adapter.py#L226-L271
def set_config_for_routing_entity( self, routing_entity: Union[web.Resource, web.StaticResource, web.ResourceRoute], config): """Record configuration for resource or it's route.""" if isinstance(routing_entity, (web.Resource, web.StaticResource)): resource = routing_entity # Add resource configuration or fail if it's already added. if resource in self._resource_config: raise ValueError( "CORS is already configured for {!r} resource.".format( resource)) self._resource_config[resource] = _ResourceConfig( default_config=config) elif isinstance(routing_entity, web.ResourceRoute): route = routing_entity # Add resource's route configuration or fail if it's already added. if route.resource not in self._resource_config: self.set_config_for_routing_entity(route.resource, config) if route.resource not in self._resource_config: raise ValueError( "Can't setup CORS for {!r} request, " "CORS must be enabled for route's resource first.".format( route)) resource_config = self._resource_config[route.resource] if route.method in resource_config.method_config: raise ValueError( "Can't setup CORS for {!r} route: CORS already " "configured on resource {!r} for {} method".format( route, route.resource, route.method)) resource_config.method_config[route.method] = config else: raise ValueError( "Resource or ResourceRoute expected, got {!r}".format( routing_entity))
[ "def", "set_config_for_routing_entity", "(", "self", ",", "routing_entity", ":", "Union", "[", "web", ".", "Resource", ",", "web", ".", "StaticResource", ",", "web", ".", "ResourceRoute", "]", ",", "config", ")", ":", "if", "isinstance", "(", "routing_entity", ",", "(", "web", ".", "Resource", ",", "web", ".", "StaticResource", ")", ")", ":", "resource", "=", "routing_entity", "# Add resource configuration or fail if it's already added.", "if", "resource", "in", "self", ".", "_resource_config", ":", "raise", "ValueError", "(", "\"CORS is already configured for {!r} resource.\"", ".", "format", "(", "resource", ")", ")", "self", ".", "_resource_config", "[", "resource", "]", "=", "_ResourceConfig", "(", "default_config", "=", "config", ")", "elif", "isinstance", "(", "routing_entity", ",", "web", ".", "ResourceRoute", ")", ":", "route", "=", "routing_entity", "# Add resource's route configuration or fail if it's already added.", "if", "route", ".", "resource", "not", "in", "self", ".", "_resource_config", ":", "self", ".", "set_config_for_routing_entity", "(", "route", ".", "resource", ",", "config", ")", "if", "route", ".", "resource", "not", "in", "self", ".", "_resource_config", ":", "raise", "ValueError", "(", "\"Can't setup CORS for {!r} request, \"", "\"CORS must be enabled for route's resource first.\"", ".", "format", "(", "route", ")", ")", "resource_config", "=", "self", ".", "_resource_config", "[", "route", ".", "resource", "]", "if", "route", ".", "method", "in", "resource_config", ".", "method_config", ":", "raise", "ValueError", "(", "\"Can't setup CORS for {!r} route: CORS already \"", "\"configured on resource {!r} for {} method\"", ".", "format", "(", "route", ",", "route", ".", "resource", ",", "route", ".", "method", ")", ")", "resource_config", ".", "method_config", "[", "route", ".", "method", "]", "=", "config", "else", ":", "raise", "ValueError", "(", "\"Resource or ResourceRoute expected, got {!r}\"", ".", "format", "(", "routing_entity", ")", ")" ]
Record configuration for resource or it's route.
[ "Record", "configuration", "for", "resource", "or", "it", "s", "route", "." ]
python
train
palantir/python-language-server
pyls/workspace.py
https://github.com/palantir/python-language-server/blob/96e08d85635382d17024c352306c4759f124195d/pyls/workspace.py#L63-L68
def get_document(self, doc_uri): """Return a managed document if-present, else create one pointing at disk. See https://github.com/Microsoft/language-server-protocol/issues/177 """ return self._docs.get(doc_uri) or self._create_document(doc_uri)
[ "def", "get_document", "(", "self", ",", "doc_uri", ")", ":", "return", "self", ".", "_docs", ".", "get", "(", "doc_uri", ")", "or", "self", ".", "_create_document", "(", "doc_uri", ")" ]
Return a managed document if-present, else create one pointing at disk. See https://github.com/Microsoft/language-server-protocol/issues/177
[ "Return", "a", "managed", "document", "if", "-", "present", "else", "create", "one", "pointing", "at", "disk", "." ]
python
train
un33k/django-toolware
toolware/utils/query.py
https://github.com/un33k/django-toolware/blob/973f3e003dc38b812897dab88455bee37dcaf931/toolware/utils/query.py#L12-L20
def get_unique_or_none(klass, *args, **kwargs): """ Returns a unique instance of `klass` or None """ try: return klass.objects.get(*args, **kwargs) except klass.DoesNotExist: return None except klass.MultipleObjectsReturned: return None return None
[ "def", "get_unique_or_none", "(", "klass", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "klass", ".", "objects", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "klass", ".", "DoesNotExist", ":", "return", "None", "except", "klass", ".", "MultipleObjectsReturned", ":", "return", "None", "return", "None" ]
Returns a unique instance of `klass` or None
[ "Returns", "a", "unique", "instance", "of", "klass", "or", "None" ]
python
test
CityOfZion/neo-python
neo/Core/State/AccountState.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/AccountState.py#L200-L214
def AddToBalance(self, assetId, fixed8_val): """ Add amount to the specified balance. Args: assetId (UInt256): fixed8_val (Fixed8): amount to add. """ found = False for key, balance in self.Balances.items(): if key == assetId: self.Balances[assetId] = self.Balances[assetId] + fixed8_val found = True if not found: self.Balances[assetId] = fixed8_val
[ "def", "AddToBalance", "(", "self", ",", "assetId", ",", "fixed8_val", ")", ":", "found", "=", "False", "for", "key", ",", "balance", "in", "self", ".", "Balances", ".", "items", "(", ")", ":", "if", "key", "==", "assetId", ":", "self", ".", "Balances", "[", "assetId", "]", "=", "self", ".", "Balances", "[", "assetId", "]", "+", "fixed8_val", "found", "=", "True", "if", "not", "found", ":", "self", ".", "Balances", "[", "assetId", "]", "=", "fixed8_val" ]
Add amount to the specified balance. Args: assetId (UInt256): fixed8_val (Fixed8): amount to add.
[ "Add", "amount", "to", "the", "specified", "balance", "." ]
python
train
apple/turicreate
deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/cmake-3.13.4/Source/cmConvertMSBuildXMLToJSON.py#L292-L296
def __convert_string(node): """Converts a StringProperty node to JSON format.""" converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue)) return __check_for_flag(converted)
[ "def", "__convert_string", "(", "node", ")", ":", "converted", "=", "__convert_node", "(", "node", ",", "default_flags", "=", "vsflags", "(", "VSFlags", ".", "UserValue", ")", ")", "return", "__check_for_flag", "(", "converted", ")" ]
Converts a StringProperty node to JSON format.
[ "Converts", "a", "StringProperty", "node", "to", "JSON", "format", "." ]
python
train
delfick/gitmit
gitmit/cache.py
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/cache.py#L20-L42
def get_all_cached_commit_times(root_folder): """ Find the gitmit cached commit_times and return them if they are the right shape. This means the file is a list of dictionaries. If they aren't, issue a warning and return an empty list, it is just a cache after all! """ result = [] location = cache_location(root_folder) if os.path.exists(location): try: result = json.load(open(location)) except (TypeError, ValueError) as error: log.warning("Failed to open gitmit cached commit_times\tlocation=%s\terror=%s", location, error) else: if type(result) is not list or not all(type(item) is dict for item in result): log.warning("Gitmit cached commit_times needs to be a list of dictionaries\tlocation=%s\tgot=%s", location, type(result)) result = [] return result
[ "def", "get_all_cached_commit_times", "(", "root_folder", ")", ":", "result", "=", "[", "]", "location", "=", "cache_location", "(", "root_folder", ")", "if", "os", ".", "path", ".", "exists", "(", "location", ")", ":", "try", ":", "result", "=", "json", ".", "load", "(", "open", "(", "location", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "error", ":", "log", ".", "warning", "(", "\"Failed to open gitmit cached commit_times\\tlocation=%s\\terror=%s\"", ",", "location", ",", "error", ")", "else", ":", "if", "type", "(", "result", ")", "is", "not", "list", "or", "not", "all", "(", "type", "(", "item", ")", "is", "dict", "for", "item", "in", "result", ")", ":", "log", ".", "warning", "(", "\"Gitmit cached commit_times needs to be a list of dictionaries\\tlocation=%s\\tgot=%s\"", ",", "location", ",", "type", "(", "result", ")", ")", "result", "=", "[", "]", "return", "result" ]
Find the gitmit cached commit_times and return them if they are the right shape. This means the file is a list of dictionaries. If they aren't, issue a warning and return an empty list, it is just a cache after all!
[ "Find", "the", "gitmit", "cached", "commit_times", "and", "return", "them", "if", "they", "are", "the", "right", "shape", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L2222-L2235
def get_results(self, **kwargs): """ Returns :class:`NodeResults` instance. Subclasses should extend this method (if needed) by adding specialized code that performs some kind of post-processing. """ # Check whether the process completed. if self.returncode is None: raise self.Error("return code is None, you should call wait, communicate or poll") if self.status is None or self.status < self.S_DONE: raise self.Error("Task is not completed") return self.Results.from_node(self)
[ "def", "get_results", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Check whether the process completed.", "if", "self", ".", "returncode", "is", "None", ":", "raise", "self", ".", "Error", "(", "\"return code is None, you should call wait, communicate or poll\"", ")", "if", "self", ".", "status", "is", "None", "or", "self", ".", "status", "<", "self", ".", "S_DONE", ":", "raise", "self", ".", "Error", "(", "\"Task is not completed\"", ")", "return", "self", ".", "Results", ".", "from_node", "(", "self", ")" ]
Returns :class:`NodeResults` instance. Subclasses should extend this method (if needed) by adding specialized code that performs some kind of post-processing.
[ "Returns", ":", "class", ":", "NodeResults", "instance", ".", "Subclasses", "should", "extend", "this", "method", "(", "if", "needed", ")", "by", "adding", "specialized", "code", "that", "performs", "some", "kind", "of", "post", "-", "processing", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/lens_model.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/lens_model.py#L57-L71
def fermat_potential(self, x_image, y_image, x_source, y_source, kwargs_lens): """ fermat potential (negative sign means earlier arrival time) :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes :return: fermat potential in arcsec**2 without geometry term (second part of Eqn 1 in Suyu et al. 2013) as a list """ if hasattr(self.lens_model, 'fermat_potential'): return self.lens_model.fermat_potential(x_image, y_image, x_source, y_source, kwargs_lens) else: raise ValueError("Fermat potential is not defined in multi-plane lensing. Please use single plane lens models.")
[ "def", "fermat_potential", "(", "self", ",", "x_image", ",", "y_image", ",", "x_source", ",", "y_source", ",", "kwargs_lens", ")", ":", "if", "hasattr", "(", "self", ".", "lens_model", ",", "'fermat_potential'", ")", ":", "return", "self", ".", "lens_model", ".", "fermat_potential", "(", "x_image", ",", "y_image", ",", "x_source", ",", "y_source", ",", "kwargs_lens", ")", "else", ":", "raise", "ValueError", "(", "\"Fermat potential is not defined in multi-plane lensing. Please use single plane lens models.\"", ")" ]
fermat potential (negative sign means earlier arrival time) :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes :return: fermat potential in arcsec**2 without geometry term (second part of Eqn 1 in Suyu et al. 2013) as a list
[ "fermat", "potential", "(", "negative", "sign", "means", "earlier", "arrival", "time", ")" ]
python
train
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/generic/compute_ffill_by_group.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/generic/compute_ffill_by_group.py#L6-L67
def compute_ffill_by_group( df, id_cols: List[str], reference_cols: List[str], value_col: str ): """ Compute `ffill` with `groupby` Dedicated method as there is a performance issue with a simple groupby/fillna (2017/07) The method `ffill` propagates last valid value forward to next values. --- ### Parameters *mandatory :* - `id_cols` (*list of str*): names of columns used to create each group. - `reference_cols` (*list of str*): names of columns used to sort. - `value_col` (*str*): name of the columns to fill. --- ### Example **Input** name | rank | value :------:|:--------------:|:--------: A | 1 | 2 A | 2 | 5 A | 3 | null B | 1 | null B | 2 | 7 ```cson compute_ffill_by_group: id_cols: ['name'] reference_cols: ['rank'] value_col: 'value' ``` **Ouput** name | rank | value :------:|:--------------:|:--------: A | 1 | 2 A | 2 | 5 A | 3 | 5 B | 1 | null B | 2 | 7 """ check_params_columns_duplicate(id_cols + reference_cols + [value_col]) df = df.sort_values(by=id_cols + reference_cols) df = df.set_index(id_cols) df['fill'] = 1 - df[value_col].isnull().astype(int) df['fill'] = df.groupby( level=list(range(0, len(id_cols) - 1)) )['fill'].cumsum() df[value_col] = df[value_col].ffill() df.loc[df['fill'] == 0, value_col] = None del df['fill'] return df.reset_index()
[ "def", "compute_ffill_by_group", "(", "df", ",", "id_cols", ":", "List", "[", "str", "]", ",", "reference_cols", ":", "List", "[", "str", "]", ",", "value_col", ":", "str", ")", ":", "check_params_columns_duplicate", "(", "id_cols", "+", "reference_cols", "+", "[", "value_col", "]", ")", "df", "=", "df", ".", "sort_values", "(", "by", "=", "id_cols", "+", "reference_cols", ")", "df", "=", "df", ".", "set_index", "(", "id_cols", ")", "df", "[", "'fill'", "]", "=", "1", "-", "df", "[", "value_col", "]", ".", "isnull", "(", ")", ".", "astype", "(", "int", ")", "df", "[", "'fill'", "]", "=", "df", ".", "groupby", "(", "level", "=", "list", "(", "range", "(", "0", ",", "len", "(", "id_cols", ")", "-", "1", ")", ")", ")", "[", "'fill'", "]", ".", "cumsum", "(", ")", "df", "[", "value_col", "]", "=", "df", "[", "value_col", "]", ".", "ffill", "(", ")", "df", ".", "loc", "[", "df", "[", "'fill'", "]", "==", "0", ",", "value_col", "]", "=", "None", "del", "df", "[", "'fill'", "]", "return", "df", ".", "reset_index", "(", ")" ]
Compute `ffill` with `groupby` Dedicated method as there is a performance issue with a simple groupby/fillna (2017/07) The method `ffill` propagates last valid value forward to next values. --- ### Parameters *mandatory :* - `id_cols` (*list of str*): names of columns used to create each group. - `reference_cols` (*list of str*): names of columns used to sort. - `value_col` (*str*): name of the columns to fill. --- ### Example **Input** name | rank | value :------:|:--------------:|:--------: A | 1 | 2 A | 2 | 5 A | 3 | null B | 1 | null B | 2 | 7 ```cson compute_ffill_by_group: id_cols: ['name'] reference_cols: ['rank'] value_col: 'value' ``` **Ouput** name | rank | value :------:|:--------------:|:--------: A | 1 | 2 A | 2 | 5 A | 3 | 5 B | 1 | null B | 2 | 7
[ "Compute", "ffill", "with", "groupby", "Dedicated", "method", "as", "there", "is", "a", "performance", "issue", "with", "a", "simple", "groupby", "/", "fillna", "(", "2017", "/", "07", ")", "The", "method", "ffill", "propagates", "last", "valid", "value", "forward", "to", "next", "values", "." ]
python
test
CEA-COSMIC/ModOpt
modopt/opt/linear.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/linear.py#L154-L184
def _check_type(self, input_val): """ Check Input Type This method checks if the input is a list, tuple or a numpy array and converts the input to a numpy array Parameters ---------- input_val : list, tuple or np.ndarray Returns ------- np.ndarray of input Raises ------ TypeError For invalid input type """ if not isinstance(input_val, (list, tuple, np.ndarray)): raise TypeError('Invalid input type, input must be a list, tuple ' 'or numpy array.') input_val = np.array(input_val) if not input_val.size: raise ValueError('Input list is empty.') return input_val
[ "def", "_check_type", "(", "self", ",", "input_val", ")", ":", "if", "not", "isinstance", "(", "input_val", ",", "(", "list", ",", "tuple", ",", "np", ".", "ndarray", ")", ")", ":", "raise", "TypeError", "(", "'Invalid input type, input must be a list, tuple '", "'or numpy array.'", ")", "input_val", "=", "np", ".", "array", "(", "input_val", ")", "if", "not", "input_val", ".", "size", ":", "raise", "ValueError", "(", "'Input list is empty.'", ")", "return", "input_val" ]
Check Input Type This method checks if the input is a list, tuple or a numpy array and converts the input to a numpy array Parameters ---------- input_val : list, tuple or np.ndarray Returns ------- np.ndarray of input Raises ------ TypeError For invalid input type
[ "Check", "Input", "Type" ]
python
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2152-L2172
def suggest(self, w): """ Return a list of (word, confidence) spelling corrections for the given word, based on the probability of known words with edit distance 1-2 from the given word. """ if len(self) == 0: self.load() if len(w) == 1: return [(w, 1.0)] # I if w in PUNCTUATION: return [(w, 1.0)] # .?! if w.replace(".", "").isdigit(): return [(w, 1.0)] # 1.5 candidates = self._known([w]) \ or self._known(self._edit1(w)) \ or self._known(self._edit2(w)) \ or [w] candidates = [(self.get(c, 0.0), c) for c in candidates] s = float(sum(p for p, w in candidates) or 1) candidates = sorted(((p / s, w) for p, w in candidates), reverse=True) candidates = [(w.istitle() and x.title() or x, p) for p, x in candidates] # case-sensitive return candidates
[ "def", "suggest", "(", "self", ",", "w", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "self", ".", "load", "(", ")", "if", "len", "(", "w", ")", "==", "1", ":", "return", "[", "(", "w", ",", "1.0", ")", "]", "# I", "if", "w", "in", "PUNCTUATION", ":", "return", "[", "(", "w", ",", "1.0", ")", "]", "# .?!", "if", "w", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ".", "isdigit", "(", ")", ":", "return", "[", "(", "w", ",", "1.0", ")", "]", "# 1.5", "candidates", "=", "self", ".", "_known", "(", "[", "w", "]", ")", "or", "self", ".", "_known", "(", "self", ".", "_edit1", "(", "w", ")", ")", "or", "self", ".", "_known", "(", "self", ".", "_edit2", "(", "w", ")", ")", "or", "[", "w", "]", "candidates", "=", "[", "(", "self", ".", "get", "(", "c", ",", "0.0", ")", ",", "c", ")", "for", "c", "in", "candidates", "]", "s", "=", "float", "(", "sum", "(", "p", "for", "p", ",", "w", "in", "candidates", ")", "or", "1", ")", "candidates", "=", "sorted", "(", "(", "(", "p", "/", "s", ",", "w", ")", "for", "p", ",", "w", "in", "candidates", ")", ",", "reverse", "=", "True", ")", "candidates", "=", "[", "(", "w", ".", "istitle", "(", ")", "and", "x", ".", "title", "(", ")", "or", "x", ",", "p", ")", "for", "p", ",", "x", "in", "candidates", "]", "# case-sensitive", "return", "candidates" ]
Return a list of (word, confidence) spelling corrections for the given word, based on the probability of known words with edit distance 1-2 from the given word.
[ "Return", "a", "list", "of", "(", "word", "confidence", ")", "spelling", "corrections", "for", "the", "given", "word", "based", "on", "the", "probability", "of", "known", "words", "with", "edit", "distance", "1", "-", "2", "from", "the", "given", "word", "." ]
python
train
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L758-L798
def syntax_trees( self, layer=None ): """ Builds syntactic trees (estnltk.syntax.utils.Tree objects) from syntactic annotations and returns as a list. If the input argument *layer* is not specified, the type of the syntactic parser is used to decide, which syntactic analysis layer should be produced and taken as basis for building syntactic trees; If a syntactic parser is not available, then a missing *layer* name is replaced by the first syntactic layer available (1st LAYER_CONLL, then LAYER_VISLCG3); Otherwise, the *layer* must be provided by the user and it must be either LAYER_CONLL or LAYER_VISLCG3. """ # If no layer specified, decide the layer based on the type of syntactic # analyzer used: if not layer and self.__syntactic_parser: if isinstance(self.__syntactic_parser, MaltParser): layer = LAYER_CONLL elif isinstance(self.__syntactic_parser, VISLCG3Parser): layer = LAYER_VISLCG3 # If no syntactic analyzer available, pick the layer as the first syntactic # layer available: if not layer and self.is_tagged(LAYER_CONLL): layer = LAYER_CONLL elif not layer and self.is_tagged(LAYER_VISLCG3): layer = LAYER_VISLCG3 # Based on the chosen layer, perform the syntactic analysis (if necessary) # and return the results packaged as tree objects; if layer: if layer==LAYER_CONLL: if not self.is_tagged(layer): self.tag_syntax_maltparser() return self.syntax_trees_conll elif layer==LAYER_VISLCG3: if not self.is_tagged(layer): self.tag_syntax_vislcg3() return self.syntax_trees_vislcg3 else: raise ValueError('(!) Unexpected layer name: '+str(layer)) else: raise ValueError('(!) Missing layer name! ')
[ "def", "syntax_trees", "(", "self", ",", "layer", "=", "None", ")", ":", "# If no layer specified, decide the layer based on the type of syntactic", "# analyzer used:", "if", "not", "layer", "and", "self", ".", "__syntactic_parser", ":", "if", "isinstance", "(", "self", ".", "__syntactic_parser", ",", "MaltParser", ")", ":", "layer", "=", "LAYER_CONLL", "elif", "isinstance", "(", "self", ".", "__syntactic_parser", ",", "VISLCG3Parser", ")", ":", "layer", "=", "LAYER_VISLCG3", "# If no syntactic analyzer available, pick the layer as the first syntactic", "# layer available:", "if", "not", "layer", "and", "self", ".", "is_tagged", "(", "LAYER_CONLL", ")", ":", "layer", "=", "LAYER_CONLL", "elif", "not", "layer", "and", "self", ".", "is_tagged", "(", "LAYER_VISLCG3", ")", ":", "layer", "=", "LAYER_VISLCG3", "# Based on the chosen layer, perform the syntactic analysis (if necessary)", "# and return the results packaged as tree objects;", "if", "layer", ":", "if", "layer", "==", "LAYER_CONLL", ":", "if", "not", "self", ".", "is_tagged", "(", "layer", ")", ":", "self", ".", "tag_syntax_maltparser", "(", ")", "return", "self", ".", "syntax_trees_conll", "elif", "layer", "==", "LAYER_VISLCG3", ":", "if", "not", "self", ".", "is_tagged", "(", "layer", ")", ":", "self", ".", "tag_syntax_vislcg3", "(", ")", "return", "self", ".", "syntax_trees_vislcg3", "else", ":", "raise", "ValueError", "(", "'(!) Unexpected layer name: '", "+", "str", "(", "layer", ")", ")", "else", ":", "raise", "ValueError", "(", "'(!) Missing layer name! '", ")" ]
Builds syntactic trees (estnltk.syntax.utils.Tree objects) from syntactic annotations and returns as a list. If the input argument *layer* is not specified, the type of the syntactic parser is used to decide, which syntactic analysis layer should be produced and taken as basis for building syntactic trees; If a syntactic parser is not available, then a missing *layer* name is replaced by the first syntactic layer available (1st LAYER_CONLL, then LAYER_VISLCG3); Otherwise, the *layer* must be provided by the user and it must be either LAYER_CONLL or LAYER_VISLCG3.
[ "Builds", "syntactic", "trees", "(", "estnltk", ".", "syntax", ".", "utils", ".", "Tree", "objects", ")", "from", "syntactic", "annotations", "and", "returns", "as", "a", "list", ".", "If", "the", "input", "argument", "*", "layer", "*", "is", "not", "specified", "the", "type", "of", "the", "syntactic", "parser", "is", "used", "to", "decide", "which", "syntactic", "analysis", "layer", "should", "be", "produced", "and", "taken", "as", "basis", "for", "building", "syntactic", "trees", ";", "If", "a", "syntactic", "parser", "is", "not", "available", "then", "a", "missing", "*", "layer", "*", "name", "is", "replaced", "by", "the", "first", "syntactic", "layer", "available", "(", "1st", "LAYER_CONLL", "then", "LAYER_VISLCG3", ")", ";", "Otherwise", "the", "*", "layer", "*", "must", "be", "provided", "by", "the", "user", "and", "it", "must", "be", "either", "LAYER_CONLL", "or", "LAYER_VISLCG3", "." ]
python
train
saltstack/salt
salt/modules/win_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_network.py#L349-L389
def ip_addrs(interface=None, include_loopback=False, cidr=None, type=None): ''' Returns a list of IPv4 addresses assigned to the host. interface Only IP addresses from that interface will be returned. include_loopback : False Include loopback 127.0.0.1 IPv4 address. cidr Describes subnet using CIDR notation and only IPv4 addresses that belong to this subnet will be returned. .. versionchanged:: 2019.2.0 type If option set to 'public' then only public addresses will be returned. Ditto for 'private'. .. versionchanged:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' network.ip_addrs salt '*' network.ip_addrs cidr=10.0.0.0/8 salt '*' network.ip_addrs cidr=192.168.0.0/16 type=private ''' addrs = salt.utils.network.ip_addrs(interface=interface, include_loopback=include_loopback) if cidr: return [i for i in addrs if salt.utils.network.in_subnet(cidr, [i])] else: if type == 'public': return [i for i in addrs if not is_private(i)] elif type == 'private': return [i for i in addrs if is_private(i)] else: return addrs
[ "def", "ip_addrs", "(", "interface", "=", "None", ",", "include_loopback", "=", "False", ",", "cidr", "=", "None", ",", "type", "=", "None", ")", ":", "addrs", "=", "salt", ".", "utils", ".", "network", ".", "ip_addrs", "(", "interface", "=", "interface", ",", "include_loopback", "=", "include_loopback", ")", "if", "cidr", ":", "return", "[", "i", "for", "i", "in", "addrs", "if", "salt", ".", "utils", ".", "network", ".", "in_subnet", "(", "cidr", ",", "[", "i", "]", ")", "]", "else", ":", "if", "type", "==", "'public'", ":", "return", "[", "i", "for", "i", "in", "addrs", "if", "not", "is_private", "(", "i", ")", "]", "elif", "type", "==", "'private'", ":", "return", "[", "i", "for", "i", "in", "addrs", "if", "is_private", "(", "i", ")", "]", "else", ":", "return", "addrs" ]
Returns a list of IPv4 addresses assigned to the host. interface Only IP addresses from that interface will be returned. include_loopback : False Include loopback 127.0.0.1 IPv4 address. cidr Describes subnet using CIDR notation and only IPv4 addresses that belong to this subnet will be returned. .. versionchanged:: 2019.2.0 type If option set to 'public' then only public addresses will be returned. Ditto for 'private'. .. versionchanged:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' network.ip_addrs salt '*' network.ip_addrs cidr=10.0.0.0/8 salt '*' network.ip_addrs cidr=192.168.0.0/16 type=private
[ "Returns", "a", "list", "of", "IPv4", "addresses", "assigned", "to", "the", "host", "." ]
python
train
DeVilhena-Paulo/KdQuery
kdquery.py
https://github.com/DeVilhena-Paulo/KdQuery/blob/76e3791e25b2db2168c1007fe1b92c3f8ec20005/kdquery.py#L100-L156
def insert(self, point, data=None): """Insert a new node in the tree. Args: point (:obj:`tuple` of float or int): Stores the position of the node. data (:obj, optional): The information stored by the node. Returns: int: The identifier of the new node. Example: >>> tree = Tree(4, 800) >>> point = (3, 7) >>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2} >>> node_id = tree.insert(point, data) """ assert len(point) == self.k if self.size == 0: if self.region is None: self.region = [[-math.inf, math.inf]] * self.k axis = 0 return self.new_node(point, self.region, axis, data) # Iteratively descends to one leaf current_id = 0 while True: parent_node = self.node_list[current_id] axis = parent_node.axis if point[axis] < parent_node.point[axis]: next_id, left = parent_node.left, True else: next_id, left = parent_node.right, False if next_id is None: break current_id = next_id # Get the region delimited by the parent node region = parent_node.region[:] region[axis] = parent_node.region[axis][:] # Limit to the child's region limit = parent_node.point[axis] # Update reference to the new node if left: self.node_list[current_id] = parent_node._replace(left=self.size) region[axis][1] = limit else: self.node_list[current_id] = parent_node._replace(right=self.size) region[axis][0] = limit return self.new_node(point, region, (axis + 1) % self.k, data)
[ "def", "insert", "(", "self", ",", "point", ",", "data", "=", "None", ")", ":", "assert", "len", "(", "point", ")", "==", "self", ".", "k", "if", "self", ".", "size", "==", "0", ":", "if", "self", ".", "region", "is", "None", ":", "self", ".", "region", "=", "[", "[", "-", "math", ".", "inf", ",", "math", ".", "inf", "]", "]", "*", "self", ".", "k", "axis", "=", "0", "return", "self", ".", "new_node", "(", "point", ",", "self", ".", "region", ",", "axis", ",", "data", ")", "# Iteratively descends to one leaf", "current_id", "=", "0", "while", "True", ":", "parent_node", "=", "self", ".", "node_list", "[", "current_id", "]", "axis", "=", "parent_node", ".", "axis", "if", "point", "[", "axis", "]", "<", "parent_node", ".", "point", "[", "axis", "]", ":", "next_id", ",", "left", "=", "parent_node", ".", "left", ",", "True", "else", ":", "next_id", ",", "left", "=", "parent_node", ".", "right", ",", "False", "if", "next_id", "is", "None", ":", "break", "current_id", "=", "next_id", "# Get the region delimited by the parent node", "region", "=", "parent_node", ".", "region", "[", ":", "]", "region", "[", "axis", "]", "=", "parent_node", ".", "region", "[", "axis", "]", "[", ":", "]", "# Limit to the child's region", "limit", "=", "parent_node", ".", "point", "[", "axis", "]", "# Update reference to the new node", "if", "left", ":", "self", ".", "node_list", "[", "current_id", "]", "=", "parent_node", ".", "_replace", "(", "left", "=", "self", ".", "size", ")", "region", "[", "axis", "]", "[", "1", "]", "=", "limit", "else", ":", "self", ".", "node_list", "[", "current_id", "]", "=", "parent_node", ".", "_replace", "(", "right", "=", "self", ".", "size", ")", "region", "[", "axis", "]", "[", "0", "]", "=", "limit", "return", "self", ".", "new_node", "(", "point", ",", "region", ",", "(", "axis", "+", "1", ")", "%", "self", ".", "k", ",", "data", ")" ]
Insert a new node in the tree. Args: point (:obj:`tuple` of float or int): Stores the position of the node. data (:obj, optional): The information stored by the node. Returns: int: The identifier of the new node. Example: >>> tree = Tree(4, 800) >>> point = (3, 7) >>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2} >>> node_id = tree.insert(point, data)
[ "Insert", "a", "new", "node", "in", "the", "tree", "." ]
python
train
beregond/jsonmodels
jsonmodels/fields.py
https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/fields.py#L418-L424
def parse_value(self, value): """Parse string into instance of `time`.""" if value is None: return value if isinstance(value, datetime.time): return value return parse(value).timetz()
[ "def", "parse_value", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "value", "if", "isinstance", "(", "value", ",", "datetime", ".", "time", ")", ":", "return", "value", "return", "parse", "(", "value", ")", ".", "timetz", "(", ")" ]
Parse string into instance of `time`.
[ "Parse", "string", "into", "instance", "of", "time", "." ]
python
train
mikedh/trimesh
trimesh/poses.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/poses.py#L13-L155
def compute_stable_poses(mesh, center_mass=None, sigma=0.0, n_samples=1, threshold=0.0): """ Computes stable orientations of a mesh and their quasi-static probabilites. This method samples the location of the center of mass from a multivariate gaussian with the mean at the center of mass, and a covariance equal to and identity matrix times sigma, over n_samples. For each sample, it computes the stable resting poses of the mesh on a a planar workspace and evaulates the probabilities of landing in each pose if the object is dropped onto the table randomly. This method returns the 4x4 homogenous transform matrices that place the shape against the planar surface with the z-axis pointing upwards and a list of the probabilities for each pose. The transforms and probabilties that are returned are sorted, with the most probable pose first. Parameters ---------- mesh : trimesh.Trimesh The target mesh com : (3,) float Rhe object center of mass. If None, this method assumes uniform density and watertightness and computes a center of mass explicitly sigma : float Rhe covariance for the multivariate gaussian used to sample center of mass locations n_samples : int The number of samples of the center of mass location threshold : float The probability value at which to threshold returned stable poses Returns ------- transforms : (n, 4, 4) float The homogenous matrices that transform the object to rest in a stable pose, with the new z-axis pointing upwards from the table and the object just touching the table. probs : (n,) float Probability in (0, 1) for each pose """ # save convex hull mesh to avoid a cache check cvh = mesh.convex_hull if center_mass is None: center_mass = mesh.center_mass # Sample center of mass, rejecting points outside of conv hull sample_coms = [] while len(sample_coms) < n_samples: remaining = n_samples - len(sample_coms) coms = np.random.multivariate_normal(center_mass, sigma * np.eye(3), remaining) for c in coms: dots = np.einsum('ij,ij->i', c - cvh.triangles_center, cvh.face_normals) if np.all(dots < 0): sample_coms.append(c) norms_to_probs = {} # Map from normal to probabilities # For each sample, compute the stable poses for sample_com in sample_coms: # Create toppling digraph dg = _create_topple_graph(cvh, sample_com) # Propagate probabilites to sink nodes with a breadth-first traversal nodes = [n for n in dg.nodes() if dg.in_degree(n) == 0] n_iters = 0 while len(nodes) > 0 and n_iters <= len(mesh.faces): new_nodes = [] for node in nodes: if dg.out_degree(node) == 0: continue successor = next(iter(dg.successors(node))) dg.node[successor]['prob'] += dg.node[node]['prob'] dg.node[node]['prob'] = 0.0 new_nodes.append(successor) nodes = new_nodes n_iters += 1 # Collect stable poses for node in dg.nodes(): if dg.node[node]['prob'] > 0.0: normal = cvh.face_normals[node] prob = dg.node[node]['prob'] key = tuple(np.around(normal, decimals=3)) if key in norms_to_probs: norms_to_probs[key]['prob'] += 1.0 / n_samples * prob else: norms_to_probs[key] = { 'prob': 1.0 / n_samples * prob, 'normal': normal } transforms = [] probs = [] # Filter stable poses for key in norms_to_probs: prob = norms_to_probs[key]['prob'] if prob > threshold: tf = np.eye(4) # Compute a rotation matrix for this stable pose z = -1.0 * norms_to_probs[key]['normal'] x = np.array([-z[1], z[0], 0]) if np.linalg.norm(x) == 0.0: x = np.array([1, 0, 0]) else: x = x / np.linalg.norm(x) y = np.cross(z, x) y = y / np.linalg.norm(y) tf[:3, :3] = np.array([x, y, z]) # Compute the necessary translation for this stable pose m = cvh.copy() m.apply_transform(tf) z = -m.bounds[0][2] tf[:3, 3] = np.array([0, 0, z]) transforms.append(tf) probs.append(prob) # Sort the results transforms = np.array(transforms) probs = np.array(probs) inds = np.argsort(-probs) return transforms[inds], probs[inds]
[ "def", "compute_stable_poses", "(", "mesh", ",", "center_mass", "=", "None", ",", "sigma", "=", "0.0", ",", "n_samples", "=", "1", ",", "threshold", "=", "0.0", ")", ":", "# save convex hull mesh to avoid a cache check", "cvh", "=", "mesh", ".", "convex_hull", "if", "center_mass", "is", "None", ":", "center_mass", "=", "mesh", ".", "center_mass", "# Sample center of mass, rejecting points outside of conv hull", "sample_coms", "=", "[", "]", "while", "len", "(", "sample_coms", ")", "<", "n_samples", ":", "remaining", "=", "n_samples", "-", "len", "(", "sample_coms", ")", "coms", "=", "np", ".", "random", ".", "multivariate_normal", "(", "center_mass", ",", "sigma", "*", "np", ".", "eye", "(", "3", ")", ",", "remaining", ")", "for", "c", "in", "coms", ":", "dots", "=", "np", ".", "einsum", "(", "'ij,ij->i'", ",", "c", "-", "cvh", ".", "triangles_center", ",", "cvh", ".", "face_normals", ")", "if", "np", ".", "all", "(", "dots", "<", "0", ")", ":", "sample_coms", ".", "append", "(", "c", ")", "norms_to_probs", "=", "{", "}", "# Map from normal to probabilities", "# For each sample, compute the stable poses", "for", "sample_com", "in", "sample_coms", ":", "# Create toppling digraph", "dg", "=", "_create_topple_graph", "(", "cvh", ",", "sample_com", ")", "# Propagate probabilites to sink nodes with a breadth-first traversal", "nodes", "=", "[", "n", "for", "n", "in", "dg", ".", "nodes", "(", ")", "if", "dg", ".", "in_degree", "(", "n", ")", "==", "0", "]", "n_iters", "=", "0", "while", "len", "(", "nodes", ")", ">", "0", "and", "n_iters", "<=", "len", "(", "mesh", ".", "faces", ")", ":", "new_nodes", "=", "[", "]", "for", "node", "in", "nodes", ":", "if", "dg", ".", "out_degree", "(", "node", ")", "==", "0", ":", "continue", "successor", "=", "next", "(", "iter", "(", "dg", ".", "successors", "(", "node", ")", ")", ")", "dg", ".", "node", "[", "successor", "]", "[", "'prob'", "]", "+=", "dg", ".", "node", "[", "node", "]", "[", "'prob'", "]", "dg", ".", "node", "[", "node", "]", "[", "'prob'", "]", "=", "0.0", "new_nodes", ".", "append", "(", "successor", ")", "nodes", "=", "new_nodes", "n_iters", "+=", "1", "# Collect stable poses", "for", "node", "in", "dg", ".", "nodes", "(", ")", ":", "if", "dg", ".", "node", "[", "node", "]", "[", "'prob'", "]", ">", "0.0", ":", "normal", "=", "cvh", ".", "face_normals", "[", "node", "]", "prob", "=", "dg", ".", "node", "[", "node", "]", "[", "'prob'", "]", "key", "=", "tuple", "(", "np", ".", "around", "(", "normal", ",", "decimals", "=", "3", ")", ")", "if", "key", "in", "norms_to_probs", ":", "norms_to_probs", "[", "key", "]", "[", "'prob'", "]", "+=", "1.0", "/", "n_samples", "*", "prob", "else", ":", "norms_to_probs", "[", "key", "]", "=", "{", "'prob'", ":", "1.0", "/", "n_samples", "*", "prob", ",", "'normal'", ":", "normal", "}", "transforms", "=", "[", "]", "probs", "=", "[", "]", "# Filter stable poses", "for", "key", "in", "norms_to_probs", ":", "prob", "=", "norms_to_probs", "[", "key", "]", "[", "'prob'", "]", "if", "prob", ">", "threshold", ":", "tf", "=", "np", ".", "eye", "(", "4", ")", "# Compute a rotation matrix for this stable pose", "z", "=", "-", "1.0", "*", "norms_to_probs", "[", "key", "]", "[", "'normal'", "]", "x", "=", "np", ".", "array", "(", "[", "-", "z", "[", "1", "]", ",", "z", "[", "0", "]", ",", "0", "]", ")", "if", "np", ".", "linalg", ".", "norm", "(", "x", ")", "==", "0.0", ":", "x", "=", "np", ".", "array", "(", "[", "1", ",", "0", ",", "0", "]", ")", "else", ":", "x", "=", "x", "/", "np", ".", "linalg", ".", "norm", "(", "x", ")", "y", "=", "np", ".", "cross", "(", "z", ",", "x", ")", "y", "=", "y", "/", "np", ".", "linalg", ".", "norm", "(", "y", ")", "tf", "[", ":", "3", ",", ":", "3", "]", "=", "np", ".", "array", "(", "[", "x", ",", "y", ",", "z", "]", ")", "# Compute the necessary translation for this stable pose", "m", "=", "cvh", ".", "copy", "(", ")", "m", ".", "apply_transform", "(", "tf", ")", "z", "=", "-", "m", ".", "bounds", "[", "0", "]", "[", "2", "]", "tf", "[", ":", "3", ",", "3", "]", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "z", "]", ")", "transforms", ".", "append", "(", "tf", ")", "probs", ".", "append", "(", "prob", ")", "# Sort the results", "transforms", "=", "np", ".", "array", "(", "transforms", ")", "probs", "=", "np", ".", "array", "(", "probs", ")", "inds", "=", "np", ".", "argsort", "(", "-", "probs", ")", "return", "transforms", "[", "inds", "]", ",", "probs", "[", "inds", "]" ]
Computes stable orientations of a mesh and their quasi-static probabilites. This method samples the location of the center of mass from a multivariate gaussian with the mean at the center of mass, and a covariance equal to and identity matrix times sigma, over n_samples. For each sample, it computes the stable resting poses of the mesh on a a planar workspace and evaulates the probabilities of landing in each pose if the object is dropped onto the table randomly. This method returns the 4x4 homogenous transform matrices that place the shape against the planar surface with the z-axis pointing upwards and a list of the probabilities for each pose. The transforms and probabilties that are returned are sorted, with the most probable pose first. Parameters ---------- mesh : trimesh.Trimesh The target mesh com : (3,) float Rhe object center of mass. If None, this method assumes uniform density and watertightness and computes a center of mass explicitly sigma : float Rhe covariance for the multivariate gaussian used to sample center of mass locations n_samples : int The number of samples of the center of mass location threshold : float The probability value at which to threshold returned stable poses Returns ------- transforms : (n, 4, 4) float The homogenous matrices that transform the object to rest in a stable pose, with the new z-axis pointing upwards from the table and the object just touching the table. probs : (n,) float Probability in (0, 1) for each pose
[ "Computes", "stable", "orientations", "of", "a", "mesh", "and", "their", "quasi", "-", "static", "probabilites", "." ]
python
train
jupyter-widgets/ipywidgets
ipywidgets/widgets/widget.py
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget.py#L732-L735
def _send(self, msg, buffers=None): """Sends a message to the model in the front-end.""" if self.comm is not None and self.comm.kernel is not None: self.comm.send(data=msg, buffers=buffers)
[ "def", "_send", "(", "self", ",", "msg", ",", "buffers", "=", "None", ")", ":", "if", "self", ".", "comm", "is", "not", "None", "and", "self", ".", "comm", ".", "kernel", "is", "not", "None", ":", "self", ".", "comm", ".", "send", "(", "data", "=", "msg", ",", "buffers", "=", "buffers", ")" ]
Sends a message to the model in the front-end.
[ "Sends", "a", "message", "to", "the", "model", "in", "the", "front", "-", "end", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L1276-L1292
def get_interface_detail_output_interface_ifHCOutBroadcastPkts(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') ifHCOutBroadcastPkts = ET.SubElement(interface, "ifHCOutBroadcastPkts") ifHCOutBroadcastPkts.text = kwargs.pop('ifHCOutBroadcastPkts') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_interface_detail_output_interface_ifHCOutBroadcastPkts", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_interface_detail", "=", "ET", ".", "Element", "(", "\"get_interface_detail\"", ")", "config", "=", "get_interface_detail", "output", "=", "ET", ".", "SubElement", "(", "get_interface_detail", ",", "\"output\"", ")", "interface", "=", "ET", ".", "SubElement", "(", "output", ",", "\"interface\"", ")", "interface_type_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-type\"", ")", "interface_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "interface_name_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-name\"", ")", "interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "ifHCOutBroadcastPkts", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"ifHCOutBroadcastPkts\"", ")", "ifHCOutBroadcastPkts", ".", "text", "=", "kwargs", ".", "pop", "(", "'ifHCOutBroadcastPkts'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
wbond/asn1crypto
asn1crypto/_iri.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/_iri.py#L120-L169
def uri_to_iri(value): """ Converts an ASCII URI byte string into a unicode IRI :param value: An ASCII-encoded byte string of the URI :return: A unicode string of the IRI """ if not isinstance(value, byte_cls): raise TypeError(unwrap( ''' value must be a byte string, not %s ''', type_name(value) )) parsed = urlsplit(value) scheme = parsed.scheme if scheme is not None: scheme = scheme.decode('ascii') username = _urlunquote(parsed.username, remap=[':', '@']) password = _urlunquote(parsed.password, remap=[':', '@']) hostname = parsed.hostname if hostname: hostname = hostname.decode('idna') port = parsed.port if port and not isinstance(port, int_types): port = port.decode('ascii') netloc = '' if username is not None: netloc += username if password: netloc += ':' + password netloc += '@' if hostname is not None: netloc += hostname if port is not None: netloc += ':' + str_cls(port) path = _urlunquote(parsed.path, remap=['/'], preserve=True) query = _urlunquote(parsed.query, remap=['&', '='], preserve=True) fragment = _urlunquote(parsed.fragment) return urlunsplit((scheme, netloc, path, query, fragment))
[ "def", "uri_to_iri", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n value must be a byte string, not %s\n '''", ",", "type_name", "(", "value", ")", ")", ")", "parsed", "=", "urlsplit", "(", "value", ")", "scheme", "=", "parsed", ".", "scheme", "if", "scheme", "is", "not", "None", ":", "scheme", "=", "scheme", ".", "decode", "(", "'ascii'", ")", "username", "=", "_urlunquote", "(", "parsed", ".", "username", ",", "remap", "=", "[", "':'", ",", "'@'", "]", ")", "password", "=", "_urlunquote", "(", "parsed", ".", "password", ",", "remap", "=", "[", "':'", ",", "'@'", "]", ")", "hostname", "=", "parsed", ".", "hostname", "if", "hostname", ":", "hostname", "=", "hostname", ".", "decode", "(", "'idna'", ")", "port", "=", "parsed", ".", "port", "if", "port", "and", "not", "isinstance", "(", "port", ",", "int_types", ")", ":", "port", "=", "port", ".", "decode", "(", "'ascii'", ")", "netloc", "=", "''", "if", "username", "is", "not", "None", ":", "netloc", "+=", "username", "if", "password", ":", "netloc", "+=", "':'", "+", "password", "netloc", "+=", "'@'", "if", "hostname", "is", "not", "None", ":", "netloc", "+=", "hostname", "if", "port", "is", "not", "None", ":", "netloc", "+=", "':'", "+", "str_cls", "(", "port", ")", "path", "=", "_urlunquote", "(", "parsed", ".", "path", ",", "remap", "=", "[", "'/'", "]", ",", "preserve", "=", "True", ")", "query", "=", "_urlunquote", "(", "parsed", ".", "query", ",", "remap", "=", "[", "'&'", ",", "'='", "]", ",", "preserve", "=", "True", ")", "fragment", "=", "_urlunquote", "(", "parsed", ".", "fragment", ")", "return", "urlunsplit", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", ")", ")" ]
Converts an ASCII URI byte string into a unicode IRI :param value: An ASCII-encoded byte string of the URI :return: A unicode string of the IRI
[ "Converts", "an", "ASCII", "URI", "byte", "string", "into", "a", "unicode", "IRI" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Variables/PackageVariable.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Variables/PackageVariable.py#L86-L100
def PackageVariable(key, help, default, searchfunc=None): # NB: searchfunc is currently undocumented and unsupported """ The input parameters describe a 'package list' option, thus they are returned with the correct converter and validator appended. The result is usable for input to opts.Add() . A 'package list' option may either be 'all', 'none' or a list of package names (separated by space). """ help = '\n '.join( (help, '( yes | no | /path/to/%s )' % key)) return (key, help, default, lambda k, v, e: _validator(k,v,e,searchfunc), _converter)
[ "def", "PackageVariable", "(", "key", ",", "help", ",", "default", ",", "searchfunc", "=", "None", ")", ":", "# NB: searchfunc is currently undocumented and unsupported", "help", "=", "'\\n '", ".", "join", "(", "(", "help", ",", "'( yes | no | /path/to/%s )'", "%", "key", ")", ")", "return", "(", "key", ",", "help", ",", "default", ",", "lambda", "k", ",", "v", ",", "e", ":", "_validator", "(", "k", ",", "v", ",", "e", ",", "searchfunc", ")", ",", "_converter", ")" ]
The input parameters describe a 'package list' option, thus they are returned with the correct converter and validator appended. The result is usable for input to opts.Add() . A 'package list' option may either be 'all', 'none' or a list of package names (separated by space).
[ "The", "input", "parameters", "describe", "a", "package", "list", "option", "thus", "they", "are", "returned", "with", "the", "correct", "converter", "and", "validator", "appended", ".", "The", "result", "is", "usable", "for", "input", "to", "opts", ".", "Add", "()", "." ]
python
train
gem/oq-engine
openquake/hazardlib/sourceconverter.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourceconverter.py#L614-L627
def convert_hpdist(self, node): """ Convert the given node into a probability mass function for the hypo depth distribution. :param node: a hypoDepthDist node :returns: a :class:`openquake.hazardlib.pmf.PMF` instance """ with context(self.fname, node): hcdist = [(hd['probability'], hd['depth']) for hd in node.hypoDepthDist] if not self.spinning_floating: # consider the first hypocenter hcdist = [(1, hcdist[0][1])] return pmf.PMF(hcdist)
[ "def", "convert_hpdist", "(", "self", ",", "node", ")", ":", "with", "context", "(", "self", ".", "fname", ",", "node", ")", ":", "hcdist", "=", "[", "(", "hd", "[", "'probability'", "]", ",", "hd", "[", "'depth'", "]", ")", "for", "hd", "in", "node", ".", "hypoDepthDist", "]", "if", "not", "self", ".", "spinning_floating", ":", "# consider the first hypocenter", "hcdist", "=", "[", "(", "1", ",", "hcdist", "[", "0", "]", "[", "1", "]", ")", "]", "return", "pmf", ".", "PMF", "(", "hcdist", ")" ]
Convert the given node into a probability mass function for the hypo depth distribution. :param node: a hypoDepthDist node :returns: a :class:`openquake.hazardlib.pmf.PMF` instance
[ "Convert", "the", "given", "node", "into", "a", "probability", "mass", "function", "for", "the", "hypo", "depth", "distribution", "." ]
python
train
Jarn/jarn.mkrelease
jarn/mkrelease/utils.py
https://github.com/Jarn/jarn.mkrelease/blob/844377f37a3cdc0a154148790a926f991019ec4a/jarn/mkrelease/utils.py#L34-L44
def getinputencoding(stream=None): """Return preferred encoding for reading from ``stream``. ``stream`` defaults to sys.stdin. """ if stream is None: stream = sys.stdin encoding = stream.encoding if not encoding: encoding = getpreferredencoding() return encoding
[ "def", "getinputencoding", "(", "stream", "=", "None", ")", ":", "if", "stream", "is", "None", ":", "stream", "=", "sys", ".", "stdin", "encoding", "=", "stream", ".", "encoding", "if", "not", "encoding", ":", "encoding", "=", "getpreferredencoding", "(", ")", "return", "encoding" ]
Return preferred encoding for reading from ``stream``. ``stream`` defaults to sys.stdin.
[ "Return", "preferred", "encoding", "for", "reading", "from", "stream", "." ]
python
train
pyQode/pyqode.core
examples/notepad/notepad/main_window.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/examples/notepad/notepad/main_window.py#L76-L93
def setup_mimetypes(self): """ Setup additional mime types. """ # setup some specific mimetypes mimetypes.add_type('text/xml', '.ui') # qt designer forms forms mimetypes.add_type('text/x-rst', '.rst') # rst docs mimetypes.add_type('text/x-cython', '.pyx') # cython impl files mimetypes.add_type('text/x-cython', '.pxd') # cython def files mimetypes.add_type('text/x-python', '.py') mimetypes.add_type('text/x-python', '.pyw') mimetypes.add_type('text/x-c', '.c') mimetypes.add_type('text/x-c', '.h') mimetypes.add_type('text/x-c++hdr', '.hpp') mimetypes.add_type('text/x-c++src', '.cpp') mimetypes.add_type('text/x-c++src', '.cxx') # cobol files for ext in ['.cbl', '.cob', '.cpy']: mimetypes.add_type('text/x-cobol', ext) mimetypes.add_type('text/x-cobol', ext.upper())
[ "def", "setup_mimetypes", "(", "self", ")", ":", "# setup some specific mimetypes", "mimetypes", ".", "add_type", "(", "'text/xml'", ",", "'.ui'", ")", "# qt designer forms forms", "mimetypes", ".", "add_type", "(", "'text/x-rst'", ",", "'.rst'", ")", "# rst docs", "mimetypes", ".", "add_type", "(", "'text/x-cython'", ",", "'.pyx'", ")", "# cython impl files", "mimetypes", ".", "add_type", "(", "'text/x-cython'", ",", "'.pxd'", ")", "# cython def files", "mimetypes", ".", "add_type", "(", "'text/x-python'", ",", "'.py'", ")", "mimetypes", ".", "add_type", "(", "'text/x-python'", ",", "'.pyw'", ")", "mimetypes", ".", "add_type", "(", "'text/x-c'", ",", "'.c'", ")", "mimetypes", ".", "add_type", "(", "'text/x-c'", ",", "'.h'", ")", "mimetypes", ".", "add_type", "(", "'text/x-c++hdr'", ",", "'.hpp'", ")", "mimetypes", ".", "add_type", "(", "'text/x-c++src'", ",", "'.cpp'", ")", "mimetypes", ".", "add_type", "(", "'text/x-c++src'", ",", "'.cxx'", ")", "# cobol files", "for", "ext", "in", "[", "'.cbl'", ",", "'.cob'", ",", "'.cpy'", "]", ":", "mimetypes", ".", "add_type", "(", "'text/x-cobol'", ",", "ext", ")", "mimetypes", ".", "add_type", "(", "'text/x-cobol'", ",", "ext", ".", "upper", "(", ")", ")" ]
Setup additional mime types.
[ "Setup", "additional", "mime", "types", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/_file_cache.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/_file_cache.py#L165-L176
def shell_safe_json_parse(json_or_dict_string, preserve_order=False): """ Allows the passing of JSON or Python dictionary strings. This is needed because certain JSON strings in CMD shell are not received in main's argv. This allows the user to specify the alternative notation, which does not have this problem (but is technically not JSON). """ try: if not preserve_order: return json.loads(json_or_dict_string) from collections import OrderedDict return json.loads(json_or_dict_string, object_pairs_hook=OrderedDict) except ValueError: import ast return ast.literal_eval(json_or_dict_string)
[ "def", "shell_safe_json_parse", "(", "json_or_dict_string", ",", "preserve_order", "=", "False", ")", ":", "try", ":", "if", "not", "preserve_order", ":", "return", "json", ".", "loads", "(", "json_or_dict_string", ")", "from", "collections", "import", "OrderedDict", "return", "json", ".", "loads", "(", "json_or_dict_string", ",", "object_pairs_hook", "=", "OrderedDict", ")", "except", "ValueError", ":", "import", "ast", "return", "ast", ".", "literal_eval", "(", "json_or_dict_string", ")" ]
Allows the passing of JSON or Python dictionary strings. This is needed because certain JSON strings in CMD shell are not received in main's argv. This allows the user to specify the alternative notation, which does not have this problem (but is technically not JSON).
[ "Allows", "the", "passing", "of", "JSON", "or", "Python", "dictionary", "strings", ".", "This", "is", "needed", "because", "certain", "JSON", "strings", "in", "CMD", "shell", "are", "not", "received", "in", "main", "s", "argv", ".", "This", "allows", "the", "user", "to", "specify", "the", "alternative", "notation", "which", "does", "not", "have", "this", "problem", "(", "but", "is", "technically", "not", "JSON", ")", "." ]
python
train
archman/beamline
beamline/lattice.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/lattice.py#L367-L380
def makeElement(self, kw): """ return element object regarding the keyword configuration """ kw_name = kw kw_type = self.getKwType(kw_name) kw_config = {k.lower(): v for k, v in self.getKwConfig(kw_name).items()} objtype = 'Element' + kw_type.capitalize() retobj = getattr(element, objtype)(name=kw_name, config=kw_config) # set up EPICS control configs ctrlconf = self.getKwCtrlConf(kw_name) if ctrlconf != {}: retobj.setConf(ctrlconf, type='ctrl') return retobj
[ "def", "makeElement", "(", "self", ",", "kw", ")", ":", "kw_name", "=", "kw", "kw_type", "=", "self", ".", "getKwType", "(", "kw_name", ")", "kw_config", "=", "{", "k", ".", "lower", "(", ")", ":", "v", "for", "k", ",", "v", "in", "self", ".", "getKwConfig", "(", "kw_name", ")", ".", "items", "(", ")", "}", "objtype", "=", "'Element'", "+", "kw_type", ".", "capitalize", "(", ")", "retobj", "=", "getattr", "(", "element", ",", "objtype", ")", "(", "name", "=", "kw_name", ",", "config", "=", "kw_config", ")", "# set up EPICS control configs", "ctrlconf", "=", "self", ".", "getKwCtrlConf", "(", "kw_name", ")", "if", "ctrlconf", "!=", "{", "}", ":", "retobj", ".", "setConf", "(", "ctrlconf", ",", "type", "=", "'ctrl'", ")", "return", "retobj" ]
return element object regarding the keyword configuration
[ "return", "element", "object", "regarding", "the", "keyword", "configuration" ]
python
train
saltstack/salt
salt/thorium/calc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/calc.py#L226-L246
def median_high(name, num, minimum=0, maximum=0, ref=None): ''' Calculates the high mean of the ``num`` most recent values. Requires a list. USAGE: .. code-block:: yaml foo: calc.median_high: - name: myregentry - num: 5 ''' return calc( name=name, num=num, oper='median_high', minimum=minimum, maximum=maximum, ref=ref )
[ "def", "median_high", "(", "name", ",", "num", ",", "minimum", "=", "0", ",", "maximum", "=", "0", ",", "ref", "=", "None", ")", ":", "return", "calc", "(", "name", "=", "name", ",", "num", "=", "num", ",", "oper", "=", "'median_high'", ",", "minimum", "=", "minimum", ",", "maximum", "=", "maximum", ",", "ref", "=", "ref", ")" ]
Calculates the high mean of the ``num`` most recent values. Requires a list. USAGE: .. code-block:: yaml foo: calc.median_high: - name: myregentry - num: 5
[ "Calculates", "the", "high", "mean", "of", "the", "num", "most", "recent", "values", ".", "Requires", "a", "list", "." ]
python
train
wdm0006/git-pandas
gitpandas/project.py
https://github.com/wdm0006/git-pandas/blob/e56b817b1d66b8296d1d5e703d5db0e181d25899/gitpandas/project.py#L140-L178
def file_change_rates(self, branch='master', limit=None, coverage=False, days=None, ignore_globs=None, include_globs=None): """ This function will return a DataFrame containing some basic aggregations of the file change history data, and optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has a high change rate and poor test coverage, then it is a great candidate for writing more tests. :param branch: (optional, default=master) the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data. :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame """ columns = ['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change', 'edit_rate', 'repository'] if coverage: columns += ['lines_covered', 'total_lines', 'coverage'] df = pd.DataFrame(columns=columns) for repo in self.repos: try: fcr = repo.file_change_rates( branch=branch, limit=limit, coverage=coverage, days=days, ignore_globs=ignore_globs, include_globs=include_globs ) fcr['repository'] = repo.repo_name df = df.append(fcr) except GitCommandError: print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch)) df.reset_index() return df
[ "def", "file_change_rates", "(", "self", ",", "branch", "=", "'master'", ",", "limit", "=", "None", ",", "coverage", "=", "False", ",", "days", "=", "None", ",", "ignore_globs", "=", "None", ",", "include_globs", "=", "None", ")", ":", "columns", "=", "[", "'unique_committers'", ",", "'abs_rate_of_change'", ",", "'net_rate_of_change'", ",", "'net_change'", ",", "'abs_change'", ",", "'edit_rate'", ",", "'repository'", "]", "if", "coverage", ":", "columns", "+=", "[", "'lines_covered'", ",", "'total_lines'", ",", "'coverage'", "]", "df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "columns", ")", "for", "repo", "in", "self", ".", "repos", ":", "try", ":", "fcr", "=", "repo", ".", "file_change_rates", "(", "branch", "=", "branch", ",", "limit", "=", "limit", ",", "coverage", "=", "coverage", ",", "days", "=", "days", ",", "ignore_globs", "=", "ignore_globs", ",", "include_globs", "=", "include_globs", ")", "fcr", "[", "'repository'", "]", "=", "repo", ".", "repo_name", "df", "=", "df", ".", "append", "(", "fcr", ")", "except", "GitCommandError", ":", "print", "(", "'Warning! Repo: %s seems to not have the branch: %s'", "%", "(", "repo", ",", "branch", ")", ")", "df", ".", "reset_index", "(", ")", "return", "df" ]
This function will return a DataFrame containing some basic aggregations of the file change history data, and optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has a high change rate and poor test coverage, then it is a great candidate for writing more tests. :param branch: (optional, default=master) the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data. :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame
[ "This", "function", "will", "return", "a", "DataFrame", "containing", "some", "basic", "aggregations", "of", "the", "file", "change", "history", "data", "and", "optionally", "test", "coverage", "data", "from", "a", "coverage_data", ".", "py", ".", "coverage", "file", ".", "The", "aim", "here", "is", "to", "identify", "files", "in", "the", "project", "which", "have", "abnormal", "edit", "rates", "or", "the", "rate", "of", "changes", "without", "growing", "the", "files", "size", ".", "If", "a", "file", "has", "a", "high", "change", "rate", "and", "poor", "test", "coverage", "then", "it", "is", "a", "great", "candidate", "for", "writing", "more", "tests", "." ]
python
train
ASMfreaK/habitipy
habitipy/cli.py
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/cli.py#L126-L164
def get_content(api, rebuild_cache=False): """get content from server or cache""" if hasattr(get_content, 'cache') and not rebuild_cache: return get_content.cache if not os.path.exists(CONTENT_JSON) or rebuild_cache: import locale content_endpoint = api.content.get # pylint: disable=protected-access try_langs = [] try: lang = get_translation_for('habitipy').info()['language'] try_langs.append(lang) except KeyError: pass try: loc = locale.getdefaultlocale()[0] if loc: try_langs.append(loc) try_langs.append(loc[:2]) except IndexError: pass server_lang = content_endpoint._node.params['query']['language'] # handle something like 'ru_RU' not available - only 'ru' for lang in try_langs: if lang in server_lang.possible_values: loc = {'language': lang} break else: loc = {} get_content.cache = content = content_endpoint(**loc) with open(CONTENT_JSON, 'w') as f: json.dump(content, f) return content try: with open(CONTENT_JSON) as f: get_content.cache = content = json.load(f) return content except JSONDecodeError: return get_content(api, rebuild_cache=True)
[ "def", "get_content", "(", "api", ",", "rebuild_cache", "=", "False", ")", ":", "if", "hasattr", "(", "get_content", ",", "'cache'", ")", "and", "not", "rebuild_cache", ":", "return", "get_content", ".", "cache", "if", "not", "os", ".", "path", ".", "exists", "(", "CONTENT_JSON", ")", "or", "rebuild_cache", ":", "import", "locale", "content_endpoint", "=", "api", ".", "content", ".", "get", "# pylint: disable=protected-access", "try_langs", "=", "[", "]", "try", ":", "lang", "=", "get_translation_for", "(", "'habitipy'", ")", ".", "info", "(", ")", "[", "'language'", "]", "try_langs", ".", "append", "(", "lang", ")", "except", "KeyError", ":", "pass", "try", ":", "loc", "=", "locale", ".", "getdefaultlocale", "(", ")", "[", "0", "]", "if", "loc", ":", "try_langs", ".", "append", "(", "loc", ")", "try_langs", ".", "append", "(", "loc", "[", ":", "2", "]", ")", "except", "IndexError", ":", "pass", "server_lang", "=", "content_endpoint", ".", "_node", ".", "params", "[", "'query'", "]", "[", "'language'", "]", "# handle something like 'ru_RU' not available - only 'ru'", "for", "lang", "in", "try_langs", ":", "if", "lang", "in", "server_lang", ".", "possible_values", ":", "loc", "=", "{", "'language'", ":", "lang", "}", "break", "else", ":", "loc", "=", "{", "}", "get_content", ".", "cache", "=", "content", "=", "content_endpoint", "(", "*", "*", "loc", ")", "with", "open", "(", "CONTENT_JSON", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "content", ",", "f", ")", "return", "content", "try", ":", "with", "open", "(", "CONTENT_JSON", ")", "as", "f", ":", "get_content", ".", "cache", "=", "content", "=", "json", ".", "load", "(", "f", ")", "return", "content", "except", "JSONDecodeError", ":", "return", "get_content", "(", "api", ",", "rebuild_cache", "=", "True", ")" ]
get content from server or cache
[ "get", "content", "from", "server", "or", "cache" ]
python
train
limpyd/redis-limpyd
limpyd/collection.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/collection.py#L574-L583
def sort(self, **parameters): """ Parameters: `by`: pass either a field name or a wildcard string to sort on prefix with `-` to make a desc sort. `alpha`: set it to True to sort lexicographically instead of numerically. """ parameters = self._coerce_by_parameter(parameters) self._sort = parameters return self
[ "def", "sort", "(", "self", ",", "*", "*", "parameters", ")", ":", "parameters", "=", "self", ".", "_coerce_by_parameter", "(", "parameters", ")", "self", ".", "_sort", "=", "parameters", "return", "self" ]
Parameters: `by`: pass either a field name or a wildcard string to sort on prefix with `-` to make a desc sort. `alpha`: set it to True to sort lexicographically instead of numerically.
[ "Parameters", ":", "by", ":", "pass", "either", "a", "field", "name", "or", "a", "wildcard", "string", "to", "sort", "on", "prefix", "with", "-", "to", "make", "a", "desc", "sort", ".", "alpha", ":", "set", "it", "to", "True", "to", "sort", "lexicographically", "instead", "of", "numerically", "." ]
python
train
jleinonen/pytmatrix
pytmatrix/tmatrix.py
https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/tmatrix.py#L202-L219
def _init_tmatrix(self): """Initialize the T-matrix. """ if self.radius_type == Scatterer.RADIUS_MAXIMUM: # Maximum radius is not directly supported in the original # so we convert it to equal volume radius radius_type = Scatterer.RADIUS_EQUAL_VOLUME radius = self.equal_volume_from_maximum() else: radius_type = self.radius_type radius = self.radius self.nmax = pytmatrix.calctmat(radius, radius_type, self.wavelength, self.m.real, self.m.imag, self.axis_ratio, self.shape, self.ddelt, self.ndgs) self._tm_signature = (self.radius, self.radius_type, self.wavelength, self.m, self.axis_ratio, self.shape, self.ddelt, self.ndgs)
[ "def", "_init_tmatrix", "(", "self", ")", ":", "if", "self", ".", "radius_type", "==", "Scatterer", ".", "RADIUS_MAXIMUM", ":", "# Maximum radius is not directly supported in the original", "# so we convert it to equal volume radius", "radius_type", "=", "Scatterer", ".", "RADIUS_EQUAL_VOLUME", "radius", "=", "self", ".", "equal_volume_from_maximum", "(", ")", "else", ":", "radius_type", "=", "self", ".", "radius_type", "radius", "=", "self", ".", "radius", "self", ".", "nmax", "=", "pytmatrix", ".", "calctmat", "(", "radius", ",", "radius_type", ",", "self", ".", "wavelength", ",", "self", ".", "m", ".", "real", ",", "self", ".", "m", ".", "imag", ",", "self", ".", "axis_ratio", ",", "self", ".", "shape", ",", "self", ".", "ddelt", ",", "self", ".", "ndgs", ")", "self", ".", "_tm_signature", "=", "(", "self", ".", "radius", ",", "self", ".", "radius_type", ",", "self", ".", "wavelength", ",", "self", ".", "m", ",", "self", ".", "axis_ratio", ",", "self", ".", "shape", ",", "self", ".", "ddelt", ",", "self", ".", "ndgs", ")" ]
Initialize the T-matrix.
[ "Initialize", "the", "T", "-", "matrix", "." ]
python
train
MisterWil/abodepy
abodepy/devices/camera.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/camera.py#L40-L48
def refresh_image(self): """Get the most recent camera image.""" url = str.replace(CONST.TIMELINE_IMAGES_ID_URL, '$DEVID$', self.device_id) response = self._abode.send_request("get", url) _LOGGER.debug("Get image response: %s", response.text) return self.update_image_location(json.loads(response.text))
[ "def", "refresh_image", "(", "self", ")", ":", "url", "=", "str", ".", "replace", "(", "CONST", ".", "TIMELINE_IMAGES_ID_URL", ",", "'$DEVID$'", ",", "self", ".", "device_id", ")", "response", "=", "self", ".", "_abode", ".", "send_request", "(", "\"get\"", ",", "url", ")", "_LOGGER", ".", "debug", "(", "\"Get image response: %s\"", ",", "response", ".", "text", ")", "return", "self", ".", "update_image_location", "(", "json", ".", "loads", "(", "response", ".", "text", ")", ")" ]
Get the most recent camera image.
[ "Get", "the", "most", "recent", "camera", "image", "." ]
python
train
pybel/pybel
src/pybel/struct/summary/node_summary.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/summary/node_summary.py#L170-L183
def count_names_by_namespace(graph, namespace): """Get the set of all of the names in a given namespace that are in the graph. :param pybel.BELGraph graph: A BEL graph :param str namespace: A namespace keyword :return: A counter from {name: frequency} :rtype: collections.Counter :raises IndexError: if the namespace is not defined in the graph. """ if namespace not in graph.defined_namespace_keywords: raise IndexError('{} is not defined in {}'.format(namespace, graph)) return Counter(_namespace_filtered_iterator(graph, namespace))
[ "def", "count_names_by_namespace", "(", "graph", ",", "namespace", ")", ":", "if", "namespace", "not", "in", "graph", ".", "defined_namespace_keywords", ":", "raise", "IndexError", "(", "'{} is not defined in {}'", ".", "format", "(", "namespace", ",", "graph", ")", ")", "return", "Counter", "(", "_namespace_filtered_iterator", "(", "graph", ",", "namespace", ")", ")" ]
Get the set of all of the names in a given namespace that are in the graph. :param pybel.BELGraph graph: A BEL graph :param str namespace: A namespace keyword :return: A counter from {name: frequency} :rtype: collections.Counter :raises IndexError: if the namespace is not defined in the graph.
[ "Get", "the", "set", "of", "all", "of", "the", "names", "in", "a", "given", "namespace", "that", "are", "in", "the", "graph", "." ]
python
train
cariad/py-wpconfigr
wpconfigr/wp_config_string.py
https://github.com/cariad/py-wpconfigr/blob/8f25bb849b72ce95957566544a2be8445316c818/wpconfigr/wp_config_string.py#L29-L41
def _get_match(self, key): """ Gets a MatchObject for the given key. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match. """ return self._get_string_match(key=key) or \ self._get_non_string_match(key=key)
[ "def", "_get_match", "(", "self", ",", "key", ")", ":", "return", "self", ".", "_get_string_match", "(", "key", "=", "key", ")", "or", "self", ".", "_get_non_string_match", "(", "key", "=", "key", ")" ]
Gets a MatchObject for the given key. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
[ "Gets", "a", "MatchObject", "for", "the", "given", "key", "." ]
python
train
ET-CS/dry
dry/__init__.py
https://github.com/ET-CS/dry/blob/e8e73903dab372122ebb89a11d2168dd239faf51/dry/__init__.py#L382-L394
def init_current_directory(): """Initialize and create dry config file(s) inside current directory""" settings_directory=project_path+'/.dry' settings_file=settings_directory+'/config.py' if os.path.isdir(settings_directory): # already initialized print("directory already initialized.") return # init os.makedirs(settings_directory) f = open(settings_file,'w') print(sample_config_file, file=f) f.close()
[ "def", "init_current_directory", "(", ")", ":", "settings_directory", "=", "project_path", "+", "'/.dry'", "settings_file", "=", "settings_directory", "+", "'/config.py'", "if", "os", ".", "path", ".", "isdir", "(", "settings_directory", ")", ":", "# already initialized", "print", "(", "\"directory already initialized.\"", ")", "return", "# init", "os", ".", "makedirs", "(", "settings_directory", ")", "f", "=", "open", "(", "settings_file", ",", "'w'", ")", "print", "(", "sample_config_file", ",", "file", "=", "f", ")", "f", ".", "close", "(", ")" ]
Initialize and create dry config file(s) inside current directory
[ "Initialize", "and", "create", "dry", "config", "file", "(", "s", ")", "inside", "current", "directory" ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/mapping.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/mapping.py#L2176-L2215
def get_gates(): """ get all gates known on the Ariane server :return: """ LOGGER.debug("GateService.get_gates") params = SessionService.complete_transactional_req(None) if params is None: if MappingService.driver_type != DriverFactory.DRIVER_REST: params = {'OPERATION': 'getGates'} args = {'properties': params} else: args = {'http_operation': 'GET', 'operation_path': ''} else: if MappingService.driver_type != DriverFactory.DRIVER_REST: params['OPERATION'] = 'getGates' args = {'properties': params} else: args = {'http_operation': 'GET', 'operation_path': '', 'parameters': params} response = GateService.requester.call(args) if MappingService.driver_type != DriverFactory.DRIVER_REST: response = response.get() ret = None if response.rc == 0: ret = [] for gate in response.response_content['gates']: ret.append(Gate.json_2_gate(gate)) elif response.rc != 404: err_msg = 'GateService.get_gates - Problem while getting nodes. ' \ '. Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \ " (" + str(response.rc) + ")" LOGGER.warning(err_msg) if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message: raise ArianeMappingOverloadError("GateService.get_gates", ArianeMappingOverloadError.ERROR_MSG) # traceback.print_stack() return ret
[ "def", "get_gates", "(", ")", ":", "LOGGER", ".", "debug", "(", "\"GateService.get_gates\"", ")", "params", "=", "SessionService", ".", "complete_transactional_req", "(", "None", ")", "if", "params", "is", "None", ":", "if", "MappingService", ".", "driver_type", "!=", "DriverFactory", ".", "DRIVER_REST", ":", "params", "=", "{", "'OPERATION'", ":", "'getGates'", "}", "args", "=", "{", "'properties'", ":", "params", "}", "else", ":", "args", "=", "{", "'http_operation'", ":", "'GET'", ",", "'operation_path'", ":", "''", "}", "else", ":", "if", "MappingService", ".", "driver_type", "!=", "DriverFactory", ".", "DRIVER_REST", ":", "params", "[", "'OPERATION'", "]", "=", "'getGates'", "args", "=", "{", "'properties'", ":", "params", "}", "else", ":", "args", "=", "{", "'http_operation'", ":", "'GET'", ",", "'operation_path'", ":", "''", ",", "'parameters'", ":", "params", "}", "response", "=", "GateService", ".", "requester", ".", "call", "(", "args", ")", "if", "MappingService", ".", "driver_type", "!=", "DriverFactory", ".", "DRIVER_REST", ":", "response", "=", "response", ".", "get", "(", ")", "ret", "=", "None", "if", "response", ".", "rc", "==", "0", ":", "ret", "=", "[", "]", "for", "gate", "in", "response", ".", "response_content", "[", "'gates'", "]", ":", "ret", ".", "append", "(", "Gate", ".", "json_2_gate", "(", "gate", ")", ")", "elif", "response", ".", "rc", "!=", "404", ":", "err_msg", "=", "'GateService.get_gates - Problem while getting nodes. '", "'. Reason: '", "+", "str", "(", "response", ".", "response_content", ")", "+", "' - '", "+", "str", "(", "response", ".", "error_message", ")", "+", "\" (\"", "+", "str", "(", "response", ".", "rc", ")", "+", "\")\"", "LOGGER", ".", "warning", "(", "err_msg", ")", "if", "response", ".", "rc", "==", "500", "and", "ArianeMappingOverloadError", ".", "ERROR_MSG", "in", "response", ".", "error_message", ":", "raise", "ArianeMappingOverloadError", "(", "\"GateService.get_gates\"", ",", "ArianeMappingOverloadError", ".", "ERROR_MSG", ")", "# traceback.print_stack()", "return", "ret" ]
get all gates known on the Ariane server :return:
[ "get", "all", "gates", "known", "on", "the", "Ariane", "server", ":", "return", ":" ]
python
train
snare/voltron
setup.py
https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/setup.py#L10-L51
def check_install(): """ Try to detect the two most common installation errors: 1. Installing on macOS using a Homebrew version of Python 2. Installing on Linux using Python 2 when GDB is linked with Python 3 """ if platform.system() == 'Darwin' and sys.executable != '/usr/bin/python': print("*" * 79) print(textwrap.fill( "WARNING: You are not using the version of Python included with " "macOS. If you intend to use Voltron with the LLDB included " "with Xcode, or GDB installed with Homebrew, it will not work " "unless it is installed using the system's default Python. If " "you intend to use Voltron with a debugger installed by some " "other method, it may be safe to ignore this warning. See the " "following documentation for more detailed installation " "instructions: " "https://github.com/snare/voltron/wiki/Installation", 79)) print("*" * 79) elif platform.system() == 'Linux': try: output = check_output([ "gdb", "-batch", "-q", "--nx", "-ex", "pi print(sys.version_info.major)" ]).decode("utf-8") gdb_python = int(output) if gdb_python != sys.version_info.major: print("*" * 79) print(textwrap.fill( "WARNING: You are installing Voltron using Python {0}.x " "and GDB is linked with Python {1}.x. GDB will not be " "able to load Voltron. Please install using Python {1} " "if you intend to use Voltron with the copy of GDB that " "is installed. See the following documentation for more " "detailed installation instructions: " "https://github.com/snare/voltron/wiki/Installation" .format(sys.version_info.major, gdb_python), 79)) print("*" * 79) except: pass
[ "def", "check_install", "(", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", "and", "sys", ".", "executable", "!=", "'/usr/bin/python'", ":", "print", "(", "\"*\"", "*", "79", ")", "print", "(", "textwrap", ".", "fill", "(", "\"WARNING: You are not using the version of Python included with \"", "\"macOS. If you intend to use Voltron with the LLDB included \"", "\"with Xcode, or GDB installed with Homebrew, it will not work \"", "\"unless it is installed using the system's default Python. If \"", "\"you intend to use Voltron with a debugger installed by some \"", "\"other method, it may be safe to ignore this warning. See the \"", "\"following documentation for more detailed installation \"", "\"instructions: \"", "\"https://github.com/snare/voltron/wiki/Installation\"", ",", "79", ")", ")", "print", "(", "\"*\"", "*", "79", ")", "elif", "platform", ".", "system", "(", ")", "==", "'Linux'", ":", "try", ":", "output", "=", "check_output", "(", "[", "\"gdb\"", ",", "\"-batch\"", ",", "\"-q\"", ",", "\"--nx\"", ",", "\"-ex\"", ",", "\"pi print(sys.version_info.major)\"", "]", ")", ".", "decode", "(", "\"utf-8\"", ")", "gdb_python", "=", "int", "(", "output", ")", "if", "gdb_python", "!=", "sys", ".", "version_info", ".", "major", ":", "print", "(", "\"*\"", "*", "79", ")", "print", "(", "textwrap", ".", "fill", "(", "\"WARNING: You are installing Voltron using Python {0}.x \"", "\"and GDB is linked with Python {1}.x. GDB will not be \"", "\"able to load Voltron. Please install using Python {1} \"", "\"if you intend to use Voltron with the copy of GDB that \"", "\"is installed. See the following documentation for more \"", "\"detailed installation instructions: \"", "\"https://github.com/snare/voltron/wiki/Installation\"", ".", "format", "(", "sys", ".", "version_info", ".", "major", ",", "gdb_python", ")", ",", "79", ")", ")", "print", "(", "\"*\"", "*", "79", ")", "except", ":", "pass" ]
Try to detect the two most common installation errors: 1. Installing on macOS using a Homebrew version of Python 2. Installing on Linux using Python 2 when GDB is linked with Python 3
[ "Try", "to", "detect", "the", "two", "most", "common", "installation", "errors", ":" ]
python
train
Erotemic/utool
utool/util_str.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L1885-L1910
def str_between(str_, startstr, endstr): r""" gets substring between two sentianl strings Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> import utool as ut >>> str_ = '\n INSERT INTO vsone(\n' >>> startstr = 'INSERT' >>> endstr = '(' >>> result = str_between(str_, startstr, endstr) >>> print(result) """ if startstr is None: startpos = 0 else: startpos = str_.find(startstr) + len(startstr) if endstr is None: endpos = None else: endpos = str_.find(endstr) if endpos == -1: endpos = None newstr = str_[startpos:endpos] return newstr
[ "def", "str_between", "(", "str_", ",", "startstr", ",", "endstr", ")", ":", "if", "startstr", "is", "None", ":", "startpos", "=", "0", "else", ":", "startpos", "=", "str_", ".", "find", "(", "startstr", ")", "+", "len", "(", "startstr", ")", "if", "endstr", "is", "None", ":", "endpos", "=", "None", "else", ":", "endpos", "=", "str_", ".", "find", "(", "endstr", ")", "if", "endpos", "==", "-", "1", ":", "endpos", "=", "None", "newstr", "=", "str_", "[", "startpos", ":", "endpos", "]", "return", "newstr" ]
r""" gets substring between two sentianl strings Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> import utool as ut >>> str_ = '\n INSERT INTO vsone(\n' >>> startstr = 'INSERT' >>> endstr = '(' >>> result = str_between(str_, startstr, endstr) >>> print(result)
[ "r", "gets", "substring", "between", "two", "sentianl", "strings" ]
python
train
pr-omethe-us/PyKED
pyked/chemked.py
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L853-L889
def get_cantera_mass_fraction(self, species_conversion=None): """Get the mass fractions in a string format suitable for input to Cantera. Arguments: species_conversion (`dict`, optional): Mapping of species identifier to a species name. This argument should be supplied when the name of the species in the ChemKED YAML file does not match the name of the same species in a chemical kinetic mechanism. The species identifier (the key of the mapping) can be the name, InChI, or SMILES provided in the ChemKED file, while the value associated with a key should be the desired name in the Cantera format output string. Returns: `str`: String of mass fractions in the ``SPEC:AMT, SPEC:AMT`` format Raises: `ValueError`: If the composition type is ``'mole fraction'`` or ``'mole percent'``, the conversion cannot be done because no molecular weight information is known Examples: >>> dp = DataPoint(properties) >>> dp.get_cantera_mass_fraction() 'H2:2.2525e-04, O2:4.4775e-03, Ar:9.9530e-01' >>> species_conversion = {'H2': 'h2', 'O2': 'o2'} >>> dp.get_cantera_mass_fraction(species_conversion) 'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01' >>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'} >>> dp.get_cantera_mass_fraction(species_conversion) 'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01' """ if self.composition_type in ['mole fraction', 'mole percent']: raise ValueError('Cannot get mass fractions from the given composition.\n' '{}'.format(self.composition) ) else: return self.get_cantera_composition_string(species_conversion)
[ "def", "get_cantera_mass_fraction", "(", "self", ",", "species_conversion", "=", "None", ")", ":", "if", "self", ".", "composition_type", "in", "[", "'mole fraction'", ",", "'mole percent'", "]", ":", "raise", "ValueError", "(", "'Cannot get mass fractions from the given composition.\\n'", "'{}'", ".", "format", "(", "self", ".", "composition", ")", ")", "else", ":", "return", "self", ".", "get_cantera_composition_string", "(", "species_conversion", ")" ]
Get the mass fractions in a string format suitable for input to Cantera. Arguments: species_conversion (`dict`, optional): Mapping of species identifier to a species name. This argument should be supplied when the name of the species in the ChemKED YAML file does not match the name of the same species in a chemical kinetic mechanism. The species identifier (the key of the mapping) can be the name, InChI, or SMILES provided in the ChemKED file, while the value associated with a key should be the desired name in the Cantera format output string. Returns: `str`: String of mass fractions in the ``SPEC:AMT, SPEC:AMT`` format Raises: `ValueError`: If the composition type is ``'mole fraction'`` or ``'mole percent'``, the conversion cannot be done because no molecular weight information is known Examples: >>> dp = DataPoint(properties) >>> dp.get_cantera_mass_fraction() 'H2:2.2525e-04, O2:4.4775e-03, Ar:9.9530e-01' >>> species_conversion = {'H2': 'h2', 'O2': 'o2'} >>> dp.get_cantera_mass_fraction(species_conversion) 'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01' >>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'} >>> dp.get_cantera_mass_fraction(species_conversion) 'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
[ "Get", "the", "mass", "fractions", "in", "a", "string", "format", "suitable", "for", "input", "to", "Cantera", "." ]
python
train
biolink/biolink-model
metamodel/generators/markdowngen.py
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/markdowngen.py#L262-L277
def desc_for(self, obj: Element, doing_descs: bool) -> str: """ Return a description for object if it is unique (different than its parent) @param obj: object to be described @param doing_descs: If false, always return an empty string @return: text or empty string """ if obj.description and doing_descs: if isinstance(obj, SlotDefinition) and obj.is_a: parent = self.schema.slots[obj.is_a] elif isinstance(obj, ClassDefinition) and obj.is_a: parent = self.schema.classes[obj.is_a] else: parent = None return '' if parent and obj.description == parent.description else obj.description return ''
[ "def", "desc_for", "(", "self", ",", "obj", ":", "Element", ",", "doing_descs", ":", "bool", ")", "->", "str", ":", "if", "obj", ".", "description", "and", "doing_descs", ":", "if", "isinstance", "(", "obj", ",", "SlotDefinition", ")", "and", "obj", ".", "is_a", ":", "parent", "=", "self", ".", "schema", ".", "slots", "[", "obj", ".", "is_a", "]", "elif", "isinstance", "(", "obj", ",", "ClassDefinition", ")", "and", "obj", ".", "is_a", ":", "parent", "=", "self", ".", "schema", ".", "classes", "[", "obj", ".", "is_a", "]", "else", ":", "parent", "=", "None", "return", "''", "if", "parent", "and", "obj", ".", "description", "==", "parent", ".", "description", "else", "obj", ".", "description", "return", "''" ]
Return a description for object if it is unique (different than its parent) @param obj: object to be described @param doing_descs: If false, always return an empty string @return: text or empty string
[ "Return", "a", "description", "for", "object", "if", "it", "is", "unique", "(", "different", "than", "its", "parent", ")" ]
python
train
dwkim78/upsilon
upsilon/extract_features/extract_features.py
https://github.com/dwkim78/upsilon/blob/5f381453f26582ef56e62fb8fed7317ce67861af/upsilon/extract_features/extract_features.py#L451-L486
def half_mag_amplitude_ratio2(self, mag, avg): """ Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average. """ # For lower (fainter) magnitude than average. index = np.where(mag > avg) fainter_mag = mag[index] lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag) # For higher (brighter) magnitude than average. index = np.where(mag <= avg) brighter_mag = mag[index] higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag) # Return ratio. return np.sqrt(lower_sum / higher_sum)
[ "def", "half_mag_amplitude_ratio2", "(", "self", ",", "mag", ",", "avg", ")", ":", "# For lower (fainter) magnitude than average.", "index", "=", "np", ".", "where", "(", "mag", ">", "avg", ")", "fainter_mag", "=", "mag", "[", "index", "]", "lower_sum", "=", "np", ".", "sum", "(", "(", "fainter_mag", "-", "avg", ")", "**", "2", ")", "/", "len", "(", "fainter_mag", ")", "# For higher (brighter) magnitude than average.", "index", "=", "np", ".", "where", "(", "mag", "<=", "avg", ")", "brighter_mag", "=", "mag", "[", "index", "]", "higher_sum", "=", "np", ".", "sum", "(", "(", "avg", "-", "brighter_mag", ")", "**", "2", ")", "/", "len", "(", "brighter_mag", ")", "# Return ratio.", "return", "np", ".", "sqrt", "(", "lower_sum", "/", "higher_sum", ")" ]
Return ratio of amplitude of higher and lower magnitudes. A ratio of amplitude of higher and lower magnitudes than average, considering weights. This ratio, by definition, should be higher for EB than for others. Parameters ---------- mag : array_like An array of magnitudes. avg : float An average value of magnitudes. Returns ------- hl_ratio : float Ratio of amplitude of higher and lower magnitudes than average.
[ "Return", "ratio", "of", "amplitude", "of", "higher", "and", "lower", "magnitudes", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/deck_calibration/endpoints.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/deck_calibration/endpoints.py#L438-L467
async def save_z(data): """ Save the current Z height value for the calibration data :param data: Information obtained from a POST request. The content type is application/json. The correct packet form should be as follows: { 'token': UUID token from current session start 'command': 'save z' } """ if not session.tip_length: message = "Tip length must be set before calibrating" status = 400 else: if not feature_flags.use_protocol_api_v2(): mount = 'Z' if session.current_mount == 'left' else 'A' actual_z = position( mount, session.adapter)[-1] length_offset = pipette_config.load( session.current_model, session.pipette_id).model_offset[-1] session.z_value = actual_z - session.tip_length + length_offset else: session.z_value = position( session.current_mount, session.adapter, session.cp)[-1] message = "Saved z: {}".format(session.z_value) status = 200 return web.json_response({'message': message}, status=status)
[ "async", "def", "save_z", "(", "data", ")", ":", "if", "not", "session", ".", "tip_length", ":", "message", "=", "\"Tip length must be set before calibrating\"", "status", "=", "400", "else", ":", "if", "not", "feature_flags", ".", "use_protocol_api_v2", "(", ")", ":", "mount", "=", "'Z'", "if", "session", ".", "current_mount", "==", "'left'", "else", "'A'", "actual_z", "=", "position", "(", "mount", ",", "session", ".", "adapter", ")", "[", "-", "1", "]", "length_offset", "=", "pipette_config", ".", "load", "(", "session", ".", "current_model", ",", "session", ".", "pipette_id", ")", ".", "model_offset", "[", "-", "1", "]", "session", ".", "z_value", "=", "actual_z", "-", "session", ".", "tip_length", "+", "length_offset", "else", ":", "session", ".", "z_value", "=", "position", "(", "session", ".", "current_mount", ",", "session", ".", "adapter", ",", "session", ".", "cp", ")", "[", "-", "1", "]", "message", "=", "\"Saved z: {}\"", ".", "format", "(", "session", ".", "z_value", ")", "status", "=", "200", "return", "web", ".", "json_response", "(", "{", "'message'", ":", "message", "}", ",", "status", "=", "status", ")" ]
Save the current Z height value for the calibration data :param data: Information obtained from a POST request. The content type is application/json. The correct packet form should be as follows: { 'token': UUID token from current session start 'command': 'save z' }
[ "Save", "the", "current", "Z", "height", "value", "for", "the", "calibration", "data" ]
python
train
Yubico/python-yubico
setup.py
https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/setup.py#L10-L15
def get_version(): """Return the current version as defined by yubico/yubico_version.py.""" with open('yubico/yubico_version.py', 'r') as f: match = VERSION_PATTERN.search(f.read()) return match.group(1)
[ "def", "get_version", "(", ")", ":", "with", "open", "(", "'yubico/yubico_version.py'", ",", "'r'", ")", "as", "f", ":", "match", "=", "VERSION_PATTERN", ".", "search", "(", "f", ".", "read", "(", ")", ")", "return", "match", ".", "group", "(", "1", ")" ]
Return the current version as defined by yubico/yubico_version.py.
[ "Return", "the", "current", "version", "as", "defined", "by", "yubico", "/", "yubico_version", ".", "py", "." ]
python
train
saltstack/salt
salt/modules/aptpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aptpkg.py#L2399-L2452
def expand_repo_def(**kwargs): ''' Take a repository definition and expand it to the full pkg repository dict that can be used for comparison. This is a helper function to make the Debian/Ubuntu apt sources sane for comparison in the pkgrepo states. This is designed to be called from pkgrepo states and will have little use being called on the CLI. ''' if 'repo' not in kwargs: raise SaltInvocationError('missing \'repo\' argument') _check_apt() sanitized = {} repo = salt.utils.pkg.deb.strip_uri(kwargs['repo']) if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'): dist = __grains__['lsb_distrib_codename'] owner_name, ppa_name = repo[4:].split('/', 1) if 'ppa_auth' in kwargs: auth_info = '{0}@'.format(kwargs['ppa_auth']) repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist) else: if HAS_SOFTWAREPROPERTIES: if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'): repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0] else: repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0] else: repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) if 'file' not in kwargs: filename = '/etc/apt/sources.list.d/{0}-{1}-{2}.list' kwargs['file'] = filename.format(owner_name, ppa_name, dist) source_entry = sourceslist.SourceEntry(repo) for list_args in ('architectures', 'comps'): if list_args in kwargs: kwargs[list_args] = kwargs[list_args].split(',') for kwarg in _MODIFY_OK: if kwarg in kwargs: setattr(source_entry, kwarg, kwargs[kwarg]) sanitized['file'] = source_entry.file sanitized['comps'] = getattr(source_entry, 'comps', []) sanitized['disabled'] = source_entry.disabled sanitized['dist'] = source_entry.dist sanitized['type'] = source_entry.type sanitized['uri'] = source_entry.uri.rstrip('/') sanitized['line'] = source_entry.line.strip() sanitized['architectures'] = getattr(source_entry, 'architectures', []) return sanitized
[ "def", "expand_repo_def", "(", "*", "*", "kwargs", ")", ":", "if", "'repo'", "not", "in", "kwargs", ":", "raise", "SaltInvocationError", "(", "'missing \\'repo\\' argument'", ")", "_check_apt", "(", ")", "sanitized", "=", "{", "}", "repo", "=", "salt", ".", "utils", ".", "pkg", ".", "deb", ".", "strip_uri", "(", "kwargs", "[", "'repo'", "]", ")", "if", "repo", ".", "startswith", "(", "'ppa:'", ")", "and", "__grains__", "[", "'os'", "]", "in", "(", "'Ubuntu'", ",", "'Mint'", ",", "'neon'", ")", ":", "dist", "=", "__grains__", "[", "'lsb_distrib_codename'", "]", "owner_name", ",", "ppa_name", "=", "repo", "[", "4", ":", "]", ".", "split", "(", "'/'", ",", "1", ")", "if", "'ppa_auth'", "in", "kwargs", ":", "auth_info", "=", "'{0}@'", ".", "format", "(", "kwargs", "[", "'ppa_auth'", "]", ")", "repo", "=", "LP_PVT_SRC_FORMAT", ".", "format", "(", "auth_info", ",", "owner_name", ",", "ppa_name", ",", "dist", ")", "else", ":", "if", "HAS_SOFTWAREPROPERTIES", ":", "if", "hasattr", "(", "softwareproperties", ".", "ppa", ",", "'PPAShortcutHandler'", ")", ":", "repo", "=", "softwareproperties", ".", "ppa", ".", "PPAShortcutHandler", "(", "repo", ")", ".", "expand", "(", "dist", ")", "[", "0", "]", "else", ":", "repo", "=", "softwareproperties", ".", "ppa", ".", "expand_ppa_line", "(", "repo", ",", "dist", ")", "[", "0", "]", "else", ":", "repo", "=", "LP_SRC_FORMAT", ".", "format", "(", "owner_name", ",", "ppa_name", ",", "dist", ")", "if", "'file'", "not", "in", "kwargs", ":", "filename", "=", "'/etc/apt/sources.list.d/{0}-{1}-{2}.list'", "kwargs", "[", "'file'", "]", "=", "filename", ".", "format", "(", "owner_name", ",", "ppa_name", ",", "dist", ")", "source_entry", "=", "sourceslist", ".", "SourceEntry", "(", "repo", ")", "for", "list_args", "in", "(", "'architectures'", ",", "'comps'", ")", ":", "if", "list_args", "in", "kwargs", ":", "kwargs", "[", "list_args", "]", "=", "kwargs", "[", "list_args", "]", ".", "split", "(", "','", ")", "for", "kwarg", "in", "_MODIFY_OK", ":", "if", "kwarg", "in", "kwargs", ":", "setattr", "(", "source_entry", ",", "kwarg", ",", "kwargs", "[", "kwarg", "]", ")", "sanitized", "[", "'file'", "]", "=", "source_entry", ".", "file", "sanitized", "[", "'comps'", "]", "=", "getattr", "(", "source_entry", ",", "'comps'", ",", "[", "]", ")", "sanitized", "[", "'disabled'", "]", "=", "source_entry", ".", "disabled", "sanitized", "[", "'dist'", "]", "=", "source_entry", ".", "dist", "sanitized", "[", "'type'", "]", "=", "source_entry", ".", "type", "sanitized", "[", "'uri'", "]", "=", "source_entry", ".", "uri", ".", "rstrip", "(", "'/'", ")", "sanitized", "[", "'line'", "]", "=", "source_entry", ".", "line", ".", "strip", "(", ")", "sanitized", "[", "'architectures'", "]", "=", "getattr", "(", "source_entry", ",", "'architectures'", ",", "[", "]", ")", "return", "sanitized" ]
Take a repository definition and expand it to the full pkg repository dict that can be used for comparison. This is a helper function to make the Debian/Ubuntu apt sources sane for comparison in the pkgrepo states. This is designed to be called from pkgrepo states and will have little use being called on the CLI.
[ "Take", "a", "repository", "definition", "and", "expand", "it", "to", "the", "full", "pkg", "repository", "dict", "that", "can", "be", "used", "for", "comparison", ".", "This", "is", "a", "helper", "function", "to", "make", "the", "Debian", "/", "Ubuntu", "apt", "sources", "sane", "for", "comparison", "in", "the", "pkgrepo", "states", "." ]
python
train
chatfirst/chatfirst
chatfirst/client.py
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L38-L48
def bots_get(self, bot): """ Fetch and fill Bot object :param bot: empty bot object with name to search :type bot: Bot :rtype: Bot :return: filled bot object """ data = self.client.bots.__getattr__(bot.name).__call__() return Bot(data)
[ "def", "bots_get", "(", "self", ",", "bot", ")", ":", "data", "=", "self", ".", "client", ".", "bots", ".", "__getattr__", "(", "bot", ".", "name", ")", ".", "__call__", "(", ")", "return", "Bot", "(", "data", ")" ]
Fetch and fill Bot object :param bot: empty bot object with name to search :type bot: Bot :rtype: Bot :return: filled bot object
[ "Fetch", "and", "fill", "Bot", "object" ]
python
train
acrazing/dbapi
dbapi/base.py
https://github.com/acrazing/dbapi/blob/8c1f85cb1a051daf7be1fc97a62c4499983e9898/dbapi/base.py#L106-L127
def json(self, url, method='get', params=None, data=None): """ 请求并返回json :type url: str :param url: API :type method: str :param method: HTTP METHOD :type params: dict :param params: query :type data: dict :param data: body :rtype: dict :return: """ r = self.req(url, method, params, data) return r.json()
[ "def", "json", "(", "self", ",", "url", ",", "method", "=", "'get'", ",", "params", "=", "None", ",", "data", "=", "None", ")", ":", "r", "=", "self", ".", "req", "(", "url", ",", "method", ",", "params", ",", "data", ")", "return", "r", ".", "json", "(", ")" ]
请求并返回json :type url: str :param url: API :type method: str :param method: HTTP METHOD :type params: dict :param params: query :type data: dict :param data: body :rtype: dict :return:
[ "请求并返回json", ":", "type", "url", ":", "str", ":", "param", "url", ":", "API", ":", "type", "method", ":", "str", ":", "param", "method", ":", "HTTP", "METHOD", ":", "type", "params", ":", "dict", ":", "param", "params", ":", "query", ":", "type", "data", ":", "dict", ":", "param", "data", ":", "body", ":", "rtype", ":", "dict", ":", "return", ":" ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L687-L689
def subscriptions_unread(self, room_id, **kwargs): """Mark messages as unread by roomId or from a message""" return self.__call_api_post('subscriptions.unread', roomId=room_id, kwargs=kwargs)
[ "def", "subscriptions_unread", "(", "self", ",", "room_id", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'subscriptions.unread'", ",", "roomId", "=", "room_id", ",", "kwargs", "=", "kwargs", ")" ]
Mark messages as unread by roomId or from a message
[ "Mark", "messages", "as", "unread", "by", "roomId", "or", "from", "a", "message" ]
python
train
orb-framework/orb
orb/core/model.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/model.py#L428-L437
def context(self, **context): """ Returns the lookup options for this record. This will track the options that were used when looking this record up from the database. :return <orb.LookupOptions> """ output = orb.Context(context=self.__context) if self.__context is not None else orb.Context() output.update(context) return output
[ "def", "context", "(", "self", ",", "*", "*", "context", ")", ":", "output", "=", "orb", ".", "Context", "(", "context", "=", "self", ".", "__context", ")", "if", "self", ".", "__context", "is", "not", "None", "else", "orb", ".", "Context", "(", ")", "output", ".", "update", "(", "context", ")", "return", "output" ]
Returns the lookup options for this record. This will track the options that were used when looking this record up from the database. :return <orb.LookupOptions>
[ "Returns", "the", "lookup", "options", "for", "this", "record", ".", "This", "will", "track", "the", "options", "that", "were", "used", "when", "looking", "this", "record", "up", "from", "the", "database", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L1545-L1551
def sorted(self): """Utility function for sort_file_tabs_alphabetically().""" for i in range(0, self.tabs.tabBar().count() - 1): if (self.tabs.tabBar().tabText(i) > self.tabs.tabBar().tabText(i + 1)): return False return True
[ "def", "sorted", "(", "self", ")", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "tabs", ".", "tabBar", "(", ")", ".", "count", "(", ")", "-", "1", ")", ":", "if", "(", "self", ".", "tabs", ".", "tabBar", "(", ")", ".", "tabText", "(", "i", ")", ">", "self", ".", "tabs", ".", "tabBar", "(", ")", ".", "tabText", "(", "i", "+", "1", ")", ")", ":", "return", "False", "return", "True" ]
Utility function for sort_file_tabs_alphabetically().
[ "Utility", "function", "for", "sort_file_tabs_alphabetically", "()", "." ]
python
train
koordinates/python-client
koordinates/metadata.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/metadata.py#L47-L61
def get_xml(self, fp, format=FORMAT_NATIVE): """ Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you. """ r = self._client.request('GET', getattr(self, format), stream=True) filename = stream.stream_response_to_file(r, path=fp) return filename
[ "def", "get_xml", "(", "self", ",", "fp", ",", "format", "=", "FORMAT_NATIVE", ")", ":", "r", "=", "self", ".", "_client", ".", "request", "(", "'GET'", ",", "getattr", "(", "self", ",", "format", ")", ",", "stream", "=", "True", ")", "filename", "=", "stream", ".", "stream_response_to_file", "(", "r", ",", "path", "=", "fp", ")", "return", "filename" ]
Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you.
[ "Returns", "the", "XML", "metadata", "for", "this", "source", "converted", "to", "the", "requested", "format", ".", "Converted", "metadata", "may", "not", "contain", "all", "the", "same", "information", "as", "the", "native", "format", "." ]
python
train
moble/quaternion
__init__.py
https://github.com/moble/quaternion/blob/7a323e81b391d6892e2874073e495e0beb057e85/__init__.py#L190-L326
def from_rotation_matrix(rot, nonorthogonal=True): """Convert input 3x3 rotation matrix to unit quaternion By default, if scipy.linalg is available, this function uses Bar-Itzhack's algorithm to allow for non-orthogonal matrices. [J. Guidance, Vol. 23, No. 6, p. 1085 <http://dx.doi.org/10.2514/2.4654>] This will almost certainly be quite a bit slower than simpler versions, though it will be more robust to numerical errors in the rotation matrix. Also note that Bar-Itzhack uses some pretty weird conventions. The last component of the quaternion appears to represent the scalar, and the quaternion itself is conjugated relative to the convention used throughout this module. If scipy.linalg is not available or if the optional `nonorthogonal` parameter is set to `False`, this function falls back to the possibly faster, but less robust, algorithm of Markley [J. Guidance, Vol. 31, No. 2, p. 440 <http://dx.doi.org/10.2514/1.31730>]. Parameters ---------- rot: (...Nx3x3) float array Each 3x3 matrix represents a rotation by multiplying (from the left) a column vector to produce a rotated column vector. Note that this input may actually have ndims>3; it is just assumed that the last two dimensions have size 3, representing the matrix. nonorthogonal: bool, optional If scipy.linalg is available, use the more robust algorithm of Bar-Itzhack. Default value is True. Returns ------- q: array of quaternions Unit quaternions resulting in rotations corresponding to input rotations. Output shape is rot.shape[:-2]. Raises ------ LinAlgError If any of the eigenvalue solutions does not converge """ try: from scipy import linalg except ImportError: linalg = False rot = np.array(rot, copy=False) shape = rot.shape[:-2] if linalg and nonorthogonal: from operator import mul from functools import reduce K3 = np.empty(shape+(4, 4)) K3[..., 0, 0] = (rot[..., 0, 0] - rot[..., 1, 1] - rot[..., 2, 2])/3.0 K3[..., 0, 1] = (rot[..., 1, 0] + rot[..., 0, 1])/3.0 K3[..., 0, 2] = (rot[..., 2, 0] + rot[..., 0, 2])/3.0 K3[..., 0, 3] = (rot[..., 1, 2] - rot[..., 2, 1])/3.0 K3[..., 1, 0] = K3[..., 0, 1] K3[..., 1, 1] = (rot[..., 1, 1] - rot[..., 0, 0] - rot[..., 2, 2])/3.0 K3[..., 1, 2] = (rot[..., 2, 1] + rot[..., 1, 2])/3.0 K3[..., 1, 3] = (rot[..., 2, 0] - rot[..., 0, 2])/3.0 K3[..., 2, 0] = K3[..., 0, 2] K3[..., 2, 1] = K3[..., 1, 2] K3[..., 2, 2] = (rot[..., 2, 2] - rot[..., 0, 0] - rot[..., 1, 1])/3.0 K3[..., 2, 3] = (rot[..., 0, 1] - rot[..., 1, 0])/3.0 K3[..., 3, 0] = K3[..., 0, 3] K3[..., 3, 1] = K3[..., 1, 3] K3[..., 3, 2] = K3[..., 2, 3] K3[..., 3, 3] = (rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2])/3.0 if not shape: q = zero.copy() eigvals, eigvecs = linalg.eigh(K3.T, eigvals=(3, 3)) q.components[0] = eigvecs[-1] q.components[1:] = -eigvecs[:-1].flatten() return q else: q = np.empty(shape+(4,), dtype=np.float) for flat_index in range(reduce(mul, shape)): multi_index = np.unravel_index(flat_index, shape) eigvals, eigvecs = linalg.eigh(K3[multi_index], eigvals=(3, 3)) q[multi_index+(0,)] = eigvecs[-1] q[multi_index+(slice(1,None),)] = -eigvecs[:-1].flatten() return as_quat_array(q) else: # No scipy.linalg or not `nonorthogonal` diagonals = np.empty(shape+(4,)) diagonals[..., 0] = rot[..., 0, 0] diagonals[..., 1] = rot[..., 1, 1] diagonals[..., 2] = rot[..., 2, 2] diagonals[..., 3] = rot[..., 0, 0] + rot[..., 1, 1] + rot[..., 2, 2] indices = np.argmax(diagonals, axis=-1) q = diagonals # reuse storage space indices_i = (indices == 0) if np.any(indices_i): if indices_i.shape == (): indices_i = Ellipsis rot_i = rot[indices_i, :, :] q[indices_i, 0] = rot_i[..., 2, 1] - rot_i[..., 1, 2] q[indices_i, 1] = 1 + rot_i[..., 0, 0] - rot_i[..., 1, 1] - rot_i[..., 2, 2] q[indices_i, 2] = rot_i[..., 0, 1] + rot_i[..., 1, 0] q[indices_i, 3] = rot_i[..., 0, 2] + rot_i[..., 2, 0] indices_i = (indices == 1) if np.any(indices_i): if indices_i.shape == (): indices_i = Ellipsis rot_i = rot[indices_i, :, :] q[indices_i, 0] = rot_i[..., 0, 2] - rot_i[..., 2, 0] q[indices_i, 1] = rot_i[..., 1, 0] + rot_i[..., 0, 1] q[indices_i, 2] = 1 - rot_i[..., 0, 0] + rot_i[..., 1, 1] - rot_i[..., 2, 2] q[indices_i, 3] = rot_i[..., 1, 2] + rot_i[..., 2, 1] indices_i = (indices == 2) if np.any(indices_i): if indices_i.shape == (): indices_i = Ellipsis rot_i = rot[indices_i, :, :] q[indices_i, 0] = rot_i[..., 1, 0] - rot_i[..., 0, 1] q[indices_i, 1] = rot_i[..., 2, 0] + rot_i[..., 0, 2] q[indices_i, 2] = rot_i[..., 2, 1] + rot_i[..., 1, 2] q[indices_i, 3] = 1 - rot_i[..., 0, 0] - rot_i[..., 1, 1] + rot_i[..., 2, 2] indices_i = (indices == 3) if np.any(indices_i): if indices_i.shape == (): indices_i = Ellipsis rot_i = rot[indices_i, :, :] q[indices_i, 0] = 1 + rot_i[..., 0, 0] + rot_i[..., 1, 1] + rot_i[..., 2, 2] q[indices_i, 1] = rot_i[..., 2, 1] - rot_i[..., 1, 2] q[indices_i, 2] = rot_i[..., 0, 2] - rot_i[..., 2, 0] q[indices_i, 3] = rot_i[..., 1, 0] - rot_i[..., 0, 1] q /= np.linalg.norm(q, axis=-1)[..., np.newaxis] return as_quat_array(q)
[ "def", "from_rotation_matrix", "(", "rot", ",", "nonorthogonal", "=", "True", ")", ":", "try", ":", "from", "scipy", "import", "linalg", "except", "ImportError", ":", "linalg", "=", "False", "rot", "=", "np", ".", "array", "(", "rot", ",", "copy", "=", "False", ")", "shape", "=", "rot", ".", "shape", "[", ":", "-", "2", "]", "if", "linalg", "and", "nonorthogonal", ":", "from", "operator", "import", "mul", "from", "functools", "import", "reduce", "K3", "=", "np", ".", "empty", "(", "shape", "+", "(", "4", ",", "4", ")", ")", "K3", "[", "...", ",", "0", ",", "0", "]", "=", "(", "rot", "[", "...", ",", "0", ",", "0", "]", "-", "rot", "[", "...", ",", "1", ",", "1", "]", "-", "rot", "[", "...", ",", "2", ",", "2", "]", ")", "/", "3.0", "K3", "[", "...", ",", "0", ",", "1", "]", "=", "(", "rot", "[", "...", ",", "1", ",", "0", "]", "+", "rot", "[", "...", ",", "0", ",", "1", "]", ")", "/", "3.0", "K3", "[", "...", ",", "0", ",", "2", "]", "=", "(", "rot", "[", "...", ",", "2", ",", "0", "]", "+", "rot", "[", "...", ",", "0", ",", "2", "]", ")", "/", "3.0", "K3", "[", "...", ",", "0", ",", "3", "]", "=", "(", "rot", "[", "...", ",", "1", ",", "2", "]", "-", "rot", "[", "...", ",", "2", ",", "1", "]", ")", "/", "3.0", "K3", "[", "...", ",", "1", ",", "0", "]", "=", "K3", "[", "...", ",", "0", ",", "1", "]", "K3", "[", "...", ",", "1", ",", "1", "]", "=", "(", "rot", "[", "...", ",", "1", ",", "1", "]", "-", "rot", "[", "...", ",", "0", ",", "0", "]", "-", "rot", "[", "...", ",", "2", ",", "2", "]", ")", "/", "3.0", "K3", "[", "...", ",", "1", ",", "2", "]", "=", "(", "rot", "[", "...", ",", "2", ",", "1", "]", "+", "rot", "[", "...", ",", "1", ",", "2", "]", ")", "/", "3.0", "K3", "[", "...", ",", "1", ",", "3", "]", "=", "(", "rot", "[", "...", ",", "2", ",", "0", "]", "-", "rot", "[", "...", ",", "0", ",", "2", "]", ")", "/", "3.0", "K3", "[", "...", ",", "2", ",", "0", "]", "=", "K3", "[", "...", ",", "0", ",", "2", "]", "K3", "[", "...", ",", "2", ",", "1", "]", "=", "K3", "[", "...", ",", "1", ",", "2", "]", "K3", "[", "...", ",", "2", ",", "2", "]", "=", "(", "rot", "[", "...", ",", "2", ",", "2", "]", "-", "rot", "[", "...", ",", "0", ",", "0", "]", "-", "rot", "[", "...", ",", "1", ",", "1", "]", ")", "/", "3.0", "K3", "[", "...", ",", "2", ",", "3", "]", "=", "(", "rot", "[", "...", ",", "0", ",", "1", "]", "-", "rot", "[", "...", ",", "1", ",", "0", "]", ")", "/", "3.0", "K3", "[", "...", ",", "3", ",", "0", "]", "=", "K3", "[", "...", ",", "0", ",", "3", "]", "K3", "[", "...", ",", "3", ",", "1", "]", "=", "K3", "[", "...", ",", "1", ",", "3", "]", "K3", "[", "...", ",", "3", ",", "2", "]", "=", "K3", "[", "...", ",", "2", ",", "3", "]", "K3", "[", "...", ",", "3", ",", "3", "]", "=", "(", "rot", "[", "...", ",", "0", ",", "0", "]", "+", "rot", "[", "...", ",", "1", ",", "1", "]", "+", "rot", "[", "...", ",", "2", ",", "2", "]", ")", "/", "3.0", "if", "not", "shape", ":", "q", "=", "zero", ".", "copy", "(", ")", "eigvals", ",", "eigvecs", "=", "linalg", ".", "eigh", "(", "K3", ".", "T", ",", "eigvals", "=", "(", "3", ",", "3", ")", ")", "q", ".", "components", "[", "0", "]", "=", "eigvecs", "[", "-", "1", "]", "q", ".", "components", "[", "1", ":", "]", "=", "-", "eigvecs", "[", ":", "-", "1", "]", ".", "flatten", "(", ")", "return", "q", "else", ":", "q", "=", "np", ".", "empty", "(", "shape", "+", "(", "4", ",", ")", ",", "dtype", "=", "np", ".", "float", ")", "for", "flat_index", "in", "range", "(", "reduce", "(", "mul", ",", "shape", ")", ")", ":", "multi_index", "=", "np", ".", "unravel_index", "(", "flat_index", ",", "shape", ")", "eigvals", ",", "eigvecs", "=", "linalg", ".", "eigh", "(", "K3", "[", "multi_index", "]", ",", "eigvals", "=", "(", "3", ",", "3", ")", ")", "q", "[", "multi_index", "+", "(", "0", ",", ")", "]", "=", "eigvecs", "[", "-", "1", "]", "q", "[", "multi_index", "+", "(", "slice", "(", "1", ",", "None", ")", ",", ")", "]", "=", "-", "eigvecs", "[", ":", "-", "1", "]", ".", "flatten", "(", ")", "return", "as_quat_array", "(", "q", ")", "else", ":", "# No scipy.linalg or not `nonorthogonal`", "diagonals", "=", "np", ".", "empty", "(", "shape", "+", "(", "4", ",", ")", ")", "diagonals", "[", "...", ",", "0", "]", "=", "rot", "[", "...", ",", "0", ",", "0", "]", "diagonals", "[", "...", ",", "1", "]", "=", "rot", "[", "...", ",", "1", ",", "1", "]", "diagonals", "[", "...", ",", "2", "]", "=", "rot", "[", "...", ",", "2", ",", "2", "]", "diagonals", "[", "...", ",", "3", "]", "=", "rot", "[", "...", ",", "0", ",", "0", "]", "+", "rot", "[", "...", ",", "1", ",", "1", "]", "+", "rot", "[", "...", ",", "2", ",", "2", "]", "indices", "=", "np", ".", "argmax", "(", "diagonals", ",", "axis", "=", "-", "1", ")", "q", "=", "diagonals", "# reuse storage space", "indices_i", "=", "(", "indices", "==", "0", ")", "if", "np", ".", "any", "(", "indices_i", ")", ":", "if", "indices_i", ".", "shape", "==", "(", ")", ":", "indices_i", "=", "Ellipsis", "rot_i", "=", "rot", "[", "indices_i", ",", ":", ",", ":", "]", "q", "[", "indices_i", ",", "0", "]", "=", "rot_i", "[", "...", ",", "2", ",", "1", "]", "-", "rot_i", "[", "...", ",", "1", ",", "2", "]", "q", "[", "indices_i", ",", "1", "]", "=", "1", "+", "rot_i", "[", "...", ",", "0", ",", "0", "]", "-", "rot_i", "[", "...", ",", "1", ",", "1", "]", "-", "rot_i", "[", "...", ",", "2", ",", "2", "]", "q", "[", "indices_i", ",", "2", "]", "=", "rot_i", "[", "...", ",", "0", ",", "1", "]", "+", "rot_i", "[", "...", ",", "1", ",", "0", "]", "q", "[", "indices_i", ",", "3", "]", "=", "rot_i", "[", "...", ",", "0", ",", "2", "]", "+", "rot_i", "[", "...", ",", "2", ",", "0", "]", "indices_i", "=", "(", "indices", "==", "1", ")", "if", "np", ".", "any", "(", "indices_i", ")", ":", "if", "indices_i", ".", "shape", "==", "(", ")", ":", "indices_i", "=", "Ellipsis", "rot_i", "=", "rot", "[", "indices_i", ",", ":", ",", ":", "]", "q", "[", "indices_i", ",", "0", "]", "=", "rot_i", "[", "...", ",", "0", ",", "2", "]", "-", "rot_i", "[", "...", ",", "2", ",", "0", "]", "q", "[", "indices_i", ",", "1", "]", "=", "rot_i", "[", "...", ",", "1", ",", "0", "]", "+", "rot_i", "[", "...", ",", "0", ",", "1", "]", "q", "[", "indices_i", ",", "2", "]", "=", "1", "-", "rot_i", "[", "...", ",", "0", ",", "0", "]", "+", "rot_i", "[", "...", ",", "1", ",", "1", "]", "-", "rot_i", "[", "...", ",", "2", ",", "2", "]", "q", "[", "indices_i", ",", "3", "]", "=", "rot_i", "[", "...", ",", "1", ",", "2", "]", "+", "rot_i", "[", "...", ",", "2", ",", "1", "]", "indices_i", "=", "(", "indices", "==", "2", ")", "if", "np", ".", "any", "(", "indices_i", ")", ":", "if", "indices_i", ".", "shape", "==", "(", ")", ":", "indices_i", "=", "Ellipsis", "rot_i", "=", "rot", "[", "indices_i", ",", ":", ",", ":", "]", "q", "[", "indices_i", ",", "0", "]", "=", "rot_i", "[", "...", ",", "1", ",", "0", "]", "-", "rot_i", "[", "...", ",", "0", ",", "1", "]", "q", "[", "indices_i", ",", "1", "]", "=", "rot_i", "[", "...", ",", "2", ",", "0", "]", "+", "rot_i", "[", "...", ",", "0", ",", "2", "]", "q", "[", "indices_i", ",", "2", "]", "=", "rot_i", "[", "...", ",", "2", ",", "1", "]", "+", "rot_i", "[", "...", ",", "1", ",", "2", "]", "q", "[", "indices_i", ",", "3", "]", "=", "1", "-", "rot_i", "[", "...", ",", "0", ",", "0", "]", "-", "rot_i", "[", "...", ",", "1", ",", "1", "]", "+", "rot_i", "[", "...", ",", "2", ",", "2", "]", "indices_i", "=", "(", "indices", "==", "3", ")", "if", "np", ".", "any", "(", "indices_i", ")", ":", "if", "indices_i", ".", "shape", "==", "(", ")", ":", "indices_i", "=", "Ellipsis", "rot_i", "=", "rot", "[", "indices_i", ",", ":", ",", ":", "]", "q", "[", "indices_i", ",", "0", "]", "=", "1", "+", "rot_i", "[", "...", ",", "0", ",", "0", "]", "+", "rot_i", "[", "...", ",", "1", ",", "1", "]", "+", "rot_i", "[", "...", ",", "2", ",", "2", "]", "q", "[", "indices_i", ",", "1", "]", "=", "rot_i", "[", "...", ",", "2", ",", "1", "]", "-", "rot_i", "[", "...", ",", "1", ",", "2", "]", "q", "[", "indices_i", ",", "2", "]", "=", "rot_i", "[", "...", ",", "0", ",", "2", "]", "-", "rot_i", "[", "...", ",", "2", ",", "0", "]", "q", "[", "indices_i", ",", "3", "]", "=", "rot_i", "[", "...", ",", "1", ",", "0", "]", "-", "rot_i", "[", "...", ",", "0", ",", "1", "]", "q", "/=", "np", ".", "linalg", ".", "norm", "(", "q", ",", "axis", "=", "-", "1", ")", "[", "...", ",", "np", ".", "newaxis", "]", "return", "as_quat_array", "(", "q", ")" ]
Convert input 3x3 rotation matrix to unit quaternion By default, if scipy.linalg is available, this function uses Bar-Itzhack's algorithm to allow for non-orthogonal matrices. [J. Guidance, Vol. 23, No. 6, p. 1085 <http://dx.doi.org/10.2514/2.4654>] This will almost certainly be quite a bit slower than simpler versions, though it will be more robust to numerical errors in the rotation matrix. Also note that Bar-Itzhack uses some pretty weird conventions. The last component of the quaternion appears to represent the scalar, and the quaternion itself is conjugated relative to the convention used throughout this module. If scipy.linalg is not available or if the optional `nonorthogonal` parameter is set to `False`, this function falls back to the possibly faster, but less robust, algorithm of Markley [J. Guidance, Vol. 31, No. 2, p. 440 <http://dx.doi.org/10.2514/1.31730>]. Parameters ---------- rot: (...Nx3x3) float array Each 3x3 matrix represents a rotation by multiplying (from the left) a column vector to produce a rotated column vector. Note that this input may actually have ndims>3; it is just assumed that the last two dimensions have size 3, representing the matrix. nonorthogonal: bool, optional If scipy.linalg is available, use the more robust algorithm of Bar-Itzhack. Default value is True. Returns ------- q: array of quaternions Unit quaternions resulting in rotations corresponding to input rotations. Output shape is rot.shape[:-2]. Raises ------ LinAlgError If any of the eigenvalue solutions does not converge
[ "Convert", "input", "3x3", "rotation", "matrix", "to", "unit", "quaternion" ]
python
train
HPCC-Cloud-Computing/CAL
calplus/utils.py
https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/utils.py#L50-L56
def _sanitizer(self, obj): """Sanitizer method that will be passed to json.dumps.""" if isinstance(obj, datetime.datetime): return obj.isoformat() if hasattr(obj, "to_dict"): return obj.to_dict() return obj
[ "def", "_sanitizer", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "if", "hasattr", "(", "obj", ",", "\"to_dict\"", ")", ":", "return", "obj", ".", "to_dict", "(", ")", "return", "obj" ]
Sanitizer method that will be passed to json.dumps.
[ "Sanitizer", "method", "that", "will", "be", "passed", "to", "json", ".", "dumps", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Script/SConscript.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Script/SConscript.py#L92-L111
def compute_exports(exports): """Compute a dictionary of exports given one of the parameters to the Export() function or the exports argument to SConscript().""" loc, glob = get_calling_namespaces() retval = {} try: for export in exports: if SCons.Util.is_Dict(export): retval.update(export) else: try: retval[export] = loc[export] except KeyError: retval[export] = glob[export] except KeyError as x: raise SCons.Errors.UserError("Export of non-existent variable '%s'"%x) return retval
[ "def", "compute_exports", "(", "exports", ")", ":", "loc", ",", "glob", "=", "get_calling_namespaces", "(", ")", "retval", "=", "{", "}", "try", ":", "for", "export", "in", "exports", ":", "if", "SCons", ".", "Util", ".", "is_Dict", "(", "export", ")", ":", "retval", ".", "update", "(", "export", ")", "else", ":", "try", ":", "retval", "[", "export", "]", "=", "loc", "[", "export", "]", "except", "KeyError", ":", "retval", "[", "export", "]", "=", "glob", "[", "export", "]", "except", "KeyError", "as", "x", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"Export of non-existent variable '%s'\"", "%", "x", ")", "return", "retval" ]
Compute a dictionary of exports given one of the parameters to the Export() function or the exports argument to SConscript().
[ "Compute", "a", "dictionary", "of", "exports", "given", "one", "of", "the", "parameters", "to", "the", "Export", "()", "function", "or", "the", "exports", "argument", "to", "SConscript", "()", "." ]
python
train
saltstack/salt
salt/utils/dns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L1128-L1224
def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
[ "def", "parse_resolv", "(", "src", "=", "'/etc/resolv.conf'", ")", ":", "nameservers", "=", "[", "]", "ip4_nameservers", "=", "[", "]", "ip6_nameservers", "=", "[", "]", "search", "=", "[", "]", "sortlist", "=", "[", "]", "domain", "=", "''", "options", "=", "[", "]", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "src", ")", "as", "src_file", ":", "# pylint: disable=too-many-nested-blocks", "for", "line", "in", "src_file", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "try", ":", "(", "directive", ",", "arg", ")", "=", "(", "line", "[", "0", "]", ".", "lower", "(", ")", ",", "line", "[", "1", ":", "]", ")", "# Drop everything after # or ; (comments)", "arg", "=", "list", "(", "itertools", ".", "takewhile", "(", "lambda", "x", ":", "x", "[", "0", "]", "not", "in", "(", "'#'", ",", "';'", ")", ",", "arg", ")", ")", "if", "directive", "==", "'nameserver'", ":", "addr", "=", "arg", "[", "0", "]", "try", ":", "ip_addr", "=", "ipaddress", ".", "ip_address", "(", "addr", ")", "version", "=", "ip_addr", ".", "version", "ip_addr", "=", "str", "(", "ip_addr", ")", "if", "ip_addr", "not", "in", "nameservers", ":", "nameservers", ".", "append", "(", "ip_addr", ")", "if", "version", "==", "4", "and", "ip_addr", "not", "in", "ip4_nameservers", ":", "ip4_nameservers", ".", "append", "(", "ip_addr", ")", "elif", "version", "==", "6", "and", "ip_addr", "not", "in", "ip6_nameservers", ":", "ip6_nameservers", ".", "append", "(", "ip_addr", ")", "except", "ValueError", "as", "exc", ":", "log", ".", "error", "(", "'%s: %s'", ",", "src", ",", "exc", ")", "elif", "directive", "==", "'domain'", ":", "domain", "=", "arg", "[", "0", "]", "elif", "directive", "==", "'search'", ":", "search", "=", "arg", "elif", "directive", "==", "'sortlist'", ":", "# A sortlist is specified by IP address netmask pairs.", "# The netmask is optional and defaults to the natural", "# netmask of the net. The IP address and optional", "# network pairs are separated by slashes.", "for", "ip_raw", "in", "arg", ":", "try", ":", "ip_net", "=", "ipaddress", ".", "ip_network", "(", "ip_raw", ")", "except", "ValueError", "as", "exc", ":", "log", ".", "error", "(", "'%s: %s'", ",", "src", ",", "exc", ")", "else", ":", "if", "'/'", "not", "in", "ip_raw", ":", "# No netmask has been provided, guess", "# the \"natural\" one", "if", "ip_net", ".", "version", "==", "4", ":", "ip_addr", "=", "six", ".", "text_type", "(", "ip_net", ".", "network_address", ")", "# pylint: disable=protected-access", "mask", "=", "salt", ".", "utils", ".", "network", ".", "natural_ipv4_netmask", "(", "ip_addr", ")", "ip_net", "=", "ipaddress", ".", "ip_network", "(", "'{0}{1}'", ".", "format", "(", "ip_addr", ",", "mask", ")", ",", "strict", "=", "False", ")", "if", "ip_net", ".", "version", "==", "6", ":", "# TODO", "pass", "if", "ip_net", "not", "in", "sortlist", ":", "sortlist", ".", "append", "(", "ip_net", ")", "elif", "directive", "==", "'options'", ":", "# Options allows certain internal resolver variables to", "# be modified.", "if", "arg", "[", "0", "]", "not", "in", "options", ":", "options", ".", "append", "(", "arg", "[", "0", "]", ")", "except", "IndexError", ":", "continue", "if", "domain", "and", "search", ":", "# The domain and search keywords are mutually exclusive. If more", "# than one instance of these keywords is present, the last instance", "# will override.", "log", ".", "debug", "(", "'%s: The domain and search keywords are mutually exclusive.'", ",", "src", ")", "return", "{", "'nameservers'", ":", "nameservers", ",", "'ip4_nameservers'", ":", "ip4_nameservers", ",", "'ip6_nameservers'", ":", "ip6_nameservers", ",", "'sortlist'", ":", "[", "ip", ".", "with_netmask", "for", "ip", "in", "sortlist", "]", ",", "'domain'", ":", "domain", ",", "'search'", ":", "search", ",", "'options'", ":", "options", "}", "except", "IOError", ":", "return", "{", "}" ]
Parse a resolver configuration file (traditionally /etc/resolv.conf)
[ "Parse", "a", "resolver", "configuration", "file", "(", "traditionally", "/", "etc", "/", "resolv", ".", "conf", ")" ]
python
train
vinitkumar/pycrawler
crawler.py
https://github.com/vinitkumar/pycrawler/blob/d3fe6d2da1469fc701c4fe04df88cee9cc8cd9c3/crawler.py#L50-L75
async def main(): """ Main class.""" opts, args = option_parser() url = args[0] if opts.links: getlinks(url) raise SystemExit(0) depth = opts.depth sTime = time.time() webcrawler = Webcrawler(url, depth) webcrawler.crawl() eTime = time.time() tTime = eTime - sTime print("CRAWLER STARTED:") print("%s, will crawl upto depth %d" % (url, depth)) print("*****RESULTS") print("\n".join(webcrawler.urls)) print("=" * 100) print("Crawler Statistics") print("=" * 100) print("No of links Found: %d" % webcrawler.links) print("No of followed: %d" % webcrawler.followed) print("Time Stats : Found all links after %0.2fs" % tTime)
[ "async", "def", "main", "(", ")", ":", "opts", ",", "args", "=", "option_parser", "(", ")", "url", "=", "args", "[", "0", "]", "if", "opts", ".", "links", ":", "getlinks", "(", "url", ")", "raise", "SystemExit", "(", "0", ")", "depth", "=", "opts", ".", "depth", "sTime", "=", "time", ".", "time", "(", ")", "webcrawler", "=", "Webcrawler", "(", "url", ",", "depth", ")", "webcrawler", ".", "crawl", "(", ")", "eTime", "=", "time", ".", "time", "(", ")", "tTime", "=", "eTime", "-", "sTime", "print", "(", "\"CRAWLER STARTED:\"", ")", "print", "(", "\"%s, will crawl upto depth %d\"", "%", "(", "url", ",", "depth", ")", ")", "print", "(", "\"*****RESULTS\"", ")", "print", "(", "\"\\n\"", ".", "join", "(", "webcrawler", ".", "urls", ")", ")", "print", "(", "\"=\"", "*", "100", ")", "print", "(", "\"Crawler Statistics\"", ")", "print", "(", "\"=\"", "*", "100", ")", "print", "(", "\"No of links Found: %d\"", "%", "webcrawler", ".", "links", ")", "print", "(", "\"No of followed: %d\"", "%", "webcrawler", ".", "followed", ")", "print", "(", "\"Time Stats : Found all links after %0.2fs\"", "%", "tTime", ")" ]
Main class.
[ "Main", "class", "." ]
python
train
thebigmunch/audio-metadata
src/audio_metadata/api.py
https://github.com/thebigmunch/audio-metadata/blob/d17bdbdb71db79c1568d54438d42dcd940b76074/src/audio_metadata/api.py#L85-L103
def loads(b): """Load audio metadata from a bytes-like object. Parameters: b (bytes-like object): A bytes-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format. """ parser_cls = determine_format(b) if parser_cls is None: raise UnsupportedFormat("Supported format signature not found.") return parser_cls.load(b)
[ "def", "loads", "(", "b", ")", ":", "parser_cls", "=", "determine_format", "(", "b", ")", "if", "parser_cls", "is", "None", ":", "raise", "UnsupportedFormat", "(", "\"Supported format signature not found.\"", ")", "return", "parser_cls", ".", "load", "(", "b", ")" ]
Load audio metadata from a bytes-like object. Parameters: b (bytes-like object): A bytes-like object of an audio file. Returns: Format: An audio format object. Raises: UnsupportedFormat: If file is not of a supported format.
[ "Load", "audio", "metadata", "from", "a", "bytes", "-", "like", "object", "." ]
python
train
tradenity/python-sdk
tradenity/resources/tax_rate.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/tax_rate.py#L539-L559
def delete_tax_rate_by_id(cls, tax_rate_id, **kwargs): """Delete TaxRate Delete an instance of TaxRate by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_tax_rate_by_id(tax_rate_id, async=True) >>> result = thread.get() :param async bool :param str tax_rate_id: ID of taxRate to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs) else: (data) = cls._delete_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs) return data
[ "def", "delete_tax_rate_by_id", "(", "cls", ",", "tax_rate_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_delete_tax_rate_by_id_with_http_info", "(", "tax_rate_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_delete_tax_rate_by_id_with_http_info", "(", "tax_rate_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Delete TaxRate Delete an instance of TaxRate by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_tax_rate_by_id(tax_rate_id, async=True) >>> result = thread.get() :param async bool :param str tax_rate_id: ID of taxRate to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Delete", "TaxRate" ]
python
train
futurecolors/django-geoip
django_geoip/management/ipgeobase.py
https://github.com/futurecolors/django-geoip/blob/f9eee4bcad40508089b184434b79826f842d7bd0/django_geoip/management/ipgeobase.py#L128-L147
def _update_geography(self, countries, regions, cities, city_country_mapping): """ Update database with new countries, regions and cities """ existing = { 'cities': list(City.objects.values_list('id', flat=True)), 'regions': list(Region.objects.values('name', 'country__code')), 'countries': Country.objects.values_list('code', flat=True) } for country_code in countries: if country_code not in existing['countries']: Country.objects.create(code=country_code, name=ISO_CODES.get(country_code, country_code)) for entry in regions: if entry not in existing['regions']: Region.objects.create(name=entry['name'], country_id=entry['country__code']) for entry in cities: if int(entry['id']) not in existing['cities']: code = city_country_mapping.get(entry['id']) if code: region = Region.objects.get(name=entry['region__name'], country__code=code) City.objects.create(id=entry['id'], name=entry['name'], region=region, latitude=entry.get('latitude'), longitude=entry.get('longitude'))
[ "def", "_update_geography", "(", "self", ",", "countries", ",", "regions", ",", "cities", ",", "city_country_mapping", ")", ":", "existing", "=", "{", "'cities'", ":", "list", "(", "City", ".", "objects", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", ")", ",", "'regions'", ":", "list", "(", "Region", ".", "objects", ".", "values", "(", "'name'", ",", "'country__code'", ")", ")", ",", "'countries'", ":", "Country", ".", "objects", ".", "values_list", "(", "'code'", ",", "flat", "=", "True", ")", "}", "for", "country_code", "in", "countries", ":", "if", "country_code", "not", "in", "existing", "[", "'countries'", "]", ":", "Country", ".", "objects", ".", "create", "(", "code", "=", "country_code", ",", "name", "=", "ISO_CODES", ".", "get", "(", "country_code", ",", "country_code", ")", ")", "for", "entry", "in", "regions", ":", "if", "entry", "not", "in", "existing", "[", "'regions'", "]", ":", "Region", ".", "objects", ".", "create", "(", "name", "=", "entry", "[", "'name'", "]", ",", "country_id", "=", "entry", "[", "'country__code'", "]", ")", "for", "entry", "in", "cities", ":", "if", "int", "(", "entry", "[", "'id'", "]", ")", "not", "in", "existing", "[", "'cities'", "]", ":", "code", "=", "city_country_mapping", ".", "get", "(", "entry", "[", "'id'", "]", ")", "if", "code", ":", "region", "=", "Region", ".", "objects", ".", "get", "(", "name", "=", "entry", "[", "'region__name'", "]", ",", "country__code", "=", "code", ")", "City", ".", "objects", ".", "create", "(", "id", "=", "entry", "[", "'id'", "]", ",", "name", "=", "entry", "[", "'name'", "]", ",", "region", "=", "region", ",", "latitude", "=", "entry", ".", "get", "(", "'latitude'", ")", ",", "longitude", "=", "entry", ".", "get", "(", "'longitude'", ")", ")" ]
Update database with new countries, regions and cities
[ "Update", "database", "with", "new", "countries", "regions", "and", "cities" ]
python
train