repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
xav/Grapefruit
grapefruit.py
https://github.com/xav/Grapefruit/blob/b3d88375be727a3a1ec5839fbc462e0e8e0836e4/grapefruit.py#L869-L912
def html_to_rgb(html): """Convert the HTML color to (r, g, b). Parameters: :html: the HTML definition of the color (#RRGGBB or #RGB or a color name). Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] Throws: :ValueError: If html is neither a known color name or a hexadecimal RGB representation. >>> '(%g, %g, %g)' % html_to_rgb('#ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('#f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon') '(1, 0.980392, 0.803922)' """ html = html.strip().lower() if html[0]=='#': html = html[1:] elif html in NAMED_COLOR: html = NAMED_COLOR[html][1:] if len(html)==6: rgb = html[:2], html[2:4], html[4:] elif len(html)==3: rgb = ['%c%c' % (v,v) for v in html] else: raise ValueError("input #%s is not in #RRGGBB format" % html) return tuple(((int(n, 16) / 255.0) for n in rgb))
[ "def", "html_to_rgb", "(", "html", ")", ":", "html", "=", "html", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "html", "[", "0", "]", "==", "'#'", ":", "html", "=", "html", "[", "1", ":", "]", "elif", "html", "in", "NAMED_COLOR", ":", "html", "=", "NAMED_COLOR", "[", "html", "]", "[", "1", ":", "]", "if", "len", "(", "html", ")", "==", "6", ":", "rgb", "=", "html", "[", ":", "2", "]", ",", "html", "[", "2", ":", "4", "]", ",", "html", "[", "4", ":", "]", "elif", "len", "(", "html", ")", "==", "3", ":", "rgb", "=", "[", "'%c%c'", "%", "(", "v", ",", "v", ")", "for", "v", "in", "html", "]", "else", ":", "raise", "ValueError", "(", "\"input #%s is not in #RRGGBB format\"", "%", "html", ")", "return", "tuple", "(", "(", "(", "int", "(", "n", ",", "16", ")", "/", "255.0", ")", "for", "n", "in", "rgb", ")", ")" ]
Convert the HTML color to (r, g, b). Parameters: :html: the HTML definition of the color (#RRGGBB or #RGB or a color name). Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] Throws: :ValueError: If html is neither a known color name or a hexadecimal RGB representation. >>> '(%g, %g, %g)' % html_to_rgb('#ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('#f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon') '(1, 0.980392, 0.803922)'
[ "Convert", "the", "HTML", "color", "to", "(", "r", "g", "b", ")", "." ]
python
train
23.977273
elsampsa/valkka-live
valkka/live/gui.py
https://github.com/elsampsa/valkka-live/blob/218bb2ecf71c516c85b1b6e075454bba13090cd8/valkka/live/gui.py#L414-L440
def serializeContainers(self): """Serializes the current view of open video grids (i.e. the view) """ """ each serialized container looks like this: dic={# these are used when re-instantiating the view "classname" : self.__class__.__name__, "kwargs" : {}, # parameters that we're used to instantiate this class # these parameters are used by deserialize "x" : self.window.x(), "y" : self.window.y(), "width" : self.window.width(), "height" : self.window.height(), "streams" : streams } """ container_list = [] mvision_container_list = [] for container in self.containers: print("gui: serialize containers : container=", container) container_list.append(container.serialize()) for container in self.mvision_containers: mvision_container_list.append(container.serialize()) return {"container_list" : container_list, "mvision_container_list" : mvision_container_list}
[ "def", "serializeContainers", "(", "self", ")", ":", "\"\"\" each serialized container looks like this:\n dic={# these are used when re-instantiating the view\n \"classname\" : self.__class__.__name__,\n \"kwargs\" : {}, # parameters that we're used to instantiate this class\n # these parameters are used by deserialize\n \"x\" : self.window.x(),\n \"y\" : self.window.y(),\n \"width\" : self.window.width(),\n \"height\" : self.window.height(),\n \"streams\" : streams\n }\n \"\"\"", "container_list", "=", "[", "]", "mvision_container_list", "=", "[", "]", "for", "container", "in", "self", ".", "containers", ":", "print", "(", "\"gui: serialize containers : container=\"", ",", "container", ")", "container_list", ".", "append", "(", "container", ".", "serialize", "(", ")", ")", "for", "container", "in", "self", ".", "mvision_containers", ":", "mvision_container_list", ".", "append", "(", "container", ".", "serialize", "(", ")", ")", "return", "{", "\"container_list\"", ":", "container_list", ",", "\"mvision_container_list\"", ":", "mvision_container_list", "}" ]
Serializes the current view of open video grids (i.e. the view)
[ "Serializes", "the", "current", "view", "of", "open", "video", "grids", "(", "i", ".", "e", ".", "the", "view", ")" ]
python
train
41.814815
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/mongodb.py#L157-L169
def get_empty_ids(self): """ Get documents id with missing targeted field """ cursor = self.get_collection().find( { '_id': {'$in': self._document_ids}, self._field: {'$exists': True} }, {'_id': True} ) return set(self._document_ids) - {doc['_id'] for doc in cursor}
[ "def", "get_empty_ids", "(", "self", ")", ":", "cursor", "=", "self", ".", "get_collection", "(", ")", ".", "find", "(", "{", "'_id'", ":", "{", "'$in'", ":", "self", ".", "_document_ids", "}", ",", "self", ".", "_field", ":", "{", "'$exists'", ":", "True", "}", "}", ",", "{", "'_id'", ":", "True", "}", ")", "return", "set", "(", "self", ".", "_document_ids", ")", "-", "{", "doc", "[", "'_id'", "]", "for", "doc", "in", "cursor", "}" ]
Get documents id with missing targeted field
[ "Get", "documents", "id", "with", "missing", "targeted", "field" ]
python
train
28.538462
tanghaibao/goatools
goatools/go_enrichment.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L554-L561
def print_results_adj(results, indent=False, prt=sys.stdout): """Print GOEA results.""" # Print column headers if there are results to be printed if results: prt.write("{R}\n".format(R="\t".join(GOEnrichmentStudy.get_prtflds_default(results)))) # Print the GOEA results for rec in results: prt.write("{R}\n".format(R=rec.__str__(indent=indent)))
[ "def", "print_results_adj", "(", "results", ",", "indent", "=", "False", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "# Print column headers if there are results to be printed", "if", "results", ":", "prt", ".", "write", "(", "\"{R}\\n\"", ".", "format", "(", "R", "=", "\"\\t\"", ".", "join", "(", "GOEnrichmentStudy", ".", "get_prtflds_default", "(", "results", ")", ")", ")", ")", "# Print the GOEA results", "for", "rec", "in", "results", ":", "prt", ".", "write", "(", "\"{R}\\n\"", ".", "format", "(", "R", "=", "rec", ".", "__str__", "(", "indent", "=", "indent", ")", ")", ")" ]
Print GOEA results.
[ "Print", "GOEA", "results", "." ]
python
train
50.25
LISE-B26/pylabcontrol
build/lib/scripts/select_points.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/scripts/select_points.py#L136-L200
def toggle_NV(self, pt): ''' If there is not currently a selected NV within self.settings[patch_size] of pt, adds it to the selected list. If there is, removes that point from the selected list. Args: pt: the point to add or remove from the selected list Poststate: updates selected list ''' if not self.data['nv_locations']: #if self.data is empty so this is the first point self.data['nv_locations'].append(pt) self.data['image_data'] = None # clear image data else: # use KDTree to find NV closest to mouse click tree = scipy.spatial.KDTree(self.data['nv_locations']) #does a search with k=1, that is a search for the nearest neighbor, within distance_upper_bound d, i = tree.query(pt,k = 1, distance_upper_bound = self.settings['patch_size']) # removes NV if previously selected if d is not np.inf: self.data['nv_locations'].pop(i) # adds NV if not previously selected else: self.data['nv_locations'].append(pt) # if type is not free we calculate the total points of locations from the first selected points if self.settings['type'] == 'square' and len(self.data['nv_locations'])>1: # here we create a rectangular grid, where pts a and be define the top left and bottom right corner of the rectangle Nx, Ny = self.settings['Nx'], self.settings['Ny'] pta = self.data['nv_locations'][0] ptb = self.data['nv_locations'][1] tmp = np.array([[[pta[0] + 1.0*i*(ptb[0]-pta[0])/(Nx-1), pta[1] + 1.0*j*(ptb[1]-pta[1])/(Ny-1)] for i in range(Nx)] for j in range(Ny)]) self.data['nv_locations'] = np.reshape(tmp, (Nx * Ny, 2)) self.stop() elif self.settings['type'] == 'line' and len(self.data['nv_locations'])>1: # here we create a straight line between points a and b N = self.settings['Nx'] pta = self.data['nv_locations'][0] ptb = self.data['nv_locations'][1] self.data['nv_locations'] = [np.array([pta[0] + 1.0*i*(ptb[0]-pta[0])/(N-1), pta[1] + 1.0*i*(ptb[1]-pta[1])/(N-1)]) for i in range(N)] self.stop() elif self.settings['type'] == 'ring' and len(self.data['nv_locations'])>1: # here we create a circular grid, where pts a and be define the center and the outermost ring Nx, Ny = self.settings['Nx'], self.settings['Ny'] pta = self.data['nv_locations'][0] # center ptb = self.data['nv_locations'][1] # outermost ring # radius of outermost ring: rmax = np.sqrt((pta[0] - ptb[0]) ** 2 + (pta[1] - ptb[1]) ** 2) # create points on rings tmp = [] for r in np.linspace(rmax, 0, Ny + 1)[0:-1]: for theta in np.linspace(0, 2 * np.pi, Nx+1)[0:-1]: tmp += [[r * np.sin(theta)+pta[0], r * np.cos(theta)+pta[1]]] self.data['nv_locations'] = np.array(tmp) self.stop()
[ "def", "toggle_NV", "(", "self", ",", "pt", ")", ":", "if", "not", "self", ".", "data", "[", "'nv_locations'", "]", ":", "#if self.data is empty so this is the first point", "self", ".", "data", "[", "'nv_locations'", "]", ".", "append", "(", "pt", ")", "self", ".", "data", "[", "'image_data'", "]", "=", "None", "# clear image data", "else", ":", "# use KDTree to find NV closest to mouse click", "tree", "=", "scipy", ".", "spatial", ".", "KDTree", "(", "self", ".", "data", "[", "'nv_locations'", "]", ")", "#does a search with k=1, that is a search for the nearest neighbor, within distance_upper_bound", "d", ",", "i", "=", "tree", ".", "query", "(", "pt", ",", "k", "=", "1", ",", "distance_upper_bound", "=", "self", ".", "settings", "[", "'patch_size'", "]", ")", "# removes NV if previously selected", "if", "d", "is", "not", "np", ".", "inf", ":", "self", ".", "data", "[", "'nv_locations'", "]", ".", "pop", "(", "i", ")", "# adds NV if not previously selected", "else", ":", "self", ".", "data", "[", "'nv_locations'", "]", ".", "append", "(", "pt", ")", "# if type is not free we calculate the total points of locations from the first selected points", "if", "self", ".", "settings", "[", "'type'", "]", "==", "'square'", "and", "len", "(", "self", ".", "data", "[", "'nv_locations'", "]", ")", ">", "1", ":", "# here we create a rectangular grid, where pts a and be define the top left and bottom right corner of the rectangle", "Nx", ",", "Ny", "=", "self", ".", "settings", "[", "'Nx'", "]", ",", "self", ".", "settings", "[", "'Ny'", "]", "pta", "=", "self", ".", "data", "[", "'nv_locations'", "]", "[", "0", "]", "ptb", "=", "self", ".", "data", "[", "'nv_locations'", "]", "[", "1", "]", "tmp", "=", "np", ".", "array", "(", "[", "[", "[", "pta", "[", "0", "]", "+", "1.0", "*", "i", "*", "(", "ptb", "[", "0", "]", "-", "pta", "[", "0", "]", ")", "/", "(", "Nx", "-", "1", ")", ",", "pta", "[", "1", "]", "+", "1.0", "*", "j", "*", "(", "ptb", "[", "1", "]", "-", "pta", "[", "1", "]", ")", "/", "(", "Ny", "-", "1", ")", "]", "for", "i", "in", "range", "(", "Nx", ")", "]", "for", "j", "in", "range", "(", "Ny", ")", "]", ")", "self", ".", "data", "[", "'nv_locations'", "]", "=", "np", ".", "reshape", "(", "tmp", ",", "(", "Nx", "*", "Ny", ",", "2", ")", ")", "self", ".", "stop", "(", ")", "elif", "self", ".", "settings", "[", "'type'", "]", "==", "'line'", "and", "len", "(", "self", ".", "data", "[", "'nv_locations'", "]", ")", ">", "1", ":", "# here we create a straight line between points a and b", "N", "=", "self", ".", "settings", "[", "'Nx'", "]", "pta", "=", "self", ".", "data", "[", "'nv_locations'", "]", "[", "0", "]", "ptb", "=", "self", ".", "data", "[", "'nv_locations'", "]", "[", "1", "]", "self", ".", "data", "[", "'nv_locations'", "]", "=", "[", "np", ".", "array", "(", "[", "pta", "[", "0", "]", "+", "1.0", "*", "i", "*", "(", "ptb", "[", "0", "]", "-", "pta", "[", "0", "]", ")", "/", "(", "N", "-", "1", ")", ",", "pta", "[", "1", "]", "+", "1.0", "*", "i", "*", "(", "ptb", "[", "1", "]", "-", "pta", "[", "1", "]", ")", "/", "(", "N", "-", "1", ")", "]", ")", "for", "i", "in", "range", "(", "N", ")", "]", "self", ".", "stop", "(", ")", "elif", "self", ".", "settings", "[", "'type'", "]", "==", "'ring'", "and", "len", "(", "self", ".", "data", "[", "'nv_locations'", "]", ")", ">", "1", ":", "# here we create a circular grid, where pts a and be define the center and the outermost ring", "Nx", ",", "Ny", "=", "self", ".", "settings", "[", "'Nx'", "]", ",", "self", ".", "settings", "[", "'Ny'", "]", "pta", "=", "self", ".", "data", "[", "'nv_locations'", "]", "[", "0", "]", "# center", "ptb", "=", "self", ".", "data", "[", "'nv_locations'", "]", "[", "1", "]", "# outermost ring", "# radius of outermost ring:", "rmax", "=", "np", ".", "sqrt", "(", "(", "pta", "[", "0", "]", "-", "ptb", "[", "0", "]", ")", "**", "2", "+", "(", "pta", "[", "1", "]", "-", "ptb", "[", "1", "]", ")", "**", "2", ")", "# create points on rings", "tmp", "=", "[", "]", "for", "r", "in", "np", ".", "linspace", "(", "rmax", ",", "0", ",", "Ny", "+", "1", ")", "[", "0", ":", "-", "1", "]", ":", "for", "theta", "in", "np", ".", "linspace", "(", "0", ",", "2", "*", "np", ".", "pi", ",", "Nx", "+", "1", ")", "[", "0", ":", "-", "1", "]", ":", "tmp", "+=", "[", "[", "r", "*", "np", ".", "sin", "(", "theta", ")", "+", "pta", "[", "0", "]", ",", "r", "*", "np", ".", "cos", "(", "theta", ")", "+", "pta", "[", "1", "]", "]", "]", "self", ".", "data", "[", "'nv_locations'", "]", "=", "np", ".", "array", "(", "tmp", ")", "self", ".", "stop", "(", ")" ]
If there is not currently a selected NV within self.settings[patch_size] of pt, adds it to the selected list. If there is, removes that point from the selected list. Args: pt: the point to add or remove from the selected list Poststate: updates selected list
[ "If", "there", "is", "not", "currently", "a", "selected", "NV", "within", "self", ".", "settings", "[", "patch_size", "]", "of", "pt", "adds", "it", "to", "the", "selected", "list", ".", "If", "there", "is", "removes", "that", "point", "from", "the", "selected", "list", ".", "Args", ":", "pt", ":", "the", "point", "to", "add", "or", "remove", "from", "the", "selected", "list" ]
python
train
47.553846
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAUtil/QADateTools.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAUtil/QADateTools.py#L47-L64
def QA_util_getBetweenQuarter(begin_date, end_date): """ #加上每季度的起始日期、结束日期 """ quarter_list = {} month_list = QA_util_getBetweenMonth(begin_date, end_date) for value in month_list: tempvalue = value.split("-") year = tempvalue[0] if tempvalue[1] in ['01', '02', '03']: quarter_list[year + "Q1"] = ['%s-01-01' % year, '%s-03-31' % year] elif tempvalue[1] in ['04', '05', '06']: quarter_list[year + "Q2"] = ['%s-04-01' % year, '%s-06-30' % year] elif tempvalue[1] in ['07', '08', '09']: quarter_list[year + "Q3"] = ['%s-07-31' % year, '%s-09-30' % year] elif tempvalue[1] in ['10', '11', '12']: quarter_list[year + "Q4"] = ['%s-10-01' % year, '%s-12-31' % year] return(quarter_list)
[ "def", "QA_util_getBetweenQuarter", "(", "begin_date", ",", "end_date", ")", ":", "quarter_list", "=", "{", "}", "month_list", "=", "QA_util_getBetweenMonth", "(", "begin_date", ",", "end_date", ")", "for", "value", "in", "month_list", ":", "tempvalue", "=", "value", ".", "split", "(", "\"-\"", ")", "year", "=", "tempvalue", "[", "0", "]", "if", "tempvalue", "[", "1", "]", "in", "[", "'01'", ",", "'02'", ",", "'03'", "]", ":", "quarter_list", "[", "year", "+", "\"Q1\"", "]", "=", "[", "'%s-01-01'", "%", "year", ",", "'%s-03-31'", "%", "year", "]", "elif", "tempvalue", "[", "1", "]", "in", "[", "'04'", ",", "'05'", ",", "'06'", "]", ":", "quarter_list", "[", "year", "+", "\"Q2\"", "]", "=", "[", "'%s-04-01'", "%", "year", ",", "'%s-06-30'", "%", "year", "]", "elif", "tempvalue", "[", "1", "]", "in", "[", "'07'", ",", "'08'", ",", "'09'", "]", ":", "quarter_list", "[", "year", "+", "\"Q3\"", "]", "=", "[", "'%s-07-31'", "%", "year", ",", "'%s-09-30'", "%", "year", "]", "elif", "tempvalue", "[", "1", "]", "in", "[", "'10'", ",", "'11'", ",", "'12'", "]", ":", "quarter_list", "[", "year", "+", "\"Q4\"", "]", "=", "[", "'%s-10-01'", "%", "year", ",", "'%s-12-31'", "%", "year", "]", "return", "(", "quarter_list", ")" ]
#加上每季度的起始日期、结束日期
[ "#加上每季度的起始日期、结束日期" ]
python
train
43.666667
cons3rt/pycons3rt
pycons3rt/deployment.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/deployment.py#L500-L507
def set_deployment_name(self): """Sets the deployment name from deployment properties :return: None """ log = logging.getLogger(self.cls_logger + '.set_deployment_name') self.deployment_name = self.get_value('cons3rt.deployment.name') log.info('Found deployment name: {n}'.format(n=self.deployment_name))
[ "def", "set_deployment_name", "(", "self", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "self", ".", "cls_logger", "+", "'.set_deployment_name'", ")", "self", ".", "deployment_name", "=", "self", ".", "get_value", "(", "'cons3rt.deployment.name'", ")", "log", ".", "info", "(", "'Found deployment name: {n}'", ".", "format", "(", "n", "=", "self", ".", "deployment_name", ")", ")" ]
Sets the deployment name from deployment properties :return: None
[ "Sets", "the", "deployment", "name", "from", "deployment", "properties" ]
python
train
43.25
apache/spark
python/pyspark/sql/utils.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L130-L147
def require_minimum_pandas_version(): """ Raise ImportError if minimum version of Pandas is not installed """ # TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pandas_version = "0.19.2" from distutils.version import LooseVersion try: import pandas have_pandas = True except ImportError: have_pandas = False if not have_pandas: raise ImportError("Pandas >= %s must be installed; however, " "it was not found." % minimum_pandas_version) if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version): raise ImportError("Pandas >= %s must be installed; however, " "your version was %s." % (minimum_pandas_version, pandas.__version__))
[ "def", "require_minimum_pandas_version", "(", ")", ":", "# TODO(HyukjinKwon): Relocate and deduplicate the version specification.", "minimum_pandas_version", "=", "\"0.19.2\"", "from", "distutils", ".", "version", "import", "LooseVersion", "try", ":", "import", "pandas", "have_pandas", "=", "True", "except", "ImportError", ":", "have_pandas", "=", "False", "if", "not", "have_pandas", ":", "raise", "ImportError", "(", "\"Pandas >= %s must be installed; however, \"", "\"it was not found.\"", "%", "minimum_pandas_version", ")", "if", "LooseVersion", "(", "pandas", ".", "__version__", ")", "<", "LooseVersion", "(", "minimum_pandas_version", ")", ":", "raise", "ImportError", "(", "\"Pandas >= %s must be installed; however, \"", "\"your version was %s.\"", "%", "(", "minimum_pandas_version", ",", "pandas", ".", "__version__", ")", ")" ]
Raise ImportError if minimum version of Pandas is not installed
[ "Raise", "ImportError", "if", "minimum", "version", "of", "Pandas", "is", "not", "installed" ]
python
train
43.666667
agabrown/PyGaia
examples/relativeParallaxErrorsVsDistance.py
https://github.com/agabrown/PyGaia/blob/ae972b0622a15f713ffae471f925eac25ccdae47/examples/relativeParallaxErrorsVsDistance.py#L29-L70
def makePlot(pdf=False, png=False): """ Plot relative parallax errors as a function of distance for stars of a given spectral type. Parameters ---------- args - command line arguments """ logdistancekpc = np.linspace(-1,np.log10(20.0),100) sptVabsAndVmini=OrderedDict([('K0V',(5.58,0.87)), ('G5V',(4.78,0.74)), ('G0V',(4.24,0.67)), ('F5V',(3.50,0.50)), ('F0V',(2.98,0.38)), ('RC',(0.8,1.0))]) lines={} fig=plt.figure(figsize=(10,6.5)) currentAxis=plt.gca() for spt in sptVabsAndVmini.keys(): vmag=sptVabsAndVmini[spt][0]+5.0*logdistancekpc+10.0 indices=(vmag>14) & (vmag<16) gmag=vmag+gminvFromVmini(sptVabsAndVmini[spt][1]) parerrors=parallaxErrorSkyAvg(gmag,sptVabsAndVmini[spt][1]) relparerrors=parerrors*10**logdistancekpc/1000.0 plt.loglog(10**logdistancekpc, relparerrors,'--k',lw=1) plt.loglog(10**logdistancekpc[indices], relparerrors[indices],'-',label=spt) plt.xlim(0.1,20.0) plt.ylim(0.001,0.5) plt.text(0.9, 0.05,'Colours indicate $14<V<16$', horizontalalignment='right', verticalalignment='bottom', transform = currentAxis.transAxes) plt.legend(loc=2) plt.xlabel('distance [kpc]') plt.ylabel('$\\sigma_\\varpi/\\varpi$') plt.grid(which='both') if (args['pdfOutput']): plt.savefig('RelativeParallaxErrorsVsDist.pdf') elif (args['pngOutput']): plt.savefig('RelativeParallaxErrorsVsDist.png') else: plt.show()
[ "def", "makePlot", "(", "pdf", "=", "False", ",", "png", "=", "False", ")", ":", "logdistancekpc", "=", "np", ".", "linspace", "(", "-", "1", ",", "np", ".", "log10", "(", "20.0", ")", ",", "100", ")", "sptVabsAndVmini", "=", "OrderedDict", "(", "[", "(", "'K0V'", ",", "(", "5.58", ",", "0.87", ")", ")", ",", "(", "'G5V'", ",", "(", "4.78", ",", "0.74", ")", ")", ",", "(", "'G0V'", ",", "(", "4.24", ",", "0.67", ")", ")", ",", "(", "'F5V'", ",", "(", "3.50", ",", "0.50", ")", ")", ",", "(", "'F0V'", ",", "(", "2.98", ",", "0.38", ")", ")", ",", "(", "'RC'", ",", "(", "0.8", ",", "1.0", ")", ")", "]", ")", "lines", "=", "{", "}", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "6.5", ")", ")", "currentAxis", "=", "plt", ".", "gca", "(", ")", "for", "spt", "in", "sptVabsAndVmini", ".", "keys", "(", ")", ":", "vmag", "=", "sptVabsAndVmini", "[", "spt", "]", "[", "0", "]", "+", "5.0", "*", "logdistancekpc", "+", "10.0", "indices", "=", "(", "vmag", ">", "14", ")", "&", "(", "vmag", "<", "16", ")", "gmag", "=", "vmag", "+", "gminvFromVmini", "(", "sptVabsAndVmini", "[", "spt", "]", "[", "1", "]", ")", "parerrors", "=", "parallaxErrorSkyAvg", "(", "gmag", ",", "sptVabsAndVmini", "[", "spt", "]", "[", "1", "]", ")", "relparerrors", "=", "parerrors", "*", "10", "**", "logdistancekpc", "/", "1000.0", "plt", ".", "loglog", "(", "10", "**", "logdistancekpc", ",", "relparerrors", ",", "'--k'", ",", "lw", "=", "1", ")", "plt", ".", "loglog", "(", "10", "**", "logdistancekpc", "[", "indices", "]", ",", "relparerrors", "[", "indices", "]", ",", "'-'", ",", "label", "=", "spt", ")", "plt", ".", "xlim", "(", "0.1", ",", "20.0", ")", "plt", ".", "ylim", "(", "0.001", ",", "0.5", ")", "plt", ".", "text", "(", "0.9", ",", "0.05", ",", "'Colours indicate $14<V<16$'", ",", "horizontalalignment", "=", "'right'", ",", "verticalalignment", "=", "'bottom'", ",", "transform", "=", "currentAxis", ".", "transAxes", ")", "plt", ".", "legend", "(", "loc", "=", "2", ")", "plt", ".", "xlabel", "(", "'distance [kpc]'", ")", "plt", ".", "ylabel", "(", "'$\\\\sigma_\\\\varpi/\\\\varpi$'", ")", "plt", ".", "grid", "(", "which", "=", "'both'", ")", "if", "(", "args", "[", "'pdfOutput'", "]", ")", ":", "plt", ".", "savefig", "(", "'RelativeParallaxErrorsVsDist.pdf'", ")", "elif", "(", "args", "[", "'pngOutput'", "]", ")", ":", "plt", ".", "savefig", "(", "'RelativeParallaxErrorsVsDist.png'", ")", "else", ":", "plt", ".", "show", "(", ")" ]
Plot relative parallax errors as a function of distance for stars of a given spectral type. Parameters ---------- args - command line arguments
[ "Plot", "relative", "parallax", "errors", "as", "a", "function", "of", "distance", "for", "stars", "of", "a", "given", "spectral", "type", "." ]
python
test
33.047619
Autodesk/aomi
aomi/model/backend.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/model/backend.py#L108-L125
def sync_tunables(self, vault_client): """Synchtonizes any tunables we have set""" if not self.config: return a_prefix = self.tune_prefix if self.tune_prefix: a_prefix = "%s/" % self.tune_prefix v_path = "sys/mounts/%s%s/tune" % (a_prefix, self.path) a_obj = self.config.copy() if 'description' in a_obj: del a_obj['description'] t_resp = vault_client.write(v_path, **a_obj) if t_resp and 'errors' in t_resp and t_resp['errors']: e_msg = "Unable to update tuning info for %s" % self raise aomi_excep.VaultData(e_msg)
[ "def", "sync_tunables", "(", "self", ",", "vault_client", ")", ":", "if", "not", "self", ".", "config", ":", "return", "a_prefix", "=", "self", ".", "tune_prefix", "if", "self", ".", "tune_prefix", ":", "a_prefix", "=", "\"%s/\"", "%", "self", ".", "tune_prefix", "v_path", "=", "\"sys/mounts/%s%s/tune\"", "%", "(", "a_prefix", ",", "self", ".", "path", ")", "a_obj", "=", "self", ".", "config", ".", "copy", "(", ")", "if", "'description'", "in", "a_obj", ":", "del", "a_obj", "[", "'description'", "]", "t_resp", "=", "vault_client", ".", "write", "(", "v_path", ",", "*", "*", "a_obj", ")", "if", "t_resp", "and", "'errors'", "in", "t_resp", "and", "t_resp", "[", "'errors'", "]", ":", "e_msg", "=", "\"Unable to update tuning info for %s\"", "%", "self", "raise", "aomi_excep", ".", "VaultData", "(", "e_msg", ")" ]
Synchtonizes any tunables we have set
[ "Synchtonizes", "any", "tunables", "we", "have", "set" ]
python
train
35.222222
wikimedia/ores
ores/scoring/models/rev_id_scorer.py
https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring/models/rev_id_scorer.py#L67-L76
def calculate_statistics(self): "Jam some data through to generate statistics" rev_ids = range(0, 100, 1) feature_values = zip(rev_ids, [0] * 100) scores = [self.score(f) for f in feature_values] labels = [s['prediction'] for s in scores] statistics = Classification(labels, threshold_ndigits=1, decision_key='probability') score_labels = list(zip(scores, labels)) statistics.fit(score_labels) return statistics
[ "def", "calculate_statistics", "(", "self", ")", ":", "rev_ids", "=", "range", "(", "0", ",", "100", ",", "1", ")", "feature_values", "=", "zip", "(", "rev_ids", ",", "[", "0", "]", "*", "100", ")", "scores", "=", "[", "self", ".", "score", "(", "f", ")", "for", "f", "in", "feature_values", "]", "labels", "=", "[", "s", "[", "'prediction'", "]", "for", "s", "in", "scores", "]", "statistics", "=", "Classification", "(", "labels", ",", "threshold_ndigits", "=", "1", ",", "decision_key", "=", "'probability'", ")", "score_labels", "=", "list", "(", "zip", "(", "scores", ",", "labels", ")", ")", "statistics", ".", "fit", "(", "score_labels", ")", "return", "statistics" ]
Jam some data through to generate statistics
[ "Jam", "some", "data", "through", "to", "generate", "statistics" ]
python
train
47.4
Metatab/metapack
metapack/jupyter/exporters.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L279-L284
def extract_terms(self, nb): """Extract some term values, usually set with tags or metadata""" emt = ExtractMetatabTerms() emt.preprocess(nb, {}) return emt.terms
[ "def", "extract_terms", "(", "self", ",", "nb", ")", ":", "emt", "=", "ExtractMetatabTerms", "(", ")", "emt", ".", "preprocess", "(", "nb", ",", "{", "}", ")", "return", "emt", ".", "terms" ]
Extract some term values, usually set with tags or metadata
[ "Extract", "some", "term", "values", "usually", "set", "with", "tags", "or", "metadata" ]
python
train
31.666667
NLeSC/scriptcwl
scriptcwl/step.py
https://github.com/NLeSC/scriptcwl/blob/33bb847a875379da3a5702c7a98dfa585306b960/scriptcwl/step.py#L251-L265
def list_inputs(self): """Return a string listing all the Step's input names and their types. The types are returned in a copy/pastable format, so if the type is `string`, `'string'` (with single quotes) is returned. Returns: str containing all input names and types. """ doc = [] for inp, typ in self.input_types.items(): if isinstance(typ, six.string_types): typ = "'{}'".format(typ) doc.append('{}: {}'.format(inp, typ)) return '\n'.join(doc)
[ "def", "list_inputs", "(", "self", ")", ":", "doc", "=", "[", "]", "for", "inp", ",", "typ", "in", "self", ".", "input_types", ".", "items", "(", ")", ":", "if", "isinstance", "(", "typ", ",", "six", ".", "string_types", ")", ":", "typ", "=", "\"'{}'\"", ".", "format", "(", "typ", ")", "doc", ".", "append", "(", "'{}: {}'", ".", "format", "(", "inp", ",", "typ", ")", ")", "return", "'\\n'", ".", "join", "(", "doc", ")" ]
Return a string listing all the Step's input names and their types. The types are returned in a copy/pastable format, so if the type is `string`, `'string'` (with single quotes) is returned. Returns: str containing all input names and types.
[ "Return", "a", "string", "listing", "all", "the", "Step", "s", "input", "names", "and", "their", "types", "." ]
python
train
36.6
kodexlab/reliure
reliure/schema.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/schema.py#L585-L605
def set(self, keys): """ Set new keys. Mind this will clear all attributes and keys before adding new keys >>> doc = Doc(docnum='1') >>> doc.terms = Text(multi=True, attrs={'tf': Numeric(default=1)}) >>> doc.terms.add('copmputer', tf=12) >>> doc.terms.tf.values() [12] >>> doc.terms.set(['keyboard', 'mouse']) >>> list(doc.terms) ['keyboard', 'mouse'] >>> doc.terms.tf.values() [1, 1] """ # clear keys and atributes self._keys = OrderedDict() self.clear_attributes() _validate = self._ftype.validate for key in keys: self.add(_validate(key))
[ "def", "set", "(", "self", ",", "keys", ")", ":", "# clear keys and atributes", "self", ".", "_keys", "=", "OrderedDict", "(", ")", "self", ".", "clear_attributes", "(", ")", "_validate", "=", "self", ".", "_ftype", ".", "validate", "for", "key", "in", "keys", ":", "self", ".", "add", "(", "_validate", "(", "key", ")", ")" ]
Set new keys. Mind this will clear all attributes and keys before adding new keys >>> doc = Doc(docnum='1') >>> doc.terms = Text(multi=True, attrs={'tf': Numeric(default=1)}) >>> doc.terms.add('copmputer', tf=12) >>> doc.terms.tf.values() [12] >>> doc.terms.set(['keyboard', 'mouse']) >>> list(doc.terms) ['keyboard', 'mouse'] >>> doc.terms.tf.values() [1, 1]
[ "Set", "new", "keys", ".", "Mind", "this", "will", "clear", "all", "attributes", "and", "keys", "before", "adding", "new", "keys", ">>>", "doc", "=", "Doc", "(", "docnum", "=", "1", ")", ">>>", "doc", ".", "terms", "=", "Text", "(", "multi", "=", "True", "attrs", "=", "{", "tf", ":", "Numeric", "(", "default", "=", "1", ")", "}", ")", ">>>", "doc", ".", "terms", ".", "add", "(", "copmputer", "tf", "=", "12", ")", ">>>", "doc", ".", "terms", ".", "tf", ".", "values", "()", "[", "12", "]", ">>>", "doc", ".", "terms", ".", "set", "(", "[", "keyboard", "mouse", "]", ")", ">>>", "list", "(", "doc", ".", "terms", ")", "[", "keyboard", "mouse", "]", ">>>", "doc", ".", "terms", ".", "tf", ".", "values", "()", "[", "1", "1", "]" ]
python
train
32.714286
christophertbrown/bioscripts
ctbBio/compare_aligned.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L79-L89
def compare_seqs_leven(seqs): """ calculate Levenshtein ratio of sequences """ A, B, ignore_gaps = seqs a, b = remove_gaps(A[1], B[1]) # actual sequences if len(a) != len(b): print('# reads are not the same length', file=sys.stderr) exit() pident = lr(a, b) * 100 return A[0], B[0], pident
[ "def", "compare_seqs_leven", "(", "seqs", ")", ":", "A", ",", "B", ",", "ignore_gaps", "=", "seqs", "a", ",", "b", "=", "remove_gaps", "(", "A", "[", "1", "]", ",", "B", "[", "1", "]", ")", "# actual sequences", "if", "len", "(", "a", ")", "!=", "len", "(", "b", ")", ":", "print", "(", "'# reads are not the same length'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "pident", "=", "lr", "(", "a", ",", "b", ")", "*", "100", "return", "A", "[", "0", "]", ",", "B", "[", "0", "]", ",", "pident" ]
calculate Levenshtein ratio of sequences
[ "calculate", "Levenshtein", "ratio", "of", "sequences" ]
python
train
29.727273
BerkeleyAutomation/perception
perception/kinect2_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/kinect2_sensor.py#L236-L256
def frames(self, skip_registration=False): """Retrieve a new frame from the Kinect and convert it to a ColorImage, a DepthImage, and an IrImage. Parameters ---------- skip_registration : bool If True, the registration step is skipped. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame. Raises ------ RuntimeError If the Kinect stream is not running. """ color_im, depth_im, ir_im, _ = self._frames_and_index_map(skip_registration=skip_registration) return color_im, depth_im, ir_im
[ "def", "frames", "(", "self", ",", "skip_registration", "=", "False", ")", ":", "color_im", ",", "depth_im", ",", "ir_im", ",", "_", "=", "self", ".", "_frames_and_index_map", "(", "skip_registration", "=", "skip_registration", ")", "return", "color_im", ",", "depth_im", ",", "ir_im" ]
Retrieve a new frame from the Kinect and convert it to a ColorImage, a DepthImage, and an IrImage. Parameters ---------- skip_registration : bool If True, the registration step is skipped. Returns ------- :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray` The ColorImage, DepthImage, and IrImage of the current frame. Raises ------ RuntimeError If the Kinect stream is not running.
[ "Retrieve", "a", "new", "frame", "from", "the", "Kinect", "and", "convert", "it", "to", "a", "ColorImage", "a", "DepthImage", "and", "an", "IrImage", "." ]
python
train
34.761905
saltstack/salt
salt/scripts.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/scripts.py#L538-L549
def salt_extend(extension, name, description, salt_dir, merge): ''' Quickstart for developing on the saltstack installation .. versionadded:: 2016.11.0 ''' import salt.utils.extend salt.utils.extend.run(extension=extension, name=name, description=description, salt_dir=salt_dir, merge=merge)
[ "def", "salt_extend", "(", "extension", ",", "name", ",", "description", ",", "salt_dir", ",", "merge", ")", ":", "import", "salt", ".", "utils", ".", "extend", "salt", ".", "utils", ".", "extend", ".", "run", "(", "extension", "=", "extension", ",", "name", "=", "name", ",", "description", "=", "description", ",", "salt_dir", "=", "salt_dir", ",", "merge", "=", "merge", ")" ]
Quickstart for developing on the saltstack installation .. versionadded:: 2016.11.0
[ "Quickstart", "for", "developing", "on", "the", "saltstack", "installation" ]
python
train
34.083333
Clinical-Genomics/scout
scout/parse/panel.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/panel.py#L295-L333
def parse_panel_app_gene(app_gene, hgnc_map): """Parse a panel app formated gene Args: app_gene(dict): Dict with panel app info hgnc_map(dict): Map from hgnc_symbol to hgnc_id Returns: gene_info(dict): Scout infromation """ gene_info = {} confidence_level = app_gene['LevelOfConfidence'] # Return empty gene if not confident gene if not confidence_level == 'HighEvidence': return gene_info hgnc_symbol = app_gene['GeneSymbol'] # Returns a set of hgnc ids hgnc_ids = get_correct_ids(hgnc_symbol, hgnc_map) if not hgnc_ids: LOG.warning("Gene %s does not exist in database. Skipping gene...", hgnc_symbol) return gene_info if len(hgnc_ids) > 1: LOG.warning("Gene %s has unclear identifier. Choose random id", hgnc_symbol) gene_info['hgnc_symbol'] = hgnc_symbol for hgnc_id in hgnc_ids: gene_info['hgnc_id'] = hgnc_id gene_info['reduced_penetrance'] = INCOMPLETE_PENETRANCE_MAP.get(app_gene['Penetrance']) inheritance_models = [] for model in MODELS_MAP.get(app_gene['ModeOfInheritance'],[]): inheritance_models.append(model) gene_info['inheritance_models'] = inheritance_models return gene_info
[ "def", "parse_panel_app_gene", "(", "app_gene", ",", "hgnc_map", ")", ":", "gene_info", "=", "{", "}", "confidence_level", "=", "app_gene", "[", "'LevelOfConfidence'", "]", "# Return empty gene if not confident gene", "if", "not", "confidence_level", "==", "'HighEvidence'", ":", "return", "gene_info", "hgnc_symbol", "=", "app_gene", "[", "'GeneSymbol'", "]", "# Returns a set of hgnc ids", "hgnc_ids", "=", "get_correct_ids", "(", "hgnc_symbol", ",", "hgnc_map", ")", "if", "not", "hgnc_ids", ":", "LOG", ".", "warning", "(", "\"Gene %s does not exist in database. Skipping gene...\"", ",", "hgnc_symbol", ")", "return", "gene_info", "if", "len", "(", "hgnc_ids", ")", ">", "1", ":", "LOG", ".", "warning", "(", "\"Gene %s has unclear identifier. Choose random id\"", ",", "hgnc_symbol", ")", "gene_info", "[", "'hgnc_symbol'", "]", "=", "hgnc_symbol", "for", "hgnc_id", "in", "hgnc_ids", ":", "gene_info", "[", "'hgnc_id'", "]", "=", "hgnc_id", "gene_info", "[", "'reduced_penetrance'", "]", "=", "INCOMPLETE_PENETRANCE_MAP", ".", "get", "(", "app_gene", "[", "'Penetrance'", "]", ")", "inheritance_models", "=", "[", "]", "for", "model", "in", "MODELS_MAP", ".", "get", "(", "app_gene", "[", "'ModeOfInheritance'", "]", ",", "[", "]", ")", ":", "inheritance_models", ".", "append", "(", "model", ")", "gene_info", "[", "'inheritance_models'", "]", "=", "inheritance_models", "return", "gene_info" ]
Parse a panel app formated gene Args: app_gene(dict): Dict with panel app info hgnc_map(dict): Map from hgnc_symbol to hgnc_id Returns: gene_info(dict): Scout infromation
[ "Parse", "a", "panel", "app", "formated", "gene", "Args", ":", "app_gene", "(", "dict", ")", ":", "Dict", "with", "panel", "app", "info", "hgnc_map", "(", "dict", ")", ":", "Map", "from", "hgnc_symbol", "to", "hgnc_id", "Returns", ":", "gene_info", "(", "dict", ")", ":", "Scout", "infromation" ]
python
test
31.74359
wummel/dosage
dosagelib/events.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/events.py#L200-L218
def comicDownloaded(self, comic, filename, text=None): """Write HTML entry for downloaded comic.""" if self.lastComic != comic.name: self.newComic(comic) size = None if self.allowdownscale: size = getDimensionForImage(filename, MaxImageSize) imageUrl = self.getUrlFromFilename(filename) pageUrl = comic.referrer if pageUrl != self.lastUrl: self.html.write(u'<li><a href="%s">%s</a>\n' % (pageUrl, pageUrl)) self.html.write(u'<br/><img src="%s"' % imageUrl) if size: self.html.write(' width="%d" height="%d"' % size) self.html.write('/>\n') if text: self.html.write(u'<br/>%s\n' % text) self.lastComic = comic.name self.lastUrl = pageUrl
[ "def", "comicDownloaded", "(", "self", ",", "comic", ",", "filename", ",", "text", "=", "None", ")", ":", "if", "self", ".", "lastComic", "!=", "comic", ".", "name", ":", "self", ".", "newComic", "(", "comic", ")", "size", "=", "None", "if", "self", ".", "allowdownscale", ":", "size", "=", "getDimensionForImage", "(", "filename", ",", "MaxImageSize", ")", "imageUrl", "=", "self", ".", "getUrlFromFilename", "(", "filename", ")", "pageUrl", "=", "comic", ".", "referrer", "if", "pageUrl", "!=", "self", ".", "lastUrl", ":", "self", ".", "html", ".", "write", "(", "u'<li><a href=\"%s\">%s</a>\\n'", "%", "(", "pageUrl", ",", "pageUrl", ")", ")", "self", ".", "html", ".", "write", "(", "u'<br/><img src=\"%s\"'", "%", "imageUrl", ")", "if", "size", ":", "self", ".", "html", ".", "write", "(", "' width=\"%d\" height=\"%d\"'", "%", "size", ")", "self", ".", "html", ".", "write", "(", "'/>\\n'", ")", "if", "text", ":", "self", ".", "html", ".", "write", "(", "u'<br/>%s\\n'", "%", "text", ")", "self", ".", "lastComic", "=", "comic", ".", "name", "self", ".", "lastUrl", "=", "pageUrl" ]
Write HTML entry for downloaded comic.
[ "Write", "HTML", "entry", "for", "downloaded", "comic", "." ]
python
train
41.157895
deep-compute/funcserver
funcserver/funcserver.py
https://github.com/deep-compute/funcserver/blob/ce3418cb4a0cb85f0a3cbf86d12ea9733ca23f23/funcserver/funcserver.py#L563-L572
def define_log_pre_format_hooks(self): """ adds a hook to send to websocket if the run command was selected """ hooks = super(Server, self).define_log_pre_format_hooks() # NOTE enabling logs only on debug mode if self.args.func == self.run and self.args.debug: hooks.append(self._send_log_to_ws) return hooks
[ "def", "define_log_pre_format_hooks", "(", "self", ")", ":", "hooks", "=", "super", "(", "Server", ",", "self", ")", ".", "define_log_pre_format_hooks", "(", ")", "# NOTE enabling logs only on debug mode", "if", "self", ".", "args", ".", "func", "==", "self", ".", "run", "and", "self", ".", "args", ".", "debug", ":", "hooks", ".", "append", "(", "self", ".", "_send_log_to_ws", ")", "return", "hooks" ]
adds a hook to send to websocket if the run command was selected
[ "adds", "a", "hook", "to", "send", "to", "websocket", "if", "the", "run", "command", "was", "selected" ]
python
train
36.8
bird-house/birdhousebuilder.recipe.supervisor
birdhousebuilder/recipe/supervisor/__init__.py
https://github.com/bird-house/birdhousebuilder.recipe.supervisor/blob/bbd5531466f5f84b2ab7757f674dcdb5ae751d7f/birdhousebuilder/recipe/supervisor/__init__.py#L127-L135
def install_config(self): """ install supervisor main config file """ text = templ_config.render(**self.options) config = Configuration(self.buildout, 'supervisord.conf', { 'deployment': self.deployment_name, 'text': text}) return [config.install()]
[ "def", "install_config", "(", "self", ")", ":", "text", "=", "templ_config", ".", "render", "(", "*", "*", "self", ".", "options", ")", "config", "=", "Configuration", "(", "self", ".", "buildout", ",", "'supervisord.conf'", ",", "{", "'deployment'", ":", "self", ".", "deployment_name", ",", "'text'", ":", "text", "}", ")", "return", "[", "config", ".", "install", "(", ")", "]" ]
install supervisor main config file
[ "install", "supervisor", "main", "config", "file" ]
python
train
34.777778
GNS3/gns3-server
gns3server/compute/base_node.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/base_node.py#L445-L465
def console_type(self, console_type): """ Sets the console type for this node. :param console_type: console type (string) """ if console_type != self._console_type: # get a new port if the console type change self._manager.port_manager.release_tcp_port(self._console, self._project) if console_type == "vnc": # VNC is a special case and the range must be 5900-6000 self._console = self._manager.port_manager.get_free_tcp_port(self._project, 5900, 6000) else: self._console = self._manager.port_manager.get_free_tcp_port(self._project) self._console_type = console_type log.info("{module}: '{name}' [{id}]: console type set to {console_type}".format(module=self.manager.module_name, name=self.name, id=self.id, console_type=console_type))
[ "def", "console_type", "(", "self", ",", "console_type", ")", ":", "if", "console_type", "!=", "self", ".", "_console_type", ":", "# get a new port if the console type change", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_console", ",", "self", ".", "_project", ")", "if", "console_type", "==", "\"vnc\"", ":", "# VNC is a special case and the range must be 5900-6000", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "get_free_tcp_port", "(", "self", ".", "_project", ",", "5900", ",", "6000", ")", "else", ":", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "get_free_tcp_port", "(", "self", ".", "_project", ")", "self", ".", "_console_type", "=", "console_type", "log", ".", "info", "(", "\"{module}: '{name}' [{id}]: console type set to {console_type}\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "console_type", "=", "console_type", ")", ")" ]
Sets the console type for this node. :param console_type: console type (string)
[ "Sets", "the", "console", "type", "for", "this", "node", "." ]
python
train
54.095238
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L797-L809
def get_setting(self, twig=None, **kwargs): """ Filter in the 'setting' context :parameter str twig: the twig used for filtering :parameter **kwargs: any other tags to do the filter (except tag or context) :return: :class:`phoebe.parameters.parameters.ParameterSet` """ if twig is not None: kwargs['twig'] = twig kwargs['context'] = 'setting' return self.filter_or_get(**kwargs)
[ "def", "get_setting", "(", "self", ",", "twig", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "twig", "is", "not", "None", ":", "kwargs", "[", "'twig'", "]", "=", "twig", "kwargs", "[", "'context'", "]", "=", "'setting'", "return", "self", ".", "filter_or_get", "(", "*", "*", "kwargs", ")" ]
Filter in the 'setting' context :parameter str twig: the twig used for filtering :parameter **kwargs: any other tags to do the filter (except tag or context) :return: :class:`phoebe.parameters.parameters.ParameterSet`
[ "Filter", "in", "the", "setting", "context" ]
python
train
35.615385
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L202-L293
def fit(self, X, y, cost_mat, sample_weight=None): """Build a Bagging ensemble of estimators from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. y : array-like, shape = [n_samples] The target values (class labels in classification, real numbers in regression). cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Note that this is supported only if the base estimator supports sample weighting. Returns ------- self : object Returns self. """ random_state = check_random_state(self.random_state) # Convert data # X, y = check_X_y(X, y, ['csr', 'csc', 'coo']) # Not in sklearn verion 0.15 # Remap output n_samples, self.n_features_ = X.shape y = self._validate_y(y) # Check parameters self._validate_estimator() if isinstance(self.max_samples, (numbers.Integral, np.integer)): max_samples = self.max_samples else: # float max_samples = int(self.max_samples * X.shape[0]) if not (0 < max_samples <= X.shape[0]): raise ValueError("max_samples must be in (0, n_samples]") if isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float max_features = int(self.max_features * self.n_features_) if not (0 < max_features <= self.n_features_): raise ValueError("max_features must be in (0, n_features]") # Free allocated memory, if any self.estimators_ = None # Parallel loop n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) seeds = random_state.randint(MAX_INT, size=self.n_estimators) all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_build_estimators)( n_estimators[i], self, X, y, cost_mat, seeds[starts[i]:starts[i + 1]], verbose=self.verbose) for i in range(n_jobs)) # Reduce self.estimators_ = list(itertools.chain.from_iterable( t[0] for t in all_results)) self.estimators_samples_ = list(itertools.chain.from_iterable( t[1] for t in all_results)) self.estimators_features_ = list(itertools.chain.from_iterable( t[2] for t in all_results)) self._evaluate_oob_savings(X, y, cost_mat) if self.combination in ['stacking', 'stacking_proba', 'stacking_bmr', 'stacking_proba_bmr']: self._fit_stacking_model(X, y, cost_mat) if self.combination in ['majority_bmr', 'weighted_bmr', 'stacking_bmr', 'stacking_proba_bmr']: self._fit_bmr_model(X, y) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ",", "cost_mat", ",", "sample_weight", "=", "None", ")", ":", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "# Convert data", "# X, y = check_X_y(X, y, ['csr', 'csc', 'coo']) # Not in sklearn verion 0.15", "# Remap output", "n_samples", ",", "self", ".", "n_features_", "=", "X", ".", "shape", "y", "=", "self", ".", "_validate_y", "(", "y", ")", "# Check parameters", "self", ".", "_validate_estimator", "(", ")", "if", "isinstance", "(", "self", ".", "max_samples", ",", "(", "numbers", ".", "Integral", ",", "np", ".", "integer", ")", ")", ":", "max_samples", "=", "self", ".", "max_samples", "else", ":", "# float", "max_samples", "=", "int", "(", "self", ".", "max_samples", "*", "X", ".", "shape", "[", "0", "]", ")", "if", "not", "(", "0", "<", "max_samples", "<=", "X", ".", "shape", "[", "0", "]", ")", ":", "raise", "ValueError", "(", "\"max_samples must be in (0, n_samples]\"", ")", "if", "isinstance", "(", "self", ".", "max_features", ",", "(", "numbers", ".", "Integral", ",", "np", ".", "integer", ")", ")", ":", "max_features", "=", "self", ".", "max_features", "else", ":", "# float", "max_features", "=", "int", "(", "self", ".", "max_features", "*", "self", ".", "n_features_", ")", "if", "not", "(", "0", "<", "max_features", "<=", "self", ".", "n_features_", ")", ":", "raise", "ValueError", "(", "\"max_features must be in (0, n_features]\"", ")", "# Free allocated memory, if any", "self", ".", "estimators_", "=", "None", "# Parallel loop", "n_jobs", ",", "n_estimators", ",", "starts", "=", "_partition_estimators", "(", "self", ".", "n_estimators", ",", "self", ".", "n_jobs", ")", "seeds", "=", "random_state", ".", "randint", "(", "MAX_INT", ",", "size", "=", "self", ".", "n_estimators", ")", "all_results", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "self", ".", "verbose", ")", "(", "delayed", "(", "_parallel_build_estimators", ")", "(", "n_estimators", "[", "i", "]", ",", "self", ",", "X", ",", "y", ",", "cost_mat", ",", "seeds", "[", "starts", "[", "i", "]", ":", "starts", "[", "i", "+", "1", "]", "]", ",", "verbose", "=", "self", ".", "verbose", ")", "for", "i", "in", "range", "(", "n_jobs", ")", ")", "# Reduce", "self", ".", "estimators_", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "t", "[", "0", "]", "for", "t", "in", "all_results", ")", ")", "self", ".", "estimators_samples_", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "t", "[", "1", "]", "for", "t", "in", "all_results", ")", ")", "self", ".", "estimators_features_", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "t", "[", "2", "]", "for", "t", "in", "all_results", ")", ")", "self", ".", "_evaluate_oob_savings", "(", "X", ",", "y", ",", "cost_mat", ")", "if", "self", ".", "combination", "in", "[", "'stacking'", ",", "'stacking_proba'", ",", "'stacking_bmr'", ",", "'stacking_proba_bmr'", "]", ":", "self", ".", "_fit_stacking_model", "(", "X", ",", "y", ",", "cost_mat", ")", "if", "self", ".", "combination", "in", "[", "'majority_bmr'", ",", "'weighted_bmr'", ",", "'stacking_bmr'", ",", "'stacking_proba_bmr'", "]", ":", "self", ".", "_fit_bmr_model", "(", "X", ",", "y", ")", "return", "self" ]
Build a Bagging ensemble of estimators from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. y : array-like, shape = [n_samples] The target values (class labels in classification, real numbers in regression). cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Note that this is supported only if the base estimator supports sample weighting. Returns ------- self : object Returns self.
[ "Build", "a", "Bagging", "ensemble", "of", "estimators", "from", "the", "training", "set", "(", "X", "y", ")", "." ]
python
train
37.23913
thespacedoctor/sloancone
build/lib/sloancone/check_coverage.py
https://github.com/thespacedoctor/sloancone/blob/106ea6533ad57f5f0ca82bf6db3053132bdb42e1/build/lib/sloancone/check_coverage.py#L95-L107
def get(self): """ *get the check_coverage object* **Return:** - ``check_coverage`` """ self.log.info('starting the ``get`` method') match = self._query_sdss() self.log.info('completed the ``get`` method') return match
[ "def", "get", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``get`` method'", ")", "match", "=", "self", ".", "_query_sdss", "(", ")", "self", ".", "log", ".", "info", "(", "'completed the ``get`` method'", ")", "return", "match" ]
*get the check_coverage object* **Return:** - ``check_coverage``
[ "*", "get", "the", "check_coverage", "object", "*" ]
python
train
21.923077
Grunny/zap-cli
zapcli/commands/scanners.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/scanners.py#L74-L82
def set_scanner_strength(zap_helper, scanners, strength): """Set the attack strength for scanners.""" if not scanners or 'all' in scanners: scanners = _get_all_scanner_ids(zap_helper) with zap_error_handler(): zap_helper.set_scanner_attack_strength(scanners, strength) console.info('Set attack strength to {0}.'.format(strength))
[ "def", "set_scanner_strength", "(", "zap_helper", ",", "scanners", ",", "strength", ")", ":", "if", "not", "scanners", "or", "'all'", "in", "scanners", ":", "scanners", "=", "_get_all_scanner_ids", "(", "zap_helper", ")", "with", "zap_error_handler", "(", ")", ":", "zap_helper", ".", "set_scanner_attack_strength", "(", "scanners", ",", "strength", ")", "console", ".", "info", "(", "'Set attack strength to {0}.'", ".", "format", "(", "strength", ")", ")" ]
Set the attack strength for scanners.
[ "Set", "the", "attack", "strength", "for", "scanners", "." ]
python
train
39.444444
nanoporetech/ont_fast5_api
ont_fast5_api/fast5_file.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L125-L131
def get_tracking_id(self): """ Returns a dictionary of tracking-id key/value pairs. """ self.assert_open() tracking = self.handle[self.global_key +'tracking_id'].attrs.items() tracking = {key: _clean(value) for key, value in tracking} return tracking
[ "def", "get_tracking_id", "(", "self", ")", ":", "self", ".", "assert_open", "(", ")", "tracking", "=", "self", ".", "handle", "[", "self", ".", "global_key", "+", "'tracking_id'", "]", ".", "attrs", ".", "items", "(", ")", "tracking", "=", "{", "key", ":", "_clean", "(", "value", ")", "for", "key", ",", "value", "in", "tracking", "}", "return", "tracking" ]
Returns a dictionary of tracking-id key/value pairs.
[ "Returns", "a", "dictionary", "of", "tracking", "-", "id", "key", "/", "value", "pairs", "." ]
python
train
41.714286
awslabs/aws-shell
awsshell/fuzzy.py
https://github.com/awslabs/aws-shell/blob/8950f03d9d720879890af6c11537b8f9789ce5a9/awsshell/fuzzy.py#L54-L84
def calculate_score(search_string, word): """Calculate how well the search string matches the word.""" # See the module docstring for a high level description # of what we're trying to do. # * If the search string is larger than the word, we know # immediately that this can't be a match. if len(search_string) > len(word): return 0 original_word = word score = 1 search_index = 0 while True: scale = 1.0 search_char = search_string[search_index] i = word.find(search_char) if i < 0: return 0 if i > 0 and word[i - 1] == '-': scale = 0.95 else: scale = 1 - (i / float(len(word))) score *= scale word = word[i + 1:] search_index += 1 if search_index >= len(search_string): break # The more characters that matched the word, the better # so prefer more complete matches. completion_scale = 1 - (len(word) / float(len(original_word))) score *= completion_scale return score
[ "def", "calculate_score", "(", "search_string", ",", "word", ")", ":", "# See the module docstring for a high level description", "# of what we're trying to do.", "# * If the search string is larger than the word, we know", "# immediately that this can't be a match.", "if", "len", "(", "search_string", ")", ">", "len", "(", "word", ")", ":", "return", "0", "original_word", "=", "word", "score", "=", "1", "search_index", "=", "0", "while", "True", ":", "scale", "=", "1.0", "search_char", "=", "search_string", "[", "search_index", "]", "i", "=", "word", ".", "find", "(", "search_char", ")", "if", "i", "<", "0", ":", "return", "0", "if", "i", ">", "0", "and", "word", "[", "i", "-", "1", "]", "==", "'-'", ":", "scale", "=", "0.95", "else", ":", "scale", "=", "1", "-", "(", "i", "/", "float", "(", "len", "(", "word", ")", ")", ")", "score", "*=", "scale", "word", "=", "word", "[", "i", "+", "1", ":", "]", "search_index", "+=", "1", "if", "search_index", ">=", "len", "(", "search_string", ")", ":", "break", "# The more characters that matched the word, the better", "# so prefer more complete matches.", "completion_scale", "=", "1", "-", "(", "len", "(", "word", ")", "/", "float", "(", "len", "(", "original_word", ")", ")", ")", "score", "*=", "completion_scale", "return", "score" ]
Calculate how well the search string matches the word.
[ "Calculate", "how", "well", "the", "search", "string", "matches", "the", "word", "." ]
python
train
33.483871
ClericPy/torequests
torequests/parsers.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/parsers.py#L253-L270
def xml_parser(self, scode, *args): """ args[0]: xpath args[1]: text / html / xml """ allow_method = ('text', 'html', 'xml') xpath_string, method = args assert method in allow_method, 'method allow: %s' % allow_method result = self.ensure_list( self._fromstring(scode, parser=self._xml_parser).xpath(xpath_string)) result = [ self._tostring( item, method=method, with_tail=0, encoding='unicode') for item in result ] return result
[ "def", "xml_parser", "(", "self", ",", "scode", ",", "*", "args", ")", ":", "allow_method", "=", "(", "'text'", ",", "'html'", ",", "'xml'", ")", "xpath_string", ",", "method", "=", "args", "assert", "method", "in", "allow_method", ",", "'method allow: %s'", "%", "allow_method", "result", "=", "self", ".", "ensure_list", "(", "self", ".", "_fromstring", "(", "scode", ",", "parser", "=", "self", ".", "_xml_parser", ")", ".", "xpath", "(", "xpath_string", ")", ")", "result", "=", "[", "self", ".", "_tostring", "(", "item", ",", "method", "=", "method", ",", "with_tail", "=", "0", ",", "encoding", "=", "'unicode'", ")", "for", "item", "in", "result", "]", "return", "result" ]
args[0]: xpath args[1]: text / html / xml
[ "args", "[", "0", "]", ":", "xpath" ]
python
train
32.388889
gersolar/stations
aspects.py
https://github.com/gersolar/stations/blob/b5cf99f1906282d88738b90c9ce44caa8d613e1b/aspects.py#L556-L584
def peel_around(method): """ This function will be deprecated. Removes one wrap around the method (given as a parameter) and returns the wrap. If the method is not wrapped, returns None. """ _permission_to_touch_wraps.acquire() # released in finally part try: if hasattr(method,'__aspects_enabled'): # new-style aspect, easy! method.__aspects_rmf(method.__name__,method.__aspects_orig) return method.__aspects_adv methods_name = method.__name__ methods_class = method.im_class wc = wrap_count(method)-1 if wc==-1: return None wrapped = getattr(methods_class, '__wrapped' + str(wc) + methods_name) setattr(methods_class, methods_name, wrapped) removed_adv = getattr(methods_class, '__wrap'+str(wc)+methods_name) del methods_class.__dict__['__wrapped'+str(wc)+methods_name] del methods_class.__dict__['__wrap'+str(wc)+methods_name] return removed_adv finally: _permission_to_touch_wraps.release()
[ "def", "peel_around", "(", "method", ")", ":", "_permission_to_touch_wraps", ".", "acquire", "(", ")", "# released in finally part", "try", ":", "if", "hasattr", "(", "method", ",", "'__aspects_enabled'", ")", ":", "# new-style aspect, easy!", "method", ".", "__aspects_rmf", "(", "method", ".", "__name__", ",", "method", ".", "__aspects_orig", ")", "return", "method", ".", "__aspects_adv", "methods_name", "=", "method", ".", "__name__", "methods_class", "=", "method", ".", "im_class", "wc", "=", "wrap_count", "(", "method", ")", "-", "1", "if", "wc", "==", "-", "1", ":", "return", "None", "wrapped", "=", "getattr", "(", "methods_class", ",", "'__wrapped'", "+", "str", "(", "wc", ")", "+", "methods_name", ")", "setattr", "(", "methods_class", ",", "methods_name", ",", "wrapped", ")", "removed_adv", "=", "getattr", "(", "methods_class", ",", "'__wrap'", "+", "str", "(", "wc", ")", "+", "methods_name", ")", "del", "methods_class", ".", "__dict__", "[", "'__wrapped'", "+", "str", "(", "wc", ")", "+", "methods_name", "]", "del", "methods_class", ".", "__dict__", "[", "'__wrap'", "+", "str", "(", "wc", ")", "+", "methods_name", "]", "return", "removed_adv", "finally", ":", "_permission_to_touch_wraps", ".", "release", "(", ")" ]
This function will be deprecated. Removes one wrap around the method (given as a parameter) and returns the wrap. If the method is not wrapped, returns None.
[ "This", "function", "will", "be", "deprecated", "." ]
python
train
35.344828
theislab/scanpy
scanpy/plotting/_preprocessing.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/plotting/_preprocessing.py#L66-L83
def filter_genes_dispersion(result, log=False, show=None, save=None): """Plot dispersions versus means for genes. Produces Supp. Fig. 5c of Zheng et al. (2017) and MeanVarPlot() of Seurat. Parameters ---------- result : `np.recarray` Result of :func:`~scanpy.api.pp.filter_genes_dispersion`. log : `bool` Plot on logarithmic axes. show : bool, optional (default: `None`) Show the plot, do not return axis. save : `bool` or `str`, optional (default: `None`) If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}. """ highly_variable_genes(result, log=False, show=None, save=None, highly_variable_genes=False)
[ "def", "filter_genes_dispersion", "(", "result", ",", "log", "=", "False", ",", "show", "=", "None", ",", "save", "=", "None", ")", ":", "highly_variable_genes", "(", "result", ",", "log", "=", "False", ",", "show", "=", "None", ",", "save", "=", "None", ",", "highly_variable_genes", "=", "False", ")" ]
Plot dispersions versus means for genes. Produces Supp. Fig. 5c of Zheng et al. (2017) and MeanVarPlot() of Seurat. Parameters ---------- result : `np.recarray` Result of :func:`~scanpy.api.pp.filter_genes_dispersion`. log : `bool` Plot on logarithmic axes. show : bool, optional (default: `None`) Show the plot, do not return axis. save : `bool` or `str`, optional (default: `None`) If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
[ "Plot", "dispersions", "versus", "means", "for", "genes", "." ]
python
train
42.611111
ioos/cc-plugin-ncei
cc_plugin_ncei/ncei_timeseries.py
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_timeseries.py#L181-L196
def check_dimensions(self, dataset): ''' Checks that the feature types of this dataset are consitent with a time series incomplete dataset :param netCDF4.Dataset dataset: An open netCDF dataset ''' required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are time-series incomplete feature types') message = '{} must be a valid timeseries feature type. It must have dimensions of (timeSeries, time).' message += ' And all coordinates must have dimensions of (timeSeries)' for variable in util.get_geophysical_variables(dataset): is_valid = util.is_multi_timeseries_incomplete(dataset, variable) required_ctx.assert_true( is_valid, message.format(variable) ) return required_ctx.to_result()
[ "def", "check_dimensions", "(", "self", ",", "dataset", ")", ":", "required_ctx", "=", "TestCtx", "(", "BaseCheck", ".", "HIGH", ",", "'All geophysical variables are time-series incomplete feature types'", ")", "message", "=", "'{} must be a valid timeseries feature type. It must have dimensions of (timeSeries, time).'", "message", "+=", "' And all coordinates must have dimensions of (timeSeries)'", "for", "variable", "in", "util", ".", "get_geophysical_variables", "(", "dataset", ")", ":", "is_valid", "=", "util", ".", "is_multi_timeseries_incomplete", "(", "dataset", ",", "variable", ")", "required_ctx", ".", "assert_true", "(", "is_valid", ",", "message", ".", "format", "(", "variable", ")", ")", "return", "required_ctx", ".", "to_result", "(", ")" ]
Checks that the feature types of this dataset are consitent with a time series incomplete dataset :param netCDF4.Dataset dataset: An open netCDF dataset
[ "Checks", "that", "the", "feature", "types", "of", "this", "dataset", "are", "consitent", "with", "a", "time", "series", "incomplete", "dataset" ]
python
train
51.5
benoitkugler/abstractDataLibrary
pyDLib/GUI/app.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/app.py#L160-L172
def init_login(self, from_local=False): """Display login screen. May ask for local data loading if from_local is True.""" if self.toolbar: self.removeToolBar(self.toolbar) widget_login = login.Loading(self.statusBar(), self.theory_main) self.centralWidget().addWidget(widget_login) widget_login.loaded.connect(self.init_tabs) widget_login.canceled.connect(self._quit) widget_login.updated.connect(self.on_update_at_launch) if from_local: widget_login.propose_load_local() else: self.statusBar().showMessage("Données chargées depuis le serveur.", 5000)
[ "def", "init_login", "(", "self", ",", "from_local", "=", "False", ")", ":", "if", "self", ".", "toolbar", ":", "self", ".", "removeToolBar", "(", "self", ".", "toolbar", ")", "widget_login", "=", "login", ".", "Loading", "(", "self", ".", "statusBar", "(", ")", ",", "self", ".", "theory_main", ")", "self", ".", "centralWidget", "(", ")", ".", "addWidget", "(", "widget_login", ")", "widget_login", ".", "loaded", ".", "connect", "(", "self", ".", "init_tabs", ")", "widget_login", ".", "canceled", ".", "connect", "(", "self", ".", "_quit", ")", "widget_login", ".", "updated", ".", "connect", "(", "self", ".", "on_update_at_launch", ")", "if", "from_local", ":", "widget_login", ".", "propose_load_local", "(", ")", "else", ":", "self", ".", "statusBar", "(", ")", ".", "showMessage", "(", "\"Données chargées depuis le serveur.\", ", "5", "00)", "" ]
Display login screen. May ask for local data loading if from_local is True.
[ "Display", "login", "screen", ".", "May", "ask", "for", "local", "data", "loading", "if", "from_local", "is", "True", "." ]
python
train
49.769231
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1382-L1439
def movingMax(requestContext, seriesList, windowSize): """ Graphs the moving maximum of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the maximum of the preceeding datapoints for each point on the graph. Example:: &target=movingMax(Server.instance01.requests,10) &target=movingMax(Server.instance*.errors,'5min') """ if not seriesList: return [] windowInterval = None if isinstance(windowSize, six.string_types): delta = parseTimeOffset(windowSize) windowInterval = abs(delta.seconds + (delta.days * 86400)) if windowInterval: previewSeconds = windowInterval else: previewSeconds = max([s.step for s in seriesList]) * int(windowSize) # ignore original data and pull new, including our preview # data from earlier is needed to calculate the early results newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) result = [] for series in previewList: if windowInterval: windowPoints = windowInterval // series.step else: windowPoints = int(windowSize) if isinstance(windowSize, six.string_types): newName = 'movingMax(%s,"%s")' % (series.name, windowSize) else: newName = "movingMax(%s,%s)" % (series.name, windowSize) newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, []) newSeries.pathExpression = newName for i in range(windowPoints, len(series)): window = series[i - windowPoints:i] newSeries.append(safeMax(window)) result.append(newSeries) return result
[ "def", "movingMax", "(", "requestContext", ",", "seriesList", ",", "windowSize", ")", ":", "if", "not", "seriesList", ":", "return", "[", "]", "windowInterval", "=", "None", "if", "isinstance", "(", "windowSize", ",", "six", ".", "string_types", ")", ":", "delta", "=", "parseTimeOffset", "(", "windowSize", ")", "windowInterval", "=", "abs", "(", "delta", ".", "seconds", "+", "(", "delta", ".", "days", "*", "86400", ")", ")", "if", "windowInterval", ":", "previewSeconds", "=", "windowInterval", "else", ":", "previewSeconds", "=", "max", "(", "[", "s", ".", "step", "for", "s", "in", "seriesList", "]", ")", "*", "int", "(", "windowSize", ")", "# ignore original data and pull new, including our preview", "# data from earlier is needed to calculate the early results", "newContext", "=", "requestContext", ".", "copy", "(", ")", "newContext", "[", "'startTime'", "]", "=", "(", "requestContext", "[", "'startTime'", "]", "-", "timedelta", "(", "seconds", "=", "previewSeconds", ")", ")", "previewList", "=", "evaluateTokens", "(", "newContext", ",", "requestContext", "[", "'args'", "]", "[", "0", "]", ")", "result", "=", "[", "]", "for", "series", "in", "previewList", ":", "if", "windowInterval", ":", "windowPoints", "=", "windowInterval", "//", "series", ".", "step", "else", ":", "windowPoints", "=", "int", "(", "windowSize", ")", "if", "isinstance", "(", "windowSize", ",", "six", ".", "string_types", ")", ":", "newName", "=", "'movingMax(%s,\"%s\")'", "%", "(", "series", ".", "name", ",", "windowSize", ")", "else", ":", "newName", "=", "\"movingMax(%s,%s)\"", "%", "(", "series", ".", "name", ",", "windowSize", ")", "newSeries", "=", "TimeSeries", "(", "newName", ",", "series", ".", "start", "+", "previewSeconds", ",", "series", ".", "end", ",", "series", ".", "step", ",", "[", "]", ")", "newSeries", ".", "pathExpression", "=", "newName", "for", "i", "in", "range", "(", "windowPoints", ",", "len", "(", "series", ")", ")", ":", "window", "=", "series", "[", "i", "-", "windowPoints", ":", "i", "]", "newSeries", ".", "append", "(", "safeMax", "(", "window", ")", ")", "result", ".", "append", "(", "newSeries", ")", "return", "result" ]
Graphs the moving maximum of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the maximum of the preceeding datapoints for each point on the graph. Example:: &target=movingMax(Server.instance01.requests,10) &target=movingMax(Server.instance*.errors,'5min')
[ "Graphs", "the", "moving", "maximum", "of", "a", "metric", "(", "or", "metrics", ")", "over", "a", "fixed", "number", "of", "past", "points", "or", "a", "time", "interval", "." ]
python
train
35.948276
grycap/RADL
radl/radl_json.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl_json.py#L41-L49
def encode_simple(d): """Encode strings in basic python objects.""" if isinstance(d, unicode): return d.encode() if isinstance(d, list): return list(map(encode_simple, d)) if isinstance(d, dict): return dict([(encode_simple(k), encode_simple(v)) for k, v in d.items()]) return d
[ "def", "encode_simple", "(", "d", ")", ":", "if", "isinstance", "(", "d", ",", "unicode", ")", ":", "return", "d", ".", "encode", "(", ")", "if", "isinstance", "(", "d", ",", "list", ")", ":", "return", "list", "(", "map", "(", "encode_simple", ",", "d", ")", ")", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "return", "dict", "(", "[", "(", "encode_simple", "(", "k", ")", ",", "encode_simple", "(", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "]", ")", "return", "d" ]
Encode strings in basic python objects.
[ "Encode", "strings", "in", "basic", "python", "objects", "." ]
python
train
34.888889
bdastur/spam
pyansible/ansivault.py
https://github.com/bdastur/spam/blob/3c363302412d15bdb391f62bf90348243e456af2/pyansible/ansivault.py#L60-L82
def decrypt_file(self, filename): ''' Decrypt File Args: filename: Pass the filename to encrypt. Returns: No return. ''' if not os.path.exists(filename): print "Invalid filename %s. Does not exist" % filename return if self.vault_password is None: print "ENV Variable PYANSI_VAULT_PASSWORD not set" return if not self.is_file_encrypted(filename): # No need to do anything. return cipher = 'AES256' vaulteditor = VaultEditor(cipher, self.vault_password, filename) vaulteditor.decrypt_file()
[ "def", "decrypt_file", "(", "self", ",", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "print", "\"Invalid filename %s. Does not exist\"", "%", "filename", "return", "if", "self", ".", "vault_password", "is", "None", ":", "print", "\"ENV Variable PYANSI_VAULT_PASSWORD not set\"", "return", "if", "not", "self", ".", "is_file_encrypted", "(", "filename", ")", ":", "# No need to do anything.", "return", "cipher", "=", "'AES256'", "vaulteditor", "=", "VaultEditor", "(", "cipher", ",", "self", ".", "vault_password", ",", "filename", ")", "vaulteditor", ".", "decrypt_file", "(", ")" ]
Decrypt File Args: filename: Pass the filename to encrypt. Returns: No return.
[ "Decrypt", "File", "Args", ":", "filename", ":", "Pass", "the", "filename", "to", "encrypt", ".", "Returns", ":", "No", "return", "." ]
python
train
28.434783
martijnvermaat/monoseq
monoseq/commands.py
https://github.com/martijnvermaat/monoseq/blob/02b92f6aa482ba169787a1a4bcad28372662dc36/monoseq/commands.py#L129-L162
def main(): """ Command line interface. """ parser = argparse.ArgumentParser( description='monoseq: pretty-printing DNA and protein sequences', epilog='If INPUT is in FASTA format, each record is pretty-printed ' 'after printing its name and ANNOTATION (if supplied) is used by ' 'matching chromosome/record name. If INPUT contains a raw sequence, ' 'only the first chromosome in ANNOTATION is used.') parser.add_argument( 'sequence_file', metavar='INPUT', nargs='?', default=sys.stdin, type=argparse.FileType('r'), help='file to read sequence(s) from, ' 'can be in FASTA format (default: standard input)') parser.add_argument( '-b', '--block-length', metavar='LENGTH', dest='block_length', type=int, default=10, help='block length in letters (default: 10)') parser.add_argument( '-l', '--blocks-per-line', metavar='BLOCKS', dest='blocks_per_line', type=int, default=6, help='blocks per line (default: 6)') parser.add_argument( '-a', '--annotation', metavar='POS', dest='annotation', nargs=2, action='append', type=int, help='first and last positions of ' 'subsequence to annotate (allowed more than once)') parser.add_argument( '-e', '--bed', metavar='ANNOTATION', dest='annotation_file', type=argparse.FileType('r'), help='file to read annotation from in ' 'BED format') args = parser.parse_args() pprint(_until_eof(args.sequence_file), annotation=args.annotation, annotation_file=args.annotation_file, block_length=args.block_length, blocks_per_line=args.blocks_per_line)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'monoseq: pretty-printing DNA and protein sequences'", ",", "epilog", "=", "'If INPUT is in FASTA format, each record is pretty-printed '", "'after printing its name and ANNOTATION (if supplied) is used by '", "'matching chromosome/record name. If INPUT contains a raw sequence, '", "'only the first chromosome in ANNOTATION is used.'", ")", "parser", ".", "add_argument", "(", "'sequence_file'", ",", "metavar", "=", "'INPUT'", ",", "nargs", "=", "'?'", ",", "default", "=", "sys", ".", "stdin", ",", "type", "=", "argparse", ".", "FileType", "(", "'r'", ")", ",", "help", "=", "'file to read sequence(s) from, '", "'can be in FASTA format (default: standard input)'", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "'--block-length'", ",", "metavar", "=", "'LENGTH'", ",", "dest", "=", "'block_length'", ",", "type", "=", "int", ",", "default", "=", "10", ",", "help", "=", "'block length in letters (default: 10)'", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--blocks-per-line'", ",", "metavar", "=", "'BLOCKS'", ",", "dest", "=", "'blocks_per_line'", ",", "type", "=", "int", ",", "default", "=", "6", ",", "help", "=", "'blocks per line (default: 6)'", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "'--annotation'", ",", "metavar", "=", "'POS'", ",", "dest", "=", "'annotation'", ",", "nargs", "=", "2", ",", "action", "=", "'append'", ",", "type", "=", "int", ",", "help", "=", "'first and last positions of '", "'subsequence to annotate (allowed more than once)'", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "'--bed'", ",", "metavar", "=", "'ANNOTATION'", ",", "dest", "=", "'annotation_file'", ",", "type", "=", "argparse", ".", "FileType", "(", "'r'", ")", ",", "help", "=", "'file to read annotation from in '", "'BED format'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "pprint", "(", "_until_eof", "(", "args", ".", "sequence_file", ")", ",", "annotation", "=", "args", ".", "annotation", ",", "annotation_file", "=", "args", ".", "annotation_file", ",", "block_length", "=", "args", ".", "block_length", ",", "blocks_per_line", "=", "args", ".", "blocks_per_line", ")" ]
Command line interface.
[ "Command", "line", "interface", "." ]
python
train
48.911765
SergeySatskiy/cdm-pythonparser
cdmpyparser.py
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L462-L486
def __flushLevel(self, level): """Merge the found objects to the required level""" objectsCount = len(self.objectsStack) while objectsCount > level: lastIndex = objectsCount - 1 if lastIndex == 0: # We have exactly one element in the stack if self.objectsStack[0].__class__.__name__ == "Class": self.classes.append(self.objectsStack[0]) else: self.functions.append(self.objectsStack[0]) self.objectsStack = [] break # Append to the previous level if self.objectsStack[lastIndex].__class__.__name__ == "Class": self.objectsStack[lastIndex - 1].classes. \ append(self.objectsStack[lastIndex]) else: self.objectsStack[lastIndex - 1].functions. \ append(self.objectsStack[lastIndex]) del self.objectsStack[lastIndex] objectsCount -= 1
[ "def", "__flushLevel", "(", "self", ",", "level", ")", ":", "objectsCount", "=", "len", "(", "self", ".", "objectsStack", ")", "while", "objectsCount", ">", "level", ":", "lastIndex", "=", "objectsCount", "-", "1", "if", "lastIndex", "==", "0", ":", "# We have exactly one element in the stack", "if", "self", ".", "objectsStack", "[", "0", "]", ".", "__class__", ".", "__name__", "==", "\"Class\"", ":", "self", ".", "classes", ".", "append", "(", "self", ".", "objectsStack", "[", "0", "]", ")", "else", ":", "self", ".", "functions", ".", "append", "(", "self", ".", "objectsStack", "[", "0", "]", ")", "self", ".", "objectsStack", "=", "[", "]", "break", "# Append to the previous level", "if", "self", ".", "objectsStack", "[", "lastIndex", "]", ".", "__class__", ".", "__name__", "==", "\"Class\"", ":", "self", ".", "objectsStack", "[", "lastIndex", "-", "1", "]", ".", "classes", ".", "append", "(", "self", ".", "objectsStack", "[", "lastIndex", "]", ")", "else", ":", "self", ".", "objectsStack", "[", "lastIndex", "-", "1", "]", ".", "functions", ".", "append", "(", "self", ".", "objectsStack", "[", "lastIndex", "]", ")", "del", "self", ".", "objectsStack", "[", "lastIndex", "]", "objectsCount", "-=", "1" ]
Merge the found objects to the required level
[ "Merge", "the", "found", "objects", "to", "the", "required", "level" ]
python
train
40.36
aichaos/rivescript-python
rivescript/sorting.py
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/sorting.py#L120-L143
def sort_list(items): """Sort a simple list by number of words and length.""" # Track by number of words. track = {} def by_length(word1, word2): return len(word2) - len(word1) # Loop through each item. for item in items: # Count the words. cword = utils.word_count(item, all=True) if cword not in track: track[cword] = [] track[cword].append(item) # Sort them. output = [] for count in sorted(track.keys(), reverse=True): sort = sorted(track[count], key=len, reverse=True) output.extend(sort) return output
[ "def", "sort_list", "(", "items", ")", ":", "# Track by number of words.", "track", "=", "{", "}", "def", "by_length", "(", "word1", ",", "word2", ")", ":", "return", "len", "(", "word2", ")", "-", "len", "(", "word1", ")", "# Loop through each item.", "for", "item", "in", "items", ":", "# Count the words.", "cword", "=", "utils", ".", "word_count", "(", "item", ",", "all", "=", "True", ")", "if", "cword", "not", "in", "track", ":", "track", "[", "cword", "]", "=", "[", "]", "track", "[", "cword", "]", ".", "append", "(", "item", ")", "# Sort them.", "output", "=", "[", "]", "for", "count", "in", "sorted", "(", "track", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", ":", "sort", "=", "sorted", "(", "track", "[", "count", "]", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", "output", ".", "extend", "(", "sort", ")", "return", "output" ]
Sort a simple list by number of words and length.
[ "Sort", "a", "simple", "list", "by", "number", "of", "words", "and", "length", "." ]
python
train
24.875
gwastro/pycbc
pycbc/results/render.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/render.py#L62-L76
def get_embedded_config(filename): """ Attempt to load config data attached to file """ def check_option(self, section, name): return (self.has_section(section) and (self.has_option(section, name) or (name in self.defaults()))) try: cp = pycbc.results.load_metadata_from_file(filename) except TypeError: cp = ConfigParser() cp.check_option = types.MethodType(check_option, cp) return cp
[ "def", "get_embedded_config", "(", "filename", ")", ":", "def", "check_option", "(", "self", ",", "section", ",", "name", ")", ":", "return", "(", "self", ".", "has_section", "(", "section", ")", "and", "(", "self", ".", "has_option", "(", "section", ",", "name", ")", "or", "(", "name", "in", "self", ".", "defaults", "(", ")", ")", ")", ")", "try", ":", "cp", "=", "pycbc", ".", "results", ".", "load_metadata_from_file", "(", "filename", ")", "except", "TypeError", ":", "cp", "=", "ConfigParser", "(", ")", "cp", ".", "check_option", "=", "types", ".", "MethodType", "(", "check_option", ",", "cp", ")", "return", "cp" ]
Attempt to load config data attached to file
[ "Attempt", "to", "load", "config", "data", "attached", "to", "file" ]
python
train
29.466667
ronaldguillen/wave
wave/serializers.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1298-L1331
def _get_model_fields(self, field_names, declared_fields, extra_kwargs): """ Returns all the model fields that are being mapped to by fields on the serializer class. Returned as a dict of 'model field name' -> 'model field'. Used internally by `get_uniqueness_field_options`. """ model = getattr(self.Meta, 'model') model_fields = {} for field_name in field_names: if field_name in declared_fields: # If the field is declared on the serializer field = declared_fields[field_name] source = field.source or field_name else: try: source = extra_kwargs[field_name]['source'] except KeyError: source = field_name if '.' in source or source == '*': # Model fields will always have a simple source mapping, # they can't be nested attribute lookups. continue try: field = model._meta.get_field(source) if isinstance(field, DjangoModelField): model_fields[source] = field except FieldDoesNotExist: pass return model_fields
[ "def", "_get_model_fields", "(", "self", ",", "field_names", ",", "declared_fields", ",", "extra_kwargs", ")", ":", "model", "=", "getattr", "(", "self", ".", "Meta", ",", "'model'", ")", "model_fields", "=", "{", "}", "for", "field_name", "in", "field_names", ":", "if", "field_name", "in", "declared_fields", ":", "# If the field is declared on the serializer", "field", "=", "declared_fields", "[", "field_name", "]", "source", "=", "field", ".", "source", "or", "field_name", "else", ":", "try", ":", "source", "=", "extra_kwargs", "[", "field_name", "]", "[", "'source'", "]", "except", "KeyError", ":", "source", "=", "field_name", "if", "'.'", "in", "source", "or", "source", "==", "'*'", ":", "# Model fields will always have a simple source mapping,", "# they can't be nested attribute lookups.", "continue", "try", ":", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "source", ")", "if", "isinstance", "(", "field", ",", "DjangoModelField", ")", ":", "model_fields", "[", "source", "]", "=", "field", "except", "FieldDoesNotExist", ":", "pass", "return", "model_fields" ]
Returns all the model fields that are being mapped to by fields on the serializer class. Returned as a dict of 'model field name' -> 'model field'. Used internally by `get_uniqueness_field_options`.
[ "Returns", "all", "the", "model", "fields", "that", "are", "being", "mapped", "to", "by", "fields", "on", "the", "serializer", "class", ".", "Returned", "as", "a", "dict", "of", "model", "field", "name", "-", ">", "model", "field", ".", "Used", "internally", "by", "get_uniqueness_field_options", "." ]
python
train
37.058824
yyuu/botornado
boto/iam/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/iam/connection.py#L157-L178
def update_group(self, group_name, new_group_name=None, new_path=None): """ Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path. """ params = {'GroupName' : group_name} if new_group_name: params['NewGroupName'] = new_group_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateGroup', params)
[ "def", "update_group", "(", "self", ",", "group_name", ",", "new_group_name", "=", "None", ",", "new_path", "=", "None", ")", ":", "params", "=", "{", "'GroupName'", ":", "group_name", "}", "if", "new_group_name", ":", "params", "[", "'NewGroupName'", "]", "=", "new_group_name", "if", "new_path", ":", "params", "[", "'NewPath'", "]", "=", "new_path", "return", "self", ".", "get_response", "(", "'UpdateGroup'", ",", "params", ")" ]
Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path.
[ "Updates", "name", "and", "/", "or", "path", "of", "the", "specified", "group", "." ]
python
train
35.090909
mikedh/trimesh
trimesh/exchange/assimp.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/assimp.py#L10-L155
def load_pyassimp(file_obj, file_type=None, resolver=None, **kwargs): """ Use the pyassimp library to load a mesh from a file object and type or file name if file_obj is a string Parameters --------- file_obj: str, or file object File path or object containing mesh data file_type : str File extension, aka 'stl' resolver : trimesh.visual.resolvers.Resolver Used to load referenced data (like texture files) kwargs : dict Passed through to mesh constructor Returns --------- scene : trimesh.Scene Native trimesh copy of assimp scene """ def LP_to_TM(lp): # try to get the vertex colors attribute colors = (np.reshape(lp.colors, (-1, 4)) [:, :3] * 255).round().astype(np.uint8) # If no vertex colors, try to extract them from the material if len(colors) == 0: if 'diffuse' in lp.material.properties.keys(): colors = np.array(lp.material.properties['diffuse']) # pass kwargs through to mesh constructor mesh_kwargs = copy.deepcopy(kwargs) # add data from the LP_Mesh mesh_kwargs.update({'vertices': lp.vertices, 'vertex_normals': lp.normals, 'faces': lp.faces, 'vertex_colors': colors}) return mesh_kwargs # did we open the file inside this function opened = False # not a file object if not hasattr(file_obj, 'read'): # if there is no read attribute # we assume we've been passed a file name file_type = (str(file_obj).split('.')[-1]).lower() file_obj = open(file_obj, 'rb') opened = True # we need files to be bytes elif not hasattr(file_obj, 'mode') or file_obj.mode != 'rb': # assimp will crash on anything that isn't binary # so if we have a text mode file or anything else # grab the data, encode as bytes, and then use stream data = file_obj.read() if hasattr(data, 'encode'): data = data.encode('utf-8') file_obj = util.wrap_as_stream(data) # load the scene using pyassimp scene = pyassimp.load(file_obj, file_type=file_type) # save a record of mesh names used so we # don't have to do queries on mesh_id.values() mesh_names = set() # save a mapping for {id(mesh) : name} mesh_id = {} # save results as {name : Trimesh} meshes = {} # loop through scene LPMesh objects for m in scene.meshes: # skip meshes without tri/quad faces if m.faces.shape[1] not in [3, 4]: continue # if this mesh has the name of an existing mesh if m.name in mesh_names: # make it the name plus the unique ID of the object name = m.name + str(id(m)) else: # otherwise just use the name it calls itself by name = m.name # save the name to mark as consumed mesh_names.add(name) # save the id:name mapping mesh_id[id(m)] = name # convert the mesh to a trimesh object meshes[name] = LP_to_TM(m) # now go through and collect the transforms from the scene # we are going to save them as a list of dict kwargs transforms = [] # nodes as (parent, node) tuples # use deque so we can pop from both ends queue = collections.deque( [('world', n) for n in scene.rootnode.children]) # consume the queue while len(queue) > 0: # parent name, node object parent, node = queue.pop() # assimp uses weirdly duplicate node names # object ID's are actually unique and consistent node_name = id(node) transforms.append({'frame_from': parent, 'frame_to': node_name, 'matrix': node.transformation}) # loop through meshes this node uses # note that they are the SAME objects as converted # above so we can find their reference using id() for m in node.meshes: if id(m) not in mesh_id: continue # create kwargs for graph.update edge = {'frame_from': node_name, 'frame_to': str(id(m)) + str(node_name), 'matrix': np.eye(4), 'geometry': mesh_id[id(m)]} transforms.append(edge) # add any children to the queue to be visited for child in node.children: queue.appendleft((node_name, child)) # release the loaded scene pyassimp.release(scene) # if we opened the file in this function close it if opened: file_obj.close() # create kwargs for trimesh.exchange.load.load_kwargs result = {'class': 'Scene', 'geometry': meshes, 'graph': transforms, 'base_frame': 'world'} return result
[ "def", "load_pyassimp", "(", "file_obj", ",", "file_type", "=", "None", ",", "resolver", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "LP_to_TM", "(", "lp", ")", ":", "# try to get the vertex colors attribute", "colors", "=", "(", "np", ".", "reshape", "(", "lp", ".", "colors", ",", "(", "-", "1", ",", "4", ")", ")", "[", ":", ",", ":", "3", "]", "*", "255", ")", ".", "round", "(", ")", ".", "astype", "(", "np", ".", "uint8", ")", "# If no vertex colors, try to extract them from the material", "if", "len", "(", "colors", ")", "==", "0", ":", "if", "'diffuse'", "in", "lp", ".", "material", ".", "properties", ".", "keys", "(", ")", ":", "colors", "=", "np", ".", "array", "(", "lp", ".", "material", ".", "properties", "[", "'diffuse'", "]", ")", "# pass kwargs through to mesh constructor", "mesh_kwargs", "=", "copy", ".", "deepcopy", "(", "kwargs", ")", "# add data from the LP_Mesh", "mesh_kwargs", ".", "update", "(", "{", "'vertices'", ":", "lp", ".", "vertices", ",", "'vertex_normals'", ":", "lp", ".", "normals", ",", "'faces'", ":", "lp", ".", "faces", ",", "'vertex_colors'", ":", "colors", "}", ")", "return", "mesh_kwargs", "# did we open the file inside this function", "opened", "=", "False", "# not a file object", "if", "not", "hasattr", "(", "file_obj", ",", "'read'", ")", ":", "# if there is no read attribute", "# we assume we've been passed a file name", "file_type", "=", "(", "str", "(", "file_obj", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ")", ".", "lower", "(", ")", "file_obj", "=", "open", "(", "file_obj", ",", "'rb'", ")", "opened", "=", "True", "# we need files to be bytes", "elif", "not", "hasattr", "(", "file_obj", ",", "'mode'", ")", "or", "file_obj", ".", "mode", "!=", "'rb'", ":", "# assimp will crash on anything that isn't binary", "# so if we have a text mode file or anything else", "# grab the data, encode as bytes, and then use stream", "data", "=", "file_obj", ".", "read", "(", ")", "if", "hasattr", "(", "data", ",", "'encode'", ")", ":", "data", "=", "data", ".", "encode", "(", "'utf-8'", ")", "file_obj", "=", "util", ".", "wrap_as_stream", "(", "data", ")", "# load the scene using pyassimp", "scene", "=", "pyassimp", ".", "load", "(", "file_obj", ",", "file_type", "=", "file_type", ")", "# save a record of mesh names used so we", "# don't have to do queries on mesh_id.values()", "mesh_names", "=", "set", "(", ")", "# save a mapping for {id(mesh) : name}", "mesh_id", "=", "{", "}", "# save results as {name : Trimesh}", "meshes", "=", "{", "}", "# loop through scene LPMesh objects", "for", "m", "in", "scene", ".", "meshes", ":", "# skip meshes without tri/quad faces", "if", "m", ".", "faces", ".", "shape", "[", "1", "]", "not", "in", "[", "3", ",", "4", "]", ":", "continue", "# if this mesh has the name of an existing mesh", "if", "m", ".", "name", "in", "mesh_names", ":", "# make it the name plus the unique ID of the object", "name", "=", "m", ".", "name", "+", "str", "(", "id", "(", "m", ")", ")", "else", ":", "# otherwise just use the name it calls itself by", "name", "=", "m", ".", "name", "# save the name to mark as consumed", "mesh_names", ".", "add", "(", "name", ")", "# save the id:name mapping", "mesh_id", "[", "id", "(", "m", ")", "]", "=", "name", "# convert the mesh to a trimesh object", "meshes", "[", "name", "]", "=", "LP_to_TM", "(", "m", ")", "# now go through and collect the transforms from the scene", "# we are going to save them as a list of dict kwargs", "transforms", "=", "[", "]", "# nodes as (parent, node) tuples", "# use deque so we can pop from both ends", "queue", "=", "collections", ".", "deque", "(", "[", "(", "'world'", ",", "n", ")", "for", "n", "in", "scene", ".", "rootnode", ".", "children", "]", ")", "# consume the queue", "while", "len", "(", "queue", ")", ">", "0", ":", "# parent name, node object", "parent", ",", "node", "=", "queue", ".", "pop", "(", ")", "# assimp uses weirdly duplicate node names", "# object ID's are actually unique and consistent", "node_name", "=", "id", "(", "node", ")", "transforms", ".", "append", "(", "{", "'frame_from'", ":", "parent", ",", "'frame_to'", ":", "node_name", ",", "'matrix'", ":", "node", ".", "transformation", "}", ")", "# loop through meshes this node uses", "# note that they are the SAME objects as converted", "# above so we can find their reference using id()", "for", "m", "in", "node", ".", "meshes", ":", "if", "id", "(", "m", ")", "not", "in", "mesh_id", ":", "continue", "# create kwargs for graph.update", "edge", "=", "{", "'frame_from'", ":", "node_name", ",", "'frame_to'", ":", "str", "(", "id", "(", "m", ")", ")", "+", "str", "(", "node_name", ")", ",", "'matrix'", ":", "np", ".", "eye", "(", "4", ")", ",", "'geometry'", ":", "mesh_id", "[", "id", "(", "m", ")", "]", "}", "transforms", ".", "append", "(", "edge", ")", "# add any children to the queue to be visited", "for", "child", "in", "node", ".", "children", ":", "queue", ".", "appendleft", "(", "(", "node_name", ",", "child", ")", ")", "# release the loaded scene", "pyassimp", ".", "release", "(", "scene", ")", "# if we opened the file in this function close it", "if", "opened", ":", "file_obj", ".", "close", "(", ")", "# create kwargs for trimesh.exchange.load.load_kwargs", "result", "=", "{", "'class'", ":", "'Scene'", ",", "'geometry'", ":", "meshes", ",", "'graph'", ":", "transforms", ",", "'base_frame'", ":", "'world'", "}", "return", "result" ]
Use the pyassimp library to load a mesh from a file object and type or file name if file_obj is a string Parameters --------- file_obj: str, or file object File path or object containing mesh data file_type : str File extension, aka 'stl' resolver : trimesh.visual.resolvers.Resolver Used to load referenced data (like texture files) kwargs : dict Passed through to mesh constructor Returns --------- scene : trimesh.Scene Native trimesh copy of assimp scene
[ "Use", "the", "pyassimp", "library", "to", "load", "a", "mesh", "from", "a", "file", "object", "and", "type", "or", "file", "name", "if", "file_obj", "is", "a", "string" ]
python
train
33.541096
bear/ronkyuu
ronkyuu/webmention.py
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/webmention.py#L187-L249
def sendWebmention(sourceURL, targetURL, webmention=None, test_urls=True, vouchDomain=None, headers={}, timeout=None, debug=False): """Send to the :targetURL: a WebMention for the :sourceURL: The WebMention will be discovered if not given in the :webmention: parameter. :param sourceURL: URL that is referencing :targetURL: :param targetURL: URL of mentioned post :param webmention: optional WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :rtype: HTTPrequest object if WebMention endpoint was valid """ if test_urls: v = URLValidator() v(sourceURL) v(targetURL) debugOutput = [] originalURL = targetURL try: targetRequest = requests.get(targetURL) if targetRequest.status_code == requests.codes.ok: if len(targetRequest.history) > 0: redirect = targetRequest.history[-1] if (redirect.status_code == 301 or redirect.status_code == 302) and 'Location' in redirect.headers: targetURL = urljoin(targetURL, redirect.headers['Location']) debugOutput.append('targetURL redirected: %s' % targetURL) if webmention is None: wStatus, wUrl = discoverEndpoint(targetURL, headers=headers, timeout=timeout, request=targetRequest) else: wStatus = 200 wUrl = webmention debugOutput.append('endpointURL: %s %s' % (wStatus, wUrl)) if wStatus == requests.codes.ok and wUrl is not None: if test_urls: v(wUrl) payload = {'source': sourceURL, 'target': originalURL} if vouchDomain is not None: payload['vouch'] = vouchDomain try: result = requests.post(wUrl, data=payload, headers=headers, timeout=timeout) debugOutput.append('POST %s -- %s' % (wUrl, result.status_code)) if result.status_code == 405 and len(result.history) > 0: redirect = result.history[-1] if redirect.status_code == 301 and 'Location' in redirect.headers: result = requests.post(redirect.headers['Location'], data=payload, headers=headers, timeout=timeout) debugOutput.append('redirected POST %s -- %s' % (redirect.headers['Location'], result.status_code)) except Exception as e: result = None except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): debugOutput.append('exception during GET request') result = None return result
[ "def", "sendWebmention", "(", "sourceURL", ",", "targetURL", ",", "webmention", "=", "None", ",", "test_urls", "=", "True", ",", "vouchDomain", "=", "None", ",", "headers", "=", "{", "}", ",", "timeout", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "test_urls", ":", "v", "=", "URLValidator", "(", ")", "v", "(", "sourceURL", ")", "v", "(", "targetURL", ")", "debugOutput", "=", "[", "]", "originalURL", "=", "targetURL", "try", ":", "targetRequest", "=", "requests", ".", "get", "(", "targetURL", ")", "if", "targetRequest", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "if", "len", "(", "targetRequest", ".", "history", ")", ">", "0", ":", "redirect", "=", "targetRequest", ".", "history", "[", "-", "1", "]", "if", "(", "redirect", ".", "status_code", "==", "301", "or", "redirect", ".", "status_code", "==", "302", ")", "and", "'Location'", "in", "redirect", ".", "headers", ":", "targetURL", "=", "urljoin", "(", "targetURL", ",", "redirect", ".", "headers", "[", "'Location'", "]", ")", "debugOutput", ".", "append", "(", "'targetURL redirected: %s'", "%", "targetURL", ")", "if", "webmention", "is", "None", ":", "wStatus", ",", "wUrl", "=", "discoverEndpoint", "(", "targetURL", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ",", "request", "=", "targetRequest", ")", "else", ":", "wStatus", "=", "200", "wUrl", "=", "webmention", "debugOutput", ".", "append", "(", "'endpointURL: %s %s'", "%", "(", "wStatus", ",", "wUrl", ")", ")", "if", "wStatus", "==", "requests", ".", "codes", ".", "ok", "and", "wUrl", "is", "not", "None", ":", "if", "test_urls", ":", "v", "(", "wUrl", ")", "payload", "=", "{", "'source'", ":", "sourceURL", ",", "'target'", ":", "originalURL", "}", "if", "vouchDomain", "is", "not", "None", ":", "payload", "[", "'vouch'", "]", "=", "vouchDomain", "try", ":", "result", "=", "requests", ".", "post", "(", "wUrl", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")", "debugOutput", ".", "append", "(", "'POST %s -- %s'", "%", "(", "wUrl", ",", "result", ".", "status_code", ")", ")", "if", "result", ".", "status_code", "==", "405", "and", "len", "(", "result", ".", "history", ")", ">", "0", ":", "redirect", "=", "result", ".", "history", "[", "-", "1", "]", "if", "redirect", ".", "status_code", "==", "301", "and", "'Location'", "in", "redirect", ".", "headers", ":", "result", "=", "requests", ".", "post", "(", "redirect", ".", "headers", "[", "'Location'", "]", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")", "debugOutput", ".", "append", "(", "'redirected POST %s -- %s'", "%", "(", "redirect", ".", "headers", "[", "'Location'", "]", ",", "result", ".", "status_code", ")", ")", "except", "Exception", "as", "e", ":", "result", "=", "None", "except", "(", "requests", ".", "exceptions", ".", "RequestException", ",", "requests", ".", "exceptions", ".", "ConnectionError", ",", "requests", ".", "exceptions", ".", "HTTPError", ",", "requests", ".", "exceptions", ".", "URLRequired", ",", "requests", ".", "exceptions", ".", "TooManyRedirects", ",", "requests", ".", "exceptions", ".", "Timeout", ")", ":", "debugOutput", ".", "append", "(", "'exception during GET request'", ")", "result", "=", "None", "return", "result" ]
Send to the :targetURL: a WebMention for the :sourceURL: The WebMention will be discovered if not given in the :webmention: parameter. :param sourceURL: URL that is referencing :targetURL: :param targetURL: URL of mentioned post :param webmention: optional WebMention endpoint :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers dict :param timeout: optional timeout for web requests :type timeout float :rtype: HTTPrequest object if WebMention endpoint was valid
[ "Send", "to", "the", ":", "targetURL", ":", "a", "WebMention", "for", "the", ":", "sourceURL", ":" ]
python
train
46.936508
CamDavidsonPilon/lifetimes
lifetimes/fitters/pareto_nbd_fitter.py
https://github.com/CamDavidsonPilon/lifetimes/blob/f926308bc03c17c1d12fead729de43885cf13321/lifetimes/fitters/pareto_nbd_fitter.py#L297-L317
def expected_number_of_purchases_up_to_time(self, t): """ Return expected number of repeat purchases up to time t. Calculate the expected number of repeat purchases up to time t for a randomly choose individual from the population. Parameters ---------- t: array_like times to calculate the expectation for. Returns ------- array_like """ r, alpha, s, beta = self._unload_params("r", "alpha", "s", "beta") first_term = r * beta / alpha / (s - 1) second_term = 1 - (beta / (beta + t)) ** (s - 1) return first_term * second_term
[ "def", "expected_number_of_purchases_up_to_time", "(", "self", ",", "t", ")", ":", "r", ",", "alpha", ",", "s", ",", "beta", "=", "self", ".", "_unload_params", "(", "\"r\"", ",", "\"alpha\"", ",", "\"s\"", ",", "\"beta\"", ")", "first_term", "=", "r", "*", "beta", "/", "alpha", "/", "(", "s", "-", "1", ")", "second_term", "=", "1", "-", "(", "beta", "/", "(", "beta", "+", "t", ")", ")", "**", "(", "s", "-", "1", ")", "return", "first_term", "*", "second_term" ]
Return expected number of repeat purchases up to time t. Calculate the expected number of repeat purchases up to time t for a randomly choose individual from the population. Parameters ---------- t: array_like times to calculate the expectation for. Returns ------- array_like
[ "Return", "expected", "number", "of", "repeat", "purchases", "up", "to", "time", "t", "." ]
python
train
30.571429
TheGhouls/oct
oct/utilities/configuration.py
https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/configuration.py#L98-L109
def get_db_uri(config, output_dir): """Process results_database parameters in config to format them for set database function :param dict config: project configuration dict :param str output_dir: output directory for results :return: string for db uri """ db_config = config.get("results_database", {"db_uri": "default"}) if db_config['db_uri'] == 'default': return os.path.join(output_dir, "results.sqlite") return db_config['db_uri']
[ "def", "get_db_uri", "(", "config", ",", "output_dir", ")", ":", "db_config", "=", "config", ".", "get", "(", "\"results_database\"", ",", "{", "\"db_uri\"", ":", "\"default\"", "}", ")", "if", "db_config", "[", "'db_uri'", "]", "==", "'default'", ":", "return", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"results.sqlite\"", ")", "return", "db_config", "[", "'db_uri'", "]" ]
Process results_database parameters in config to format them for set database function :param dict config: project configuration dict :param str output_dir: output directory for results :return: string for db uri
[ "Process", "results_database", "parameters", "in", "config", "to", "format", "them", "for", "set", "database", "function" ]
python
train
39.083333
driftx/Telephus
telephus/cassandra/Cassandra.py
https://github.com/driftx/Telephus/blob/860a03a0fafe71605e1a4316dfdd8d0c29094703/telephus/cassandra/Cassandra.py#L580-L594
def get_slice(self, key, column_parent, predicate, consistency_level): """ Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned. Parameters: - key - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_get_slice(key, column_parent, predicate, consistency_level) return d
[ "def", "get_slice", "(", "self", ",", "key", ",", "column_parent", ",", "predicate", ",", "consistency_level", ")", ":", "self", ".", "_seqid", "+=", "1", "d", "=", "self", ".", "_reqs", "[", "self", ".", "_seqid", "]", "=", "defer", ".", "Deferred", "(", ")", "self", ".", "send_get_slice", "(", "key", ",", "column_parent", ",", "predicate", ",", "consistency_level", ")", "return", "d" ]
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned. Parameters: - key - column_parent - predicate - consistency_level
[ "Get", "the", "group", "of", "columns", "contained", "by", "column_parent", "(", "either", "a", "ColumnFamily", "name", "or", "a", "ColumnFamily", "/", "SuperColumn", "name", "pair", ")", "specified", "by", "the", "given", "SlicePredicate", ".", "If", "no", "matching", "values", "are", "found", "an", "empty", "list", "is", "returned", "." ]
python
train
36.666667
albertz/py_better_exchook
better_exchook.py
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L617-L699
def py_syntax_highlight(self, s): """ :param str s: :rtype: str """ if not self.enable: return s state = 0 spaces = " \t\n" ops = ".,;:+-*/%&!=|(){}[]^<>" i = 0 cur_token = "" color_args = {0: {}, len(s): {}} # type: typing.Dict[int,typing.Dict[str]] # pos in s -> color kwargs def finish_identifier(): """ Reset color to standard for current identifier. """ if cur_token in py_keywords: color_args[max([k for k in color_args.keys() if k < i])] = {"color": self.fg_colors[0]} while i < len(s): c = s[i] i += 1 if c == "\n": if state == 3: finish_identifier() color_args[i] = {} state = 0 elif state == 0: if c in spaces: pass elif c in ops: color_args[i - 1] = {"color": self.fg_colors[0]} color_args[i] = {} elif c == "#": color_args[i - 1] = {"color": self.fg_colors[3]} state = 6 elif c == '"': color_args[i - 1] = {"color": self.fg_colors[2]} state = 1 elif c == "'": color_args[i - 1] = {"color": self.fg_colors[2]} state = 2 else: cur_token = c color_args[i - 1] = {} state = 3 elif state == 1: # string via " if c == "\\": state = 4 elif c == "\"": color_args[i] = {} state = 0 elif state == 2: # string via ' if c == "\\": state = 5 elif c == "'": color_args[i] = {} state = 0 elif state == 3: # identifier if c in spaces + ops + "#\"'": finish_identifier() color_args[i] = {} state = 0 i -= 1 else: cur_token += c elif state == 4: # escape in " state = 1 elif state == 5: # escape in ' state = 2 elif state == 6: # comment pass if state == 3: finish_identifier() out = "" i = 0 while i < len(s): j = min([k for k in color_args.keys() if k > i]) out += self.color(s[i:j], **color_args[i]) i = j return out
[ "def", "py_syntax_highlight", "(", "self", ",", "s", ")", ":", "if", "not", "self", ".", "enable", ":", "return", "s", "state", "=", "0", "spaces", "=", "\" \\t\\n\"", "ops", "=", "\".,;:+-*/%&!=|(){}[]^<>\"", "i", "=", "0", "cur_token", "=", "\"\"", "color_args", "=", "{", "0", ":", "{", "}", ",", "len", "(", "s", ")", ":", "{", "}", "}", "# type: typing.Dict[int,typing.Dict[str]] # pos in s -> color kwargs", "def", "finish_identifier", "(", ")", ":", "\"\"\"\n Reset color to standard for current identifier.\n \"\"\"", "if", "cur_token", "in", "py_keywords", ":", "color_args", "[", "max", "(", "[", "k", "for", "k", "in", "color_args", ".", "keys", "(", ")", "if", "k", "<", "i", "]", ")", "]", "=", "{", "\"color\"", ":", "self", ".", "fg_colors", "[", "0", "]", "}", "while", "i", "<", "len", "(", "s", ")", ":", "c", "=", "s", "[", "i", "]", "i", "+=", "1", "if", "c", "==", "\"\\n\"", ":", "if", "state", "==", "3", ":", "finish_identifier", "(", ")", "color_args", "[", "i", "]", "=", "{", "}", "state", "=", "0", "elif", "state", "==", "0", ":", "if", "c", "in", "spaces", ":", "pass", "elif", "c", "in", "ops", ":", "color_args", "[", "i", "-", "1", "]", "=", "{", "\"color\"", ":", "self", ".", "fg_colors", "[", "0", "]", "}", "color_args", "[", "i", "]", "=", "{", "}", "elif", "c", "==", "\"#\"", ":", "color_args", "[", "i", "-", "1", "]", "=", "{", "\"color\"", ":", "self", ".", "fg_colors", "[", "3", "]", "}", "state", "=", "6", "elif", "c", "==", "'\"'", ":", "color_args", "[", "i", "-", "1", "]", "=", "{", "\"color\"", ":", "self", ".", "fg_colors", "[", "2", "]", "}", "state", "=", "1", "elif", "c", "==", "\"'\"", ":", "color_args", "[", "i", "-", "1", "]", "=", "{", "\"color\"", ":", "self", ".", "fg_colors", "[", "2", "]", "}", "state", "=", "2", "else", ":", "cur_token", "=", "c", "color_args", "[", "i", "-", "1", "]", "=", "{", "}", "state", "=", "3", "elif", "state", "==", "1", ":", "# string via \"", "if", "c", "==", "\"\\\\\"", ":", "state", "=", "4", "elif", "c", "==", "\"\\\"\"", ":", "color_args", "[", "i", "]", "=", "{", "}", "state", "=", "0", "elif", "state", "==", "2", ":", "# string via '", "if", "c", "==", "\"\\\\\"", ":", "state", "=", "5", "elif", "c", "==", "\"'\"", ":", "color_args", "[", "i", "]", "=", "{", "}", "state", "=", "0", "elif", "state", "==", "3", ":", "# identifier", "if", "c", "in", "spaces", "+", "ops", "+", "\"#\\\"'\"", ":", "finish_identifier", "(", ")", "color_args", "[", "i", "]", "=", "{", "}", "state", "=", "0", "i", "-=", "1", "else", ":", "cur_token", "+=", "c", "elif", "state", "==", "4", ":", "# escape in \"", "state", "=", "1", "elif", "state", "==", "5", ":", "# escape in '", "state", "=", "2", "elif", "state", "==", "6", ":", "# comment", "pass", "if", "state", "==", "3", ":", "finish_identifier", "(", ")", "out", "=", "\"\"", "i", "=", "0", "while", "i", "<", "len", "(", "s", ")", ":", "j", "=", "min", "(", "[", "k", "for", "k", "in", "color_args", ".", "keys", "(", ")", "if", "k", ">", "i", "]", ")", "out", "+=", "self", ".", "color", "(", "s", "[", "i", ":", "j", "]", ",", "*", "*", "color_args", "[", "i", "]", ")", "i", "=", "j", "return", "out" ]
:param str s: :rtype: str
[ ":", "param", "str", "s", ":", ":", "rtype", ":", "str" ]
python
train
32.493976
getfleety/coralillo
coralillo/fields.py
https://github.com/getfleety/coralillo/blob/9cac101738a0fa7c1106f129604c00ef703370e1/coralillo/fields.py#L68-L75
def recover(self, data, redis=None): ''' Retrieve this field's value from the database ''' value = data.get(self.name) if value is None or value == 'None': return None return str(value)
[ "def", "recover", "(", "self", ",", "data", ",", "redis", "=", "None", ")", ":", "value", "=", "data", ".", "get", "(", "self", ".", "name", ")", "if", "value", "is", "None", "or", "value", "==", "'None'", ":", "return", "None", "return", "str", "(", "value", ")" ]
Retrieve this field's value from the database
[ "Retrieve", "this", "field", "s", "value", "from", "the", "database" ]
python
train
28
dlon/html2markdown
html2markdown.py
https://github.com/dlon/html2markdown/blob/5946da7136e69a67b3dd37fd0e896be4d6a5b482/html2markdown.py#L163-L330
def _markdownify(tag, _listType=None, _blockQuote=False, _listIndex=1): """recursively converts a tag into markdown""" children = tag.find_all(recursive=False) if tag.name == '[document]': for child in children: _markdownify(child) return if tag.name not in _supportedTags or not _supportedAttrs(tag): if tag.name not in _inlineTags: tag.insert_before('\n\n') tag.insert_after('\n\n') else: _escapeCharacters(tag) for child in children: _markdownify(child) return if tag.name not in ('pre', 'code'): _escapeCharacters(tag) _breakRemNewlines(tag) if tag.name == 'p': if tag.string != None: if tag.string.strip() == u'': tag.string = u'\xa0' tag.unwrap() return if not _blockQuote: tag.insert_before('\n\n') tag.insert_after('\n\n') else: tag.insert_before('\n') tag.insert_after('\n') tag.unwrap() for child in children: _markdownify(child) elif tag.name == 'br': tag.string = ' \n' tag.unwrap() elif tag.name == 'img': alt = '' title = '' if tag.has_attr('alt'): alt = tag['alt'] if tag.has_attr('title') and tag['title']: title = ' "%s"' % tag['title'] tag.string = '![%s](%s%s)' % (alt, tag['src'], title) tag.unwrap() elif tag.name == 'hr': tag.string = '\n---\n' tag.unwrap() elif tag.name == 'pre': tag.insert_before('\n\n') tag.insert_after('\n\n') if tag.code: if not _supportedAttrs(tag.code): return for child in tag.code.find_all(recursive=False): if child.name != 'br': return # code block for br in tag.code.find_all('br'): br.string = '\n' br.unwrap() tag.code.unwrap() lines = unicode(tag).strip().split('\n') lines[0] = lines[0][5:] lines[-1] = lines[-1][:-6] if not lines[-1]: lines.pop() for i,line in enumerate(lines): line = line.replace(u'\xa0', ' ') lines[i] = ' %s' % line tag.replace_with(BeautifulSoup('\n'.join(lines), 'html.parser')) return elif tag.name == 'code': # inline code if children: return tag.insert_before('`` ') tag.insert_after(' ``') tag.unwrap() elif _recursivelyValid(tag): if tag.name == 'blockquote': # ! FIXME: hack tag.insert_before('<<<BLOCKQUOTE: ') tag.insert_after('>>>') tag.unwrap() for child in children: _markdownify(child, _blockQuote=True) return elif tag.name == 'a': # process children first for child in children: _markdownify(child) if not tag.has_attr('href'): return if tag.string != tag.get('href') or tag.has_attr('title'): title = '' if tag.has_attr('title'): title = ' "%s"' % tag['title'] tag.string = '[%s](%s%s)' % (BeautifulSoup(unicode(tag), 'html.parser').string, tag.get('href', ''), title) else: # ! FIXME: hack tag.string = '<<<FLOATING LINK: %s>>>' % tag.string tag.unwrap() return elif tag.name == 'h1': tag.insert_before('\n\n# ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h2': tag.insert_before('\n\n## ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h3': tag.insert_before('\n\n### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h4': tag.insert_before('\n\n#### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h5': tag.insert_before('\n\n##### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name == 'h6': tag.insert_before('\n\n###### ') tag.insert_after('\n\n') tag.unwrap() elif tag.name in ('ul', 'ol'): tag.insert_before('\n\n') tag.insert_after('\n\n') tag.unwrap() for i, child in enumerate(children): _markdownify(child, _listType=tag.name, _listIndex=i+1) return elif tag.name == 'li': if not _listType: # <li> outside of list; ignore return if _listType == 'ul': tag.insert_before('* ') else: tag.insert_before('%d. ' % _listIndex) for child in children: _markdownify(child) for c in tag.contents: if type(c) != bs4.element.NavigableString: continue c.replace_with('\n '.join(c.split('\n'))) tag.insert_after('\n') tag.unwrap() return elif tag.name in ('strong','b'): tag.insert_before('__') tag.insert_after('__') tag.unwrap() elif tag.name in ('em','i'): tag.insert_before('_') tag.insert_after('_') tag.unwrap() for child in children: _markdownify(child)
[ "def", "_markdownify", "(", "tag", ",", "_listType", "=", "None", ",", "_blockQuote", "=", "False", ",", "_listIndex", "=", "1", ")", ":", "children", "=", "tag", ".", "find_all", "(", "recursive", "=", "False", ")", "if", "tag", ".", "name", "==", "'[document]'", ":", "for", "child", "in", "children", ":", "_markdownify", "(", "child", ")", "return", "if", "tag", ".", "name", "not", "in", "_supportedTags", "or", "not", "_supportedAttrs", "(", "tag", ")", ":", "if", "tag", ".", "name", "not", "in", "_inlineTags", ":", "tag", ".", "insert_before", "(", "'\\n\\n'", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "else", ":", "_escapeCharacters", "(", "tag", ")", "for", "child", "in", "children", ":", "_markdownify", "(", "child", ")", "return", "if", "tag", ".", "name", "not", "in", "(", "'pre'", ",", "'code'", ")", ":", "_escapeCharacters", "(", "tag", ")", "_breakRemNewlines", "(", "tag", ")", "if", "tag", ".", "name", "==", "'p'", ":", "if", "tag", ".", "string", "!=", "None", ":", "if", "tag", ".", "string", ".", "strip", "(", ")", "==", "u''", ":", "tag", ".", "string", "=", "u'\\xa0'", "tag", ".", "unwrap", "(", ")", "return", "if", "not", "_blockQuote", ":", "tag", ".", "insert_before", "(", "'\\n\\n'", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "else", ":", "tag", ".", "insert_before", "(", "'\\n'", ")", "tag", ".", "insert_after", "(", "'\\n'", ")", "tag", ".", "unwrap", "(", ")", "for", "child", "in", "children", ":", "_markdownify", "(", "child", ")", "elif", "tag", ".", "name", "==", "'br'", ":", "tag", ".", "string", "=", "' \\n'", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'img'", ":", "alt", "=", "''", "title", "=", "''", "if", "tag", ".", "has_attr", "(", "'alt'", ")", ":", "alt", "=", "tag", "[", "'alt'", "]", "if", "tag", ".", "has_attr", "(", "'title'", ")", "and", "tag", "[", "'title'", "]", ":", "title", "=", "' \"%s\"'", "%", "tag", "[", "'title'", "]", "tag", ".", "string", "=", "'![%s](%s%s)'", "%", "(", "alt", ",", "tag", "[", "'src'", "]", ",", "title", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'hr'", ":", "tag", ".", "string", "=", "'\\n---\\n'", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'pre'", ":", "tag", ".", "insert_before", "(", "'\\n\\n'", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "if", "tag", ".", "code", ":", "if", "not", "_supportedAttrs", "(", "tag", ".", "code", ")", ":", "return", "for", "child", "in", "tag", ".", "code", ".", "find_all", "(", "recursive", "=", "False", ")", ":", "if", "child", ".", "name", "!=", "'br'", ":", "return", "# code block", "for", "br", "in", "tag", ".", "code", ".", "find_all", "(", "'br'", ")", ":", "br", ".", "string", "=", "'\\n'", "br", ".", "unwrap", "(", ")", "tag", ".", "code", ".", "unwrap", "(", ")", "lines", "=", "unicode", "(", "tag", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "lines", "[", "0", "]", "=", "lines", "[", "0", "]", "[", "5", ":", "]", "lines", "[", "-", "1", "]", "=", "lines", "[", "-", "1", "]", "[", ":", "-", "6", "]", "if", "not", "lines", "[", "-", "1", "]", ":", "lines", ".", "pop", "(", ")", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "line", "=", "line", ".", "replace", "(", "u'\\xa0'", ",", "' '", ")", "lines", "[", "i", "]", "=", "' %s'", "%", "line", "tag", ".", "replace_with", "(", "BeautifulSoup", "(", "'\\n'", ".", "join", "(", "lines", ")", ",", "'html.parser'", ")", ")", "return", "elif", "tag", ".", "name", "==", "'code'", ":", "# inline code", "if", "children", ":", "return", "tag", ".", "insert_before", "(", "'`` '", ")", "tag", ".", "insert_after", "(", "' ``'", ")", "tag", ".", "unwrap", "(", ")", "elif", "_recursivelyValid", "(", "tag", ")", ":", "if", "tag", ".", "name", "==", "'blockquote'", ":", "# ! FIXME: hack", "tag", ".", "insert_before", "(", "'<<<BLOCKQUOTE: '", ")", "tag", ".", "insert_after", "(", "'>>>'", ")", "tag", ".", "unwrap", "(", ")", "for", "child", "in", "children", ":", "_markdownify", "(", "child", ",", "_blockQuote", "=", "True", ")", "return", "elif", "tag", ".", "name", "==", "'a'", ":", "# process children first", "for", "child", "in", "children", ":", "_markdownify", "(", "child", ")", "if", "not", "tag", ".", "has_attr", "(", "'href'", ")", ":", "return", "if", "tag", ".", "string", "!=", "tag", ".", "get", "(", "'href'", ")", "or", "tag", ".", "has_attr", "(", "'title'", ")", ":", "title", "=", "''", "if", "tag", ".", "has_attr", "(", "'title'", ")", ":", "title", "=", "' \"%s\"'", "%", "tag", "[", "'title'", "]", "tag", ".", "string", "=", "'[%s](%s%s)'", "%", "(", "BeautifulSoup", "(", "unicode", "(", "tag", ")", ",", "'html.parser'", ")", ".", "string", ",", "tag", ".", "get", "(", "'href'", ",", "''", ")", ",", "title", ")", "else", ":", "# ! FIXME: hack", "tag", ".", "string", "=", "'<<<FLOATING LINK: %s>>>'", "%", "tag", ".", "string", "tag", ".", "unwrap", "(", ")", "return", "elif", "tag", ".", "name", "==", "'h1'", ":", "tag", ".", "insert_before", "(", "'\\n\\n# '", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'h2'", ":", "tag", ".", "insert_before", "(", "'\\n\\n## '", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'h3'", ":", "tag", ".", "insert_before", "(", "'\\n\\n### '", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'h4'", ":", "tag", ".", "insert_before", "(", "'\\n\\n#### '", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'h5'", ":", "tag", ".", "insert_before", "(", "'\\n\\n##### '", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "==", "'h6'", ":", "tag", ".", "insert_before", "(", "'\\n\\n###### '", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "in", "(", "'ul'", ",", "'ol'", ")", ":", "tag", ".", "insert_before", "(", "'\\n\\n'", ")", "tag", ".", "insert_after", "(", "'\\n\\n'", ")", "tag", ".", "unwrap", "(", ")", "for", "i", ",", "child", "in", "enumerate", "(", "children", ")", ":", "_markdownify", "(", "child", ",", "_listType", "=", "tag", ".", "name", ",", "_listIndex", "=", "i", "+", "1", ")", "return", "elif", "tag", ".", "name", "==", "'li'", ":", "if", "not", "_listType", ":", "# <li> outside of list; ignore", "return", "if", "_listType", "==", "'ul'", ":", "tag", ".", "insert_before", "(", "'* '", ")", "else", ":", "tag", ".", "insert_before", "(", "'%d. '", "%", "_listIndex", ")", "for", "child", "in", "children", ":", "_markdownify", "(", "child", ")", "for", "c", "in", "tag", ".", "contents", ":", "if", "type", "(", "c", ")", "!=", "bs4", ".", "element", ".", "NavigableString", ":", "continue", "c", ".", "replace_with", "(", "'\\n '", ".", "join", "(", "c", ".", "split", "(", "'\\n'", ")", ")", ")", "tag", ".", "insert_after", "(", "'\\n'", ")", "tag", ".", "unwrap", "(", ")", "return", "elif", "tag", ".", "name", "in", "(", "'strong'", ",", "'b'", ")", ":", "tag", ".", "insert_before", "(", "'__'", ")", "tag", ".", "insert_after", "(", "'__'", ")", "tag", ".", "unwrap", "(", ")", "elif", "tag", ".", "name", "in", "(", "'em'", ",", "'i'", ")", ":", "tag", ".", "insert_before", "(", "'_'", ")", "tag", ".", "insert_after", "(", "'_'", ")", "tag", ".", "unwrap", "(", ")", "for", "child", "in", "children", ":", "_markdownify", "(", "child", ")" ]
recursively converts a tag into markdown
[ "recursively", "converts", "a", "tag", "into", "markdown" ]
python
train
25.035714
kmpm/nodemcu-uploader
nodemcu_uploader/uploader.py
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L337-L353
def exec_file(self, path): """execute the lines in the local file 'path'""" filename = os.path.basename(path) log.info('Execute %s', filename) content = from_file(path).replace('\r', '').split('\n') res = '> ' for line in content: line = line.rstrip('\n') retlines = (res + self.__exchange(line)).splitlines() # Log all but the last line res = retlines.pop() for lin in retlines: log.info(lin) # last line log.info(res)
[ "def", "exec_file", "(", "self", ",", "path", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "log", ".", "info", "(", "'Execute %s'", ",", "filename", ")", "content", "=", "from_file", "(", "path", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", ".", "split", "(", "'\\n'", ")", "res", "=", "'> '", "for", "line", "in", "content", ":", "line", "=", "line", ".", "rstrip", "(", "'\\n'", ")", "retlines", "=", "(", "res", "+", "self", ".", "__exchange", "(", "line", ")", ")", ".", "splitlines", "(", ")", "# Log all but the last line", "res", "=", "retlines", ".", "pop", "(", ")", "for", "lin", "in", "retlines", ":", "log", ".", "info", "(", "lin", ")", "# last line", "log", ".", "info", "(", "res", ")" ]
execute the lines in the local file 'path
[ "execute", "the", "lines", "in", "the", "local", "file", "path" ]
python
valid
32.058824
MultipedRobotics/pyxl320
pyxl320/ServoSerial.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/ServoSerial.py#L268-L292
def sendPkt(self, pkt, retry=5, sleep_time=0.01): """ Sends a packet and waits for a return. If no return is given, then it resends the packet. If an error occurs, it also resends the packet. in: pkt - command packet to send to servo cnt - how many retries should this do? default = 5 out: array of packets """ for cnt in range(retry): self.serial.flushInput() self.write(pkt) # send packet to servo ans = self.read() # get return status packet if ans: # check for error and resend return ans else: # print('>> retry {} <<'.format(cnt)) time.sleep(sleep_time) return None
[ "def", "sendPkt", "(", "self", ",", "pkt", ",", "retry", "=", "5", ",", "sleep_time", "=", "0.01", ")", ":", "for", "cnt", "in", "range", "(", "retry", ")", ":", "self", ".", "serial", ".", "flushInput", "(", ")", "self", ".", "write", "(", "pkt", ")", "# send packet to servo", "ans", "=", "self", ".", "read", "(", ")", "# get return status packet", "if", "ans", ":", "# check for error and resend", "return", "ans", "else", ":", "# print('>> retry {} <<'.format(cnt))", "time", ".", "sleep", "(", "sleep_time", ")", "return", "None" ]
Sends a packet and waits for a return. If no return is given, then it resends the packet. If an error occurs, it also resends the packet. in: pkt - command packet to send to servo cnt - how many retries should this do? default = 5 out: array of packets
[ "Sends", "a", "packet", "and", "waits", "for", "a", "return", ".", "If", "no", "return", "is", "given", "then", "it", "resends", "the", "packet", ".", "If", "an", "error", "occurs", "it", "also", "resends", "the", "packet", "." ]
python
train
24.36
neurosnap/mudicom
mudicom/base.py
https://github.com/neurosnap/mudicom/blob/04011967007409f0c5253b4f308f53a7b0fc99c6/mudicom/base.py#L134-L167
def find(self, group=None, element=None, name=None, VR=None): """ Searches for data elements in the DICOM file given the filters supplied to this method. :param group: Hex decimal for the group of a DICOM element e.g. 0x002 :param element: Hex decimal for the element value of a DICOM element e.g. 0x0010 :param name: Name of the DICOM element, e.g. "Modality" :param VR: Value Representation of the DICOM element, e.g. "PN" """ results = self.read() if name is not None: def find_name(data_element): return data_element.name.lower() == name.lower() return filter(find_name, results) if group is not None: def find_group(data_element): return (data_element.tag['group'] == group or int(data_element.tag['group'], 16) == group) results = filter(find_group, results) if element is not None: def find_element(data_element): return (data_element.tag['element'] == element or int(data_element.tag['element'], 16) == element) results = filter(find_element, results) if VR is not None: def find_VR(data_element): return data_element.VR.lower() == VR.lower() results = filter(find_VR, results) return results
[ "def", "find", "(", "self", ",", "group", "=", "None", ",", "element", "=", "None", ",", "name", "=", "None", ",", "VR", "=", "None", ")", ":", "results", "=", "self", ".", "read", "(", ")", "if", "name", "is", "not", "None", ":", "def", "find_name", "(", "data_element", ")", ":", "return", "data_element", ".", "name", ".", "lower", "(", ")", "==", "name", ".", "lower", "(", ")", "return", "filter", "(", "find_name", ",", "results", ")", "if", "group", "is", "not", "None", ":", "def", "find_group", "(", "data_element", ")", ":", "return", "(", "data_element", ".", "tag", "[", "'group'", "]", "==", "group", "or", "int", "(", "data_element", ".", "tag", "[", "'group'", "]", ",", "16", ")", "==", "group", ")", "results", "=", "filter", "(", "find_group", ",", "results", ")", "if", "element", "is", "not", "None", ":", "def", "find_element", "(", "data_element", ")", ":", "return", "(", "data_element", ".", "tag", "[", "'element'", "]", "==", "element", "or", "int", "(", "data_element", ".", "tag", "[", "'element'", "]", ",", "16", ")", "==", "element", ")", "results", "=", "filter", "(", "find_element", ",", "results", ")", "if", "VR", "is", "not", "None", ":", "def", "find_VR", "(", "data_element", ")", ":", "return", "data_element", ".", "VR", ".", "lower", "(", ")", "==", "VR", ".", "lower", "(", ")", "results", "=", "filter", "(", "find_VR", ",", "results", ")", "return", "results" ]
Searches for data elements in the DICOM file given the filters supplied to this method. :param group: Hex decimal for the group of a DICOM element e.g. 0x002 :param element: Hex decimal for the element value of a DICOM element e.g. 0x0010 :param name: Name of the DICOM element, e.g. "Modality" :param VR: Value Representation of the DICOM element, e.g. "PN"
[ "Searches", "for", "data", "elements", "in", "the", "DICOM", "file", "given", "the", "filters", "supplied", "to", "this", "method", "." ]
python
train
40.735294
ska-sa/katversion
katversion/version.py
https://github.com/ska-sa/katversion/blob/f507e46e6c5610aec89a08dd480c9b3721da0f8a/katversion/version.py#L363-L381
def get_version_list(path=None, module=None): """Return the version information as a tuple. This uses get_version and breaks the string up. Would make more sense if the version was a tuple throughout katversion. """ major = 0 minor = 0 patch = '' # PEP440 calls this prerelease, postrelease or devrelease ver = get_version(path, module) if ver is not None: ver_segments = _sane_version_list(ver.split(".", 2)) major = ver_segments[0] minor = ver_segments[1] patch = ".".join(ver_segments[2:]) # Rejoin the . # Return None as first field, makes substitution easier in next step. return [None, major, minor, patch]
[ "def", "get_version_list", "(", "path", "=", "None", ",", "module", "=", "None", ")", ":", "major", "=", "0", "minor", "=", "0", "patch", "=", "''", "# PEP440 calls this prerelease, postrelease or devrelease", "ver", "=", "get_version", "(", "path", ",", "module", ")", "if", "ver", "is", "not", "None", ":", "ver_segments", "=", "_sane_version_list", "(", "ver", ".", "split", "(", "\".\"", ",", "2", ")", ")", "major", "=", "ver_segments", "[", "0", "]", "minor", "=", "ver_segments", "[", "1", "]", "patch", "=", "\".\"", ".", "join", "(", "ver_segments", "[", "2", ":", "]", ")", "# Rejoin the .", "# Return None as first field, makes substitution easier in next step.", "return", "[", "None", ",", "major", ",", "minor", ",", "patch", "]" ]
Return the version information as a tuple. This uses get_version and breaks the string up. Would make more sense if the version was a tuple throughout katversion.
[ "Return", "the", "version", "information", "as", "a", "tuple", "." ]
python
train
35.526316
Azure/azure-multiapi-storage-python
azure/multiapi/storage/v2016_05_31/file/fileservice.py
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2016_05_31/file/fileservice.py#L1741-L1794
def _get_file(self, share_name, directory_name, file_name, start_range=None, end_range=None, validate_content=False, timeout=None, _context=None): ''' Downloads a file's content, metadata, and properties. You can specify a range if you don't need to download the file in its entirety. If no range is specified, the full file will be downloaded. See get_file_to_* for high level functions that handle the download of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param int start_range: Start of byte range to use for downloading a section of the file. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for downloading a section of the file. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param bool validate_content: When this is set to True and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. :param int timeout: The timeout parameter is expressed in seconds. :return: A File with content, properties, and metadata. :rtype: :class:`~azure.storage.file.models.File` ''' _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path(share_name, directory_name, file_name) request.query = { 'timeout': _int_to_str(timeout)} _validate_and_format_range_headers( request, start_range, end_range, start_range_required=False, end_range_required=False, check_content_md5=validate_content) return self._perform_request(request, _parse_file, [file_name, validate_content], operation_context=_context)
[ "def", "_get_file", "(", "self", ",", "share_name", ",", "directory_name", ",", "file_name", ",", "start_range", "=", "None", ",", "end_range", "=", "None", ",", "validate_content", "=", "False", ",", "timeout", "=", "None", ",", "_context", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "_validate_not_none", "(", "'file_name'", ",", "file_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ",", "directory_name", ",", "file_name", ")", "request", ".", "query", "=", "{", "'timeout'", ":", "_int_to_str", "(", "timeout", ")", "}", "_validate_and_format_range_headers", "(", "request", ",", "start_range", ",", "end_range", ",", "start_range_required", "=", "False", ",", "end_range_required", "=", "False", ",", "check_content_md5", "=", "validate_content", ")", "return", "self", ".", "_perform_request", "(", "request", ",", "_parse_file", ",", "[", "file_name", ",", "validate_content", "]", ",", "operation_context", "=", "_context", ")" ]
Downloads a file's content, metadata, and properties. You can specify a range if you don't need to download the file in its entirety. If no range is specified, the full file will be downloaded. See get_file_to_* for high level functions that handle the download of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param int start_range: Start of byte range to use for downloading a section of the file. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for downloading a section of the file. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param bool validate_content: When this is set to True and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. :param int timeout: The timeout parameter is expressed in seconds. :return: A File with content, properties, and metadata. :rtype: :class:`~azure.storage.file.models.File`
[ "Downloads", "a", "file", "s", "content", "metadata", "and", "properties", ".", "You", "can", "specify", "a", "range", "if", "you", "don", "t", "need", "to", "download", "the", "file", "in", "its", "entirety", ".", "If", "no", "range", "is", "specified", "the", "full", "file", "will", "be", "downloaded", "." ]
python
train
49.074074
cloud9ers/gurumate
environment/lib/python2.7/site-packages/psutil/_common.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/_common.py#L65-L72
def memoize(f): """A simple memoize decorator for functions.""" cache= {} def memf(*x): if x not in cache: cache[x] = f(*x) return cache[x] return memf
[ "def", "memoize", "(", "f", ")", ":", "cache", "=", "{", "}", "def", "memf", "(", "*", "x", ")", ":", "if", "x", "not", "in", "cache", ":", "cache", "[", "x", "]", "=", "f", "(", "*", "x", ")", "return", "cache", "[", "x", "]", "return", "memf" ]
A simple memoize decorator for functions.
[ "A", "simple", "memoize", "decorator", "for", "functions", "." ]
python
test
23.5
portfors-lab/sparkle
sparkle/gui/stim/component_detail.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/component_detail.py#L107-L117
def setCheckedDetails(self, checked): """Sets which components are checked :param checked: dictionary of stimtype:list<attribute names> for which components and their attributes should be checked :type checked: dict """ layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() if w.stimType in checked: w.setChecked(checked[w.stimType])
[ "def", "setCheckedDetails", "(", "self", ",", "checked", ")", ":", "layout", "=", "self", ".", "layout", "(", ")", "for", "i", "in", "range", "(", "layout", ".", "count", "(", ")", ")", ":", "w", "=", "layout", ".", "itemAt", "(", "i", ")", ".", "widget", "(", ")", "if", "w", ".", "stimType", "in", "checked", ":", "w", ".", "setChecked", "(", "checked", "[", "w", ".", "stimType", "]", ")" ]
Sets which components are checked :param checked: dictionary of stimtype:list<attribute names> for which components and their attributes should be checked :type checked: dict
[ "Sets", "which", "components", "are", "checked" ]
python
train
40.272727
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py#L82-L97
def secpolicy_sa_secpolicy_defined_policy_policies_member_entry_member(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") secpolicy_sa = ET.SubElement(config, "secpolicy-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth") secpolicy = ET.SubElement(secpolicy_sa, "secpolicy") defined_policy = ET.SubElement(secpolicy, "defined-policy") policies = ET.SubElement(defined_policy, "policies") policy_key = ET.SubElement(policies, "policy") policy_key.text = kwargs.pop('policy') member_entry = ET.SubElement(policies, "member-entry") member = ET.SubElement(member_entry, "member") member.text = kwargs.pop('member') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "secpolicy_sa_secpolicy_defined_policy_policies_member_entry_member", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "secpolicy_sa", "=", "ET", ".", "SubElement", "(", "config", ",", "\"secpolicy-sa\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-fc-auth\"", ")", "secpolicy", "=", "ET", ".", "SubElement", "(", "secpolicy_sa", ",", "\"secpolicy\"", ")", "defined_policy", "=", "ET", ".", "SubElement", "(", "secpolicy", ",", "\"defined-policy\"", ")", "policies", "=", "ET", ".", "SubElement", "(", "defined_policy", ",", "\"policies\"", ")", "policy_key", "=", "ET", ".", "SubElement", "(", "policies", ",", "\"policy\"", ")", "policy_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'policy'", ")", "member_entry", "=", "ET", ".", "SubElement", "(", "policies", ",", "\"member-entry\"", ")", "member", "=", "ET", ".", "SubElement", "(", "member_entry", ",", "\"member\"", ")", "member", ".", "text", "=", "kwargs", ".", "pop", "(", "'member'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
50.25
Duke-GCB/DukeDSClient
ddsc/core/ddsapi.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/ddsapi.py#L907-L915
def create_was_invalidated_by_relation(self, activity_id, entity_kind, entity_id): """ Create a was invalidated by relationship between an activity and a entity(file). :param activity_id: str: uuid of the activity :param entity_kind: str: kind of entity('dds-file') :param entity_id: str: uuid of the entity :return: requests.Response containing the successful result """ return self._create_activity_relation(activity_id, entity_kind, entity_id, ActivityRelationTypes.WAS_INVALIDATED_BY)
[ "def", "create_was_invalidated_by_relation", "(", "self", ",", "activity_id", ",", "entity_kind", ",", "entity_id", ")", ":", "return", "self", ".", "_create_activity_relation", "(", "activity_id", ",", "entity_kind", ",", "entity_id", ",", "ActivityRelationTypes", ".", "WAS_INVALIDATED_BY", ")" ]
Create a was invalidated by relationship between an activity and a entity(file). :param activity_id: str: uuid of the activity :param entity_kind: str: kind of entity('dds-file') :param entity_id: str: uuid of the entity :return: requests.Response containing the successful result
[ "Create", "a", "was", "invalidated", "by", "relationship", "between", "an", "activity", "and", "a", "entity", "(", "file", ")", ".", ":", "param", "activity_id", ":", "str", ":", "uuid", "of", "the", "activity", ":", "param", "entity_kind", ":", "str", ":", "kind", "of", "entity", "(", "dds", "-", "file", ")", ":", "param", "entity_id", ":", "str", ":", "uuid", "of", "the", "entity", ":", "return", ":", "requests", ".", "Response", "containing", "the", "successful", "result" ]
python
train
60.444444
modin-project/modin
modin/engines/ray/generic/io.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/ray/generic/io.py#L718-L763
def read_sql(cls, sql, con, index_col=None, **kwargs): """Reads a SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). kwargs: Pass into pandas.read_sql function. """ if cls.read_sql_remote_task is None: return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs) row_cnt_query = "SELECT COUNT(*) FROM ({})".format(sql) row_cnt = pandas.read_sql(row_cnt_query, con).squeeze() cols_names_df = pandas.read_sql( "SELECT * FROM ({}) LIMIT 0".format(sql), con, index_col=index_col ) cols_names = cols_names_df.columns num_parts = cls.frame_mgr_cls._compute_num_partitions() partition_ids = [] index_ids = [] limit = math.ceil(row_cnt / num_parts) for part in range(num_parts): offset = part * limit query = "SELECT * FROM ({}) LIMIT {} OFFSET {}".format(sql, limit, offset) partition_id = cls.read_sql_remote_task._remote( args=(num_parts, query, con, index_col, kwargs), num_return_vals=num_parts + 1, ) partition_ids.append( [cls.frame_partition_cls(obj) for obj in partition_id[:-1]] ) index_ids.append(partition_id[-1]) if index_col is None: # sum all lens returned from partitions index_lens = ray.get(index_ids) new_index = pandas.RangeIndex(sum(index_lens)) else: # concat index returned from partitions index_lst = [x for part_index in ray.get(index_ids) for x in part_index] new_index = pandas.Index(index_lst).set_names(index_col) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names ) return new_query_compiler
[ "def", "read_sql", "(", "cls", ",", "sql", ",", "con", ",", "index_col", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "read_sql_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_sql", "(", "sql", ",", "con", ",", "index_col", "=", "index_col", ",", "*", "*", "kwargs", ")", "row_cnt_query", "=", "\"SELECT COUNT(*) FROM ({})\"", ".", "format", "(", "sql", ")", "row_cnt", "=", "pandas", ".", "read_sql", "(", "row_cnt_query", ",", "con", ")", ".", "squeeze", "(", ")", "cols_names_df", "=", "pandas", ".", "read_sql", "(", "\"SELECT * FROM ({}) LIMIT 0\"", ".", "format", "(", "sql", ")", ",", "con", ",", "index_col", "=", "index_col", ")", "cols_names", "=", "cols_names_df", ".", "columns", "num_parts", "=", "cls", ".", "frame_mgr_cls", ".", "_compute_num_partitions", "(", ")", "partition_ids", "=", "[", "]", "index_ids", "=", "[", "]", "limit", "=", "math", ".", "ceil", "(", "row_cnt", "/", "num_parts", ")", "for", "part", "in", "range", "(", "num_parts", ")", ":", "offset", "=", "part", "*", "limit", "query", "=", "\"SELECT * FROM ({}) LIMIT {} OFFSET {}\"", ".", "format", "(", "sql", ",", "limit", ",", "offset", ")", "partition_id", "=", "cls", ".", "read_sql_remote_task", ".", "_remote", "(", "args", "=", "(", "num_parts", ",", "query", ",", "con", ",", "index_col", ",", "kwargs", ")", ",", "num_return_vals", "=", "num_parts", "+", "1", ",", ")", "partition_ids", ".", "append", "(", "[", "cls", ".", "frame_partition_cls", "(", "obj", ")", "for", "obj", "in", "partition_id", "[", ":", "-", "1", "]", "]", ")", "index_ids", ".", "append", "(", "partition_id", "[", "-", "1", "]", ")", "if", "index_col", "is", "None", ":", "# sum all lens returned from partitions", "index_lens", "=", "ray", ".", "get", "(", "index_ids", ")", "new_index", "=", "pandas", ".", "RangeIndex", "(", "sum", "(", "index_lens", ")", ")", "else", ":", "# concat index returned from partitions", "index_lst", "=", "[", "x", "for", "part_index", "in", "ray", ".", "get", "(", "index_ids", ")", "for", "x", "in", "part_index", "]", "new_index", "=", "pandas", ".", "Index", "(", "index_lst", ")", ".", "set_names", "(", "index_col", ")", "new_query_compiler", "=", "cls", ".", "query_compiler_cls", "(", "cls", ".", "frame_mgr_cls", "(", "np", ".", "array", "(", "partition_ids", ")", ")", ",", "new_index", ",", "cols_names", ")", "return", "new_query_compiler" ]
Reads a SQL query or database table into a DataFrame. Args: sql: string or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con: SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) index_col: Column(s) to set as index(MultiIndex). kwargs: Pass into pandas.read_sql function.
[ "Reads", "a", "SQL", "query", "or", "database", "table", "into", "a", "DataFrame", ".", "Args", ":", "sql", ":", "string", "or", "SQLAlchemy", "Selectable", "(", "select", "or", "text", "object", ")", "SQL", "query", "to", "be", "executed", "or", "a", "table", "name", ".", "con", ":", "SQLAlchemy", "connectable", "(", "engine", "/", "connection", ")", "or", "database", "string", "URI", "or", "DBAPI2", "connection", "(", "fallback", "mode", ")", "index_col", ":", "Column", "(", "s", ")", "to", "set", "as", "index", "(", "MultiIndex", ")", ".", "kwargs", ":", "Pass", "into", "pandas", ".", "read_sql", "function", "." ]
python
train
46.891304
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L3551-L3592
def get_compositions_by_query(self, composition_query): """Gets a list of ``Compositions`` matching the given composition query. arg: composition_query (osid.repository.CompositionQuery): the composition query return: (osid.repository.CompositionList) - the returned ``CompositionList`` raise: NullArgument - ``composition_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``composition_query`` is not of this service *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceQuerySession.get_resources_by_query and_list = list() or_list = list() for term in composition_query._query_terms: if '$in' in composition_query._query_terms[term] and '$nin' in composition_query._query_terms[term]: and_list.append( {'$or': [{term: {'$in': composition_query._query_terms[term]['$in']}}, {term: {'$nin': composition_query._query_terms[term]['$nin']}}]}) else: and_list.append({term: composition_query._query_terms[term]}) for term in composition_query._keyword_terms: or_list.append({term: composition_query._keyword_terms[term]}) if or_list: and_list.append({'$or': or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {'$and': and_list} collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) else: result = [] return objects.CompositionList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_compositions_by_query", "(", "self", ",", "composition_query", ")", ":", "# Implemented from template for", "# osid.resource.ResourceQuerySession.get_resources_by_query", "and_list", "=", "list", "(", ")", "or_list", "=", "list", "(", ")", "for", "term", "in", "composition_query", ".", "_query_terms", ":", "if", "'$in'", "in", "composition_query", ".", "_query_terms", "[", "term", "]", "and", "'$nin'", "in", "composition_query", ".", "_query_terms", "[", "term", "]", ":", "and_list", ".", "append", "(", "{", "'$or'", ":", "[", "{", "term", ":", "{", "'$in'", ":", "composition_query", ".", "_query_terms", "[", "term", "]", "[", "'$in'", "]", "}", "}", ",", "{", "term", ":", "{", "'$nin'", ":", "composition_query", ".", "_query_terms", "[", "term", "]", "[", "'$nin'", "]", "}", "}", "]", "}", ")", "else", ":", "and_list", ".", "append", "(", "{", "term", ":", "composition_query", ".", "_query_terms", "[", "term", "]", "}", ")", "for", "term", "in", "composition_query", ".", "_keyword_terms", ":", "or_list", ".", "append", "(", "{", "term", ":", "composition_query", ".", "_keyword_terms", "[", "term", "]", "}", ")", "if", "or_list", ":", "and_list", ".", "append", "(", "{", "'$or'", ":", "or_list", "}", ")", "view_filter", "=", "self", ".", "_view_filter", "(", ")", "if", "view_filter", ":", "and_list", ".", "append", "(", "view_filter", ")", "if", "and_list", ":", "query_terms", "=", "{", "'$and'", ":", "and_list", "}", "collection", "=", "JSONClientValidated", "(", "'repository'", ",", "collection", "=", "'Composition'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "query_terms", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "else", ":", "result", "=", "[", "]", "return", "objects", ".", "CompositionList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets a list of ``Compositions`` matching the given composition query. arg: composition_query (osid.repository.CompositionQuery): the composition query return: (osid.repository.CompositionList) - the returned ``CompositionList`` raise: NullArgument - ``composition_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``composition_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "list", "of", "Compositions", "matching", "the", "given", "composition", "query", "." ]
python
train
49.190476
learningequality/ricecooker
ricecooker/utils/metadata_provider.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L844-L861
def generate_templates(self, exercise_questions=False): """ Create empty .csv files with the right headers and place them in the Will place files as siblings of directory `channeldir`. """ self.generate_template(channeldir=self.channeldir, filename=self.channelinfo, header=CHANNEL_INFO_HEADER) self.generate_template(channeldir=self.channeldir, filename=self.contentinfo, header=CONTENT_INFO_HEADER) if exercise_questions: self.generate_template(channeldir=self.channeldir, filename=self.exercisesinfo, header=EXERCISE_INFO_HEADER) self.generate_template(channeldir=self.channeldir, filename=self.questionsinfo, header=EXERCISE_QUESTIONS_INFO_HEADER)
[ "def", "generate_templates", "(", "self", ",", "exercise_questions", "=", "False", ")", ":", "self", ".", "generate_template", "(", "channeldir", "=", "self", ".", "channeldir", ",", "filename", "=", "self", ".", "channelinfo", ",", "header", "=", "CHANNEL_INFO_HEADER", ")", "self", ".", "generate_template", "(", "channeldir", "=", "self", ".", "channeldir", ",", "filename", "=", "self", ".", "contentinfo", ",", "header", "=", "CONTENT_INFO_HEADER", ")", "if", "exercise_questions", ":", "self", ".", "generate_template", "(", "channeldir", "=", "self", ".", "channeldir", ",", "filename", "=", "self", ".", "exercisesinfo", ",", "header", "=", "EXERCISE_INFO_HEADER", ")", "self", ".", "generate_template", "(", "channeldir", "=", "self", ".", "channeldir", ",", "filename", "=", "self", ".", "questionsinfo", ",", "header", "=", "EXERCISE_QUESTIONS_INFO_HEADER", ")" ]
Create empty .csv files with the right headers and place them in the Will place files as siblings of directory `channeldir`.
[ "Create", "empty", ".", "csv", "files", "with", "the", "right", "headers", "and", "place", "them", "in", "the", "Will", "place", "files", "as", "siblings", "of", "directory", "channeldir", "." ]
python
train
54.333333
aiogram/aiogram
aiogram/dispatcher/filters/factory.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/filters/factory.py#L16-L29
def bind(self, callback: typing.Union[typing.Callable, AbstractFilter], validator: typing.Optional[typing.Callable] = None, event_handlers: typing.Optional[typing.List[Handler]] = None, exclude_event_handlers: typing.Optional[typing.Iterable[Handler]] = None): """ Register filter :param callback: callable or subclass of :obj:`AbstractFilter` :param validator: custom validator. :param event_handlers: list of instances of :obj:`Handler` :param exclude_event_handlers: list of excluded event handlers (:obj:`Handler`) """ record = FilterRecord(callback, validator, event_handlers, exclude_event_handlers) self._registered.append(record)
[ "def", "bind", "(", "self", ",", "callback", ":", "typing", ".", "Union", "[", "typing", ".", "Callable", ",", "AbstractFilter", "]", ",", "validator", ":", "typing", ".", "Optional", "[", "typing", ".", "Callable", "]", "=", "None", ",", "event_handlers", ":", "typing", ".", "Optional", "[", "typing", ".", "List", "[", "Handler", "]", "]", "=", "None", ",", "exclude_event_handlers", ":", "typing", ".", "Optional", "[", "typing", ".", "Iterable", "[", "Handler", "]", "]", "=", "None", ")", ":", "record", "=", "FilterRecord", "(", "callback", ",", "validator", ",", "event_handlers", ",", "exclude_event_handlers", ")", "self", ".", "_registered", ".", "append", "(", "record", ")" ]
Register filter :param callback: callable or subclass of :obj:`AbstractFilter` :param validator: custom validator. :param event_handlers: list of instances of :obj:`Handler` :param exclude_event_handlers: list of excluded event handlers (:obj:`Handler`)
[ "Register", "filter" ]
python
train
52.571429
assamite/creamas
creamas/rules/rule.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/rules/rule.py#L153-L167
def add_subrule(self, subrule, weight): """Add subrule to the rule. :param subrule: Subrule to add to this rule, an instance of :class:`Rule` or :class:`RuleLeaf`. :param float weight: Weight of the subrule """ if not issubclass(subrule.__class__, (Rule, RuleLeaf)): raise TypeError("Rule's class must be (subclass of) {} or {}, got " "{}.".format(Rule, RuleLeaf, subrule.__class__)) self.__domains = set.union(self.__domains, subrule.domains) self.R.append(subrule) self.W.append(weight)
[ "def", "add_subrule", "(", "self", ",", "subrule", ",", "weight", ")", ":", "if", "not", "issubclass", "(", "subrule", ".", "__class__", ",", "(", "Rule", ",", "RuleLeaf", ")", ")", ":", "raise", "TypeError", "(", "\"Rule's class must be (subclass of) {} or {}, got \"", "\"{}.\"", ".", "format", "(", "Rule", ",", "RuleLeaf", ",", "subrule", ".", "__class__", ")", ")", "self", ".", "__domains", "=", "set", ".", "union", "(", "self", ".", "__domains", ",", "subrule", ".", "domains", ")", "self", ".", "R", ".", "append", "(", "subrule", ")", "self", ".", "W", ".", "append", "(", "weight", ")" ]
Add subrule to the rule. :param subrule: Subrule to add to this rule, an instance of :class:`Rule` or :class:`RuleLeaf`. :param float weight: Weight of the subrule
[ "Add", "subrule", "to", "the", "rule", "." ]
python
train
40.266667
justquick/django-native-tags
native_tags/registry.py
https://github.com/justquick/django-native-tags/blob/d40b976ee1cb13faeb04f0dedf02933d4274abf2/native_tags/registry.py#L85-L98
def register(self, bucket, name_or_func, func=None): """ Add a function to the registry by name """ assert bucket in self, 'Bucket %s is unknown' % bucket if func is None and hasattr(name_or_func, '__name__'): name = name_or_func.__name__ func = name_or_func elif func: name = name_or_func if name in self[bucket]: raise AlreadyRegistered('The function %s is already registered' % name) self[bucket][name] = func
[ "def", "register", "(", "self", ",", "bucket", ",", "name_or_func", ",", "func", "=", "None", ")", ":", "assert", "bucket", "in", "self", ",", "'Bucket %s is unknown'", "%", "bucket", "if", "func", "is", "None", "and", "hasattr", "(", "name_or_func", ",", "'__name__'", ")", ":", "name", "=", "name_or_func", ".", "__name__", "func", "=", "name_or_func", "elif", "func", ":", "name", "=", "name_or_func", "if", "name", "in", "self", "[", "bucket", "]", ":", "raise", "AlreadyRegistered", "(", "'The function %s is already registered'", "%", "name", ")", "self", "[", "bucket", "]", "[", "name", "]", "=", "func" ]
Add a function to the registry by name
[ "Add", "a", "function", "to", "the", "registry", "by", "name" ]
python
train
36.571429
blag/django-secure-mail
secure_mail/forms.py
https://github.com/blag/django-secure-mail/blob/52987b6ce829e6de2dc8ab38ed3190bc2752b341/secure_mail/forms.py#L14-L23
def clean_key(self): """ Validate the key contains an email address. """ key = self.cleaned_data["key"] gpg = get_gpg() result = gpg.import_keys(key) if result.count == 0: raise forms.ValidationError(_("Invalid Key")) return key
[ "def", "clean_key", "(", "self", ")", ":", "key", "=", "self", ".", "cleaned_data", "[", "\"key\"", "]", "gpg", "=", "get_gpg", "(", ")", "result", "=", "gpg", ".", "import_keys", "(", "key", ")", "if", "result", ".", "count", "==", "0", ":", "raise", "forms", ".", "ValidationError", "(", "_", "(", "\"Invalid Key\"", ")", ")", "return", "key" ]
Validate the key contains an email address.
[ "Validate", "the", "key", "contains", "an", "email", "address", "." ]
python
train
29.5
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L4247-L4270
def watch_variable(self, tid, address, size, action = None): """ Sets a hardware breakpoint at the given thread, address and size. @see: L{dont_watch_variable} @type tid: int @param tid: Thread global ID. @type address: int @param address: Memory address of variable to watch. @type size: int @param size: Size of variable to watch. The only supported sizes are: byte (1), word (2), dword (4) and qword (8). @type action: function @param action: (Optional) Action callback function. See L{define_hardware_breakpoint} for more details. """ bp = self.__set_variable_watch(tid, address, size, action) if not bp.is_enabled(): self.enable_hardware_breakpoint(tid, address)
[ "def", "watch_variable", "(", "self", ",", "tid", ",", "address", ",", "size", ",", "action", "=", "None", ")", ":", "bp", "=", "self", ".", "__set_variable_watch", "(", "tid", ",", "address", ",", "size", ",", "action", ")", "if", "not", "bp", ".", "is_enabled", "(", ")", ":", "self", ".", "enable_hardware_breakpoint", "(", "tid", ",", "address", ")" ]
Sets a hardware breakpoint at the given thread, address and size. @see: L{dont_watch_variable} @type tid: int @param tid: Thread global ID. @type address: int @param address: Memory address of variable to watch. @type size: int @param size: Size of variable to watch. The only supported sizes are: byte (1), word (2), dword (4) and qword (8). @type action: function @param action: (Optional) Action callback function. See L{define_hardware_breakpoint} for more details.
[ "Sets", "a", "hardware", "breakpoint", "at", "the", "given", "thread", "address", "and", "size", "." ]
python
train
33.416667
DataDog/integrations-core
vsphere/datadog_checks/vsphere/metadata_cache.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/vsphere/datadog_checks/vsphere/metadata_cache.py#L47-L52
def set_metric_ids(self, key, metric_ids): """ Store the list of metric IDs we will want to collect for the given instance key """ with self._lock: self._metric_ids[key] = metric_ids
[ "def", "set_metric_ids", "(", "self", ",", "key", ",", "metric_ids", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "_metric_ids", "[", "key", "]", "=", "metric_ids" ]
Store the list of metric IDs we will want to collect for the given instance key
[ "Store", "the", "list", "of", "metric", "IDs", "we", "will", "want", "to", "collect", "for", "the", "given", "instance", "key" ]
python
train
36.833333
ronaldguillen/wave
wave/fields.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/fields.py#L917-L940
def to_internal_value(self, data): """ Validate that the input is a decimal number and return a Decimal instance. """ data = smart_text(data).strip() if len(data) > self.MAX_STRING_LENGTH: self.fail('max_string_length') try: value = decimal.Decimal(data) except decimal.DecimalException: self.fail('invalid') # Check for NaN. It is the only value that isn't equal to itself, # so we can use this to identify NaN values. if value != value: self.fail('invalid') # Check for infinity and negative infinity. if value in (decimal.Decimal('Inf'), decimal.Decimal('-Inf')): self.fail('invalid') return self.validate_precision(value)
[ "def", "to_internal_value", "(", "self", ",", "data", ")", ":", "data", "=", "smart_text", "(", "data", ")", ".", "strip", "(", ")", "if", "len", "(", "data", ")", ">", "self", ".", "MAX_STRING_LENGTH", ":", "self", ".", "fail", "(", "'max_string_length'", ")", "try", ":", "value", "=", "decimal", ".", "Decimal", "(", "data", ")", "except", "decimal", ".", "DecimalException", ":", "self", ".", "fail", "(", "'invalid'", ")", "# Check for NaN. It is the only value that isn't equal to itself,", "# so we can use this to identify NaN values.", "if", "value", "!=", "value", ":", "self", ".", "fail", "(", "'invalid'", ")", "# Check for infinity and negative infinity.", "if", "value", "in", "(", "decimal", ".", "Decimal", "(", "'Inf'", ")", ",", "decimal", ".", "Decimal", "(", "'-Inf'", ")", ")", ":", "self", ".", "fail", "(", "'invalid'", ")", "return", "self", ".", "validate_precision", "(", "value", ")" ]
Validate that the input is a decimal number and return a Decimal instance.
[ "Validate", "that", "the", "input", "is", "a", "decimal", "number", "and", "return", "a", "Decimal", "instance", "." ]
python
train
32.416667
FujiMakoto/AgentML
agentml/__init__.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/__init__.py#L375-L387
def set_limit(self, identifier, expires_at, blocking=False): """ Set a new global trigger or response limit :param identifier: The Trigger or Response object :type identifier: parser.trigger.Trigger or parser.trigger.response.Response :param expires_at: The limit expiration as a Unix timestamp :type expires_at: float :param blocking: When True and a limit is triggered, no other Trigger or Response's will be attempted :type blocking: bool """ self._limits[identifier] = (expires_at, blocking)
[ "def", "set_limit", "(", "self", ",", "identifier", ",", "expires_at", ",", "blocking", "=", "False", ")", ":", "self", ".", "_limits", "[", "identifier", "]", "=", "(", "expires_at", ",", "blocking", ")" ]
Set a new global trigger or response limit :param identifier: The Trigger or Response object :type identifier: parser.trigger.Trigger or parser.trigger.response.Response :param expires_at: The limit expiration as a Unix timestamp :type expires_at: float :param blocking: When True and a limit is triggered, no other Trigger or Response's will be attempted :type blocking: bool
[ "Set", "a", "new", "global", "trigger", "or", "response", "limit", ":", "param", "identifier", ":", "The", "Trigger", "or", "Response", "object", ":", "type", "identifier", ":", "parser", ".", "trigger", ".", "Trigger", "or", "parser", ".", "trigger", ".", "response", ".", "Response" ]
python
train
43.692308
facelessuser/pyspelling
pyspelling/filters/cpp.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L228-L245
def evaluate_inline(self, groups): """Evaluate inline comments on their own lines.""" # Consecutive lines with only comments with same leading whitespace # will be captured as a single block. if self.lines: if ( self.group_comments and self.line_num == self.prev_line + 1 and groups['leading_space'] == self.leading ): self.line_comments[-1][0] += '\n' + groups['line'][2:].replace('\\\n', '') else: self.line_comments.append( [groups['line'][2:].replace('\\\n', ''), self.line_num, self.current_encoding] ) self.leading = groups['leading_space'] self.prev_line = self.line_num
[ "def", "evaluate_inline", "(", "self", ",", "groups", ")", ":", "# Consecutive lines with only comments with same leading whitespace", "# will be captured as a single block.", "if", "self", ".", "lines", ":", "if", "(", "self", ".", "group_comments", "and", "self", ".", "line_num", "==", "self", ".", "prev_line", "+", "1", "and", "groups", "[", "'leading_space'", "]", "==", "self", ".", "leading", ")", ":", "self", ".", "line_comments", "[", "-", "1", "]", "[", "0", "]", "+=", "'\\n'", "+", "groups", "[", "'line'", "]", "[", "2", ":", "]", ".", "replace", "(", "'\\\\\\n'", ",", "''", ")", "else", ":", "self", ".", "line_comments", ".", "append", "(", "[", "groups", "[", "'line'", "]", "[", "2", ":", "]", ".", "replace", "(", "'\\\\\\n'", ",", "''", ")", ",", "self", ".", "line_num", ",", "self", ".", "current_encoding", "]", ")", "self", ".", "leading", "=", "groups", "[", "'leading_space'", "]", "self", ".", "prev_line", "=", "self", ".", "line_num" ]
Evaluate inline comments on their own lines.
[ "Evaluate", "inline", "comments", "on", "their", "own", "lines", "." ]
python
train
42.722222
impact27/registrator
registrator/image.py
https://github.com/impact27/registrator/blob/04c099d83e0466207dc5b2e40d9b03db020d4dad/registrator/image.py#L704-L812
def polar_fft(im, nangle=None, radiimax=None, *, isshiftdft=False, truesize=None, logpolar=False, logoutput=False, interpolation='bilinear'): """Return dft in polar (or log-polar) units, the angle step (and the log base) Parameters ---------- im: 2d array The image nangle: number, optional The number of angles in the polar representation radiimax: number, optional The number of radius in the polar representation isshiftdft: boolean, default False True if the image is pre processed (DFT + fftshift) truesize: 2 numbers, required if isshiftdft is True The true size of the image logpolar: boolean, default False True if want the log polar representation instead of polar logoutput: boolean, default False True if want the log of the output interpolation: string, default 'bilinear' ('bicubic', 'bilinear', 'nearest') The interpolation technique. (For now, avoid bicubic) Returns ------- im: 2d array The (log) polar representation of the input image log_base: number, only if logpolar is True the log base if this is log polar representation Notes ----- radiimax is the maximal radius (log of radius if logpolar is true). if not provided, it is deduced from the image size To get log-polar, set logpolar to True log_base is the base of the log. It is deduced from radiimax. Two images that will be compared should therefore have the same radiimax. """ im = np.asarray(im, dtype=np.float32) # get dft if not already done if not isshiftdft: truesize = im.shape # substract mean to avoid having large central value im = im - im.mean() im = centered_mag_sq_ccs(dft_optsize(im)) # We need truesize! otherwise border effects. assert(truesize is not None) # the center is shifted from 0,0 to the ~ center #(eg. if 4x4 image, the center is at [2,2], as if 5x5) qshape = np.asarray([im.shape[0] // 2, im.shape[1]]) center = np.asarray([qshape[0], 0]) # if the angle Step is not given, take the number of pixel # on the perimeter as the target #=range/step if nangle is None: # TODO: understand why nangle need to be exactly truesize nangle = np.min(truesize) # range is pi, nbangle = 2r =~pi r # nangle-=2 # get the theta range theta = np.linspace(-np.pi / 2, np.pi / 2, nangle, endpoint=False, dtype=np.float32) # For the radii, the units are comparable if the log_base and radiimax are # the same. Therefore, log_base is deduced from radiimax # The step is assumed to be 1 if radiimax is None: radiimax = qshape.min() # also as the circle is an ellipse in the image, # we want the radius to be from 0 to 1 if logpolar: # The log base solves log_radii_max=log_{log_base}(linear_radii_max) # where we decided arbitrarely that linear_radii_max=log_radii_max log_base = np.exp(np.log(radiimax) / radiimax) radius = ((log_base ** np.arange(0, radiimax, dtype=np.float32)) / radiimax) else: radius = np.linspace(0, 1, radiimax, endpoint=False, dtype=np.float32) # get x y coordinates matrix (The units are 0 to 1, therefore a circle is # represented as an ellipse) y = cv2.gemm(np.sin(theta), radius, qshape[0], 0, 0, flags=cv2.GEMM_2_T) + center[0] x = cv2.gemm(np.cos(theta), radius, qshape[1], 0, 0, flags=cv2.GEMM_2_T) + center[1] interp = cv2.INTER_LINEAR if interpolation == 'bicubic': interp = cv2.INTER_CUBIC if interpolation == 'nearest': interp = cv2.INTER_NEAREST # get output output = cv2.remap(im, x, y, interp) # LINEAR, CUBIC,LANCZOS4 # apply log if logoutput: output = cv2.log(output) if logpolar: return output, log_base else: return output
[ "def", "polar_fft", "(", "im", ",", "nangle", "=", "None", ",", "radiimax", "=", "None", ",", "*", ",", "isshiftdft", "=", "False", ",", "truesize", "=", "None", ",", "logpolar", "=", "False", ",", "logoutput", "=", "False", ",", "interpolation", "=", "'bilinear'", ")", ":", "im", "=", "np", ".", "asarray", "(", "im", ",", "dtype", "=", "np", ".", "float32", ")", "# get dft if not already done", "if", "not", "isshiftdft", ":", "truesize", "=", "im", ".", "shape", "# substract mean to avoid having large central value", "im", "=", "im", "-", "im", ".", "mean", "(", ")", "im", "=", "centered_mag_sq_ccs", "(", "dft_optsize", "(", "im", ")", ")", "# We need truesize! otherwise border effects.", "assert", "(", "truesize", "is", "not", "None", ")", "# the center is shifted from 0,0 to the ~ center", "#(eg. if 4x4 image, the center is at [2,2], as if 5x5)", "qshape", "=", "np", ".", "asarray", "(", "[", "im", ".", "shape", "[", "0", "]", "//", "2", ",", "im", ".", "shape", "[", "1", "]", "]", ")", "center", "=", "np", ".", "asarray", "(", "[", "qshape", "[", "0", "]", ",", "0", "]", ")", "# if the angle Step is not given, take the number of pixel", "# on the perimeter as the target #=range/step", "if", "nangle", "is", "None", ":", "# TODO: understand why nangle need to be exactly truesize", "nangle", "=", "np", ".", "min", "(", "truesize", ")", "# range is pi, nbangle = 2r =~pi r", "# nangle-=2", "# get the theta range", "theta", "=", "np", ".", "linspace", "(", "-", "np", ".", "pi", "/", "2", ",", "np", ".", "pi", "/", "2", ",", "nangle", ",", "endpoint", "=", "False", ",", "dtype", "=", "np", ".", "float32", ")", "# For the radii, the units are comparable if the log_base and radiimax are", "# the same. Therefore, log_base is deduced from radiimax", "# The step is assumed to be 1", "if", "radiimax", "is", "None", ":", "radiimax", "=", "qshape", ".", "min", "(", ")", "# also as the circle is an ellipse in the image,", "# we want the radius to be from 0 to 1", "if", "logpolar", ":", "# The log base solves log_radii_max=log_{log_base}(linear_radii_max)", "# where we decided arbitrarely that linear_radii_max=log_radii_max", "log_base", "=", "np", ".", "exp", "(", "np", ".", "log", "(", "radiimax", ")", "/", "radiimax", ")", "radius", "=", "(", "(", "log_base", "**", "np", ".", "arange", "(", "0", ",", "radiimax", ",", "dtype", "=", "np", ".", "float32", ")", ")", "/", "radiimax", ")", "else", ":", "radius", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "radiimax", ",", "endpoint", "=", "False", ",", "dtype", "=", "np", ".", "float32", ")", "# get x y coordinates matrix (The units are 0 to 1, therefore a circle is", "# represented as an ellipse)", "y", "=", "cv2", ".", "gemm", "(", "np", ".", "sin", "(", "theta", ")", ",", "radius", ",", "qshape", "[", "0", "]", ",", "0", ",", "0", ",", "flags", "=", "cv2", ".", "GEMM_2_T", ")", "+", "center", "[", "0", "]", "x", "=", "cv2", ".", "gemm", "(", "np", ".", "cos", "(", "theta", ")", ",", "radius", ",", "qshape", "[", "1", "]", ",", "0", ",", "0", ",", "flags", "=", "cv2", ".", "GEMM_2_T", ")", "+", "center", "[", "1", "]", "interp", "=", "cv2", ".", "INTER_LINEAR", "if", "interpolation", "==", "'bicubic'", ":", "interp", "=", "cv2", ".", "INTER_CUBIC", "if", "interpolation", "==", "'nearest'", ":", "interp", "=", "cv2", ".", "INTER_NEAREST", "# get output", "output", "=", "cv2", ".", "remap", "(", "im", ",", "x", ",", "y", ",", "interp", ")", "# LINEAR, CUBIC,LANCZOS4", "# apply log", "if", "logoutput", ":", "output", "=", "cv2", ".", "log", "(", "output", ")", "if", "logpolar", ":", "return", "output", ",", "log_base", "else", ":", "return", "output" ]
Return dft in polar (or log-polar) units, the angle step (and the log base) Parameters ---------- im: 2d array The image nangle: number, optional The number of angles in the polar representation radiimax: number, optional The number of radius in the polar representation isshiftdft: boolean, default False True if the image is pre processed (DFT + fftshift) truesize: 2 numbers, required if isshiftdft is True The true size of the image logpolar: boolean, default False True if want the log polar representation instead of polar logoutput: boolean, default False True if want the log of the output interpolation: string, default 'bilinear' ('bicubic', 'bilinear', 'nearest') The interpolation technique. (For now, avoid bicubic) Returns ------- im: 2d array The (log) polar representation of the input image log_base: number, only if logpolar is True the log base if this is log polar representation Notes ----- radiimax is the maximal radius (log of radius if logpolar is true). if not provided, it is deduced from the image size To get log-polar, set logpolar to True log_base is the base of the log. It is deduced from radiimax. Two images that will be compared should therefore have the same radiimax.
[ "Return", "dft", "in", "polar", "(", "or", "log", "-", "polar", ")", "units", "the", "angle", "step", "(", "and", "the", "log", "base", ")" ]
python
train
36.275229
bfrog/whizzer
whizzer/transport.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/transport.py#L99-L125
def unbuffered_write(self, buf): """Performs an unbuffered write, the default unless socket.send does not send everything, in which case an unbuffered write is done and the write method is set to be a buffered write until the buffer is empty once again. buf -- bytes to send """ if self.closed: raise ConnectionClosed() result = 0 try: result = self.sock.send(buf) except EnvironmentError as e: # if the socket is simply backed up ignore the error if e.errno != errno.EAGAIN: self._close(e) return # when the socket buffers are full/backed up then we need to poll to see # when we can write again if result != len(buf): self.write = self.buffered_write self.write_watcher.start() self.write(buf[result:])
[ "def", "unbuffered_write", "(", "self", ",", "buf", ")", ":", "if", "self", ".", "closed", ":", "raise", "ConnectionClosed", "(", ")", "result", "=", "0", "try", ":", "result", "=", "self", ".", "sock", ".", "send", "(", "buf", ")", "except", "EnvironmentError", "as", "e", ":", "# if the socket is simply backed up ignore the error ", "if", "e", ".", "errno", "!=", "errno", ".", "EAGAIN", ":", "self", ".", "_close", "(", "e", ")", "return", "# when the socket buffers are full/backed up then we need to poll to see", "# when we can write again", "if", "result", "!=", "len", "(", "buf", ")", ":", "self", ".", "write", "=", "self", ".", "buffered_write", "self", ".", "write_watcher", ".", "start", "(", ")", "self", ".", "write", "(", "buf", "[", "result", ":", "]", ")" ]
Performs an unbuffered write, the default unless socket.send does not send everything, in which case an unbuffered write is done and the write method is set to be a buffered write until the buffer is empty once again. buf -- bytes to send
[ "Performs", "an", "unbuffered", "write", "the", "default", "unless", "socket", ".", "send", "does", "not", "send", "everything", "in", "which", "case", "an", "unbuffered", "write", "is", "done", "and", "the", "write", "method", "is", "set", "to", "be", "a", "buffered", "write", "until", "the", "buffer", "is", "empty", "once", "again", "." ]
python
train
33.481481
jbasko/hookery
hookery/utils.py
https://github.com/jbasko/hookery/blob/5638de2999ad533f83bc9a03110f85e5a0404257/hookery/utils.py#L5-L38
def optional_args_func(func) -> callable: """ Given a function or generator `func`, return a function/generator that takes any number of kwargs and calls `func` with only the args/kwargs that `func` expects. """ if getattr(func, '_optional_args_func', False): return func is_generator = inspect.isgeneratorfunction(func) func_sig = inspect.signature(func) expects_nothing = not func_sig.parameters if is_generator: @functools.wraps(func) def wrapped(*args, **kwargs): if expects_nothing: yield from func() else: bound_arguments = func_sig.bind(*args, **{k: v for k, v in kwargs.items() if k in func_sig.parameters}) yield from func(*bound_arguments.args, **bound_arguments.kwargs) else: @functools.wraps(func) def wrapped(*args, **kwargs): if expects_nothing: return func() else: bound_arguments = func_sig.bind(*args, **{k: v for k, v in kwargs.items() if k in func_sig.parameters}) return func(*bound_arguments.args, **bound_arguments.kwargs) # Mark it so that we don't double wrap our own setattr(wrapped, '_optional_args_func', True) return wrapped
[ "def", "optional_args_func", "(", "func", ")", "->", "callable", ":", "if", "getattr", "(", "func", ",", "'_optional_args_func'", ",", "False", ")", ":", "return", "func", "is_generator", "=", "inspect", ".", "isgeneratorfunction", "(", "func", ")", "func_sig", "=", "inspect", ".", "signature", "(", "func", ")", "expects_nothing", "=", "not", "func_sig", ".", "parameters", "if", "is_generator", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "expects_nothing", ":", "yield", "from", "func", "(", ")", "else", ":", "bound_arguments", "=", "func_sig", ".", "bind", "(", "*", "args", ",", "*", "*", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", "in", "func_sig", ".", "parameters", "}", ")", "yield", "from", "func", "(", "*", "bound_arguments", ".", "args", ",", "*", "*", "bound_arguments", ".", "kwargs", ")", "else", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "expects_nothing", ":", "return", "func", "(", ")", "else", ":", "bound_arguments", "=", "func_sig", ".", "bind", "(", "*", "args", ",", "*", "*", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", "in", "func_sig", ".", "parameters", "}", ")", "return", "func", "(", "*", "bound_arguments", ".", "args", ",", "*", "*", "bound_arguments", ".", "kwargs", ")", "# Mark it so that we don't double wrap our own", "setattr", "(", "wrapped", ",", "'_optional_args_func'", ",", "True", ")", "return", "wrapped" ]
Given a function or generator `func`, return a function/generator that takes any number of kwargs and calls `func` with only the args/kwargs that `func` expects.
[ "Given", "a", "function", "or", "generator", "func", "return", "a", "function", "/", "generator", "that", "takes", "any", "number", "of", "kwargs", "and", "calls", "func", "with", "only", "the", "args", "/", "kwargs", "that", "func", "expects", "." ]
python
train
37.147059
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_messagerate.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_messagerate.py#L49-L64
def status(self): '''returns rates''' counts = {} for bucket in self.buckets: for x in bucket: if not x in counts: counts[x] = 0 counts[x] += bucket[x] ret = "" mtypes = counts.keys() mtypes.sort() for mtype in mtypes: ret += "%s: %0.1f/s\n" % (mtype, counts[mtype]/float(len(self.buckets))) return ret
[ "def", "status", "(", "self", ")", ":", "counts", "=", "{", "}", "for", "bucket", "in", "self", ".", "buckets", ":", "for", "x", "in", "bucket", ":", "if", "not", "x", "in", "counts", ":", "counts", "[", "x", "]", "=", "0", "counts", "[", "x", "]", "+=", "bucket", "[", "x", "]", "ret", "=", "\"\"", "mtypes", "=", "counts", ".", "keys", "(", ")", "mtypes", ".", "sort", "(", ")", "for", "mtype", "in", "mtypes", ":", "ret", "+=", "\"%s: %0.1f/s\\n\"", "%", "(", "mtype", ",", "counts", "[", "mtype", "]", "/", "float", "(", "len", "(", "self", ".", "buckets", ")", ")", ")", "return", "ret" ]
returns rates
[ "returns", "rates" ]
python
train
29.125
inasafe/inasafe
safe/utilities/memory_checker.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/memory_checker.py#L162-L180
def memory_error(): """Display an error when there is not enough memory.""" warning_heading = m.Heading( tr('Memory issue'), **WARNING_STYLE) warning_message = tr( 'There is not enough free memory to run this analysis.') suggestion_heading = m.Heading( tr('Suggestion'), **SUGGESTION_STYLE) suggestion = tr( 'Try zooming in to a smaller area or using a raster layer with a ' 'coarser resolution to speed up execution and reduce memory ' 'requirements. You could also try adding more RAM to your computer.') message = m.Message() message.add(warning_heading) message.add(warning_message) message.add(suggestion_heading) message.add(suggestion) send_static_message(dispatcher.Anonymous, message)
[ "def", "memory_error", "(", ")", ":", "warning_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Memory issue'", ")", ",", "*", "*", "WARNING_STYLE", ")", "warning_message", "=", "tr", "(", "'There is not enough free memory to run this analysis.'", ")", "suggestion_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Suggestion'", ")", ",", "*", "*", "SUGGESTION_STYLE", ")", "suggestion", "=", "tr", "(", "'Try zooming in to a smaller area or using a raster layer with a '", "'coarser resolution to speed up execution and reduce memory '", "'requirements. You could also try adding more RAM to your computer.'", ")", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "warning_heading", ")", "message", ".", "add", "(", "warning_message", ")", "message", ".", "add", "(", "suggestion_heading", ")", "message", ".", "add", "(", "suggestion", ")", "send_static_message", "(", "dispatcher", ".", "Anonymous", ",", "message", ")" ]
Display an error when there is not enough memory.
[ "Display", "an", "error", "when", "there", "is", "not", "enough", "memory", "." ]
python
train
40.421053
imbolc/aiohttp-login
aiohttp_login/sql.py
https://github.com/imbolc/aiohttp-login/blob/43b30d8630ca5c14d4b75c398eb5f6a27ddf0a52/aiohttp_login/sql.py#L60-L71
def update_sql(table, filter, updates): ''' >>> update_sql('tbl', {'foo': 'a', 'bar': 1}, {'bar': 2, 'baz': 'b'}) ('UPDATE tbl SET bar=$1, baz=$2 WHERE bar=$3 AND foo=$4', [2, 'b', 1, 'a']) ''' where_keys, where_vals = _split_dict(filter) up_keys, up_vals = _split_dict(updates) changes = _pairs(up_keys, sep=', ') where = _pairs(where_keys, start=len(up_keys) + 1) sql = 'UPDATE {} SET {} WHERE {}'.format( table, changes, where) return sql, up_vals + where_vals
[ "def", "update_sql", "(", "table", ",", "filter", ",", "updates", ")", ":", "where_keys", ",", "where_vals", "=", "_split_dict", "(", "filter", ")", "up_keys", ",", "up_vals", "=", "_split_dict", "(", "updates", ")", "changes", "=", "_pairs", "(", "up_keys", ",", "sep", "=", "', '", ")", "where", "=", "_pairs", "(", "where_keys", ",", "start", "=", "len", "(", "up_keys", ")", "+", "1", ")", "sql", "=", "'UPDATE {} SET {} WHERE {}'", ".", "format", "(", "table", ",", "changes", ",", "where", ")", "return", "sql", ",", "up_vals", "+", "where_vals" ]
>>> update_sql('tbl', {'foo': 'a', 'bar': 1}, {'bar': 2, 'baz': 'b'}) ('UPDATE tbl SET bar=$1, baz=$2 WHERE bar=$3 AND foo=$4', [2, 'b', 1, 'a'])
[ ">>>", "update_sql", "(", "tbl", "{", "foo", ":", "a", "bar", ":", "1", "}", "{", "bar", ":", "2", "baz", ":", "b", "}", ")", "(", "UPDATE", "tbl", "SET", "bar", "=", "$1", "baz", "=", "$2", "WHERE", "bar", "=", "$3", "AND", "foo", "=", "$4", "[", "2", "b", "1", "a", "]", ")" ]
python
train
41.666667
HPENetworking/PYHPEIMC
build/lib/pyhpeimc/plat/termaccess.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/build/lib/pyhpeimc/plat/termaccess.py#L19-L77
def get_real_time_locate(ipAddress, auth, url): """ function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. Note: Although intended to return a single location, Multiple locations may be returned for a single host due to a partially discovered network or misconfigured environment. :param ipAddress: str value valid IPv4 IP address :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dictionaries where each element of the list represents the location of the target host :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> found_device = get_real_time_locate('10.101.0.51', auth.creds, auth.url) >>> assert type(found_device) is list >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> no_device = get_real_time_locate('192.168.254.254', auth.creds, auth.url) >>> assert type(no_device) is dict >>> assert len(no_device) == 0 """ real_time_locate_url = "/imcrs/res/access/realtimeLocate?type=2&value=" + str(ipAddress) + "&total=false" f_url = url + real_time_locate_url r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents try: if r.status_code == 200: response = json.loads(r.text) if 'realtimeLocation' in response: real_time_locate = json.loads(r.text)['realtimeLocation'] if type(real_time_locate) is dict: real_time_locate = [real_time_locate] return real_time_locate else: return json.loads(r.text)['realtimeLocation'] else: return json.loads(r.text) except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_real_time_locate: An Error has occured"
[ "def", "get_real_time_locate", "(", "ipAddress", ",", "auth", ",", "url", ")", ":", "real_time_locate_url", "=", "\"/imcrs/res/access/realtimeLocate?type=2&value=\"", "+", "str", "(", "ipAddress", ")", "+", "\"&total=false\"", "f_url", "=", "url", "+", "real_time_locate_url", "r", "=", "requests", ".", "get", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "# creates the URL using the payload variable as the contents", "try", ":", "if", "r", ".", "status_code", "==", "200", ":", "response", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "if", "'realtimeLocation'", "in", "response", ":", "real_time_locate", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "[", "'realtimeLocation'", "]", "if", "type", "(", "real_time_locate", ")", "is", "dict", ":", "real_time_locate", "=", "[", "real_time_locate", "]", "return", "real_time_locate", "else", ":", "return", "json", ".", "loads", "(", "r", ".", "text", ")", "[", "'realtimeLocation'", "]", "else", ":", "return", "json", ".", "loads", "(", "r", ".", "text", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "return", "\"Error:\\n\"", "+", "str", "(", "e", ")", "+", "\" get_real_time_locate: An Error has occured\"" ]
function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. Note: Although intended to return a single location, Multiple locations may be returned for a single host due to a partially discovered network or misconfigured environment. :param ipAddress: str value valid IPv4 IP address :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dictionaries where each element of the list represents the location of the target host :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> found_device = get_real_time_locate('10.101.0.51', auth.creds, auth.url) >>> assert type(found_device) is list >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> no_device = get_real_time_locate('192.168.254.254', auth.creds, auth.url) >>> assert type(no_device) is dict >>> assert len(no_device) == 0
[ "function", "takes", "the", "ipAddress", "of", "a", "specific", "host", "and", "issues", "a", "RESTFUL", "call", "to", "get", "the", "device", "and", "interface", "that", "the", "target", "host", "is", "currently", "connected", "to", ".", "Note", ":", "Although", "intended", "to", "return", "a", "single", "location", "Multiple", "locations", "may", "be", "returned", "for", "a", "single", "host", "due", "to", "a", "partially", "discovered", "network", "or", "misconfigured", "environment", "." ]
python
train
38.237288
ska-sa/hypercube
hypercube/base_cube.py
https://github.com/ska-sa/hypercube/blob/6564a9e65ccd9ed7e7a71bd643f183e1ec645b29/hypercube/base_cube.py#L270-L272
def dim_global_size_dict(self): """ Returns a mapping of dimension name to global size """ return { d.name: d.global_size for d in self._dims.itervalues()}
[ "def", "dim_global_size_dict", "(", "self", ")", ":", "return", "{", "d", ".", "name", ":", "d", ".", "global_size", "for", "d", "in", "self", ".", "_dims", ".", "itervalues", "(", ")", "}" ]
Returns a mapping of dimension name to global size
[ "Returns", "a", "mapping", "of", "dimension", "name", "to", "global", "size" ]
python
train
56.333333
bitesofcode/projexui
projexui/widgets/xorbcolumnedit/xorbcolumnedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbcolumnedit/xorbcolumnedit.py#L120-L144
def rebuild( self ): """ Clears out all the child widgets from this widget and creates the widget that best matches the column properties for this edit. """ plugins.init() self.blockSignals(True) self.setUpdatesEnabled(False) # clear the old editor if ( self._editor ): self._editor.close() self._editor.setParent(None) self._editor.deleteLater() self._editor = None # create a new widget plugin_class = plugins.widgets.get(self._columnType) if ( plugin_class ): self._editor = plugin_class(self) self.layout().addWidget(self._editor) self.blockSignals(False) self.setUpdatesEnabled(True)
[ "def", "rebuild", "(", "self", ")", ":", "plugins", ".", "init", "(", ")", "self", ".", "blockSignals", "(", "True", ")", "self", ".", "setUpdatesEnabled", "(", "False", ")", "# clear the old editor\r", "if", "(", "self", ".", "_editor", ")", ":", "self", ".", "_editor", ".", "close", "(", ")", "self", ".", "_editor", ".", "setParent", "(", "None", ")", "self", ".", "_editor", ".", "deleteLater", "(", ")", "self", ".", "_editor", "=", "None", "# create a new widget\r", "plugin_class", "=", "plugins", ".", "widgets", ".", "get", "(", "self", ".", "_columnType", ")", "if", "(", "plugin_class", ")", ":", "self", ".", "_editor", "=", "plugin_class", "(", "self", ")", "self", ".", "layout", "(", ")", ".", "addWidget", "(", "self", ".", "_editor", ")", "self", ".", "blockSignals", "(", "False", ")", "self", ".", "setUpdatesEnabled", "(", "True", ")" ]
Clears out all the child widgets from this widget and creates the widget that best matches the column properties for this edit.
[ "Clears", "out", "all", "the", "child", "widgets", "from", "this", "widget", "and", "creates", "the", "widget", "that", "best", "matches", "the", "column", "properties", "for", "this", "edit", "." ]
python
train
32.36
pmorissette/ffn
ffn/core.py
https://github.com/pmorissette/ffn/blob/ef09f28b858b7ffcd2627ce6a4dc618183a6bc8a/ffn/core.py#L1331-L1347
def merge(*series): """ Merge Series and/or DataFrames together. Returns a DataFrame. """ dfs = [] for s in series: if isinstance(s, pd.DataFrame): dfs.append(s) elif isinstance(s, pd.Series): tmpdf = pd.DataFrame({s.name: s}) dfs.append(tmpdf) else: raise NotImplementedError('Unsupported merge type') return pd.concat(dfs, axis=1)
[ "def", "merge", "(", "*", "series", ")", ":", "dfs", "=", "[", "]", "for", "s", "in", "series", ":", "if", "isinstance", "(", "s", ",", "pd", ".", "DataFrame", ")", ":", "dfs", ".", "append", "(", "s", ")", "elif", "isinstance", "(", "s", ",", "pd", ".", "Series", ")", ":", "tmpdf", "=", "pd", ".", "DataFrame", "(", "{", "s", ".", "name", ":", "s", "}", ")", "dfs", ".", "append", "(", "tmpdf", ")", "else", ":", "raise", "NotImplementedError", "(", "'Unsupported merge type'", ")", "return", "pd", ".", "concat", "(", "dfs", ",", "axis", "=", "1", ")" ]
Merge Series and/or DataFrames together. Returns a DataFrame.
[ "Merge", "Series", "and", "/", "or", "DataFrames", "together", "." ]
python
train
24.588235
lpantano/seqcluster
seqcluster/function/predictions.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/function/predictions.py#L37-L63
def is_tRNA(clus_obj, out_dir, args): """ Iterates through cluster precursors to predict sRNA types """ ref = os.path.abspath(args.reference) utils.safe_dirs(out_dir) for nc in clus_obj[0]: c = clus_obj[0][nc] loci = c['loci'] out_fa = "cluster_" + nc if loci[0][3] - loci[0][2] < 500: with make_temp_directory() as tmpdir: os.chdir(tmpdir) get_loci_fasta({loci[0][0]: [loci[0][0:5]]}, out_fa, ref) summary_file, str_file = _run_tRNA_scan(out_fa) if "predictions" not in c: c['predictions'] = {} c['predictions']['tRNA'] = _read_tRNA_scan(summary_file) score = _read_tRNA_scan(summary_file) logger.debug(score) shutil.move(summary_file, op.join(out_dir, summary_file)) shutil.move(str_file, op.join(out_dir, str_file)) else: c['errors'].add("precursor too long") clus_obj[0][nc] = c return clus_obj
[ "def", "is_tRNA", "(", "clus_obj", ",", "out_dir", ",", "args", ")", ":", "ref", "=", "os", ".", "path", ".", "abspath", "(", "args", ".", "reference", ")", "utils", ".", "safe_dirs", "(", "out_dir", ")", "for", "nc", "in", "clus_obj", "[", "0", "]", ":", "c", "=", "clus_obj", "[", "0", "]", "[", "nc", "]", "loci", "=", "c", "[", "'loci'", "]", "out_fa", "=", "\"cluster_\"", "+", "nc", "if", "loci", "[", "0", "]", "[", "3", "]", "-", "loci", "[", "0", "]", "[", "2", "]", "<", "500", ":", "with", "make_temp_directory", "(", ")", "as", "tmpdir", ":", "os", ".", "chdir", "(", "tmpdir", ")", "get_loci_fasta", "(", "{", "loci", "[", "0", "]", "[", "0", "]", ":", "[", "loci", "[", "0", "]", "[", "0", ":", "5", "]", "]", "}", ",", "out_fa", ",", "ref", ")", "summary_file", ",", "str_file", "=", "_run_tRNA_scan", "(", "out_fa", ")", "if", "\"predictions\"", "not", "in", "c", ":", "c", "[", "'predictions'", "]", "=", "{", "}", "c", "[", "'predictions'", "]", "[", "'tRNA'", "]", "=", "_read_tRNA_scan", "(", "summary_file", ")", "score", "=", "_read_tRNA_scan", "(", "summary_file", ")", "logger", ".", "debug", "(", "score", ")", "shutil", ".", "move", "(", "summary_file", ",", "op", ".", "join", "(", "out_dir", ",", "summary_file", ")", ")", "shutil", ".", "move", "(", "str_file", ",", "op", ".", "join", "(", "out_dir", ",", "str_file", ")", ")", "else", ":", "c", "[", "'errors'", "]", ".", "add", "(", "\"precursor too long\"", ")", "clus_obj", "[", "0", "]", "[", "nc", "]", "=", "c", "return", "clus_obj" ]
Iterates through cluster precursors to predict sRNA types
[ "Iterates", "through", "cluster", "precursors", "to", "predict", "sRNA", "types" ]
python
train
38.407407
DataONEorg/d1_python
client_cli/src/d1_cli/dataone.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/dataone.py#L430-L456
def handle_unexpected_exception(max_traceback_levels=100): """Suppress stack traces for common errors and provide hints for how to resolve them.""" exc_type, exc_msgs = sys.exc_info()[:2] if exc_type.__name__ == "SSLError": d1_cli.impl.util.print_error( """HTTPS / TLS / SSL / X.509v3 Certificate Error: An HTTPS connection could not be established. Verify that a DataONE node responds at the URL provided in the cn-url or mn-url session variable. If the URL is valid and if you intended to connect without authentication, make sure that the session variable, "anonymous", is set to True. If you intended to connect with authentication, make sure that the parameter, "cert-file", points to a valid certificate from CILogon. If the certificate has the private key in a separate file, also set "key-file" to the private key file. Otherwise, set "key-file" to None. Note that CILogon certificates must be renewed after 18 hours. """ ) elif exc_type.__name__ == "timeout": d1_cli.impl.util.print_error( """Timeout error: A connection to a DataONE node timed out. Verify that a DataONE node responds at the URL provided in the cn-url or mn-url session variable. """ ) else: _print_unexpected_exception(max_traceback_levels)
[ "def", "handle_unexpected_exception", "(", "max_traceback_levels", "=", "100", ")", ":", "exc_type", ",", "exc_msgs", "=", "sys", ".", "exc_info", "(", ")", "[", ":", "2", "]", "if", "exc_type", ".", "__name__", "==", "\"SSLError\"", ":", "d1_cli", ".", "impl", ".", "util", ".", "print_error", "(", "\"\"\"HTTPS / TLS / SSL / X.509v3 Certificate Error:\n An HTTPS connection could not be established. Verify that a DataONE node\n responds at the URL provided in the cn-url or mn-url session variable. If the\n URL is valid and if you intended to connect without authentication, make sure\n that the session variable, \"anonymous\", is set to True. If you intended to\n connect with authentication, make sure that the parameter, \"cert-file\", points\n to a valid certificate from CILogon. If the certificate has the private\n key in a separate file, also set \"key-file\" to the private key file.\n Otherwise, set \"key-file\" to None. Note that CILogon certificates must be\n renewed after 18 hours.\n \"\"\"", ")", "elif", "exc_type", ".", "__name__", "==", "\"timeout\"", ":", "d1_cli", ".", "impl", ".", "util", ".", "print_error", "(", "\"\"\"Timeout error:\n A connection to a DataONE node timed out. Verify that a DataONE node responds\n at the URL provided in the cn-url or mn-url session variable.\n \"\"\"", ")", "else", ":", "_print_unexpected_exception", "(", "max_traceback_levels", ")" ]
Suppress stack traces for common errors and provide hints for how to resolve them.
[ "Suppress", "stack", "traces", "for", "common", "errors", "and", "provide", "hints", "for", "how", "to", "resolve", "them", "." ]
python
train
49.111111
csparpa/pyowm
pyowm/stationsapi30/station.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/stationsapi30/station.py#L119-L133
def to_JSON(self): """Dumps object fields into a JSON formatted string :returns: the JSON string """ return json.dumps({'id': self.id, 'external_id': self.external_id, 'name': self.name, 'created_at': timeformatutils.to_ISO8601(self.created_at), 'updated_at': timeformatutils.to_ISO8601(self.updated_at), 'lat': self.lat, 'lon': self.lon, 'alt': self.alt if self.alt is not None else 'None', 'rank': self.rank})
[ "def", "to_JSON", "(", "self", ")", ":", "return", "json", ".", "dumps", "(", "{", "'id'", ":", "self", ".", "id", ",", "'external_id'", ":", "self", ".", "external_id", ",", "'name'", ":", "self", ".", "name", ",", "'created_at'", ":", "timeformatutils", ".", "to_ISO8601", "(", "self", ".", "created_at", ")", ",", "'updated_at'", ":", "timeformatutils", ".", "to_ISO8601", "(", "self", ".", "updated_at", ")", ",", "'lat'", ":", "self", ".", "lat", ",", "'lon'", ":", "self", ".", "lon", ",", "'alt'", ":", "self", ".", "alt", "if", "self", ".", "alt", "is", "not", "None", "else", "'None'", ",", "'rank'", ":", "self", ".", "rank", "}", ")" ]
Dumps object fields into a JSON formatted string :returns: the JSON string
[ "Dumps", "object", "fields", "into", "a", "JSON", "formatted", "string" ]
python
train
43.133333
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py#L533-L546
def get_list(self, list_id): """GetList. [Preview API] Returns a picklist. :param str list_id: The ID of the list :rtype: :class:`<PickList> <azure.devops.v5_0.work_item_tracking_process.models.PickList>` """ route_values = {} if list_id is not None: route_values['listId'] = self._serialize.url('list_id', list_id, 'str') response = self._send(http_method='GET', location_id='01e15468-e27c-4e20-a974-bd957dcccebc', version='5.0-preview.1', route_values=route_values) return self._deserialize('PickList', response)
[ "def", "get_list", "(", "self", ",", "list_id", ")", ":", "route_values", "=", "{", "}", "if", "list_id", "is", "not", "None", ":", "route_values", "[", "'listId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'list_id'", ",", "list_id", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'01e15468-e27c-4e20-a974-bd957dcccebc'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ")", "return", "self", ".", "_deserialize", "(", "'PickList'", ",", "response", ")" ]
GetList. [Preview API] Returns a picklist. :param str list_id: The ID of the list :rtype: :class:`<PickList> <azure.devops.v5_0.work_item_tracking_process.models.PickList>`
[ "GetList", ".", "[", "Preview", "API", "]", "Returns", "a", "picklist", ".", ":", "param", "str", "list_id", ":", "The", "ID", "of", "the", "list", ":", "rtype", ":", ":", "class", ":", "<PickList", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work_item_tracking_process", ".", "models", ".", "PickList", ">" ]
python
train
48.214286
DataBiosphere/dsub
dsub/lib/dsub_util.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/lib/dsub_util.py#L185-L200
def load_file(file_path, credentials=None): """Load a file from either local or gcs. Args: file_path: The target file path, which should have the prefix 'gs://' if to be loaded from gcs. credentials: Optional credential to be used to load the file from gcs. Returns: A python File object if loading file from local or a StringIO object if loading from gcs. """ if file_path.startswith('gs://'): return _load_file_from_gcs(file_path, credentials) else: return open(file_path, 'r')
[ "def", "load_file", "(", "file_path", ",", "credentials", "=", "None", ")", ":", "if", "file_path", ".", "startswith", "(", "'gs://'", ")", ":", "return", "_load_file_from_gcs", "(", "file_path", ",", "credentials", ")", "else", ":", "return", "open", "(", "file_path", ",", "'r'", ")" ]
Load a file from either local or gcs. Args: file_path: The target file path, which should have the prefix 'gs://' if to be loaded from gcs. credentials: Optional credential to be used to load the file from gcs. Returns: A python File object if loading file from local or a StringIO object if loading from gcs.
[ "Load", "a", "file", "from", "either", "local", "or", "gcs", "." ]
python
valid
32.3125
fastai/fastai
fastai/core.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/core.py#L91-L95
def arrays_split(mask:NPArrayMask, *arrs:NPArrayableList)->SplitArrayList: "Given `arrs` is [a,b,...] and `mask`index - return[(a[mask],a[~mask]),(b[mask],b[~mask]),...]." assert all([len(arr)==len(arrs[0]) for arr in arrs]), 'All arrays should have same length' mask = array(mask) return list(zip(*[(a[mask],a[~mask]) for a in map(np.array, arrs)]))
[ "def", "arrays_split", "(", "mask", ":", "NPArrayMask", ",", "*", "arrs", ":", "NPArrayableList", ")", "->", "SplitArrayList", ":", "assert", "all", "(", "[", "len", "(", "arr", ")", "==", "len", "(", "arrs", "[", "0", "]", ")", "for", "arr", "in", "arrs", "]", ")", ",", "'All arrays should have same length'", "mask", "=", "array", "(", "mask", ")", "return", "list", "(", "zip", "(", "*", "[", "(", "a", "[", "mask", "]", ",", "a", "[", "~", "mask", "]", ")", "for", "a", "in", "map", "(", "np", ".", "array", ",", "arrs", ")", "]", ")", ")" ]
Given `arrs` is [a,b,...] and `mask`index - return[(a[mask],a[~mask]),(b[mask],b[~mask]),...].
[ "Given", "arrs", "is", "[", "a", "b", "...", "]", "and", "mask", "index", "-", "return", "[", "(", "a", "[", "mask", "]", "a", "[", "~mask", "]", ")", "(", "b", "[", "mask", "]", "b", "[", "~mask", "]", ")", "...", "]", "." ]
python
train
72.4
log2timeline/plaso
plaso/parsers/sqlite_plugins/skype.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/sqlite_plugins/skype.py#L435-L497
def ParseAccountInformation( self, parser_mediator, query, row, **unused_kwargs): """Parses account information. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row with account information. """ query_hash = hash(query) display_name = self._GetRowValue(query_hash, row, 'given_displayname') fullname = self._GetRowValue(query_hash, row, 'fullname') # TODO: Move this to the formatter, and ensure username is rendered # properly when fullname and/or display_name is None. username = '{0!s} <{1!s}>'.format(fullname, display_name) event_data = SkypeAccountEventData() event_data.country = self._GetRowValue(query_hash, row, 'country') event_data.display_name = display_name event_data.email = self._GetRowValue(query_hash, row, 'emails') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.query = query event_data.username = username timestamp = self._GetRowValue(query_hash, row, 'profile_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Profile Changed') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'authreq_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Authenticate Request') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastonline_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Last Online') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'mood_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Mood Event') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'sent_authrequest_time') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Auth Request Sent') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastused_timestamp') if timestamp: date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, 'Last Used') parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ParseAccountInformation", "(", "self", ",", "parser_mediator", ",", "query", ",", "row", ",", "*", "*", "unused_kwargs", ")", ":", "query_hash", "=", "hash", "(", "query", ")", "display_name", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'given_displayname'", ")", "fullname", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'fullname'", ")", "# TODO: Move this to the formatter, and ensure username is rendered", "# properly when fullname and/or display_name is None.", "username", "=", "'{0!s} <{1!s}>'", ".", "format", "(", "fullname", ",", "display_name", ")", "event_data", "=", "SkypeAccountEventData", "(", ")", "event_data", ".", "country", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'country'", ")", "event_data", ".", "display_name", "=", "display_name", "event_data", ".", "email", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'emails'", ")", "event_data", ".", "offset", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'id'", ")", "event_data", ".", "query", "=", "query", "event_data", ".", "username", "=", "username", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'profile_timestamp'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Profile Changed'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'authreq_timestamp'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Authenticate Request'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'lastonline_timestamp'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Last Online'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'mood_timestamp'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Mood Event'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'sent_authrequest_time'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Auth Request Sent'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'lastused_timestamp'", ")", "if", "timestamp", ":", "date_time", "=", "dfdatetime_posix_time", ".", "PosixTime", "(", "timestamp", "=", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "'Last Used'", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Parses account information. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row with account information.
[ "Parses", "account", "information", "." ]
python
train
45.539683
sammchardy/python-binance
binance/client.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/client.py#L298-L345
def get_symbol_info(self, symbol): """Return information about a symbol :param symbol: required e.g BNBBTC :type symbol: str :returns: Dict if found, None if not .. code-block:: python { "symbol": "ETHBTC", "status": "TRADING", "baseAsset": "ETH", "baseAssetPrecision": 8, "quoteAsset": "BTC", "quotePrecision": 8, "orderTypes": ["LIMIT", "MARKET"], "icebergAllowed": false, "filters": [ { "filterType": "PRICE_FILTER", "minPrice": "0.00000100", "maxPrice": "100000.00000000", "tickSize": "0.00000100" }, { "filterType": "LOT_SIZE", "minQty": "0.00100000", "maxQty": "100000.00000000", "stepSize": "0.00100000" }, { "filterType": "MIN_NOTIONAL", "minNotional": "0.00100000" } ] } :raises: BinanceRequestException, BinanceAPIException """ res = self._get('exchangeInfo') for item in res['symbols']: if item['symbol'] == symbol.upper(): return item return None
[ "def", "get_symbol_info", "(", "self", ",", "symbol", ")", ":", "res", "=", "self", ".", "_get", "(", "'exchangeInfo'", ")", "for", "item", "in", "res", "[", "'symbols'", "]", ":", "if", "item", "[", "'symbol'", "]", "==", "symbol", ".", "upper", "(", ")", ":", "return", "item", "return", "None" ]
Return information about a symbol :param symbol: required e.g BNBBTC :type symbol: str :returns: Dict if found, None if not .. code-block:: python { "symbol": "ETHBTC", "status": "TRADING", "baseAsset": "ETH", "baseAssetPrecision": 8, "quoteAsset": "BTC", "quotePrecision": 8, "orderTypes": ["LIMIT", "MARKET"], "icebergAllowed": false, "filters": [ { "filterType": "PRICE_FILTER", "minPrice": "0.00000100", "maxPrice": "100000.00000000", "tickSize": "0.00000100" }, { "filterType": "LOT_SIZE", "minQty": "0.00100000", "maxQty": "100000.00000000", "stepSize": "0.00100000" }, { "filterType": "MIN_NOTIONAL", "minNotional": "0.00100000" } ] } :raises: BinanceRequestException, BinanceAPIException
[ "Return", "information", "about", "a", "symbol" ]
python
train
29.833333
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1645-L1688
def to_sparse(self, format='csr', **kwargs): """Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 1], [0, 1]], ... [[1, 1], [0, 0]], ... [[0, 0], [-1, -1]]], dtype='i1') >>> m = g.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32) """ h = self.to_haplotypes() m = h.to_sparse(format=format, **kwargs) return m
[ "def", "to_sparse", "(", "self", ",", "format", "=", "'csr'", ",", "*", "*", "kwargs", ")", ":", "h", "=", "self", ".", "to_haplotypes", "(", ")", "m", "=", "h", ".", "to_sparse", "(", "format", "=", "format", ",", "*", "*", "kwargs", ")", "return", "m" ]
Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 1], [0, 1]], ... [[1, 1], [0, 0]], ... [[0, 0], [-1, -1]]], dtype='i1') >>> m = g.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32)
[ "Convert", "into", "a", "sparse", "matrix", "." ]
python
train
28.409091
klen/muffin-admin
muffin_admin/peewee.py
https://github.com/klen/muffin-admin/blob/404dc8e5107e943b7c42fa21c679c34ddb4de1d5/muffin_admin/peewee.py#L212-L221
def apply(self, query, data): """Filter a query.""" field = self.model_field or query.model_class._meta.fields.get(self.name) if not field or self.name not in data: return query value = self.value(data) if value is self.default: return query value = field.db_value(value) return self.filter_query(query, field, value)
[ "def", "apply", "(", "self", ",", "query", ",", "data", ")", ":", "field", "=", "self", ".", "model_field", "or", "query", ".", "model_class", ".", "_meta", ".", "fields", ".", "get", "(", "self", ".", "name", ")", "if", "not", "field", "or", "self", ".", "name", "not", "in", "data", ":", "return", "query", "value", "=", "self", ".", "value", "(", "data", ")", "if", "value", "is", "self", ".", "default", ":", "return", "query", "value", "=", "field", ".", "db_value", "(", "value", ")", "return", "self", ".", "filter_query", "(", "query", ",", "field", ",", "value", ")" ]
Filter a query.
[ "Filter", "a", "query", "." ]
python
train
38.8
saltstack/salt
salt/cloud/clouds/proxmox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/proxmox.py#L248-L272
def _parse_proxmox_upid(node, vm_=None): ''' Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log. ''' ret = {} upid = node # Parse node response node = node.split(':') if node[0] == 'UPID': ret['node'] = six.text_type(node[1]) ret['pid'] = six.text_type(node[2]) ret['pstart'] = six.text_type(node[3]) ret['starttime'] = six.text_type(node[4]) ret['type'] = six.text_type(node[5]) ret['vmid'] = six.text_type(node[6]) ret['user'] = six.text_type(node[7]) # include the upid again in case we'll need it again ret['upid'] = six.text_type(upid) if vm_ is not None and 'technology' in vm_: ret['technology'] = six.text_type(vm_['technology']) return ret
[ "def", "_parse_proxmox_upid", "(", "node", ",", "vm_", "=", "None", ")", ":", "ret", "=", "{", "}", "upid", "=", "node", "# Parse node response", "node", "=", "node", ".", "split", "(", "':'", ")", "if", "node", "[", "0", "]", "==", "'UPID'", ":", "ret", "[", "'node'", "]", "=", "six", ".", "text_type", "(", "node", "[", "1", "]", ")", "ret", "[", "'pid'", "]", "=", "six", ".", "text_type", "(", "node", "[", "2", "]", ")", "ret", "[", "'pstart'", "]", "=", "six", ".", "text_type", "(", "node", "[", "3", "]", ")", "ret", "[", "'starttime'", "]", "=", "six", ".", "text_type", "(", "node", "[", "4", "]", ")", "ret", "[", "'type'", "]", "=", "six", ".", "text_type", "(", "node", "[", "5", "]", ")", "ret", "[", "'vmid'", "]", "=", "six", ".", "text_type", "(", "node", "[", "6", "]", ")", "ret", "[", "'user'", "]", "=", "six", ".", "text_type", "(", "node", "[", "7", "]", ")", "# include the upid again in case we'll need it again", "ret", "[", "'upid'", "]", "=", "six", ".", "text_type", "(", "upid", ")", "if", "vm_", "is", "not", "None", "and", "'technology'", "in", "vm_", ":", "ret", "[", "'technology'", "]", "=", "six", ".", "text_type", "(", "vm_", "[", "'technology'", "]", ")", "return", "ret" ]
Upon requesting a task that runs for a longer period of time a UPID is given. This includes information about the job and can be used to lookup information in the log.
[ "Upon", "requesting", "a", "task", "that", "runs", "for", "a", "longer", "period", "of", "time", "a", "UPID", "is", "given", ".", "This", "includes", "information", "about", "the", "job", "and", "can", "be", "used", "to", "lookup", "information", "in", "the", "log", "." ]
python
train
35
odlgroup/odl
odl/discr/grid.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/grid.py#L1047-L1169
def uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=True): """Return a grid from sampling an interval product uniformly. The resulting grid will by default include ``intv_prod.min_pt`` and ``intv_prod.max_pt`` as grid points. If you want a subdivision into equally sized cells with grid points in the middle, use `uniform_partition` instead. Parameters ---------- intv_prod : `IntervalProd` Set to be sampled. shape : int or sequence of ints Number of nodes per axis. Entries corresponding to degenerate axes must be equal to 1. nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Returns ------- sampling : `RectGrid` Uniform sampling grid for the interval product. Examples -------- >>> rbox = odl.IntervalProd([-1.5, 2], [-0.5, 3]) >>> grid = uniform_grid_fromintv(rbox, (3, 3)) >>> grid.coord_vectors (array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ])) To have the nodes in the "middle", use ``nodes_on_bdry=False``: >>> grid = uniform_grid_fromintv(rbox, (2, 2), nodes_on_bdry=False) >>> grid.coord_vectors (array([-1.25, -0.75]), array([ 2.25, 2.75])) See Also -------- uniform_grid : Create a uniform grid directly. odl.discr.partition.uniform_partition_fromintv : divide interval product into equally sized subsets """ if not isinstance(intv_prod, IntervalProd): raise TypeError('{!r} is not an `IntervalProd` instance' ''.format(intv_prod)) if (np.any(np.isinf(intv_prod.min_pt)) or np.any(np.isinf(intv_prod.max_pt))): raise ValueError('`intv_prod` must be finite, got {!r}' ''.format('intv_prod')) shape = normalized_scalar_param_list(shape, intv_prod.ndim, safe_int_conv) if np.shape(nodes_on_bdry) == (): nodes_on_bdry = ([(bool(nodes_on_bdry), bool(nodes_on_bdry))] * intv_prod.ndim) elif intv_prod.ndim == 1 and len(nodes_on_bdry) == 2: nodes_on_bdry = [nodes_on_bdry] elif len(nodes_on_bdry) != intv_prod.ndim: raise ValueError('`nodes_on_bdry` has length {}, expected {}' ''.format(len(nodes_on_bdry), intv_prod.ndim)) else: shape = tuple(int(n) for n in shape) # We need to determine the placement of the grid minimum and maximum # points based on the choices in nodes_on_bdry. If in a given axis, # and for a given side (left or right), the entry is True, the node lies # on the boundary, so this coordinate can simply be taken as-is. # # Otherwise, the following conditions must be met: # # 1. The node should be half a stride s away from the boundary # 2. Adding or subtracting (n-1)*s should give the other extremal node. # # If both nodes are to be shifted half a stride inside, # the second condition yields # a + s/2 + (n-1)*s = b - s/2 => s = (b - a) / n, # hence the extremal grid points are # gmin = a + s/2 = a + (b - a) / (2 * n), # gmax = b - s/2 = b - (b - a) / (2 * n). # # In the case where one node, say the rightmost, lies on the boundary, # the condition 2. reads as # a + s/2 + (n-1)*s = b => s = (b - a) / (n - 1/2), # thus # gmin = a + (b - a) / (2 * n - 1). gmin, gmax = [], [] for n, xmin, xmax, on_bdry in zip(shape, intv_prod.min_pt, intv_prod.max_pt, nodes_on_bdry): # Unpack the tuple if possible, else use bool globally for this axis try: bdry_l, bdry_r = on_bdry except TypeError: bdry_l = bdry_r = on_bdry if bdry_l and bdry_r: gmin.append(xmin) gmax.append(xmax) elif bdry_l and not bdry_r: gmin.append(xmin) gmax.append(xmax - (xmax - xmin) / (2 * n - 1)) elif not bdry_l and bdry_r: gmin.append(xmin + (xmax - xmin) / (2 * n - 1)) gmax.append(xmax) else: gmin.append(xmin + (xmax - xmin) / (2 * n)) gmax.append(xmax - (xmax - xmin) / (2 * n)) # Create the grid coord_vecs = [np.linspace(mi, ma, num, endpoint=True, dtype=np.float64) for mi, ma, num in zip(gmin, gmax, shape)] return RectGrid(*coord_vecs)
[ "def", "uniform_grid_fromintv", "(", "intv_prod", ",", "shape", ",", "nodes_on_bdry", "=", "True", ")", ":", "if", "not", "isinstance", "(", "intv_prod", ",", "IntervalProd", ")", ":", "raise", "TypeError", "(", "'{!r} is not an `IntervalProd` instance'", "''", ".", "format", "(", "intv_prod", ")", ")", "if", "(", "np", ".", "any", "(", "np", ".", "isinf", "(", "intv_prod", ".", "min_pt", ")", ")", "or", "np", ".", "any", "(", "np", ".", "isinf", "(", "intv_prod", ".", "max_pt", ")", ")", ")", ":", "raise", "ValueError", "(", "'`intv_prod` must be finite, got {!r}'", "''", ".", "format", "(", "'intv_prod'", ")", ")", "shape", "=", "normalized_scalar_param_list", "(", "shape", ",", "intv_prod", ".", "ndim", ",", "safe_int_conv", ")", "if", "np", ".", "shape", "(", "nodes_on_bdry", ")", "==", "(", ")", ":", "nodes_on_bdry", "=", "(", "[", "(", "bool", "(", "nodes_on_bdry", ")", ",", "bool", "(", "nodes_on_bdry", ")", ")", "]", "*", "intv_prod", ".", "ndim", ")", "elif", "intv_prod", ".", "ndim", "==", "1", "and", "len", "(", "nodes_on_bdry", ")", "==", "2", ":", "nodes_on_bdry", "=", "[", "nodes_on_bdry", "]", "elif", "len", "(", "nodes_on_bdry", ")", "!=", "intv_prod", ".", "ndim", ":", "raise", "ValueError", "(", "'`nodes_on_bdry` has length {}, expected {}'", "''", ".", "format", "(", "len", "(", "nodes_on_bdry", ")", ",", "intv_prod", ".", "ndim", ")", ")", "else", ":", "shape", "=", "tuple", "(", "int", "(", "n", ")", "for", "n", "in", "shape", ")", "# We need to determine the placement of the grid minimum and maximum", "# points based on the choices in nodes_on_bdry. If in a given axis,", "# and for a given side (left or right), the entry is True, the node lies", "# on the boundary, so this coordinate can simply be taken as-is.", "#", "# Otherwise, the following conditions must be met:", "#", "# 1. The node should be half a stride s away from the boundary", "# 2. Adding or subtracting (n-1)*s should give the other extremal node.", "#", "# If both nodes are to be shifted half a stride inside,", "# the second condition yields", "# a + s/2 + (n-1)*s = b - s/2 => s = (b - a) / n,", "# hence the extremal grid points are", "# gmin = a + s/2 = a + (b - a) / (2 * n),", "# gmax = b - s/2 = b - (b - a) / (2 * n).", "#", "# In the case where one node, say the rightmost, lies on the boundary,", "# the condition 2. reads as", "# a + s/2 + (n-1)*s = b => s = (b - a) / (n - 1/2),", "# thus", "# gmin = a + (b - a) / (2 * n - 1).", "gmin", ",", "gmax", "=", "[", "]", ",", "[", "]", "for", "n", ",", "xmin", ",", "xmax", ",", "on_bdry", "in", "zip", "(", "shape", ",", "intv_prod", ".", "min_pt", ",", "intv_prod", ".", "max_pt", ",", "nodes_on_bdry", ")", ":", "# Unpack the tuple if possible, else use bool globally for this axis", "try", ":", "bdry_l", ",", "bdry_r", "=", "on_bdry", "except", "TypeError", ":", "bdry_l", "=", "bdry_r", "=", "on_bdry", "if", "bdry_l", "and", "bdry_r", ":", "gmin", ".", "append", "(", "xmin", ")", "gmax", ".", "append", "(", "xmax", ")", "elif", "bdry_l", "and", "not", "bdry_r", ":", "gmin", ".", "append", "(", "xmin", ")", "gmax", ".", "append", "(", "xmax", "-", "(", "xmax", "-", "xmin", ")", "/", "(", "2", "*", "n", "-", "1", ")", ")", "elif", "not", "bdry_l", "and", "bdry_r", ":", "gmin", ".", "append", "(", "xmin", "+", "(", "xmax", "-", "xmin", ")", "/", "(", "2", "*", "n", "-", "1", ")", ")", "gmax", ".", "append", "(", "xmax", ")", "else", ":", "gmin", ".", "append", "(", "xmin", "+", "(", "xmax", "-", "xmin", ")", "/", "(", "2", "*", "n", ")", ")", "gmax", ".", "append", "(", "xmax", "-", "(", "xmax", "-", "xmin", ")", "/", "(", "2", "*", "n", ")", ")", "# Create the grid", "coord_vecs", "=", "[", "np", ".", "linspace", "(", "mi", ",", "ma", ",", "num", ",", "endpoint", "=", "True", ",", "dtype", "=", "np", ".", "float64", ")", "for", "mi", ",", "ma", ",", "num", "in", "zip", "(", "gmin", ",", "gmax", ",", "shape", ")", "]", "return", "RectGrid", "(", "*", "coord_vecs", ")" ]
Return a grid from sampling an interval product uniformly. The resulting grid will by default include ``intv_prod.min_pt`` and ``intv_prod.max_pt`` as grid points. If you want a subdivision into equally sized cells with grid points in the middle, use `uniform_partition` instead. Parameters ---------- intv_prod : `IntervalProd` Set to be sampled. shape : int or sequence of ints Number of nodes per axis. Entries corresponding to degenerate axes must be equal to 1. nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Returns ------- sampling : `RectGrid` Uniform sampling grid for the interval product. Examples -------- >>> rbox = odl.IntervalProd([-1.5, 2], [-0.5, 3]) >>> grid = uniform_grid_fromintv(rbox, (3, 3)) >>> grid.coord_vectors (array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ])) To have the nodes in the "middle", use ``nodes_on_bdry=False``: >>> grid = uniform_grid_fromintv(rbox, (2, 2), nodes_on_bdry=False) >>> grid.coord_vectors (array([-1.25, -0.75]), array([ 2.25, 2.75])) See Also -------- uniform_grid : Create a uniform grid directly. odl.discr.partition.uniform_partition_fromintv : divide interval product into equally sized subsets
[ "Return", "a", "grid", "from", "sampling", "an", "interval", "product", "uniformly", "." ]
python
train
38.813008
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/glir.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/glir.py#L893-L920
def draw(self, mode, selection): """ Draw program in given mode, with given selection (IndexBuffer or first, count). """ if not self._linked: raise RuntimeError('Cannot draw program if code has not been set') # Init gl.check_error('Check before draw') mode = as_enum(mode) # Draw if len(selection) == 3: # Selection based on indices id_, gtype, count = selection if count: self._pre_draw() ibuf = self._parser.get_object(id_) ibuf.activate() gl.glDrawElements(mode, count, as_enum(gtype), None) ibuf.deactivate() else: # Selection based on start and count first, count = selection if count: self._pre_draw() gl.glDrawArrays(mode, first, count) # Wrap up gl.check_error('Check after draw') self._post_draw()
[ "def", "draw", "(", "self", ",", "mode", ",", "selection", ")", ":", "if", "not", "self", ".", "_linked", ":", "raise", "RuntimeError", "(", "'Cannot draw program if code has not been set'", ")", "# Init", "gl", ".", "check_error", "(", "'Check before draw'", ")", "mode", "=", "as_enum", "(", "mode", ")", "# Draw", "if", "len", "(", "selection", ")", "==", "3", ":", "# Selection based on indices", "id_", ",", "gtype", ",", "count", "=", "selection", "if", "count", ":", "self", ".", "_pre_draw", "(", ")", "ibuf", "=", "self", ".", "_parser", ".", "get_object", "(", "id_", ")", "ibuf", ".", "activate", "(", ")", "gl", ".", "glDrawElements", "(", "mode", ",", "count", ",", "as_enum", "(", "gtype", ")", ",", "None", ")", "ibuf", ".", "deactivate", "(", ")", "else", ":", "# Selection based on start and count", "first", ",", "count", "=", "selection", "if", "count", ":", "self", ".", "_pre_draw", "(", ")", "gl", ".", "glDrawArrays", "(", "mode", ",", "first", ",", "count", ")", "# Wrap up", "gl", ".", "check_error", "(", "'Check after draw'", ")", "self", ".", "_post_draw", "(", ")" ]
Draw program in given mode, with given selection (IndexBuffer or first, count).
[ "Draw", "program", "in", "given", "mode", "with", "given", "selection", "(", "IndexBuffer", "or", "first", "count", ")", "." ]
python
train
34.964286
GPflow/GPflow
gpflow/training/monitor.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/monitor.py#L758-L770
def __lock_location(self) -> None: """ Attempts to lock the location used by this writer. Will raise an error if the location is already locked by another writer. Will do nothing if the location is already locked by this writer. """ if not self._is_active: if self._location in LogdirWriter._locked_locations: raise RuntimeError('TensorBoard event file in directory %s with suffix %s ' 'is already in use. At present multiple TensoBoard file writers ' 'cannot write data into the same file.' % self._location) LogdirWriter._locked_locations.add(self._location) self._is_active = True
[ "def", "__lock_location", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "_is_active", ":", "if", "self", ".", "_location", "in", "LogdirWriter", ".", "_locked_locations", ":", "raise", "RuntimeError", "(", "'TensorBoard event file in directory %s with suffix %s '", "'is already in use. At present multiple TensoBoard file writers '", "'cannot write data into the same file.'", "%", "self", ".", "_location", ")", "LogdirWriter", ".", "_locked_locations", ".", "add", "(", "self", ".", "_location", ")", "self", ".", "_is_active", "=", "True" ]
Attempts to lock the location used by this writer. Will raise an error if the location is already locked by another writer. Will do nothing if the location is already locked by this writer.
[ "Attempts", "to", "lock", "the", "location", "used", "by", "this", "writer", ".", "Will", "raise", "an", "error", "if", "the", "location", "is", "already", "locked", "by", "another", "writer", ".", "Will", "do", "nothing", "if", "the", "location", "is", "already", "locked", "by", "this", "writer", "." ]
python
train
57
StackStorm/pybind
pybind/slxos/v17s_1_02/routing_system/router/hide_pim_holder/pim/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/router/hide_pim_holder/pim/__init__.py#L575-L596
def _set_route_precedence(self, v, load=False): """ Setter method for route_precedence, mapped from YANG variable /routing_system/router/hide_pim_holder/pim/route_precedence (container) If this variable is read-only (config: false) in the source YANG file, then _set_route_precedence is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route_precedence() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=route_precedence.route_precedence, is_container='container', presence=False, yang_name="route-precedence", rest_name="route-precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Specify Route Selection criteria', u'callpoint': u'PimRoutePrecedenceCallpoint', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """route_precedence must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=route_precedence.route_precedence, is_container='container', presence=False, yang_name="route-precedence", rest_name="route-precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Specify Route Selection criteria', u'callpoint': u'PimRoutePrecedenceCallpoint', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)""", }) self.__route_precedence = t if hasattr(self, '_set'): self._set()
[ "def", "_set_route_precedence", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "route_precedence", ".", "route_precedence", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"route-precedence\"", ",", "rest_name", "=", "\"route-precedence\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-compact-syntax'", ":", "None", ",", "u'info'", ":", "u'Specify Route Selection criteria'", ",", "u'callpoint'", ":", "u'PimRoutePrecedenceCallpoint'", ",", "u'cli-incomplete-command'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-pim'", ",", "defining_module", "=", "'brocade-pim'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"route_precedence must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=route_precedence.route_precedence, is_container='container', presence=False, yang_name=\"route-precedence\", rest_name=\"route-precedence\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Specify Route Selection criteria', u'callpoint': u'PimRoutePrecedenceCallpoint', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__route_precedence", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for route_precedence, mapped from YANG variable /routing_system/router/hide_pim_holder/pim/route_precedence (container) If this variable is read-only (config: false) in the source YANG file, then _set_route_precedence is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route_precedence() directly.
[ "Setter", "method", "for", "route_precedence", "mapped", "from", "YANG", "variable", "/", "routing_system", "/", "router", "/", "hide_pim_holder", "/", "pim", "/", "route_precedence", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_route_precedence", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_route_precedence", "()", "directly", "." ]
python
train
87.909091
NoneGG/aredis
aredis/commands/cluster.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/cluster.py#L194-L202
async def cluster_failover(self, node_id, option): """ Forces a slave to perform a manual failover of its master Sends to specefied node """ if not isinstance(option, str) or option.upper() not in {'FORCE', 'TAKEOVER'}: raise ClusterError('Wrong option provided') return await self.execute_command('CLUSTER FAILOVER', option, node_id=node_id)
[ "async", "def", "cluster_failover", "(", "self", ",", "node_id", ",", "option", ")", ":", "if", "not", "isinstance", "(", "option", ",", "str", ")", "or", "option", ".", "upper", "(", ")", "not", "in", "{", "'FORCE'", ",", "'TAKEOVER'", "}", ":", "raise", "ClusterError", "(", "'Wrong option provided'", ")", "return", "await", "self", ".", "execute_command", "(", "'CLUSTER FAILOVER'", ",", "option", ",", "node_id", "=", "node_id", ")" ]
Forces a slave to perform a manual failover of its master Sends to specefied node
[ "Forces", "a", "slave", "to", "perform", "a", "manual", "failover", "of", "its", "master" ]
python
train
43.888889
sony/nnabla
python/src/nnabla/functions.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L208-L226
def split(x, axis=0): """ Split arrays at the specified axis. It returns a number corresponding the size of the given axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s. Args: x(~nnabla.Variable): N-D array axis(int): Axis Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s See Also: :func:`nnabla.function_bases.split`. """ from .function_bases import split as split_base return split_base(x, axis, x.shape[axis])
[ "def", "split", "(", "x", ",", "axis", "=", "0", ")", ":", "from", ".", "function_bases", "import", "split", "as", "split_base", "return", "split_base", "(", "x", ",", "axis", ",", "x", ".", "shape", "[", "axis", "]", ")" ]
Split arrays at the specified axis. It returns a number corresponding the size of the given axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s. Args: x(~nnabla.Variable): N-D array axis(int): Axis Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s See Also: :func:`nnabla.function_bases.split`.
[ "Split", "arrays", "at", "the", "specified", "axis", "." ]
python
train
24.947368
respondcreate/django-versatileimagefield
versatileimagefield/mixins.py
https://github.com/respondcreate/django-versatileimagefield/blob/d41e279c39cccffafbe876c67596184704ae8877/versatileimagefield/mixins.py#L108-L133
def build_filters_and_sizers(self, ppoi_value, create_on_demand): """Build the filters and sizers for a field.""" name = self.name if not name and self.field.placeholder_image_name: name = self.field.placeholder_image_name self.filters = FilterLibrary( name, self.storage, versatileimagefield_registry, ppoi_value, create_on_demand ) for ( attr_name, sizedimage_cls ) in iteritems(versatileimagefield_registry._sizedimage_registry): setattr( self, attr_name, sizedimage_cls( path_to_image=name, storage=self.storage, create_on_demand=create_on_demand, ppoi=ppoi_value ) )
[ "def", "build_filters_and_sizers", "(", "self", ",", "ppoi_value", ",", "create_on_demand", ")", ":", "name", "=", "self", ".", "name", "if", "not", "name", "and", "self", ".", "field", ".", "placeholder_image_name", ":", "name", "=", "self", ".", "field", ".", "placeholder_image_name", "self", ".", "filters", "=", "FilterLibrary", "(", "name", ",", "self", ".", "storage", ",", "versatileimagefield_registry", ",", "ppoi_value", ",", "create_on_demand", ")", "for", "(", "attr_name", ",", "sizedimage_cls", ")", "in", "iteritems", "(", "versatileimagefield_registry", ".", "_sizedimage_registry", ")", ":", "setattr", "(", "self", ",", "attr_name", ",", "sizedimage_cls", "(", "path_to_image", "=", "name", ",", "storage", "=", "self", ".", "storage", ",", "create_on_demand", "=", "create_on_demand", ",", "ppoi", "=", "ppoi_value", ")", ")" ]
Build the filters and sizers for a field.
[ "Build", "the", "filters", "and", "sizers", "for", "a", "field", "." ]
python
test
33.307692
intel-analytics/BigDL
pyspark/bigdl/keras/backend.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/backend.py#L60-L83
def predict(self, x, batch_size=None, verbose=None, is_distributed=False): """Generates output predictions for the input samples, processing the samples in a batched way. # Arguments x: the input data, as a Numpy array or list of Numpy array for local mode. as RDD[Sample] for distributed mode is_distributed: used to control run in local or cluster. the default value is False # Returns A Numpy array or RDD[Sample] of predictions. """ if batch_size or verbose: raise Exception("we don't support batch_size or verbose for now") if is_distributed: if isinstance(x, np.ndarray): input = to_sample_rdd(x, np.zeros([x.shape[0]])) # np.asarray(self.bmodel.predict(x_rdd).collect()) elif isinstance(x, RDD): input = x return self.bmodel.predict(input) else: if isinstance(x, np.ndarray): return self.bmodel.predict_local(x) raise Exception("not supported type: %s" % x)
[ "def", "predict", "(", "self", ",", "x", ",", "batch_size", "=", "None", ",", "verbose", "=", "None", ",", "is_distributed", "=", "False", ")", ":", "if", "batch_size", "or", "verbose", ":", "raise", "Exception", "(", "\"we don't support batch_size or verbose for now\"", ")", "if", "is_distributed", ":", "if", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "input", "=", "to_sample_rdd", "(", "x", ",", "np", ".", "zeros", "(", "[", "x", ".", "shape", "[", "0", "]", "]", ")", ")", "# np.asarray(self.bmodel.predict(x_rdd).collect())", "elif", "isinstance", "(", "x", ",", "RDD", ")", ":", "input", "=", "x", "return", "self", ".", "bmodel", ".", "predict", "(", "input", ")", "else", ":", "if", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "return", "self", ".", "bmodel", ".", "predict_local", "(", "x", ")", "raise", "Exception", "(", "\"not supported type: %s\"", "%", "x", ")" ]
Generates output predictions for the input samples, processing the samples in a batched way. # Arguments x: the input data, as a Numpy array or list of Numpy array for local mode. as RDD[Sample] for distributed mode is_distributed: used to control run in local or cluster. the default value is False # Returns A Numpy array or RDD[Sample] of predictions.
[ "Generates", "output", "predictions", "for", "the", "input", "samples", "processing", "the", "samples", "in", "a", "batched", "way", "." ]
python
test
45.25