repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
pandas-dev/pandas
pandas/tseries/offsets.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L654-L666
def rollback(self, dt): """ Roll provided date backward to next offset only if not on offset. """ if not self.onOffset(dt): businesshours = self._get_business_hours_by_sec if self.n >= 0: dt = self._prev_opening_time( dt) + timedelta(seconds=businesshours) else: dt = self._next_opening_time( dt) + timedelta(seconds=businesshours) return dt
[ "def", "rollback", "(", "self", ",", "dt", ")", ":", "if", "not", "self", ".", "onOffset", "(", "dt", ")", ":", "businesshours", "=", "self", ".", "_get_business_hours_by_sec", "if", "self", ".", "n", ">=", "0", ":", "dt", "=", "self", ".", "_prev_opening_time", "(", "dt", ")", "+", "timedelta", "(", "seconds", "=", "businesshours", ")", "else", ":", "dt", "=", "self", ".", "_next_opening_time", "(", "dt", ")", "+", "timedelta", "(", "seconds", "=", "businesshours", ")", "return", "dt" ]
Roll provided date backward to next offset only if not on offset.
[ "Roll", "provided", "date", "backward", "to", "next", "offset", "only", "if", "not", "on", "offset", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/host.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L1095-L1104
def _tot_services_by_state(self, services, state): """Get the number of service in the specified state :param state: state to filter service :type state: :return: number of service with s.state_id == state :rtype: int """ return str(sum(1 for s in self.services if services[s].state_id == state))
[ "def", "_tot_services_by_state", "(", "self", ",", "services", ",", "state", ")", ":", "return", "str", "(", "sum", "(", "1", "for", "s", "in", "self", ".", "services", "if", "services", "[", "s", "]", ".", "state_id", "==", "state", ")", ")" ]
Get the number of service in the specified state :param state: state to filter service :type state: :return: number of service with s.state_id == state :rtype: int
[ "Get", "the", "number", "of", "service", "in", "the", "specified", "state" ]
python
train
gatkin/declxml
declxml.py
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1547-L1559
def _parse_boolean(element_text, state): """Parse the raw XML string as a boolean value.""" value = None lowered_text = element_text.lower() if lowered_text == 'true': value = True elif lowered_text == 'false': value = False else: state.raise_error(InvalidPrimitiveValue, 'Invalid boolean value "{}"'.format(element_text)) return value
[ "def", "_parse_boolean", "(", "element_text", ",", "state", ")", ":", "value", "=", "None", "lowered_text", "=", "element_text", ".", "lower", "(", ")", "if", "lowered_text", "==", "'true'", ":", "value", "=", "True", "elif", "lowered_text", "==", "'false'", ":", "value", "=", "False", "else", ":", "state", ".", "raise_error", "(", "InvalidPrimitiveValue", ",", "'Invalid boolean value \"{}\"'", ".", "format", "(", "element_text", ")", ")", "return", "value" ]
Parse the raw XML string as a boolean value.
[ "Parse", "the", "raw", "XML", "string", "as", "a", "boolean", "value", "." ]
python
train
Ex-Mente/auxi.0
auxi/tools/chemistry/thermochemistry.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/tools/chemistry/thermochemistry.py#L553-L653
def _read_compound_from_factsage_file_(file_name): """ Build a dictionary containing the factsage thermochemical data of a compound by reading the data from a file. :param file_name: Name of file to read the data from. :returns: Dictionary containing compound data. """ with open(file_name) as f: lines = f.readlines() compound = {'Formula': lines[0].split(' ')[1]} # FIXME: replace with logging print(compound['Formula']) compound['Phases'] = phs = {} started = False phaseold = 'zz' recordold = '0' for line in lines: if started: if line.startswith('_'): # line indicating end of data break line = line.replace(' 298 ', ' 298.15 ') line = line.replace(' - ', ' ') while ' ' in line: line = line.replace(' ', ' ') line = line.replace(' \n', '') line = line.replace('\n', '') strings = line.split(' ') if len(strings) < 2: # empty line continue phase = strings[0] if phase != phaseold: # new phase detected phaseold = phase ph = phs[phase] = {} ph['Symbol'] = phase ph['DHref'] = float(strings[2]) ph['Sref'] = float(strings[3]) cprecs = ph['Cp_records'] = {} record = strings[1] if record != recordold: # new record detected recordold = record Tmax = float(strings[len(strings) - 1]) cprecs[Tmax] = {} cprecs[Tmax]['Tmin'] = float(strings[len(strings) - 2]) cprecs[Tmax]['Tmax'] = float(strings[len(strings) - 1]) cprecs[Tmax]['Terms'] = [] t = {'Coefficient': float(strings[4]), 'Exponent': float(strings[5])} cprecs[Tmax]['Terms'].append(t) if len(strings) == 10: t = {'Coefficient': float(strings[6]), 'Exponent': float(strings[7])} cprecs[Tmax]['Terms'].append(t) else: # old record detected t = {'Coefficient': float(strings[2]), 'Exponent': float(strings[3])} cprecs[Tmax]['Terms'].append(t) if len(strings) == 8: t = {'Coefficient': float(strings[4]), 'Exponent': float(strings[5])} cprecs[Tmax]['Terms'].append(t) else: # old phase detected ph = phs[phase] record = strings[1] if record != recordold: # new record detected recordold = record Tmax = float(strings[len(strings) - 1]) cprecs = ph['Cp_records'] cprecs[Tmax] = {} cprecs[Tmax]['Tmin'] = float(strings[len(strings) - 2]) cprecs[Tmax]['Tmax'] = float(strings[len(strings) - 1]) cprecs[Tmax]['Terms'] = [] t = {'Coefficient': float(strings[2]), 'Exponent': float(strings[3])} cprecs[Tmax]['Terms'].append(t) if len(strings) == 8: t = {'Coefficient': float(strings[4]), 'Exponent': float(strings[5])} cprecs[Tmax]['Terms'].append(t) else: # old record detected t = {'Coefficient': float(strings[2]), 'Exponent': float(strings[3])} cprecs[Tmax]['Terms'].append(t) if len(strings) == 8: t = {'Coefficient': float(strings[4]), 'Exponent': float(strings[5])} cprecs[Tmax]['Terms'].append(t) if line.startswith('_'): # line indicating the start of the data started = True for name, ph in phs.items(): cprecs = ph['Cp_records'] first = cprecs[min(cprecs.keys())] first['Tmin'] = 298.15 return compound
[ "def", "_read_compound_from_factsage_file_", "(", "file_name", ")", ":", "with", "open", "(", "file_name", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "compound", "=", "{", "'Formula'", ":", "lines", "[", "0", "]", ".", "split", "(", "' '", ")", "[", "1", "]", "}", "# FIXME: replace with logging", "print", "(", "compound", "[", "'Formula'", "]", ")", "compound", "[", "'Phases'", "]", "=", "phs", "=", "{", "}", "started", "=", "False", "phaseold", "=", "'zz'", "recordold", "=", "'0'", "for", "line", "in", "lines", ":", "if", "started", ":", "if", "line", ".", "startswith", "(", "'_'", ")", ":", "# line indicating end of data", "break", "line", "=", "line", ".", "replace", "(", "' 298 '", ",", "' 298.15 '", ")", "line", "=", "line", ".", "replace", "(", "' - '", ",", "' '", ")", "while", "' '", "in", "line", ":", "line", "=", "line", ".", "replace", "(", "' '", ",", "' '", ")", "line", "=", "line", ".", "replace", "(", "' \\n'", ",", "''", ")", "line", "=", "line", ".", "replace", "(", "'\\n'", ",", "''", ")", "strings", "=", "line", ".", "split", "(", "' '", ")", "if", "len", "(", "strings", ")", "<", "2", ":", "# empty line", "continue", "phase", "=", "strings", "[", "0", "]", "if", "phase", "!=", "phaseold", ":", "# new phase detected", "phaseold", "=", "phase", "ph", "=", "phs", "[", "phase", "]", "=", "{", "}", "ph", "[", "'Symbol'", "]", "=", "phase", "ph", "[", "'DHref'", "]", "=", "float", "(", "strings", "[", "2", "]", ")", "ph", "[", "'Sref'", "]", "=", "float", "(", "strings", "[", "3", "]", ")", "cprecs", "=", "ph", "[", "'Cp_records'", "]", "=", "{", "}", "record", "=", "strings", "[", "1", "]", "if", "record", "!=", "recordold", ":", "# new record detected", "recordold", "=", "record", "Tmax", "=", "float", "(", "strings", "[", "len", "(", "strings", ")", "-", "1", "]", ")", "cprecs", "[", "Tmax", "]", "=", "{", "}", "cprecs", "[", "Tmax", "]", "[", "'Tmin'", "]", "=", "float", "(", "strings", "[", "len", "(", "strings", ")", "-", "2", "]", ")", "cprecs", "[", "Tmax", "]", "[", "'Tmax'", "]", "=", "float", "(", "strings", "[", "len", "(", "strings", ")", "-", "1", "]", ")", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", "=", "[", "]", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "4", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "5", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "if", "len", "(", "strings", ")", "==", "10", ":", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "6", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "7", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "else", ":", "# old record detected", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "2", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "3", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "if", "len", "(", "strings", ")", "==", "8", ":", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "4", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "5", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "else", ":", "# old phase detected", "ph", "=", "phs", "[", "phase", "]", "record", "=", "strings", "[", "1", "]", "if", "record", "!=", "recordold", ":", "# new record detected", "recordold", "=", "record", "Tmax", "=", "float", "(", "strings", "[", "len", "(", "strings", ")", "-", "1", "]", ")", "cprecs", "=", "ph", "[", "'Cp_records'", "]", "cprecs", "[", "Tmax", "]", "=", "{", "}", "cprecs", "[", "Tmax", "]", "[", "'Tmin'", "]", "=", "float", "(", "strings", "[", "len", "(", "strings", ")", "-", "2", "]", ")", "cprecs", "[", "Tmax", "]", "[", "'Tmax'", "]", "=", "float", "(", "strings", "[", "len", "(", "strings", ")", "-", "1", "]", ")", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", "=", "[", "]", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "2", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "3", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "if", "len", "(", "strings", ")", "==", "8", ":", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "4", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "5", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "else", ":", "# old record detected", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "2", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "3", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "if", "len", "(", "strings", ")", "==", "8", ":", "t", "=", "{", "'Coefficient'", ":", "float", "(", "strings", "[", "4", "]", ")", ",", "'Exponent'", ":", "float", "(", "strings", "[", "5", "]", ")", "}", "cprecs", "[", "Tmax", "]", "[", "'Terms'", "]", ".", "append", "(", "t", ")", "if", "line", ".", "startswith", "(", "'_'", ")", ":", "# line indicating the start of the data", "started", "=", "True", "for", "name", ",", "ph", "in", "phs", ".", "items", "(", ")", ":", "cprecs", "=", "ph", "[", "'Cp_records'", "]", "first", "=", "cprecs", "[", "min", "(", "cprecs", ".", "keys", "(", ")", ")", "]", "first", "[", "'Tmin'", "]", "=", "298.15", "return", "compound" ]
Build a dictionary containing the factsage thermochemical data of a compound by reading the data from a file. :param file_name: Name of file to read the data from. :returns: Dictionary containing compound data.
[ "Build", "a", "dictionary", "containing", "the", "factsage", "thermochemical", "data", "of", "a", "compound", "by", "reading", "the", "data", "from", "a", "file", "." ]
python
valid
KelSolaar/Umbra
umbra/components/factory/script_editor/workers.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/workers.py#L192-L203
def pattern(self, value): """ Setter for **self.__pattern** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) in (unicode, QString), \ "'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!".format("pattern", value) self.__pattern = value
[ "def", "pattern", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "in", "(", "unicode", ",", "QString", ")", ",", "\"'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!\"", ".", "format", "(", "\"pattern\"", ",", "value", ")", "self", ".", "__pattern", "=", "value" ]
Setter for **self.__pattern** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__pattern", "**", "attribute", "." ]
python
train
GeoPyTool/GeoPyTool
geopytool/GLMultiDimension.py
https://github.com/GeoPyTool/GeoPyTool/blob/8c198aa42e4fbdf62fac05d40cbf4d1086328da3/geopytool/GLMultiDimension.py#L507-L727
def Magic(self): #self.view.setFixedSize(self.width(), self.width()) self.WholeData = [] self.x_scale = self.width_plot / self.width_load self.y_scale = self.height_plot / self.height_load self.z_scale = self.depth_plot / self.depth_load # print(self.x_scale,' and ',self.x_scale) raw = self._df a = int(self.x_element.value()) b = int(self.y_element.value()) c = int(self.z_element.value()) self.x_element_label.setText(self.items[a]) self.y_element_label.setText(self.items[b]) self.z_element_label.setText(self.items[c]) if (self.Left != self.Right) and (self.Down != self.Up) and abs(self.Left) + abs(self.Right) + abs( self.Down) + abs(self.Up) != 0: self.extent = [self.Left, self.Right, self.Down, self.Up] elif (self.Left == self.Right and abs(self.Left) + abs(self.Right) != 0): reply = QMessageBox.warning(self, 'Warning', 'You set same value to Left and Right limits.') self.extent = 0 elif (self.Down == self.Up and abs(self.Down) + abs(self.Up) != 0): reply = QMessageBox.warning(self, 'Warning', 'You set same value to Up and Down limits.') self.extent = 0 else: self.extent = 0 standardnamechosen = self.StandardsName[int(self.norm_slider.value())] standardchosen = self.Standards[standardnamechosen] self.norm_slider_label.setText(standardnamechosen) PointLabels = [] XtoDraw = [] YtoDraw = [] ZtoDraw = [] Colors=[] Alphas=[] Markers=[] Names=[] for i in range(len(raw)): # raw.at[i, 'DataType'] == 'User' or raw.at[i, 'DataType'] == 'user' or raw.at[i, 'DataType'] == 'USER' TmpLabel = '' # self.WholeData.append(math.log(tmp, 10)) if (raw.at[i, 'Label'] in PointLabels or raw.at[i, 'Label'] == ''): TmpLabel = '' else: PointLabels.append(raw.at[i, 'Label']) TmpLabel = raw.at[i, 'Label'] x, y ,z = 0, 0, 0 xuse, yuse,zuse = 0, 0, 0 x, y,z = raw.at[i, self.items[a]], raw.at[i, self.items[b]],raw.at[i, self.items[c]] try: xuse = x yuse = y zuse = z self.xlabel = self.items[a] self.ylabel = self.items[b] self.zlabel = self.items[c] if (self.Normalize_cb.isChecked()): self.xlabel = self.items[a] + ' Norm by ' + standardnamechosen self.x_element_label.setText(self.xlabel) self.ylabel = self.items[b] + ' Norm by ' + standardnamechosen self.y_element_label.setText(self.ylabel) self.zlabel = self.items[c] + ' Norm by ' + standardnamechosen self.z_element_label.setText(self.zlabel) if self.items[a] in self.Element: xuse = xuse / standardchosen[self.items[a]] if self.items[b] in self.Element: yuse = yuse / standardchosen[self.items[b]] if self.items[c] in self.Element: zuse = zuse / standardchosen[self.items[c]] if (self.logx_cb.isChecked()): xuse = math.log(x, 10) self.xlabel = '$log10$ ' + self.xlabel if (self.logy_cb.isChecked()): yuse = math.log(y, 10) self.ylabel = '$log10$ ' + self.ylabel if (self.logz_cb.isChecked()): zuse = math.log(z, 10) self.zlabel = '$log10$ ' + self.zlabel XtoDraw.append(xuse) YtoDraw.append(yuse) ZtoDraw.append(zuse) Colors.append(raw.at[i, 'Color']) Alphas.append(raw.at[i, 'Alpha']) Names.append(raw.at[i, 'Label']) Markers.append(raw.at[i, 'Marker']) except(ValueError): pass if self.LimSet==False: self.Xleft, self.Xright, self.Ydown, self.Yup, self.Tail, self.Head = min(XtoDraw), max(XtoDraw), min(YtoDraw), max(YtoDraw), min(ZtoDraw), max(ZtoDraw) xmin, xmax = min(XtoDraw), max(XtoDraw) ymin, ymax = min(YtoDraw), max(YtoDraw) zmin, zmax = min(ZtoDraw), max(ZtoDraw) xmean = np.mean(XtoDraw) ymean = np.mean(YtoDraw) zmean = np.mean(ZtoDraw) Xoriginal = np.arange(xmin, xmax, (xmax - xmin) / 10) Yoriginal = np.arange(ymin, ymax, (ymax - ymin) / 10) Zoriginal = np.arange(zmin, zmax, (zmax - zmin) / 10) XonPlot = self.GetASequence(tail=self.ShapeGroups) YonPlot = self.GetASequence(tail=self.ShapeGroups) ZonPlot = self.GetASequence(tail=self.ShapeGroups) XonStick = [] YonStick = [] ZonStick = [] for i in range(len(XonPlot)): XonStick.append([XonPlot[i], Xoriginal[i]]) YonStick.append([YonPlot[i], Yoriginal[i]]) ZonStick.append([ZonPlot[i], Zoriginal[i]]) pass #print(XtoDraw,'\n', YtoDraw,'\n', ZtoDraw) toDf = {self.xlabel:XtoDraw, self.ylabel:YtoDraw, self.zlabel:ZtoDraw} newdf = pd.DataFrame(toDf) pos = newdf.as_matrix() print(pos) ThreeDimView = gl.GLScatterPlotItem(pos=pos, color=(100, 255, 255, 88), size=0.1, pxMode=False) print(xmean,'\n', ymean,'\n', zmean,'\n') self.view.pan(xmean, ymean, zmean) xgrid = gl.GLGridItem(size=QtGui.QVector3D(10, 10, 1), color=1) ygrid = gl.GLGridItem(size=QtGui.QVector3D(20, 20, 2), color=2) zgrid = gl.GLGridItem(size=QtGui.QVector3D(30, 30, 3), color=3) ## rotate x and y grids to face the correct direction xgrid.rotate(90, 0, 1, 0) ygrid.rotate(90, 1, 0, 0) xgrid.translate(xmean, ymean, zmean) ygrid.translate(xmean, ymean, zmean) zgrid.translate(xmean, ymean, zmean) ## scale each grid differently ''' xgrid.scale(12.8, 12.8, 12.8) ygrid.scale(12.8, 12.8, 12.8) zgrid.scale(12.8, 12.8, 12.8) ''' # xgrid.setTransform(xmean,ymean,zmean) self.view.addItem(xgrid) self.view.addItem(ygrid) self.view.addItem(zgrid) self.view.addItem(ThreeDimView)
[ "def", "Magic", "(", "self", ")", ":", "#self.view.setFixedSize(self.width(), self.width())", "self", ".", "WholeData", "=", "[", "]", "self", ".", "x_scale", "=", "self", ".", "width_plot", "/", "self", ".", "width_load", "self", ".", "y_scale", "=", "self", ".", "height_plot", "/", "self", ".", "height_load", "self", ".", "z_scale", "=", "self", ".", "depth_plot", "/", "self", ".", "depth_load", "# print(self.x_scale,' and ',self.x_scale)", "raw", "=", "self", ".", "_df", "a", "=", "int", "(", "self", ".", "x_element", ".", "value", "(", ")", ")", "b", "=", "int", "(", "self", ".", "y_element", ".", "value", "(", ")", ")", "c", "=", "int", "(", "self", ".", "z_element", ".", "value", "(", ")", ")", "self", ".", "x_element_label", ".", "setText", "(", "self", ".", "items", "[", "a", "]", ")", "self", ".", "y_element_label", ".", "setText", "(", "self", ".", "items", "[", "b", "]", ")", "self", ".", "z_element_label", ".", "setText", "(", "self", ".", "items", "[", "c", "]", ")", "if", "(", "self", ".", "Left", "!=", "self", ".", "Right", ")", "and", "(", "self", ".", "Down", "!=", "self", ".", "Up", ")", "and", "abs", "(", "self", ".", "Left", ")", "+", "abs", "(", "self", ".", "Right", ")", "+", "abs", "(", "self", ".", "Down", ")", "+", "abs", "(", "self", ".", "Up", ")", "!=", "0", ":", "self", ".", "extent", "=", "[", "self", ".", "Left", ",", "self", ".", "Right", ",", "self", ".", "Down", ",", "self", ".", "Up", "]", "elif", "(", "self", ".", "Left", "==", "self", ".", "Right", "and", "abs", "(", "self", ".", "Left", ")", "+", "abs", "(", "self", ".", "Right", ")", "!=", "0", ")", ":", "reply", "=", "QMessageBox", ".", "warning", "(", "self", ",", "'Warning'", ",", "'You set same value to Left and Right limits.'", ")", "self", ".", "extent", "=", "0", "elif", "(", "self", ".", "Down", "==", "self", ".", "Up", "and", "abs", "(", "self", ".", "Down", ")", "+", "abs", "(", "self", ".", "Up", ")", "!=", "0", ")", ":", "reply", "=", "QMessageBox", ".", "warning", "(", "self", ",", "'Warning'", ",", "'You set same value to Up and Down limits.'", ")", "self", ".", "extent", "=", "0", "else", ":", "self", ".", "extent", "=", "0", "standardnamechosen", "=", "self", ".", "StandardsName", "[", "int", "(", "self", ".", "norm_slider", ".", "value", "(", ")", ")", "]", "standardchosen", "=", "self", ".", "Standards", "[", "standardnamechosen", "]", "self", ".", "norm_slider_label", ".", "setText", "(", "standardnamechosen", ")", "PointLabels", "=", "[", "]", "XtoDraw", "=", "[", "]", "YtoDraw", "=", "[", "]", "ZtoDraw", "=", "[", "]", "Colors", "=", "[", "]", "Alphas", "=", "[", "]", "Markers", "=", "[", "]", "Names", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "raw", ")", ")", ":", "# raw.at[i, 'DataType'] == 'User' or raw.at[i, 'DataType'] == 'user' or raw.at[i, 'DataType'] == 'USER'", "TmpLabel", "=", "''", "# self.WholeData.append(math.log(tmp, 10))", "if", "(", "raw", ".", "at", "[", "i", ",", "'Label'", "]", "in", "PointLabels", "or", "raw", ".", "at", "[", "i", ",", "'Label'", "]", "==", "''", ")", ":", "TmpLabel", "=", "''", "else", ":", "PointLabels", ".", "append", "(", "raw", ".", "at", "[", "i", ",", "'Label'", "]", ")", "TmpLabel", "=", "raw", ".", "at", "[", "i", ",", "'Label'", "]", "x", ",", "y", ",", "z", "=", "0", ",", "0", ",", "0", "xuse", ",", "yuse", ",", "zuse", "=", "0", ",", "0", ",", "0", "x", ",", "y", ",", "z", "=", "raw", ".", "at", "[", "i", ",", "self", ".", "items", "[", "a", "]", "]", ",", "raw", ".", "at", "[", "i", ",", "self", ".", "items", "[", "b", "]", "]", ",", "raw", ".", "at", "[", "i", ",", "self", ".", "items", "[", "c", "]", "]", "try", ":", "xuse", "=", "x", "yuse", "=", "y", "zuse", "=", "z", "self", ".", "xlabel", "=", "self", ".", "items", "[", "a", "]", "self", ".", "ylabel", "=", "self", ".", "items", "[", "b", "]", "self", ".", "zlabel", "=", "self", ".", "items", "[", "c", "]", "if", "(", "self", ".", "Normalize_cb", ".", "isChecked", "(", ")", ")", ":", "self", ".", "xlabel", "=", "self", ".", "items", "[", "a", "]", "+", "' Norm by '", "+", "standardnamechosen", "self", ".", "x_element_label", ".", "setText", "(", "self", ".", "xlabel", ")", "self", ".", "ylabel", "=", "self", ".", "items", "[", "b", "]", "+", "' Norm by '", "+", "standardnamechosen", "self", ".", "y_element_label", ".", "setText", "(", "self", ".", "ylabel", ")", "self", ".", "zlabel", "=", "self", ".", "items", "[", "c", "]", "+", "' Norm by '", "+", "standardnamechosen", "self", ".", "z_element_label", ".", "setText", "(", "self", ".", "zlabel", ")", "if", "self", ".", "items", "[", "a", "]", "in", "self", ".", "Element", ":", "xuse", "=", "xuse", "/", "standardchosen", "[", "self", ".", "items", "[", "a", "]", "]", "if", "self", ".", "items", "[", "b", "]", "in", "self", ".", "Element", ":", "yuse", "=", "yuse", "/", "standardchosen", "[", "self", ".", "items", "[", "b", "]", "]", "if", "self", ".", "items", "[", "c", "]", "in", "self", ".", "Element", ":", "zuse", "=", "zuse", "/", "standardchosen", "[", "self", ".", "items", "[", "c", "]", "]", "if", "(", "self", ".", "logx_cb", ".", "isChecked", "(", ")", ")", ":", "xuse", "=", "math", ".", "log", "(", "x", ",", "10", ")", "self", ".", "xlabel", "=", "'$log10$ '", "+", "self", ".", "xlabel", "if", "(", "self", ".", "logy_cb", ".", "isChecked", "(", ")", ")", ":", "yuse", "=", "math", ".", "log", "(", "y", ",", "10", ")", "self", ".", "ylabel", "=", "'$log10$ '", "+", "self", ".", "ylabel", "if", "(", "self", ".", "logz_cb", ".", "isChecked", "(", ")", ")", ":", "zuse", "=", "math", ".", "log", "(", "z", ",", "10", ")", "self", ".", "zlabel", "=", "'$log10$ '", "+", "self", ".", "zlabel", "XtoDraw", ".", "append", "(", "xuse", ")", "YtoDraw", ".", "append", "(", "yuse", ")", "ZtoDraw", ".", "append", "(", "zuse", ")", "Colors", ".", "append", "(", "raw", ".", "at", "[", "i", ",", "'Color'", "]", ")", "Alphas", ".", "append", "(", "raw", ".", "at", "[", "i", ",", "'Alpha'", "]", ")", "Names", ".", "append", "(", "raw", ".", "at", "[", "i", ",", "'Label'", "]", ")", "Markers", ".", "append", "(", "raw", ".", "at", "[", "i", ",", "'Marker'", "]", ")", "except", "(", "ValueError", ")", ":", "pass", "if", "self", ".", "LimSet", "==", "False", ":", "self", ".", "Xleft", ",", "self", ".", "Xright", ",", "self", ".", "Ydown", ",", "self", ".", "Yup", ",", "self", ".", "Tail", ",", "self", ".", "Head", "=", "min", "(", "XtoDraw", ")", ",", "max", "(", "XtoDraw", ")", ",", "min", "(", "YtoDraw", ")", ",", "max", "(", "YtoDraw", ")", ",", "min", "(", "ZtoDraw", ")", ",", "max", "(", "ZtoDraw", ")", "xmin", ",", "xmax", "=", "min", "(", "XtoDraw", ")", ",", "max", "(", "XtoDraw", ")", "ymin", ",", "ymax", "=", "min", "(", "YtoDraw", ")", ",", "max", "(", "YtoDraw", ")", "zmin", ",", "zmax", "=", "min", "(", "ZtoDraw", ")", ",", "max", "(", "ZtoDraw", ")", "xmean", "=", "np", ".", "mean", "(", "XtoDraw", ")", "ymean", "=", "np", ".", "mean", "(", "YtoDraw", ")", "zmean", "=", "np", ".", "mean", "(", "ZtoDraw", ")", "Xoriginal", "=", "np", ".", "arange", "(", "xmin", ",", "xmax", ",", "(", "xmax", "-", "xmin", ")", "/", "10", ")", "Yoriginal", "=", "np", ".", "arange", "(", "ymin", ",", "ymax", ",", "(", "ymax", "-", "ymin", ")", "/", "10", ")", "Zoriginal", "=", "np", ".", "arange", "(", "zmin", ",", "zmax", ",", "(", "zmax", "-", "zmin", ")", "/", "10", ")", "XonPlot", "=", "self", ".", "GetASequence", "(", "tail", "=", "self", ".", "ShapeGroups", ")", "YonPlot", "=", "self", ".", "GetASequence", "(", "tail", "=", "self", ".", "ShapeGroups", ")", "ZonPlot", "=", "self", ".", "GetASequence", "(", "tail", "=", "self", ".", "ShapeGroups", ")", "XonStick", "=", "[", "]", "YonStick", "=", "[", "]", "ZonStick", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "XonPlot", ")", ")", ":", "XonStick", ".", "append", "(", "[", "XonPlot", "[", "i", "]", ",", "Xoriginal", "[", "i", "]", "]", ")", "YonStick", ".", "append", "(", "[", "YonPlot", "[", "i", "]", ",", "Yoriginal", "[", "i", "]", "]", ")", "ZonStick", ".", "append", "(", "[", "ZonPlot", "[", "i", "]", ",", "Zoriginal", "[", "i", "]", "]", ")", "pass", "#print(XtoDraw,'\\n', YtoDraw,'\\n', ZtoDraw)", "toDf", "=", "{", "self", ".", "xlabel", ":", "XtoDraw", ",", "self", ".", "ylabel", ":", "YtoDraw", ",", "self", ".", "zlabel", ":", "ZtoDraw", "}", "newdf", "=", "pd", ".", "DataFrame", "(", "toDf", ")", "pos", "=", "newdf", ".", "as_matrix", "(", ")", "print", "(", "pos", ")", "ThreeDimView", "=", "gl", ".", "GLScatterPlotItem", "(", "pos", "=", "pos", ",", "color", "=", "(", "100", ",", "255", ",", "255", ",", "88", ")", ",", "size", "=", "0.1", ",", "pxMode", "=", "False", ")", "print", "(", "xmean", ",", "'\\n'", ",", "ymean", ",", "'\\n'", ",", "zmean", ",", "'\\n'", ")", "self", ".", "view", ".", "pan", "(", "xmean", ",", "ymean", ",", "zmean", ")", "xgrid", "=", "gl", ".", "GLGridItem", "(", "size", "=", "QtGui", ".", "QVector3D", "(", "10", ",", "10", ",", "1", ")", ",", "color", "=", "1", ")", "ygrid", "=", "gl", ".", "GLGridItem", "(", "size", "=", "QtGui", ".", "QVector3D", "(", "20", ",", "20", ",", "2", ")", ",", "color", "=", "2", ")", "zgrid", "=", "gl", ".", "GLGridItem", "(", "size", "=", "QtGui", ".", "QVector3D", "(", "30", ",", "30", ",", "3", ")", ",", "color", "=", "3", ")", "## rotate x and y grids to face the correct direction", "xgrid", ".", "rotate", "(", "90", ",", "0", ",", "1", ",", "0", ")", "ygrid", ".", "rotate", "(", "90", ",", "1", ",", "0", ",", "0", ")", "xgrid", ".", "translate", "(", "xmean", ",", "ymean", ",", "zmean", ")", "ygrid", ".", "translate", "(", "xmean", ",", "ymean", ",", "zmean", ")", "zgrid", ".", "translate", "(", "xmean", ",", "ymean", ",", "zmean", ")", "## scale each grid differently", "# xgrid.setTransform(xmean,ymean,zmean)", "self", ".", "view", ".", "addItem", "(", "xgrid", ")", "self", ".", "view", ".", "addItem", "(", "ygrid", ")", "self", ".", "view", ".", "addItem", "(", "zgrid", ")", "self", ".", "view", ".", "addItem", "(", "ThreeDimView", ")" ]
xgrid.scale(12.8, 12.8, 12.8) ygrid.scale(12.8, 12.8, 12.8) zgrid.scale(12.8, 12.8, 12.8)
[ "xgrid", ".", "scale", "(", "12", ".", "8", "12", ".", "8", "12", ".", "8", ")", "ygrid", ".", "scale", "(", "12", ".", "8", "12", ".", "8", "12", ".", "8", ")", "zgrid", ".", "scale", "(", "12", ".", "8", "12", ".", "8", "12", ".", "8", ")" ]
python
train
KxSystems/pyq
src/pyq/magic.py
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/magic.py#L23-L38
def logical_lines(lines): """Merge lines into chunks according to q rules""" if isinstance(lines, string_types): lines = StringIO(lines) buf = [] for line in lines: if buf and not line.startswith(' '): chunk = ''.join(buf).strip() if chunk: yield chunk buf[:] = [] buf.append(line) chunk = ''.join(buf).strip() if chunk: yield chunk
[ "def", "logical_lines", "(", "lines", ")", ":", "if", "isinstance", "(", "lines", ",", "string_types", ")", ":", "lines", "=", "StringIO", "(", "lines", ")", "buf", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "buf", "and", "not", "line", ".", "startswith", "(", "' '", ")", ":", "chunk", "=", "''", ".", "join", "(", "buf", ")", ".", "strip", "(", ")", "if", "chunk", ":", "yield", "chunk", "buf", "[", ":", "]", "=", "[", "]", "buf", ".", "append", "(", "line", ")", "chunk", "=", "''", ".", "join", "(", "buf", ")", ".", "strip", "(", ")", "if", "chunk", ":", "yield", "chunk" ]
Merge lines into chunks according to q rules
[ "Merge", "lines", "into", "chunks", "according", "to", "q", "rules" ]
python
train
Qiskit/qiskit-terra
qiskit/validation/fields/custom.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/validation/fields/custom.py#L113-L124
def check_type(self, value, attr, data): """Customize check_type for handling containers.""" # Check the type in the standard way first, in order to fail quickly # in case of invalid values. root_value = super(InstructionParameter, self).check_type( value, attr, data) if is_collection(value): _ = [super(InstructionParameter, self).check_type(item, attr, data) for item in value] return root_value
[ "def", "check_type", "(", "self", ",", "value", ",", "attr", ",", "data", ")", ":", "# Check the type in the standard way first, in order to fail quickly", "# in case of invalid values.", "root_value", "=", "super", "(", "InstructionParameter", ",", "self", ")", ".", "check_type", "(", "value", ",", "attr", ",", "data", ")", "if", "is_collection", "(", "value", ")", ":", "_", "=", "[", "super", "(", "InstructionParameter", ",", "self", ")", ".", "check_type", "(", "item", ",", "attr", ",", "data", ")", "for", "item", "in", "value", "]", "return", "root_value" ]
Customize check_type for handling containers.
[ "Customize", "check_type", "for", "handling", "containers", "." ]
python
test
Dallinger/Dallinger
dallinger/heroku/tools.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/heroku/tools.py#L190-L195
def destroy(self): """Destroy an app and all its add-ons""" result = self._result( ["heroku", "apps:destroy", "--app", self.name, "--confirm", self.name] ) return result
[ "def", "destroy", "(", "self", ")", ":", "result", "=", "self", ".", "_result", "(", "[", "\"heroku\"", ",", "\"apps:destroy\"", ",", "\"--app\"", ",", "self", ".", "name", ",", "\"--confirm\"", ",", "self", ".", "name", "]", ")", "return", "result" ]
Destroy an app and all its add-ons
[ "Destroy", "an", "app", "and", "all", "its", "add", "-", "ons" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/util/quaternion.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/quaternion.py#L193-L210
def get_axis_angle(self): """ Get the axis-angle representation of the quaternion. (The angle is in radians) """ # Init angle = 2 * np.arccos(max(min(self.w, 1.), -1.)) scale = (self.x**2 + self.y**2 + self.z**2)**0.5 # Calc axis if scale: ax = self.x / scale ay = self.y / scale az = self.z / scale else: # No rotation, so arbitrary axis ax, ay, az = 1, 0, 0 # Return return angle, ax, ay, az
[ "def", "get_axis_angle", "(", "self", ")", ":", "# Init", "angle", "=", "2", "*", "np", ".", "arccos", "(", "max", "(", "min", "(", "self", ".", "w", ",", "1.", ")", ",", "-", "1.", ")", ")", "scale", "=", "(", "self", ".", "x", "**", "2", "+", "self", ".", "y", "**", "2", "+", "self", ".", "z", "**", "2", ")", "**", "0.5", "# Calc axis", "if", "scale", ":", "ax", "=", "self", ".", "x", "/", "scale", "ay", "=", "self", ".", "y", "/", "scale", "az", "=", "self", ".", "z", "/", "scale", "else", ":", "# No rotation, so arbitrary axis", "ax", ",", "ay", ",", "az", "=", "1", ",", "0", ",", "0", "# Return", "return", "angle", ",", "ax", ",", "ay", ",", "az" ]
Get the axis-angle representation of the quaternion. (The angle is in radians)
[ "Get", "the", "axis", "-", "angle", "representation", "of", "the", "quaternion", ".", "(", "The", "angle", "is", "in", "radians", ")" ]
python
train
assamite/creamas
creamas/examples/spiro/spiro_agent_mp.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent_mp.py#L651-L702
def plot_places(self): '''Plot places (in the parameter space) of all the generated artifacts and the artifacts accepted to the domain. ''' from matplotlib import pyplot as plt fig, ax = plt.subplots() title = "Agent places, artifacts and env artifacts ({} env artifacts)".format(len(self.artifacts)) x = [] y = [] for a in self.get_agents(): args = a.arg_history x = x + [e[0] for e in args] y = y + [e[1] for e in args] sc = ax.scatter(x, y, marker='.', color=(0, 0, 1, 0.1), label='agent place') x = [] y = [] for a in self.get_agents(): arts = a.A for ar in arts: if ar.self_criticism == 'pass': args = ar.framings[ar.creator]['args'] x.append(args[0]) y.append(args[1]) sc = ax.scatter(x, y, marker="x", color=(0, 0, 1, 0.3), label='agent artifact') x = [] y = [] for a in self.artifacts: args = a.framings[a.creator]['args'] x.append(args[0]) y.append(args[1]) sc = ax.scatter(x, y, marker="x", color='red', label='env artifact', s=40) ax.set_xlim([-200, 200]) ax.set_ylim([-200, 200]) ax.set_xlabel('r') ax.set_ylabel('r_') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=10) ax.set_title(title) plt.tight_layout(rect=(0,0,0.8,1)) if self.logger is not None and self.logger.folder is not None: imname = os.path.join(self.logger.folder, 'arts_a{}_i{}_v{}.png' .format(len(self.get_agents()), self.age, self.voting_method)) plt.savefig(imname) plt.close() else: plt.show()
[ "def", "plot_places", "(", "self", ")", ":", "from", "matplotlib", "import", "pyplot", "as", "plt", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "title", "=", "\"Agent places, artifacts and env artifacts ({} env artifacts)\"", ".", "format", "(", "len", "(", "self", ".", "artifacts", ")", ")", "x", "=", "[", "]", "y", "=", "[", "]", "for", "a", "in", "self", ".", "get_agents", "(", ")", ":", "args", "=", "a", ".", "arg_history", "x", "=", "x", "+", "[", "e", "[", "0", "]", "for", "e", "in", "args", "]", "y", "=", "y", "+", "[", "e", "[", "1", "]", "for", "e", "in", "args", "]", "sc", "=", "ax", ".", "scatter", "(", "x", ",", "y", ",", "marker", "=", "'.'", ",", "color", "=", "(", "0", ",", "0", ",", "1", ",", "0.1", ")", ",", "label", "=", "'agent place'", ")", "x", "=", "[", "]", "y", "=", "[", "]", "for", "a", "in", "self", ".", "get_agents", "(", ")", ":", "arts", "=", "a", ".", "A", "for", "ar", "in", "arts", ":", "if", "ar", ".", "self_criticism", "==", "'pass'", ":", "args", "=", "ar", ".", "framings", "[", "ar", ".", "creator", "]", "[", "'args'", "]", "x", ".", "append", "(", "args", "[", "0", "]", ")", "y", ".", "append", "(", "args", "[", "1", "]", ")", "sc", "=", "ax", ".", "scatter", "(", "x", ",", "y", ",", "marker", "=", "\"x\"", ",", "color", "=", "(", "0", ",", "0", ",", "1", ",", "0.3", ")", ",", "label", "=", "'agent artifact'", ")", "x", "=", "[", "]", "y", "=", "[", "]", "for", "a", "in", "self", ".", "artifacts", ":", "args", "=", "a", ".", "framings", "[", "a", ".", "creator", "]", "[", "'args'", "]", "x", ".", "append", "(", "args", "[", "0", "]", ")", "y", ".", "append", "(", "args", "[", "1", "]", ")", "sc", "=", "ax", ".", "scatter", "(", "x", ",", "y", ",", "marker", "=", "\"x\"", ",", "color", "=", "'red'", ",", "label", "=", "'env artifact'", ",", "s", "=", "40", ")", "ax", ".", "set_xlim", "(", "[", "-", "200", ",", "200", "]", ")", "ax", ".", "set_ylim", "(", "[", "-", "200", ",", "200", "]", ")", "ax", ".", "set_xlabel", "(", "'r'", ")", "ax", ".", "set_ylabel", "(", "'r_'", ")", "ax", ".", "legend", "(", "loc", "=", "'center left'", ",", "bbox_to_anchor", "=", "(", "1", ",", "0.5", ")", ",", "fontsize", "=", "10", ")", "ax", ".", "set_title", "(", "title", ")", "plt", ".", "tight_layout", "(", "rect", "=", "(", "0", ",", "0", ",", "0.8", ",", "1", ")", ")", "if", "self", ".", "logger", "is", "not", "None", "and", "self", ".", "logger", ".", "folder", "is", "not", "None", ":", "imname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logger", ".", "folder", ",", "'arts_a{}_i{}_v{}.png'", ".", "format", "(", "len", "(", "self", ".", "get_agents", "(", ")", ")", ",", "self", ".", "age", ",", "self", ".", "voting_method", ")", ")", "plt", ".", "savefig", "(", "imname", ")", "plt", ".", "close", "(", ")", "else", ":", "plt", ".", "show", "(", ")" ]
Plot places (in the parameter space) of all the generated artifacts and the artifacts accepted to the domain.
[ "Plot", "places", "(", "in", "the", "parameter", "space", ")", "of", "all", "the", "generated", "artifacts", "and", "the", "artifacts", "accepted", "to", "the", "domain", "." ]
python
train
mattupstate/flask-security
flask_security/cli.py
https://github.com/mattupstate/flask-security/blob/a401fb47018fbbbe0b899ea55afadfd0e3cd847a/flask_security/cli.py#L130-L139
def users_activate(user): """Activate a user.""" user_obj = _datastore.get_user(user) if user_obj is None: raise click.UsageError('ERROR: User not found.') if _datastore.activate_user(user_obj): click.secho('User "{0}" has been activated.'.format(user), fg='green') else: click.secho('User "{0}" was already activated.'.format(user), fg='yellow')
[ "def", "users_activate", "(", "user", ")", ":", "user_obj", "=", "_datastore", ".", "get_user", "(", "user", ")", "if", "user_obj", "is", "None", ":", "raise", "click", ".", "UsageError", "(", "'ERROR: User not found.'", ")", "if", "_datastore", ".", "activate_user", "(", "user_obj", ")", ":", "click", ".", "secho", "(", "'User \"{0}\" has been activated.'", ".", "format", "(", "user", ")", ",", "fg", "=", "'green'", ")", "else", ":", "click", ".", "secho", "(", "'User \"{0}\" was already activated.'", ".", "format", "(", "user", ")", ",", "fg", "=", "'yellow'", ")" ]
Activate a user.
[ "Activate", "a", "user", "." ]
python
train
spotify/luigi
luigi/contrib/opener.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/opener.py#L115-L142
def open(self, target_uri, **kwargs): """Open target uri. :param target_uri: Uri to open :type target_uri: string :returns: Target object """ target = urlsplit(target_uri, scheme=self.default_opener) opener = self.get_opener(target.scheme) query = opener.conform_query(target.query) target = opener.get_target( target.scheme, target.path, target.fragment, target.username, target.password, target.hostname, target.port, query, **kwargs ) target.opener_path = target_uri return target
[ "def", "open", "(", "self", ",", "target_uri", ",", "*", "*", "kwargs", ")", ":", "target", "=", "urlsplit", "(", "target_uri", ",", "scheme", "=", "self", ".", "default_opener", ")", "opener", "=", "self", ".", "get_opener", "(", "target", ".", "scheme", ")", "query", "=", "opener", ".", "conform_query", "(", "target", ".", "query", ")", "target", "=", "opener", ".", "get_target", "(", "target", ".", "scheme", ",", "target", ".", "path", ",", "target", ".", "fragment", ",", "target", ".", "username", ",", "target", ".", "password", ",", "target", ".", "hostname", ",", "target", ".", "port", ",", "query", ",", "*", "*", "kwargs", ")", "target", ".", "opener_path", "=", "target_uri", "return", "target" ]
Open target uri. :param target_uri: Uri to open :type target_uri: string :returns: Target object
[ "Open", "target", "uri", "." ]
python
train
theelous3/multio
multio/__init__.py
https://github.com/theelous3/multio/blob/018e4a9f78d5f4e78608a1a1537000b5fd778bbe/multio/__init__.py#L112-L121
def wrap(cls, meth): ''' Wraps a connection opening method in this class. ''' async def inner(*args, **kwargs): sock = await meth(*args, **kwargs) return cls(sock) return inner
[ "def", "wrap", "(", "cls", ",", "meth", ")", ":", "async", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "sock", "=", "await", "meth", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "cls", "(", "sock", ")", "return", "inner" ]
Wraps a connection opening method in this class.
[ "Wraps", "a", "connection", "opening", "method", "in", "this", "class", "." ]
python
train
DataDog/integrations-core
vsphere/datadog_checks/vsphere/vsphere.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/vsphere/datadog_checks/vsphere/vsphere.py#L68-L80
def trace_method(method): """ Decorator to catch and print the exceptions that happen within async tasks. Note: this should be applied to methods of VSphereCheck only! """ def wrapper(*args, **kwargs): try: method(*args, **kwargs) except Exception: args[0].print_exception("A worker thread crashed:\n" + traceback.format_exc()) return wrapper
[ "def", "trace_method", "(", "method", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "method", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "args", "[", "0", "]", ".", "print_exception", "(", "\"A worker thread crashed:\\n\"", "+", "traceback", ".", "format_exc", "(", ")", ")", "return", "wrapper" ]
Decorator to catch and print the exceptions that happen within async tasks. Note: this should be applied to methods of VSphereCheck only!
[ "Decorator", "to", "catch", "and", "print", "the", "exceptions", "that", "happen", "within", "async", "tasks", ".", "Note", ":", "this", "should", "be", "applied", "to", "methods", "of", "VSphereCheck", "only!" ]
python
train
QuantEcon/QuantEcon.py
quantecon/kalman.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/kalman.py#L279-L312
def stationary_coefficients(self, j, coeff_type='ma'): """ Wold representation moving average or VAR coefficients for the steady state Kalman filter. Parameters ---------- j : int The lag length coeff_type : string, either 'ma' or 'var' (default='ma') The type of coefficent sequence to compute. Either 'ma' for moving average or 'var' for VAR. """ # == simplify notation == # A, G = self.ss.A, self.ss.G K_infinity = self.K_infinity # == compute and return coefficients == # coeffs = [] i = 1 if coeff_type == 'ma': coeffs.append(np.identity(self.ss.k)) P_mat = A P = np.identity(self.ss.n) # Create a copy elif coeff_type == 'var': coeffs.append(dot(G, K_infinity)) P_mat = A - dot(K_infinity, G) P = np.copy(P_mat) # Create a copy else: raise ValueError("Unknown coefficient type") while i <= j: coeffs.append(dot(dot(G, P), K_infinity)) P = dot(P, P_mat) i += 1 return coeffs
[ "def", "stationary_coefficients", "(", "self", ",", "j", ",", "coeff_type", "=", "'ma'", ")", ":", "# == simplify notation == #", "A", ",", "G", "=", "self", ".", "ss", ".", "A", ",", "self", ".", "ss", ".", "G", "K_infinity", "=", "self", ".", "K_infinity", "# == compute and return coefficients == #", "coeffs", "=", "[", "]", "i", "=", "1", "if", "coeff_type", "==", "'ma'", ":", "coeffs", ".", "append", "(", "np", ".", "identity", "(", "self", ".", "ss", ".", "k", ")", ")", "P_mat", "=", "A", "P", "=", "np", ".", "identity", "(", "self", ".", "ss", ".", "n", ")", "# Create a copy", "elif", "coeff_type", "==", "'var'", ":", "coeffs", ".", "append", "(", "dot", "(", "G", ",", "K_infinity", ")", ")", "P_mat", "=", "A", "-", "dot", "(", "K_infinity", ",", "G", ")", "P", "=", "np", ".", "copy", "(", "P_mat", ")", "# Create a copy", "else", ":", "raise", "ValueError", "(", "\"Unknown coefficient type\"", ")", "while", "i", "<=", "j", ":", "coeffs", ".", "append", "(", "dot", "(", "dot", "(", "G", ",", "P", ")", ",", "K_infinity", ")", ")", "P", "=", "dot", "(", "P", ",", "P_mat", ")", "i", "+=", "1", "return", "coeffs" ]
Wold representation moving average or VAR coefficients for the steady state Kalman filter. Parameters ---------- j : int The lag length coeff_type : string, either 'ma' or 'var' (default='ma') The type of coefficent sequence to compute. Either 'ma' for moving average or 'var' for VAR.
[ "Wold", "representation", "moving", "average", "or", "VAR", "coefficients", "for", "the", "steady", "state", "Kalman", "filter", "." ]
python
train
earlzo/hfut
hfut/shortcut.py
https://github.com/earlzo/hfut/blob/09270a9647fba79f26fd1a8a3c53c0678b5257a1/hfut/shortcut.py#L237-L265
def evaluate_course(self, kcdm, jxbh, r101=1, r102=1, r103=1, r104=1, r105=1, r106=1, r107=1, r108=1, r109=1, r201=3, r202=3, advice=''): """ 课程评价, 数值为 1-5, r1 类选项 1 为最好, 5 为最差, r2 类选项程度由深到浅, 3 为最好. 默认都是最好的选项 :param kcdm: 课程代码 :param jxbh: 教学班号 :param r101: 教学态度认真,课前准备充分 :param r102: 教授内容充实,要点重点突出 :param r103: 理论联系实际,反映最新成果 :param r104: 教学方法灵活,师生互动得当 :param r105: 运用现代技术,教学手段多样 :param r106: 注重因材施教,加强能力培养 :param r107: 严格要求管理,关心爱护学生 :param r108: 处处为人师表,注重教书育人 :param r109: 教学综合效果 :param r201: 课程内容 :param r202: 课程负担 :param advice: 其他建议,不能超过120字且不能使用分号,单引号,都好 :return: """ return self.query(EvaluateCourse( kcdm, jxbh, r101, r102, r103, r104, r105, r106, r107, r108, r109, r201, r202, advice ))
[ "def", "evaluate_course", "(", "self", ",", "kcdm", ",", "jxbh", ",", "r101", "=", "1", ",", "r102", "=", "1", ",", "r103", "=", "1", ",", "r104", "=", "1", ",", "r105", "=", "1", ",", "r106", "=", "1", ",", "r107", "=", "1", ",", "r108", "=", "1", ",", "r109", "=", "1", ",", "r201", "=", "3", ",", "r202", "=", "3", ",", "advice", "=", "''", ")", ":", "return", "self", ".", "query", "(", "EvaluateCourse", "(", "kcdm", ",", "jxbh", ",", "r101", ",", "r102", ",", "r103", ",", "r104", ",", "r105", ",", "r106", ",", "r107", ",", "r108", ",", "r109", ",", "r201", ",", "r202", ",", "advice", ")", ")" ]
课程评价, 数值为 1-5, r1 类选项 1 为最好, 5 为最差, r2 类选项程度由深到浅, 3 为最好. 默认都是最好的选项 :param kcdm: 课程代码 :param jxbh: 教学班号 :param r101: 教学态度认真,课前准备充分 :param r102: 教授内容充实,要点重点突出 :param r103: 理论联系实际,反映最新成果 :param r104: 教学方法灵活,师生互动得当 :param r105: 运用现代技术,教学手段多样 :param r106: 注重因材施教,加强能力培养 :param r107: 严格要求管理,关心爱护学生 :param r108: 处处为人师表,注重教书育人 :param r109: 教学综合效果 :param r201: 课程内容 :param r202: 课程负担 :param advice: 其他建议,不能超过120字且不能使用分号,单引号,都好 :return:
[ "课程评价", "数值为", "1", "-", "5", "r1", "类选项", "1", "为最好", "5", "为最差", "r2", "类选项程度由深到浅", "3", "为最好", "." ]
python
train
inveniosoftware/invenio-query-parser
invenio_query_parser/walkers/match_unit.py
https://github.com/inveniosoftware/invenio-query-parser/blob/21a2c36318003ff52d2e18e7196bb420db8ecb4b/invenio_query_parser/walkers/match_unit.py#L37-L54
def dottable_getitem(data, dottable_key, default=None): """Return item as ``dict.__getitem__` but using keys with dots. It does not address indexes in iterables. """ def getitem(value, *keys): if not keys: return default elif len(keys) == 1: key = keys[0] if isinstance(value, MutableMapping): return value.get(key, default) elif isinstance(value, Sequence) and \ not isinstance(value, six.string_types): return [getitem(v, key) for v in value] return default return getitem(getitem(value, keys[0]), *keys[1:]) return getitem(data, *dottable_key.split('.'))
[ "def", "dottable_getitem", "(", "data", ",", "dottable_key", ",", "default", "=", "None", ")", ":", "def", "getitem", "(", "value", ",", "*", "keys", ")", ":", "if", "not", "keys", ":", "return", "default", "elif", "len", "(", "keys", ")", "==", "1", ":", "key", "=", "keys", "[", "0", "]", "if", "isinstance", "(", "value", ",", "MutableMapping", ")", ":", "return", "value", ".", "get", "(", "key", ",", "default", ")", "elif", "isinstance", "(", "value", ",", "Sequence", ")", "and", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "return", "[", "getitem", "(", "v", ",", "key", ")", "for", "v", "in", "value", "]", "return", "default", "return", "getitem", "(", "getitem", "(", "value", ",", "keys", "[", "0", "]", ")", ",", "*", "keys", "[", "1", ":", "]", ")", "return", "getitem", "(", "data", ",", "*", "dottable_key", ".", "split", "(", "'.'", ")", ")" ]
Return item as ``dict.__getitem__` but using keys with dots. It does not address indexes in iterables.
[ "Return", "item", "as", "dict", ".", "__getitem__", "but", "using", "keys", "with", "dots", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Defaults.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Defaults.py#L344-L358
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None): """ Creates a new list from 'list' by first interpolating each element in the list using the 'env' dictionary and then calling f on the list, and finally calling _concat_ixes to concatenate 'prefix' and 'suffix' onto each element of the list. """ if not list: return list l = f(SCons.PathList.PathList(list).subst_path(env, target, source)) if l is not None: list = l return _concat_ixes(prefix, list, suffix, env)
[ "def", "_concat", "(", "prefix", ",", "list", ",", "suffix", ",", "env", ",", "f", "=", "lambda", "x", ":", "x", ",", "target", "=", "None", ",", "source", "=", "None", ")", ":", "if", "not", "list", ":", "return", "list", "l", "=", "f", "(", "SCons", ".", "PathList", ".", "PathList", "(", "list", ")", ".", "subst_path", "(", "env", ",", "target", ",", "source", ")", ")", "if", "l", "is", "not", "None", ":", "list", "=", "l", "return", "_concat_ixes", "(", "prefix", ",", "list", ",", "suffix", ",", "env", ")" ]
Creates a new list from 'list' by first interpolating each element in the list using the 'env' dictionary and then calling f on the list, and finally calling _concat_ixes to concatenate 'prefix' and 'suffix' onto each element of the list.
[ "Creates", "a", "new", "list", "from", "list", "by", "first", "interpolating", "each", "element", "in", "the", "list", "using", "the", "env", "dictionary", "and", "then", "calling", "f", "on", "the", "list", "and", "finally", "calling", "_concat_ixes", "to", "concatenate", "prefix", "and", "suffix", "onto", "each", "element", "of", "the", "list", "." ]
python
train
VingtCinq/python-mailchimp
mailchimp3/entities/automationemailqueues.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/automationemailqueues.py#L31-L61
def create(self, workflow_id, email_id, data): """ Manually add a subscriber to a workflow, bypassing the default trigger settings. You can also use this endpoint to trigger a series of automated emails in an API 3.0 workflow type or add subscribers to an automated email queue that uses the API request delay type. :param workflow_id: The unique id for the Automation workflow. :type workflow_id: :py:class:`str` :param email_id: The unique id for the Automation workflow email. :type email_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "email_address": string* } """ self.workflow_id = workflow_id self.email_id = email_id if 'email_address' not in data: raise KeyError('The automation email queue must have an email_address') check_email(data['email_address']) response = self._mc_client._post( url=self._build_path(workflow_id, 'emails', email_id, 'queue'), data=data ) if response is not None: self.subscriber_hash = response['id'] else: self.subscriber_hash = None return response
[ "def", "create", "(", "self", ",", "workflow_id", ",", "email_id", ",", "data", ")", ":", "self", ".", "workflow_id", "=", "workflow_id", "self", ".", "email_id", "=", "email_id", "if", "'email_address'", "not", "in", "data", ":", "raise", "KeyError", "(", "'The automation email queue must have an email_address'", ")", "check_email", "(", "data", "[", "'email_address'", "]", ")", "response", "=", "self", ".", "_mc_client", ".", "_post", "(", "url", "=", "self", ".", "_build_path", "(", "workflow_id", ",", "'emails'", ",", "email_id", ",", "'queue'", ")", ",", "data", "=", "data", ")", "if", "response", "is", "not", "None", ":", "self", ".", "subscriber_hash", "=", "response", "[", "'id'", "]", "else", ":", "self", ".", "subscriber_hash", "=", "None", "return", "response" ]
Manually add a subscriber to a workflow, bypassing the default trigger settings. You can also use this endpoint to trigger a series of automated emails in an API 3.0 workflow type or add subscribers to an automated email queue that uses the API request delay type. :param workflow_id: The unique id for the Automation workflow. :type workflow_id: :py:class:`str` :param email_id: The unique id for the Automation workflow email. :type email_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "email_address": string* }
[ "Manually", "add", "a", "subscriber", "to", "a", "workflow", "bypassing", "the", "default", "trigger", "settings", ".", "You", "can", "also", "use", "this", "endpoint", "to", "trigger", "a", "series", "of", "automated", "emails", "in", "an", "API", "3", ".", "0", "workflow", "type", "or", "add", "subscribers", "to", "an", "automated", "email", "queue", "that", "uses", "the", "API", "request", "delay", "type", "." ]
python
valid
dead-beef/markovchain
markovchain/cli/util.py
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/cli/util.py#L342-L360
def cmd_settings(args): """Print generator settings. Parameters ---------- args : `argparse.Namespace` Command arguments. """ if args.type == SQLITE: storage = SqliteStorage else: storage = JsonStorage storage = storage.load(args.state) data = storage.settings try: del data['markov']['nodes'] except KeyError: pass pprint(data)
[ "def", "cmd_settings", "(", "args", ")", ":", "if", "args", ".", "type", "==", "SQLITE", ":", "storage", "=", "SqliteStorage", "else", ":", "storage", "=", "JsonStorage", "storage", "=", "storage", ".", "load", "(", "args", ".", "state", ")", "data", "=", "storage", ".", "settings", "try", ":", "del", "data", "[", "'markov'", "]", "[", "'nodes'", "]", "except", "KeyError", ":", "pass", "pprint", "(", "data", ")" ]
Print generator settings. Parameters ---------- args : `argparse.Namespace` Command arguments.
[ "Print", "generator", "settings", "." ]
python
train
kajala/django-jutil
jutil/format.py
https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/format.py#L6-L47
def format_full_name(first_name: str, last_name: str, max_length: int = 20): """ Limits name length to specified length. Tries to keep name as human-readable an natural as possible. :param first_name: First name :param last_name: Last name :param max_length: Maximum length :return: Full name of shortened version depending on length """ # dont allow commas in limited names first_name = first_name.replace(',', ' ') last_name = last_name.replace(',', ' ') # accept short full names as is original_full_name = first_name + ' ' + last_name if len(original_full_name) <= max_length: return original_full_name # drop middle names first_name = first_name.split(' ')[0] full_name = first_name + ' ' + last_name if len(full_name) <= max_length: return full_name # drop latter parts of combined first names first_name = re.split(r'[\s\-]', first_name)[0] full_name = first_name + ' ' + last_name if len(full_name) <= max_length: return full_name # drop latter parts of multi part last names last_name = re.split(r'[\s\-]', last_name)[0] full_name = first_name + ' ' + last_name if len(full_name) <= max_length: return full_name # shorten last name to one letter last_name = last_name[:1] full_name = first_name + ' ' + last_name if len(full_name) > max_length: raise Exception('Failed to shorten name {}'.format(original_full_name)) return full_name
[ "def", "format_full_name", "(", "first_name", ":", "str", ",", "last_name", ":", "str", ",", "max_length", ":", "int", "=", "20", ")", ":", "# dont allow commas in limited names", "first_name", "=", "first_name", ".", "replace", "(", "','", ",", "' '", ")", "last_name", "=", "last_name", ".", "replace", "(", "','", ",", "' '", ")", "# accept short full names as is", "original_full_name", "=", "first_name", "+", "' '", "+", "last_name", "if", "len", "(", "original_full_name", ")", "<=", "max_length", ":", "return", "original_full_name", "# drop middle names", "first_name", "=", "first_name", ".", "split", "(", "' '", ")", "[", "0", "]", "full_name", "=", "first_name", "+", "' '", "+", "last_name", "if", "len", "(", "full_name", ")", "<=", "max_length", ":", "return", "full_name", "# drop latter parts of combined first names", "first_name", "=", "re", ".", "split", "(", "r'[\\s\\-]'", ",", "first_name", ")", "[", "0", "]", "full_name", "=", "first_name", "+", "' '", "+", "last_name", "if", "len", "(", "full_name", ")", "<=", "max_length", ":", "return", "full_name", "# drop latter parts of multi part last names", "last_name", "=", "re", ".", "split", "(", "r'[\\s\\-]'", ",", "last_name", ")", "[", "0", "]", "full_name", "=", "first_name", "+", "' '", "+", "last_name", "if", "len", "(", "full_name", ")", "<=", "max_length", ":", "return", "full_name", "# shorten last name to one letter", "last_name", "=", "last_name", "[", ":", "1", "]", "full_name", "=", "first_name", "+", "' '", "+", "last_name", "if", "len", "(", "full_name", ")", ">", "max_length", ":", "raise", "Exception", "(", "'Failed to shorten name {}'", ".", "format", "(", "original_full_name", ")", ")", "return", "full_name" ]
Limits name length to specified length. Tries to keep name as human-readable an natural as possible. :param first_name: First name :param last_name: Last name :param max_length: Maximum length :return: Full name of shortened version depending on length
[ "Limits", "name", "length", "to", "specified", "length", ".", "Tries", "to", "keep", "name", "as", "human", "-", "readable", "an", "natural", "as", "possible", ".", ":", "param", "first_name", ":", "First", "name", ":", "param", "last_name", ":", "Last", "name", ":", "param", "max_length", ":", "Maximum", "length", ":", "return", ":", "Full", "name", "of", "shortened", "version", "depending", "on", "length" ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6331-L6338
def relaxNGValidatePushElement(self, doc, elem): """Push a new element start on the RelaxNG validation stack. """ if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlRelaxNGValidatePushElement(self._o, doc__o, elem__o) return ret
[ "def", "relaxNGValidatePushElement", "(", "self", ",", "doc", ",", "elem", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "elem__o", "=", "elem", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlRelaxNGValidatePushElement", "(", "self", ".", "_o", ",", "doc__o", ",", "elem__o", ")", "return", "ret" ]
Push a new element start on the RelaxNG validation stack.
[ "Push", "a", "new", "element", "start", "on", "the", "RelaxNG", "validation", "stack", "." ]
python
train
humilis/humilis-lambdautils
lambdautils/state.py
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L409-L427
def arrival_delay_greater_than(item_id, delay, namespace="_expected_arrival"): """Check if an item arrival is delayed more than a given amount.""" expected = get_state(item_id, namespace=namespace) now = time.time() if expected and (now - expected) > delay: logger.error("Timeout: waited %s seconds for parent.", delay) return True elif expected: logger.info("Still out of order but no timeout: %s-%s <= %s.", now, expected, delay) return False elif delay > 0: logger.info("Storing expected arrival time (%s) for context '%s'", datetime.fromtimestamp(now).isoformat(), item_id) set_state(item_id, now, namespace=namespace) return False else: logger.info("Event is out of order but not waiting for parent.") return True
[ "def", "arrival_delay_greater_than", "(", "item_id", ",", "delay", ",", "namespace", "=", "\"_expected_arrival\"", ")", ":", "expected", "=", "get_state", "(", "item_id", ",", "namespace", "=", "namespace", ")", "now", "=", "time", ".", "time", "(", ")", "if", "expected", "and", "(", "now", "-", "expected", ")", ">", "delay", ":", "logger", ".", "error", "(", "\"Timeout: waited %s seconds for parent.\"", ",", "delay", ")", "return", "True", "elif", "expected", ":", "logger", ".", "info", "(", "\"Still out of order but no timeout: %s-%s <= %s.\"", ",", "now", ",", "expected", ",", "delay", ")", "return", "False", "elif", "delay", ">", "0", ":", "logger", ".", "info", "(", "\"Storing expected arrival time (%s) for context '%s'\"", ",", "datetime", ".", "fromtimestamp", "(", "now", ")", ".", "isoformat", "(", ")", ",", "item_id", ")", "set_state", "(", "item_id", ",", "now", ",", "namespace", "=", "namespace", ")", "return", "False", "else", ":", "logger", ".", "info", "(", "\"Event is out of order but not waiting for parent.\"", ")", "return", "True" ]
Check if an item arrival is delayed more than a given amount.
[ "Check", "if", "an", "item", "arrival", "is", "delayed", "more", "than", "a", "given", "amount", "." ]
python
train
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L829-L833
def list_security_group_rules(self, retrieve_all=True, **_params): """Fetches a list of all security group rules for a project.""" return self.list('security_group_rules', self.security_group_rules_path, retrieve_all, **_params)
[ "def", "list_security_group_rules", "(", "self", ",", "retrieve_all", "=", "True", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "list", "(", "'security_group_rules'", ",", "self", ".", "security_group_rules_path", ",", "retrieve_all", ",", "*", "*", "_params", ")" ]
Fetches a list of all security group rules for a project.
[ "Fetches", "a", "list", "of", "all", "security", "group", "rules", "for", "a", "project", "." ]
python
train
tensorflow/hub
tensorflow_hub/saved_model_lib.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/saved_model_lib.py#L391-L406
def export(self, path, variables_saver=None): """Exports to SavedModel directory. Args: path: path where to export the SavedModel to. variables_saver: lambda that receives a directory path where to export checkpoints of variables. """ # Operate on a copy of self._proto since it needs to be modified. proto = saved_model_pb2.SavedModel() proto.CopyFrom(self._proto) assets_map = _make_assets_key_collection(proto, path) self._save_all_assets(path, assets_map) self._save_variables(path, variables_saver) self._save_proto(path, proto)
[ "def", "export", "(", "self", ",", "path", ",", "variables_saver", "=", "None", ")", ":", "# Operate on a copy of self._proto since it needs to be modified.", "proto", "=", "saved_model_pb2", ".", "SavedModel", "(", ")", "proto", ".", "CopyFrom", "(", "self", ".", "_proto", ")", "assets_map", "=", "_make_assets_key_collection", "(", "proto", ",", "path", ")", "self", ".", "_save_all_assets", "(", "path", ",", "assets_map", ")", "self", ".", "_save_variables", "(", "path", ",", "variables_saver", ")", "self", ".", "_save_proto", "(", "path", ",", "proto", ")" ]
Exports to SavedModel directory. Args: path: path where to export the SavedModel to. variables_saver: lambda that receives a directory path where to export checkpoints of variables.
[ "Exports", "to", "SavedModel", "directory", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/validate.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validate.py#L671-L680
def _get_validate_plotdata_yaml(grading_file, data): """Retrieve validation plot data from grading YAML file (old style). """ with open(grading_file) as in_handle: grade_stats = yaml.safe_load(in_handle) for sample_stats in grade_stats: sample = sample_stats["sample"] for vtype, cat, val in _flatten_grading(sample_stats): yield [sample, variant.get("variantcaller", ""), vtype, cat, val]
[ "def", "_get_validate_plotdata_yaml", "(", "grading_file", ",", "data", ")", ":", "with", "open", "(", "grading_file", ")", "as", "in_handle", ":", "grade_stats", "=", "yaml", ".", "safe_load", "(", "in_handle", ")", "for", "sample_stats", "in", "grade_stats", ":", "sample", "=", "sample_stats", "[", "\"sample\"", "]", "for", "vtype", ",", "cat", ",", "val", "in", "_flatten_grading", "(", "sample_stats", ")", ":", "yield", "[", "sample", ",", "variant", ".", "get", "(", "\"variantcaller\"", ",", "\"\"", ")", ",", "vtype", ",", "cat", ",", "val", "]" ]
Retrieve validation plot data from grading YAML file (old style).
[ "Retrieve", "validation", "plot", "data", "from", "grading", "YAML", "file", "(", "old", "style", ")", "." ]
python
train
googleads/googleads-python-lib
examples/adwords/v201809/advanced_operations/add_shopping_dynamic_remarketing_campaign.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/advanced_operations/add_shopping_dynamic_remarketing_campaign.py#L216-L246
def AttachUserList(client, ad_group_id, user_list_id): """Links the provided ad group and user list. Args: client: an AdWordsClient instance. ad_group_id: an int ad group ID. user_list_id: an int user list ID. Returns: The ad group criterion that was successfully created. """ ad_group_criterion_service = client.GetService( 'AdGroupCriterionService', 'v201809') user_list = { 'xsi_type': 'CriterionUserList', 'userListId': user_list_id } ad_group_criterion = { 'xsi_type': 'BiddableAdGroupCriterion', 'criterion': user_list, 'adGroupId': ad_group_id } operations = [{ 'operator': 'ADD', 'operand': ad_group_criterion }] return ad_group_criterion_service.mutate(operations)['value'][0]
[ "def", "AttachUserList", "(", "client", ",", "ad_group_id", ",", "user_list_id", ")", ":", "ad_group_criterion_service", "=", "client", ".", "GetService", "(", "'AdGroupCriterionService'", ",", "'v201809'", ")", "user_list", "=", "{", "'xsi_type'", ":", "'CriterionUserList'", ",", "'userListId'", ":", "user_list_id", "}", "ad_group_criterion", "=", "{", "'xsi_type'", ":", "'BiddableAdGroupCriterion'", ",", "'criterion'", ":", "user_list", ",", "'adGroupId'", ":", "ad_group_id", "}", "operations", "=", "[", "{", "'operator'", ":", "'ADD'", ",", "'operand'", ":", "ad_group_criterion", "}", "]", "return", "ad_group_criterion_service", ".", "mutate", "(", "operations", ")", "[", "'value'", "]", "[", "0", "]" ]
Links the provided ad group and user list. Args: client: an AdWordsClient instance. ad_group_id: an int ad group ID. user_list_id: an int user list ID. Returns: The ad group criterion that was successfully created.
[ "Links", "the", "provided", "ad", "group", "and", "user", "list", "." ]
python
train
timothyb0912/pylogit
pylogit/asym_logit.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/asym_logit.py#L184-L225
def _calc_deriv_c_with_respect_to_eta(natural_shapes, ref_position, output_array=None): """ Parameters ---------- natural_shapes : 1D ndarray. Should have one element per available alternative in the dataset whose choice situations are being modeled. Should have at least `ref_position` elements in it. ref_position : int. Specifies the position in the array of natural shape parameters that should be equal to 1 - the sum of the other elements. Specifies the alternative in the ordered array of unique alternatives that is not having its shape parameter estimated (in order to ensure identifiability). output_array : 2D ndarray. This array is to have its data overwritten with the correct derivatives of the natural shape parameters with respect to transformed shape parameters. Should have shape == `(natural_shapes.shape[0], natural_shapes.shape[0] - 1)`. Returns ------- output_array : 2D ndarray. Has shape == (natural_shapes.shape[0], natural_shapes.shape[0] - 1). Will contain the derivative of the shape parameters, with respect to the underlying 'transformed' shape parameters. """ # Generate a list of the indices which indicate the columns to be # selected from a 2D numpy array of # np.diag(natural_shapes) - np.outer(natural_shapes, natural_shapes) columns_to_be_kept = range(natural_shapes.shape[0]) columns_to_be_kept.remove(ref_position) # Calculate and store the derivative of the natural shape parameters # with respect to the reduced shape parameters. output_array[:, :] = (np.diag(natural_shapes) - np.outer(natural_shapes, natural_shapes))[:, columns_to_be_kept] return output_array
[ "def", "_calc_deriv_c_with_respect_to_eta", "(", "natural_shapes", ",", "ref_position", ",", "output_array", "=", "None", ")", ":", "# Generate a list of the indices which indicate the columns to be", "# selected from a 2D numpy array of", "# np.diag(natural_shapes) - np.outer(natural_shapes, natural_shapes)", "columns_to_be_kept", "=", "range", "(", "natural_shapes", ".", "shape", "[", "0", "]", ")", "columns_to_be_kept", ".", "remove", "(", "ref_position", ")", "# Calculate and store the derivative of the natural shape parameters", "# with respect to the reduced shape parameters.", "output_array", "[", ":", ",", ":", "]", "=", "(", "np", ".", "diag", "(", "natural_shapes", ")", "-", "np", ".", "outer", "(", "natural_shapes", ",", "natural_shapes", ")", ")", "[", ":", ",", "columns_to_be_kept", "]", "return", "output_array" ]
Parameters ---------- natural_shapes : 1D ndarray. Should have one element per available alternative in the dataset whose choice situations are being modeled. Should have at least `ref_position` elements in it. ref_position : int. Specifies the position in the array of natural shape parameters that should be equal to 1 - the sum of the other elements. Specifies the alternative in the ordered array of unique alternatives that is not having its shape parameter estimated (in order to ensure identifiability). output_array : 2D ndarray. This array is to have its data overwritten with the correct derivatives of the natural shape parameters with respect to transformed shape parameters. Should have shape == `(natural_shapes.shape[0], natural_shapes.shape[0] - 1)`. Returns ------- output_array : 2D ndarray. Has shape == (natural_shapes.shape[0], natural_shapes.shape[0] - 1). Will contain the derivative of the shape parameters, with respect to the underlying 'transformed' shape parameters.
[ "Parameters", "----------", "natural_shapes", ":", "1D", "ndarray", ".", "Should", "have", "one", "element", "per", "available", "alternative", "in", "the", "dataset", "whose", "choice", "situations", "are", "being", "modeled", ".", "Should", "have", "at", "least", "ref_position", "elements", "in", "it", ".", "ref_position", ":", "int", ".", "Specifies", "the", "position", "in", "the", "array", "of", "natural", "shape", "parameters", "that", "should", "be", "equal", "to", "1", "-", "the", "sum", "of", "the", "other", "elements", ".", "Specifies", "the", "alternative", "in", "the", "ordered", "array", "of", "unique", "alternatives", "that", "is", "not", "having", "its", "shape", "parameter", "estimated", "(", "in", "order", "to", "ensure", "identifiability", ")", ".", "output_array", ":", "2D", "ndarray", ".", "This", "array", "is", "to", "have", "its", "data", "overwritten", "with", "the", "correct", "derivatives", "of", "the", "natural", "shape", "parameters", "with", "respect", "to", "transformed", "shape", "parameters", ".", "Should", "have", "shape", "==", "(", "natural_shapes", ".", "shape", "[", "0", "]", "natural_shapes", ".", "shape", "[", "0", "]", "-", "1", ")", "." ]
python
train
andreafioraldi/angrdbg
angrdbg/page_7.py
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/page_7.py#L247-L284
def load_objects(self, addr, num_bytes, ret_on_segv=False): """ Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple """ result = [] end = addr + num_bytes for page_addr in self._containing_pages(addr, end): try: #print "Getting page %x" % (page_addr // self._page_size) page = self._get_page(page_addr // self._page_size) #print "... got it" except KeyError: #print "... missing" #print "... SEGV" # missing page if self.allow_segv: if ret_on_segv: break raise SimSegfaultError(addr, 'read-miss') else: continue if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ: #print "... SEGV" if ret_on_segv: break raise SimSegfaultError(addr, 'non-readable') result.extend(page.load_slice(self.state, addr, end)) return result
[ "def", "load_objects", "(", "self", ",", "addr", ",", "num_bytes", ",", "ret_on_segv", "=", "False", ")", ":", "result", "=", "[", "]", "end", "=", "addr", "+", "num_bytes", "for", "page_addr", "in", "self", ".", "_containing_pages", "(", "addr", ",", "end", ")", ":", "try", ":", "#print \"Getting page %x\" % (page_addr // self._page_size)", "page", "=", "self", ".", "_get_page", "(", "page_addr", "//", "self", ".", "_page_size", ")", "#print \"... got it\"", "except", "KeyError", ":", "#print \"... missing\"", "#print \"... SEGV\"", "# missing page", "if", "self", ".", "allow_segv", ":", "if", "ret_on_segv", ":", "break", "raise", "SimSegfaultError", "(", "addr", ",", "'read-miss'", ")", "else", ":", "continue", "if", "self", ".", "allow_segv", "and", "not", "page", ".", "concrete_permissions", "&", "DbgPage", ".", "PROT_READ", ":", "#print \"... SEGV\"", "if", "ret_on_segv", ":", "break", "raise", "SimSegfaultError", "(", "addr", ",", "'non-readable'", ")", "result", ".", "extend", "(", "page", ".", "load_slice", "(", "self", ".", "state", ",", "addr", ",", "end", ")", ")", "return", "result" ]
Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple
[ "Load", "memory", "objects", "from", "paged", "memory", "." ]
python
train
redvox/Eliza
eliza/config.py
https://github.com/redvox/Eliza/blob/8661222fe792fca92bc25546f8ac7b5dd673913d/eliza/config.py#L58-L100
def load_config(self, path, environments, fill_with_defaults=False): """Will load default.yaml and <environment>.yaml at given path. The environment config will override the default values. :param path: directory where to find your config files. If the last character is not a slash (/) it will be appended. Example: resources/ :param environments: list of environment configs to load. File name pattern: <environment>.yaml. Example: develop.yaml. Latter configs will override previous ones. :param fill_with_defaults: use 'defaults' keyword in config file to fill up following config entrys. :return: your config as dictionary. """ yaml.add_implicit_resolver("!environ", self.__environ_pattern) yaml.add_constructor('!environ', self.__get_from_environment) yaml.add_implicit_resolver("!vault", self.__vault_pattern) yaml.add_constructor('!vault', self.__get_from_vault) if not path.endswith('/'): path += '/' if type(environments) != list: environments = [environments] config = {} try: for env in environments: with open(path + env + '.yaml', 'r') as configFile: env_config = yaml.load(configFile.read()) or {} config.update(env_config) if fill_with_defaults: if 'defaults' in config: defaults = config['defaults'] for target in defaults: for index, item in enumerate(config[target]): tmp = defaults[target].copy() tmp.update(config[target][index]) config[target][index] = tmp return config except exceptions.VaultError as error: raise ConfigLoaderError("Could not read vault secrets [" + error.__class__.__name__ + "]") except yaml.YAMLError as error: raise ConfigLoaderError("Configuration files malformed [" + error.__class__.__name__ + "]") except json.decoder.JSONDecodeError as error: raise ConfigLoaderError("Vault response was not json [" + error.__class__.__name__ + "]") except Exception as error: raise ConfigLoaderError("WTF? [" + error.__class__.__name__ + "]")
[ "def", "load_config", "(", "self", ",", "path", ",", "environments", ",", "fill_with_defaults", "=", "False", ")", ":", "yaml", ".", "add_implicit_resolver", "(", "\"!environ\"", ",", "self", ".", "__environ_pattern", ")", "yaml", ".", "add_constructor", "(", "'!environ'", ",", "self", ".", "__get_from_environment", ")", "yaml", ".", "add_implicit_resolver", "(", "\"!vault\"", ",", "self", ".", "__vault_pattern", ")", "yaml", ".", "add_constructor", "(", "'!vault'", ",", "self", ".", "__get_from_vault", ")", "if", "not", "path", ".", "endswith", "(", "'/'", ")", ":", "path", "+=", "'/'", "if", "type", "(", "environments", ")", "!=", "list", ":", "environments", "=", "[", "environments", "]", "config", "=", "{", "}", "try", ":", "for", "env", "in", "environments", ":", "with", "open", "(", "path", "+", "env", "+", "'.yaml'", ",", "'r'", ")", "as", "configFile", ":", "env_config", "=", "yaml", ".", "load", "(", "configFile", ".", "read", "(", ")", ")", "or", "{", "}", "config", ".", "update", "(", "env_config", ")", "if", "fill_with_defaults", ":", "if", "'defaults'", "in", "config", ":", "defaults", "=", "config", "[", "'defaults'", "]", "for", "target", "in", "defaults", ":", "for", "index", ",", "item", "in", "enumerate", "(", "config", "[", "target", "]", ")", ":", "tmp", "=", "defaults", "[", "target", "]", ".", "copy", "(", ")", "tmp", ".", "update", "(", "config", "[", "target", "]", "[", "index", "]", ")", "config", "[", "target", "]", "[", "index", "]", "=", "tmp", "return", "config", "except", "exceptions", ".", "VaultError", "as", "error", ":", "raise", "ConfigLoaderError", "(", "\"Could not read vault secrets [\"", "+", "error", ".", "__class__", ".", "__name__", "+", "\"]\"", ")", "except", "yaml", ".", "YAMLError", "as", "error", ":", "raise", "ConfigLoaderError", "(", "\"Configuration files malformed [\"", "+", "error", ".", "__class__", ".", "__name__", "+", "\"]\"", ")", "except", "json", ".", "decoder", ".", "JSONDecodeError", "as", "error", ":", "raise", "ConfigLoaderError", "(", "\"Vault response was not json [\"", "+", "error", ".", "__class__", ".", "__name__", "+", "\"]\"", ")", "except", "Exception", "as", "error", ":", "raise", "ConfigLoaderError", "(", "\"WTF? [\"", "+", "error", ".", "__class__", ".", "__name__", "+", "\"]\"", ")" ]
Will load default.yaml and <environment>.yaml at given path. The environment config will override the default values. :param path: directory where to find your config files. If the last character is not a slash (/) it will be appended. Example: resources/ :param environments: list of environment configs to load. File name pattern: <environment>.yaml. Example: develop.yaml. Latter configs will override previous ones. :param fill_with_defaults: use 'defaults' keyword in config file to fill up following config entrys. :return: your config as dictionary.
[ "Will", "load", "default", ".", "yaml", "and", "<environment", ">", ".", "yaml", "at", "given", "path", ".", "The", "environment", "config", "will", "override", "the", "default", "values", "." ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/pkginfo/utils.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/pkginfo/utils.py#L10-L57
def get_metadata(path_or_module, metadata_version=None): """ Try to create a Distribution 'path_or_module'. o 'path_or_module' may be a module object. o If a string, 'path_or_module' may point to an sdist file, a bdist file, an installed package, or a working checkout (if it contains PKG-INFO). o Return None if 'path_or_module' can't be parsed. """ if isinstance(path_or_module, ModuleType): try: return Installed(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass try: __import__(path_or_module) except ImportError: pass else: try: return Installed(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass if os.path.isfile(path_or_module): try: return SDist(path_or_module, metadata_version) except (ValueError, IOError): pass try: return BDist(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass try: return Wheel(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass if os.path.isdir(path_or_module): try: return Develop(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass
[ "def", "get_metadata", "(", "path_or_module", ",", "metadata_version", "=", "None", ")", ":", "if", "isinstance", "(", "path_or_module", ",", "ModuleType", ")", ":", "try", ":", "return", "Installed", "(", "path_or_module", ",", "metadata_version", ")", "except", "(", "ValueError", ",", "IOError", ")", ":", "#pragma NO COVER", "pass", "try", ":", "__import__", "(", "path_or_module", ")", "except", "ImportError", ":", "pass", "else", ":", "try", ":", "return", "Installed", "(", "path_or_module", ",", "metadata_version", ")", "except", "(", "ValueError", ",", "IOError", ")", ":", "#pragma NO COVER", "pass", "if", "os", ".", "path", ".", "isfile", "(", "path_or_module", ")", ":", "try", ":", "return", "SDist", "(", "path_or_module", ",", "metadata_version", ")", "except", "(", "ValueError", ",", "IOError", ")", ":", "pass", "try", ":", "return", "BDist", "(", "path_or_module", ",", "metadata_version", ")", "except", "(", "ValueError", ",", "IOError", ")", ":", "#pragma NO COVER", "pass", "try", ":", "return", "Wheel", "(", "path_or_module", ",", "metadata_version", ")", "except", "(", "ValueError", ",", "IOError", ")", ":", "#pragma NO COVER", "pass", "if", "os", ".", "path", ".", "isdir", "(", "path_or_module", ")", ":", "try", ":", "return", "Develop", "(", "path_or_module", ",", "metadata_version", ")", "except", "(", "ValueError", ",", "IOError", ")", ":", "#pragma NO COVER", "pass" ]
Try to create a Distribution 'path_or_module'. o 'path_or_module' may be a module object. o If a string, 'path_or_module' may point to an sdist file, a bdist file, an installed package, or a working checkout (if it contains PKG-INFO). o Return None if 'path_or_module' can't be parsed.
[ "Try", "to", "create", "a", "Distribution", "path_or_module", ".", "o", "path_or_module", "may", "be", "a", "module", "object", "." ]
python
test
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L222-L288
def _should_trace_with_reason(self, filename, frame): """Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a pair of values: the first indicates whether the file should be traced: it's a canonicalized filename if it should be traced, None if it should not. The second value is a string, the resason for the decision. """ if not filename: # Empty string is pretty useless return None, "empty string isn't a filename" if filename.startswith('<'): # Lots of non-file execution is represented with artificial # filenames like "<string>", "<doctest readme.txt[0]>", or # "<exec_function>". Don't ever trace these executions, since we # can't do anything with the data later anyway. return None, "not a real filename" self._check_for_packages() # Compiled Python files have two filenames: frame.f_code.co_filename is # the filename at the time the .pyc was compiled. The second name is # __file__, which is where the .pyc was actually loaded from. Since # .pyc files can be moved after compilation (for example, by being # installed), we look for __file__ in the frame and prefer it to the # co_filename value. dunder_file = frame.f_globals.get('__file__') if dunder_file: filename = self._source_for_file(dunder_file) # Jython reports the .class file to the tracer, use the source file. if filename.endswith("$py.class"): filename = filename[:-9] + ".py" canonical = self.file_locator.canonical_filename(filename) # If the user specified source or include, then that's authoritative # about the outer bound of what to measure and we don't have to apply # any canned exclusions. If they didn't, then we have to exclude the # stdlib and coverage.py directories. if self.source_match: if not self.source_match.match(canonical): return None, "falls outside the --source trees" elif self.include_match: if not self.include_match.match(canonical): return None, "falls outside the --include trees" else: # If we aren't supposed to trace installed code, then check if this # is near the Python standard library and skip it if so. if self.pylib_match and self.pylib_match.match(canonical): return None, "is in the stdlib" # We exclude the coverage code itself, since a little of it will be # measured otherwise. if self.cover_match and self.cover_match.match(canonical): return None, "is part of coverage.py" # Check the file against the omit pattern. if self.omit_match and self.omit_match.match(canonical): return None, "is inside an --omit pattern" return canonical, "because we love you"
[ "def", "_should_trace_with_reason", "(", "self", ",", "filename", ",", "frame", ")", ":", "if", "not", "filename", ":", "# Empty string is pretty useless", "return", "None", ",", "\"empty string isn't a filename\"", "if", "filename", ".", "startswith", "(", "'<'", ")", ":", "# Lots of non-file execution is represented with artificial", "# filenames like \"<string>\", \"<doctest readme.txt[0]>\", or", "# \"<exec_function>\". Don't ever trace these executions, since we", "# can't do anything with the data later anyway.", "return", "None", ",", "\"not a real filename\"", "self", ".", "_check_for_packages", "(", ")", "# Compiled Python files have two filenames: frame.f_code.co_filename is", "# the filename at the time the .pyc was compiled. The second name is", "# __file__, which is where the .pyc was actually loaded from. Since", "# .pyc files can be moved after compilation (for example, by being", "# installed), we look for __file__ in the frame and prefer it to the", "# co_filename value.", "dunder_file", "=", "frame", ".", "f_globals", ".", "get", "(", "'__file__'", ")", "if", "dunder_file", ":", "filename", "=", "self", ".", "_source_for_file", "(", "dunder_file", ")", "# Jython reports the .class file to the tracer, use the source file.", "if", "filename", ".", "endswith", "(", "\"$py.class\"", ")", ":", "filename", "=", "filename", "[", ":", "-", "9", "]", "+", "\".py\"", "canonical", "=", "self", ".", "file_locator", ".", "canonical_filename", "(", "filename", ")", "# If the user specified source or include, then that's authoritative", "# about the outer bound of what to measure and we don't have to apply", "# any canned exclusions. If they didn't, then we have to exclude the", "# stdlib and coverage.py directories.", "if", "self", ".", "source_match", ":", "if", "not", "self", ".", "source_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"falls outside the --source trees\"", "elif", "self", ".", "include_match", ":", "if", "not", "self", ".", "include_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"falls outside the --include trees\"", "else", ":", "# If we aren't supposed to trace installed code, then check if this", "# is near the Python standard library and skip it if so.", "if", "self", ".", "pylib_match", "and", "self", ".", "pylib_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"is in the stdlib\"", "# We exclude the coverage code itself, since a little of it will be", "# measured otherwise.", "if", "self", ".", "cover_match", "and", "self", ".", "cover_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"is part of coverage.py\"", "# Check the file against the omit pattern.", "if", "self", ".", "omit_match", "and", "self", ".", "omit_match", ".", "match", "(", "canonical", ")", ":", "return", "None", ",", "\"is inside an --omit pattern\"", "return", "canonical", ",", "\"because we love you\"" ]
Decide whether to trace execution in `filename`, with a reason. This function is called from the trace function. As each new file name is encountered, this function determines whether it is traced or not. Returns a pair of values: the first indicates whether the file should be traced: it's a canonicalized filename if it should be traced, None if it should not. The second value is a string, the resason for the decision.
[ "Decide", "whether", "to", "trace", "execution", "in", "filename", "with", "a", "reason", "." ]
python
test
edeposit/edeposit.amqp.harvester
src/edeposit/amqp/harvester/scrappers/cpress_cz.py
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/cpress_cz.py#L205-L227
def _parse_description(html_chunk): """ Parse description of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Description as string or None if not found. """ description_tag = html_chunk.match( ["div", {"class": "kniha_detail_text"}], "p" ) if not description_tag: return None description = get_first_content(description_tag) description = description.replace("<br />", "\n") description = description.replace("<br/>", "\n") return dhtmlparser.removeTags(description).strip()
[ "def", "_parse_description", "(", "html_chunk", ")", ":", "description_tag", "=", "html_chunk", ".", "match", "(", "[", "\"div\"", ",", "{", "\"class\"", ":", "\"kniha_detail_text\"", "}", "]", ",", "\"p\"", ")", "if", "not", "description_tag", ":", "return", "None", "description", "=", "get_first_content", "(", "description_tag", ")", "description", "=", "description", ".", "replace", "(", "\"<br />\"", ",", "\"\\n\"", ")", "description", "=", "description", ".", "replace", "(", "\"<br/>\"", ",", "\"\\n\"", ")", "return", "dhtmlparser", ".", "removeTags", "(", "description", ")", ".", "strip", "(", ")" ]
Parse description of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Description as string or None if not found.
[ "Parse", "description", "of", "the", "book", "." ]
python
train
ergoithz/browsepy
browsepy/manager.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/manager.py#L455-L494
def load_arguments(self, argv, base=None): ''' Process given argument list based on registered arguments and given optional base :class:`argparse.ArgumentParser` instance. This method saves processed arguments on itself, and this state won't be lost after :meth:`clean` calls. Processed argument state will be available via :meth:`get_argument` method. :param argv: command-line arguments (without command itself) :type argv: iterable of str :param base: optional base :class:`argparse.ArgumentParser` instance. :type base: argparse.ArgumentParser or None :returns: argparse.Namespace instance with processed arguments as given by :meth:`argparse.ArgumentParser.parse_args`. :rtype: argparse.Namespace ''' plugin_parser = argparse.ArgumentParser(add_help=False) plugin_parser.add_argument('--plugin', action='append', default=[]) parent = base or plugin_parser parser = argparse.ArgumentParser( parents=(parent,), add_help=False, **getattr(parent, 'defaults', {}) ) plugins = [ plugin for plugins in plugin_parser.parse_known_args(argv)[0].plugin for plugin in plugins.split(',') ] for plugin in sorted(set(plugins), key=plugins.index): arguments = self.extract_plugin_arguments(plugin) if arguments: group = parser.add_argument_group('%s arguments' % plugin) for argargs, argkwargs in arguments: group.add_argument(*argargs, **argkwargs) self._argparse_arguments = parser.parse_args(argv) return self._argparse_arguments
[ "def", "load_arguments", "(", "self", ",", "argv", ",", "base", "=", "None", ")", ":", "plugin_parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "plugin_parser", ".", "add_argument", "(", "'--plugin'", ",", "action", "=", "'append'", ",", "default", "=", "[", "]", ")", "parent", "=", "base", "or", "plugin_parser", "parser", "=", "argparse", ".", "ArgumentParser", "(", "parents", "=", "(", "parent", ",", ")", ",", "add_help", "=", "False", ",", "*", "*", "getattr", "(", "parent", ",", "'defaults'", ",", "{", "}", ")", ")", "plugins", "=", "[", "plugin", "for", "plugins", "in", "plugin_parser", ".", "parse_known_args", "(", "argv", ")", "[", "0", "]", ".", "plugin", "for", "plugin", "in", "plugins", ".", "split", "(", "','", ")", "]", "for", "plugin", "in", "sorted", "(", "set", "(", "plugins", ")", ",", "key", "=", "plugins", ".", "index", ")", ":", "arguments", "=", "self", ".", "extract_plugin_arguments", "(", "plugin", ")", "if", "arguments", ":", "group", "=", "parser", ".", "add_argument_group", "(", "'%s arguments'", "%", "plugin", ")", "for", "argargs", ",", "argkwargs", "in", "arguments", ":", "group", ".", "add_argument", "(", "*", "argargs", ",", "*", "*", "argkwargs", ")", "self", ".", "_argparse_arguments", "=", "parser", ".", "parse_args", "(", "argv", ")", "return", "self", ".", "_argparse_arguments" ]
Process given argument list based on registered arguments and given optional base :class:`argparse.ArgumentParser` instance. This method saves processed arguments on itself, and this state won't be lost after :meth:`clean` calls. Processed argument state will be available via :meth:`get_argument` method. :param argv: command-line arguments (without command itself) :type argv: iterable of str :param base: optional base :class:`argparse.ArgumentParser` instance. :type base: argparse.ArgumentParser or None :returns: argparse.Namespace instance with processed arguments as given by :meth:`argparse.ArgumentParser.parse_args`. :rtype: argparse.Namespace
[ "Process", "given", "argument", "list", "based", "on", "registered", "arguments", "and", "given", "optional", "base", ":", "class", ":", "argparse", ".", "ArgumentParser", "instance", "." ]
python
train
GNS3/gns3-server
gns3server/compute/dynamips/nodes/ethernet_switch.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/ethernet_switch.py#L262-L289
def remove_nio(self, port_number): """ Removes the specified NIO as member of this Ethernet switch. :param port_number: allocated port number :returns: the NIO that was bound to the port """ if port_number not in self._nios: raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._nios[port_number] if isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) if self._hypervisor: yield from self._hypervisor.send('ethsw remove_nio "{name}" {nio}'.format(name=self._name, nio=nio)) log.info('Ethernet switch "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name, id=self._id, nio=nio, port=port_number)) del self._nios[port_number] if port_number in self._mappings: del self._mappings[port_number] return nio
[ "def", "remove_nio", "(", "self", ",", "port_number", ")", ":", "if", "port_number", "not", "in", "self", ".", "_nios", ":", "raise", "DynamipsError", "(", "\"Port {} is not allocated\"", ".", "format", "(", "port_number", ")", ")", "nio", "=", "self", ".", "_nios", "[", "port_number", "]", "if", "isinstance", "(", "nio", ",", "NIOUDP", ")", ":", "self", ".", "manager", ".", "port_manager", ".", "release_udp_port", "(", "nio", ".", "lport", ",", "self", ".", "_project", ")", "if", "self", ".", "_hypervisor", ":", "yield", "from", "self", ".", "_hypervisor", ".", "send", "(", "'ethsw remove_nio \"{name}\" {nio}'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "nio", "=", "nio", ")", ")", "log", ".", "info", "(", "'Ethernet switch \"{name}\" [{id}]: NIO {nio} removed from port {port}'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ",", "nio", "=", "nio", ",", "port", "=", "port_number", ")", ")", "del", "self", ".", "_nios", "[", "port_number", "]", "if", "port_number", "in", "self", ".", "_mappings", ":", "del", "self", ".", "_mappings", "[", "port_number", "]", "return", "nio" ]
Removes the specified NIO as member of this Ethernet switch. :param port_number: allocated port number :returns: the NIO that was bound to the port
[ "Removes", "the", "specified", "NIO", "as", "member", "of", "this", "Ethernet", "switch", "." ]
python
train
mrstephenneal/mysql-toolkit
mysql/toolkit/components/connector.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/connector.py#L60-L72
def executemany(self, command, params=None, max_attempts=5): """Execute multiple SQL queries without returning a result.""" attempts = 0 while attempts < max_attempts: try: # Execute statement self._cursor.executemany(command, params) self._commit() return True except Exception as e: attempts += 1 self.reconnect() continue
[ "def", "executemany", "(", "self", ",", "command", ",", "params", "=", "None", ",", "max_attempts", "=", "5", ")", ":", "attempts", "=", "0", "while", "attempts", "<", "max_attempts", ":", "try", ":", "# Execute statement", "self", ".", "_cursor", ".", "executemany", "(", "command", ",", "params", ")", "self", ".", "_commit", "(", ")", "return", "True", "except", "Exception", "as", "e", ":", "attempts", "+=", "1", "self", ".", "reconnect", "(", ")", "continue" ]
Execute multiple SQL queries without returning a result.
[ "Execute", "multiple", "SQL", "queries", "without", "returning", "a", "result", "." ]
python
train
deontologician/restnavigator
restnavigator/halnav.py
https://github.com/deontologician/restnavigator/blob/453b9de4e70e602009d3e3ffafcf77d23c8b07c5/restnavigator/halnav.py#L396-L404
def _make_embedded_from(self, doc): '''Creates embedded navigators from a HAL response doc''' ld = utils.CurieDict(self._core.default_curie, {}) for rel, doc in doc.get('_embedded', {}).items(): if isinstance(doc, list): ld[rel] = [self._recursively_embed(d) for d in doc] else: ld[rel] = self._recursively_embed(doc) return ld
[ "def", "_make_embedded_from", "(", "self", ",", "doc", ")", ":", "ld", "=", "utils", ".", "CurieDict", "(", "self", ".", "_core", ".", "default_curie", ",", "{", "}", ")", "for", "rel", ",", "doc", "in", "doc", ".", "get", "(", "'_embedded'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "isinstance", "(", "doc", ",", "list", ")", ":", "ld", "[", "rel", "]", "=", "[", "self", ".", "_recursively_embed", "(", "d", ")", "for", "d", "in", "doc", "]", "else", ":", "ld", "[", "rel", "]", "=", "self", ".", "_recursively_embed", "(", "doc", ")", "return", "ld" ]
Creates embedded navigators from a HAL response doc
[ "Creates", "embedded", "navigators", "from", "a", "HAL", "response", "doc" ]
python
train
twisted/mantissa
xmantissa/website.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/website.py#L646-L659
def getKeyForAPI(cls, siteStore, apiName): """ Get the API key for the named API, if one exists. @param siteStore: The site store. @type siteStore: L{axiom.store.Store} @param apiName: The name of the API. @type apiName: C{unicode} (L{APIKey} constant) @rtype: L{APIKey} or C{NoneType} """ return siteStore.findUnique( cls, cls.apiName == apiName, default=None)
[ "def", "getKeyForAPI", "(", "cls", ",", "siteStore", ",", "apiName", ")", ":", "return", "siteStore", ".", "findUnique", "(", "cls", ",", "cls", ".", "apiName", "==", "apiName", ",", "default", "=", "None", ")" ]
Get the API key for the named API, if one exists. @param siteStore: The site store. @type siteStore: L{axiom.store.Store} @param apiName: The name of the API. @type apiName: C{unicode} (L{APIKey} constant) @rtype: L{APIKey} or C{NoneType}
[ "Get", "the", "API", "key", "for", "the", "named", "API", "if", "one", "exists", "." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L164-L195
def is_allowed(request, level, pid): """Check if one or more subjects are allowed to perform action level on object. If a subject holds permissions for one action level on object, all lower action levels are also allowed. Any included subject that is unknown to this MN is treated as a subject without permissions. Returns: bool True: - The active subjects include one or more subjects that: - are fully trusted DataONE infrastructure subjects, causing all rights to be granted regardless of requested access level and SciObj - OR are in the object's ACL for the requested access level. The ACL contains the subjects from the object's allow rules and the object's rightsHolder, which has all rights. - OR object is public, which always yields a match on the "public" symbolic subject. False: - None of the active subjects are in the object's ACL for the requested access level or for lower levels. - OR PID does not exist - OR access level is invalid """ if is_trusted_subject(request): return True return d1_gmn.app.models.Permission.objects.filter( sciobj__pid__did=pid, subject__subject__in=request.all_subjects_set, level__gte=level, ).exists()
[ "def", "is_allowed", "(", "request", ",", "level", ",", "pid", ")", ":", "if", "is_trusted_subject", "(", "request", ")", ":", "return", "True", "return", "d1_gmn", ".", "app", ".", "models", ".", "Permission", ".", "objects", ".", "filter", "(", "sciobj__pid__did", "=", "pid", ",", "subject__subject__in", "=", "request", ".", "all_subjects_set", ",", "level__gte", "=", "level", ",", ")", ".", "exists", "(", ")" ]
Check if one or more subjects are allowed to perform action level on object. If a subject holds permissions for one action level on object, all lower action levels are also allowed. Any included subject that is unknown to this MN is treated as a subject without permissions. Returns: bool True: - The active subjects include one or more subjects that: - are fully trusted DataONE infrastructure subjects, causing all rights to be granted regardless of requested access level and SciObj - OR are in the object's ACL for the requested access level. The ACL contains the subjects from the object's allow rules and the object's rightsHolder, which has all rights. - OR object is public, which always yields a match on the "public" symbolic subject. False: - None of the active subjects are in the object's ACL for the requested access level or for lower levels. - OR PID does not exist - OR access level is invalid
[ "Check", "if", "one", "or", "more", "subjects", "are", "allowed", "to", "perform", "action", "level", "on", "object", "." ]
python
train
jtwhite79/pyemu
pyemu/ev.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/ev.py#L425-L471
def G(self, singular_value): """get the parameter solution Matrix at a singular value V_1 * S_1^(_1) * U_1^T Parameters ---------- singular_value : int singular value to calc R at Returns ------- G : pyemu.Matrix parameter solution matrix at singular value """ if self.__G is not None and singular_value == self.__G_sv: return self.__G if singular_value == 0: self.__G_sv = 0 self.__G = Matrix( x=np.zeros((self.jco.ncol,self.jco.nrow)), row_names=self.jco.col_names, col_names=self.jco.row_names) return self.__G mn = min(self.jco.shape) try: mn = min(self.pst.npar_adj, self.pst.nnz_obs) except: pass if singular_value > mn: self.logger.warn( "ErrVar.G(): singular_value > min(npar,nobs):" + "resetting to min(npar,nobs): " + str(min(self.pst.npar_adj, self.pst.nnz_obs))) singular_value = min(self.pst.npar_adj, self.pst.nnz_obs) self.log("calc G @" + str(singular_value)) #v1 = self.qhalfx.v[:, :singular_value] v1 = self.xtqx.v[:, :singular_value] #s1 = ((self.qhalfx.s[:singular_value]) ** 2).inv s1 = (self.xtqx.s[:singular_value]).inv self.__G = v1 * s1 * v1.T * self.jco.T * self.obscov.inv self.__G_sv = singular_value self.__G.row_names = self.jco.col_names self.__G.col_names = self.jco.row_names self.__G.autoalign = True self.log("calc G @" + str(singular_value)) return self.__G
[ "def", "G", "(", "self", ",", "singular_value", ")", ":", "if", "self", ".", "__G", "is", "not", "None", "and", "singular_value", "==", "self", ".", "__G_sv", ":", "return", "self", ".", "__G", "if", "singular_value", "==", "0", ":", "self", ".", "__G_sv", "=", "0", "self", ".", "__G", "=", "Matrix", "(", "x", "=", "np", ".", "zeros", "(", "(", "self", ".", "jco", ".", "ncol", ",", "self", ".", "jco", ".", "nrow", ")", ")", ",", "row_names", "=", "self", ".", "jco", ".", "col_names", ",", "col_names", "=", "self", ".", "jco", ".", "row_names", ")", "return", "self", ".", "__G", "mn", "=", "min", "(", "self", ".", "jco", ".", "shape", ")", "try", ":", "mn", "=", "min", "(", "self", ".", "pst", ".", "npar_adj", ",", "self", ".", "pst", ".", "nnz_obs", ")", "except", ":", "pass", "if", "singular_value", ">", "mn", ":", "self", ".", "logger", ".", "warn", "(", "\"ErrVar.G(): singular_value > min(npar,nobs):\"", "+", "\"resetting to min(npar,nobs): \"", "+", "str", "(", "min", "(", "self", ".", "pst", ".", "npar_adj", ",", "self", ".", "pst", ".", "nnz_obs", ")", ")", ")", "singular_value", "=", "min", "(", "self", ".", "pst", ".", "npar_adj", ",", "self", ".", "pst", ".", "nnz_obs", ")", "self", ".", "log", "(", "\"calc G @\"", "+", "str", "(", "singular_value", ")", ")", "#v1 = self.qhalfx.v[:, :singular_value]", "v1", "=", "self", ".", "xtqx", ".", "v", "[", ":", ",", ":", "singular_value", "]", "#s1 = ((self.qhalfx.s[:singular_value]) ** 2).inv", "s1", "=", "(", "self", ".", "xtqx", ".", "s", "[", ":", "singular_value", "]", ")", ".", "inv", "self", ".", "__G", "=", "v1", "*", "s1", "*", "v1", ".", "T", "*", "self", ".", "jco", ".", "T", "*", "self", ".", "obscov", ".", "inv", "self", ".", "__G_sv", "=", "singular_value", "self", ".", "__G", ".", "row_names", "=", "self", ".", "jco", ".", "col_names", "self", ".", "__G", ".", "col_names", "=", "self", ".", "jco", ".", "row_names", "self", ".", "__G", ".", "autoalign", "=", "True", "self", ".", "log", "(", "\"calc G @\"", "+", "str", "(", "singular_value", ")", ")", "return", "self", ".", "__G" ]
get the parameter solution Matrix at a singular value V_1 * S_1^(_1) * U_1^T Parameters ---------- singular_value : int singular value to calc R at Returns ------- G : pyemu.Matrix parameter solution matrix at singular value
[ "get", "the", "parameter", "solution", "Matrix", "at", "a", "singular", "value", "V_1", "*", "S_1^", "(", "_1", ")", "*", "U_1^T" ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsIndShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L1517-L1536
def unpackcFunc(self): ''' "Unpacks" the consumption functions into their own field for easier access. After the model has been solved, the consumption functions reside in the attribute cFunc of each element of ConsumerType.solution. This method creates a (time varying) attribute cFunc that contains a list of consumption functions. Parameters ---------- none Returns ------- none ''' self.cFunc = [] for solution_t in self.solution: self.cFunc.append(solution_t.cFunc) self.addToTimeVary('cFunc')
[ "def", "unpackcFunc", "(", "self", ")", ":", "self", ".", "cFunc", "=", "[", "]", "for", "solution_t", "in", "self", ".", "solution", ":", "self", ".", "cFunc", ".", "append", "(", "solution_t", ".", "cFunc", ")", "self", ".", "addToTimeVary", "(", "'cFunc'", ")" ]
"Unpacks" the consumption functions into their own field for easier access. After the model has been solved, the consumption functions reside in the attribute cFunc of each element of ConsumerType.solution. This method creates a (time varying) attribute cFunc that contains a list of consumption functions. Parameters ---------- none Returns ------- none
[ "Unpacks", "the", "consumption", "functions", "into", "their", "own", "field", "for", "easier", "access", ".", "After", "the", "model", "has", "been", "solved", "the", "consumption", "functions", "reside", "in", "the", "attribute", "cFunc", "of", "each", "element", "of", "ConsumerType", ".", "solution", ".", "This", "method", "creates", "a", "(", "time", "varying", ")", "attribute", "cFunc", "that", "contains", "a", "list", "of", "consumption", "functions", "." ]
python
train
databio/pypiper
pypiper/pipeline.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L216-L228
def list_flags(self, only_name=False): """ Determine the flag files associated with this pipeline. :param bool only_name: Whether to return only flag file name(s) (True), or full flag file paths (False); default False (paths) :return list[str]: flag files associated with this pipeline. """ paths = glob.glob(os.path.join(self.outfolder, flag_name("*"))) if only_name: return [os.path.split(p)[1] for p in paths] else: return paths
[ "def", "list_flags", "(", "self", ",", "only_name", "=", "False", ")", ":", "paths", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "outfolder", ",", "flag_name", "(", "\"*\"", ")", ")", ")", "if", "only_name", ":", "return", "[", "os", ".", "path", ".", "split", "(", "p", ")", "[", "1", "]", "for", "p", "in", "paths", "]", "else", ":", "return", "paths" ]
Determine the flag files associated with this pipeline. :param bool only_name: Whether to return only flag file name(s) (True), or full flag file paths (False); default False (paths) :return list[str]: flag files associated with this pipeline.
[ "Determine", "the", "flag", "files", "associated", "with", "this", "pipeline", "." ]
python
train
jazzband/django-ddp
dddp/migrations/__init__.py
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/migrations/__init__.py#L43-L49
def set_default_forwards(app_name, operation, apps, schema_editor): """Set default value for AleaIdField.""" model = apps.get_model(app_name, operation.model_name) for obj_pk in model.objects.values_list('pk', flat=True): model.objects.filter(pk=obj_pk).update(**{ operation.name: get_meteor_id(model, obj_pk), })
[ "def", "set_default_forwards", "(", "app_name", ",", "operation", ",", "apps", ",", "schema_editor", ")", ":", "model", "=", "apps", ".", "get_model", "(", "app_name", ",", "operation", ".", "model_name", ")", "for", "obj_pk", "in", "model", ".", "objects", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ":", "model", ".", "objects", ".", "filter", "(", "pk", "=", "obj_pk", ")", ".", "update", "(", "*", "*", "{", "operation", ".", "name", ":", "get_meteor_id", "(", "model", ",", "obj_pk", ")", ",", "}", ")" ]
Set default value for AleaIdField.
[ "Set", "default", "value", "for", "AleaIdField", "." ]
python
test
jhermann/rituals
src/rituals/acts/documentation.py
https://github.com/jhermann/rituals/blob/1534f50d81e19bbbe799e2eba0acdefbce047c06/src/rituals/acts/documentation.py#L313-L348
def _to_webdav(self, docs_base, release): """Upload to WebDAV store.""" try: git_path = subprocess.check_output('git remote get-url origin 2>/dev/null', shell=True) except subprocess.CalledProcessError: git_path = '' else: git_path = git_path.decode('ascii').strip() git_path = git_path.replace('http://', '').replace('https://', '').replace('ssh://', '') git_path = re.search(r'[^:/]+?[:/](.+)', git_path) git_path = git_path.group(1).replace('.git', '') if git_path else '' url = None with self._zipped(docs_base) as handle: url_ns = dict(name=self.cfg.project.name, version=release, git_path=git_path) reply = requests.put(self.params['url'].format(**url_ns), data=handle.read(), headers={'Accept': 'application/json'}) if reply.status_code in range(200, 300): notify.info("{status_code} {reason}".format(**vars(reply))) try: data = reply.json() except ValueError as exc: notify.warning("Didn't get a JSON response! ({})".format(exc)) else: if 'downloadUri' in data: # Artifactory url = data['downloadUri'] + '!/index.html' elif reply.status_code == 301: url = reply.headers['location'] else: data = self.cfg.copy() data.update(self.params) data.update(vars(reply)) notify.error("{status_code} {reason} for PUT to {url}".format(**data)) if not url: notify.warning("Couldn't get URL from upload response!") return url
[ "def", "_to_webdav", "(", "self", ",", "docs_base", ",", "release", ")", ":", "try", ":", "git_path", "=", "subprocess", ".", "check_output", "(", "'git remote get-url origin 2>/dev/null'", ",", "shell", "=", "True", ")", "except", "subprocess", ".", "CalledProcessError", ":", "git_path", "=", "''", "else", ":", "git_path", "=", "git_path", ".", "decode", "(", "'ascii'", ")", ".", "strip", "(", ")", "git_path", "=", "git_path", ".", "replace", "(", "'http://'", ",", "''", ")", ".", "replace", "(", "'https://'", ",", "''", ")", ".", "replace", "(", "'ssh://'", ",", "''", ")", "git_path", "=", "re", ".", "search", "(", "r'[^:/]+?[:/](.+)'", ",", "git_path", ")", "git_path", "=", "git_path", ".", "group", "(", "1", ")", ".", "replace", "(", "'.git'", ",", "''", ")", "if", "git_path", "else", "''", "url", "=", "None", "with", "self", ".", "_zipped", "(", "docs_base", ")", "as", "handle", ":", "url_ns", "=", "dict", "(", "name", "=", "self", ".", "cfg", ".", "project", ".", "name", ",", "version", "=", "release", ",", "git_path", "=", "git_path", ")", "reply", "=", "requests", ".", "put", "(", "self", ".", "params", "[", "'url'", "]", ".", "format", "(", "*", "*", "url_ns", ")", ",", "data", "=", "handle", ".", "read", "(", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", "}", ")", "if", "reply", ".", "status_code", "in", "range", "(", "200", ",", "300", ")", ":", "notify", ".", "info", "(", "\"{status_code} {reason}\"", ".", "format", "(", "*", "*", "vars", "(", "reply", ")", ")", ")", "try", ":", "data", "=", "reply", ".", "json", "(", ")", "except", "ValueError", "as", "exc", ":", "notify", ".", "warning", "(", "\"Didn't get a JSON response! ({})\"", ".", "format", "(", "exc", ")", ")", "else", ":", "if", "'downloadUri'", "in", "data", ":", "# Artifactory", "url", "=", "data", "[", "'downloadUri'", "]", "+", "'!/index.html'", "elif", "reply", ".", "status_code", "==", "301", ":", "url", "=", "reply", ".", "headers", "[", "'location'", "]", "else", ":", "data", "=", "self", ".", "cfg", ".", "copy", "(", ")", "data", ".", "update", "(", "self", ".", "params", ")", "data", ".", "update", "(", "vars", "(", "reply", ")", ")", "notify", ".", "error", "(", "\"{status_code} {reason} for PUT to {url}\"", ".", "format", "(", "*", "*", "data", ")", ")", "if", "not", "url", ":", "notify", ".", "warning", "(", "\"Couldn't get URL from upload response!\"", ")", "return", "url" ]
Upload to WebDAV store.
[ "Upload", "to", "WebDAV", "store", "." ]
python
valid
bolt-project/bolt
bolt/utils.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/utils.py#L25-L40
def argpack(args): """ Coerce a list of arguments to a tuple. Parameters ---------- args : tuple or nested tuple Pack arguments into a tuple, converting ((,...),) or (,) -> (,) """ if isinstance(args[0], (tuple, list, ndarray)): return tupleize(args[0]) elif isinstance(args[0], Iterable) and not isinstance(args[0], str): # coerce any iterable into a list before calling tupleize (Python 3 compatibility) return tupleize(list(args[0])) else: return tuple(args)
[ "def", "argpack", "(", "args", ")", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "(", "tuple", ",", "list", ",", "ndarray", ")", ")", ":", "return", "tupleize", "(", "args", "[", "0", "]", ")", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "Iterable", ")", "and", "not", "isinstance", "(", "args", "[", "0", "]", ",", "str", ")", ":", "# coerce any iterable into a list before calling tupleize (Python 3 compatibility)", "return", "tupleize", "(", "list", "(", "args", "[", "0", "]", ")", ")", "else", ":", "return", "tuple", "(", "args", ")" ]
Coerce a list of arguments to a tuple. Parameters ---------- args : tuple or nested tuple Pack arguments into a tuple, converting ((,...),) or (,) -> (,)
[ "Coerce", "a", "list", "of", "arguments", "to", "a", "tuple", "." ]
python
test
saltstack/salt
salt/modules/win_task.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_task.py#L557-L675
def create_task_from_xml(name, location='\\', xml_text=None, xml_path=None, user_name='System', password=None): r''' Create a task based on XML. Source can be a file or a string of XML. :param str name: The name of the task. This will be displayed in the task scheduler. :param str location: A string value representing the location in which to create the task. Default is '\\' which is the root for the task scheduler (C:\Windows\System32\tasks). :param str xml_text: A string of xml representing the task to be created. This will be overridden by `xml_path` if passed. :param str xml_path: The path to an XML file on the local system containing the xml that defines the task. This will override `xml_text` :param str user_name: The user account under which to run the task. To specify the 'System' account, use 'System'. The password will be ignored. :param str password: The password to use for authentication. This should set the task to run whether the user is logged in or not, but is currently not working. :return: True if successful, False if unsuccessful, A string with the error message if there is an error :rtype: bool :raises: CommandExecutionError CLI Example: .. code-block:: bash salt 'minion-id' task.create_task_from_xml <task_name> xml_path=C:\task.xml ''' # Check for existing task if name in list_tasks(location): # Connect to an existing task definition return '{0} already exists'.format(name) if not xml_text and not xml_path: raise ArgumentValueError('Must specify either xml_text or xml_path') # Create the task service object with salt.utils.winapi.Com(): task_service = win32com.client.Dispatch("Schedule.Service") task_service.Connect() # Load xml from file, overrides xml_text # Need to figure out how to load contents of xml if xml_path: xml_text = xml_path # Get the folder to list folders from task_folder = task_service.GetFolder(location) # Determine logon type if user_name: if user_name.lower() == 'system': logon_type = TASK_LOGON_SERVICE_ACCOUNT user_name = 'SYSTEM' password = None else: if password: logon_type = TASK_LOGON_PASSWORD else: logon_type = TASK_LOGON_INTERACTIVE_TOKEN else: password = None logon_type = TASK_LOGON_NONE # Save the task try: task_folder.RegisterTask(name, xml_text, TASK_CREATE, user_name, password, logon_type) except pythoncom.com_error as error: hr, msg, exc, arg = error.args # pylint: disable=W0633 error_code = hex(exc[5] + 2**32) failure_code = error_code fc = {'0x80041319L': 'Required element or attribute missing', '0x80041318L': 'Value incorrectly formatted or out of range', '0x80020005L': 'Access denied', '0x80041309L': "A task's trigger is not found", '0x8004130aL': "One or more of the properties required to run this task have not been set", '0x8004130cL': "The Task Scheduler service is not installed on this computer", '0x8004130dL': "The task object could not be opened", '0x8004130eL': "The object is either an invalid task object or is not a task object", '0x8004130fL': "No account information could be found in the Task Scheduler security database for the task indicated", '0x80041310L': "Unable to establish existence of the account specified", '0x80041311L': "Corruption was detected in the Task Scheduler security database; the database has been reset", '0x80041313L': "The task object version is either unsupported or invalid", '0x80041314L': "The task has been configured with an unsupported combination of account settings and run time options", '0x80041315L': "The Task Scheduler Service is not running", '0x80041316L': "The task XML contains an unexpected node", '0x80041317L': "The task XML contains an element or attribute from an unexpected namespace", '0x8004131aL': "The task XML is malformed", '0x0004131cL': "The task is registered, but may fail to start. Batch logon privilege needs to be enabled for the task principal", '0x8004131dL': "The task XML contains too many nodes of the same type", } try: failure_code = fc[error_code] except KeyError: failure_code = 'Unknown Failure: {0}'.format(error_code) finally: log.debug('Failed to create task: %s', failure_code) raise CommandExecutionError(failure_code) # Verify creation return name in list_tasks(location)
[ "def", "create_task_from_xml", "(", "name", ",", "location", "=", "'\\\\'", ",", "xml_text", "=", "None", ",", "xml_path", "=", "None", ",", "user_name", "=", "'System'", ",", "password", "=", "None", ")", ":", "# Check for existing task", "if", "name", "in", "list_tasks", "(", "location", ")", ":", "# Connect to an existing task definition", "return", "'{0} already exists'", ".", "format", "(", "name", ")", "if", "not", "xml_text", "and", "not", "xml_path", ":", "raise", "ArgumentValueError", "(", "'Must specify either xml_text or xml_path'", ")", "# Create the task service object", "with", "salt", ".", "utils", ".", "winapi", ".", "Com", "(", ")", ":", "task_service", "=", "win32com", ".", "client", ".", "Dispatch", "(", "\"Schedule.Service\"", ")", "task_service", ".", "Connect", "(", ")", "# Load xml from file, overrides xml_text", "# Need to figure out how to load contents of xml", "if", "xml_path", ":", "xml_text", "=", "xml_path", "# Get the folder to list folders from", "task_folder", "=", "task_service", ".", "GetFolder", "(", "location", ")", "# Determine logon type", "if", "user_name", ":", "if", "user_name", ".", "lower", "(", ")", "==", "'system'", ":", "logon_type", "=", "TASK_LOGON_SERVICE_ACCOUNT", "user_name", "=", "'SYSTEM'", "password", "=", "None", "else", ":", "if", "password", ":", "logon_type", "=", "TASK_LOGON_PASSWORD", "else", ":", "logon_type", "=", "TASK_LOGON_INTERACTIVE_TOKEN", "else", ":", "password", "=", "None", "logon_type", "=", "TASK_LOGON_NONE", "# Save the task", "try", ":", "task_folder", ".", "RegisterTask", "(", "name", ",", "xml_text", ",", "TASK_CREATE", ",", "user_name", ",", "password", ",", "logon_type", ")", "except", "pythoncom", ".", "com_error", "as", "error", ":", "hr", ",", "msg", ",", "exc", ",", "arg", "=", "error", ".", "args", "# pylint: disable=W0633", "error_code", "=", "hex", "(", "exc", "[", "5", "]", "+", "2", "**", "32", ")", "failure_code", "=", "error_code", "fc", "=", "{", "'0x80041319L'", ":", "'Required element or attribute missing'", ",", "'0x80041318L'", ":", "'Value incorrectly formatted or out of range'", ",", "'0x80020005L'", ":", "'Access denied'", ",", "'0x80041309L'", ":", "\"A task's trigger is not found\"", ",", "'0x8004130aL'", ":", "\"One or more of the properties required to run this task have not been set\"", ",", "'0x8004130cL'", ":", "\"The Task Scheduler service is not installed on this computer\"", ",", "'0x8004130dL'", ":", "\"The task object could not be opened\"", ",", "'0x8004130eL'", ":", "\"The object is either an invalid task object or is not a task object\"", ",", "'0x8004130fL'", ":", "\"No account information could be found in the Task Scheduler security database for the task indicated\"", ",", "'0x80041310L'", ":", "\"Unable to establish existence of the account specified\"", ",", "'0x80041311L'", ":", "\"Corruption was detected in the Task Scheduler security database; the database has been reset\"", ",", "'0x80041313L'", ":", "\"The task object version is either unsupported or invalid\"", ",", "'0x80041314L'", ":", "\"The task has been configured with an unsupported combination of account settings and run time options\"", ",", "'0x80041315L'", ":", "\"The Task Scheduler Service is not running\"", ",", "'0x80041316L'", ":", "\"The task XML contains an unexpected node\"", ",", "'0x80041317L'", ":", "\"The task XML contains an element or attribute from an unexpected namespace\"", ",", "'0x8004131aL'", ":", "\"The task XML is malformed\"", ",", "'0x0004131cL'", ":", "\"The task is registered, but may fail to start. Batch logon privilege needs to be enabled for the task principal\"", ",", "'0x8004131dL'", ":", "\"The task XML contains too many nodes of the same type\"", ",", "}", "try", ":", "failure_code", "=", "fc", "[", "error_code", "]", "except", "KeyError", ":", "failure_code", "=", "'Unknown Failure: {0}'", ".", "format", "(", "error_code", ")", "finally", ":", "log", ".", "debug", "(", "'Failed to create task: %s'", ",", "failure_code", ")", "raise", "CommandExecutionError", "(", "failure_code", ")", "# Verify creation", "return", "name", "in", "list_tasks", "(", "location", ")" ]
r''' Create a task based on XML. Source can be a file or a string of XML. :param str name: The name of the task. This will be displayed in the task scheduler. :param str location: A string value representing the location in which to create the task. Default is '\\' which is the root for the task scheduler (C:\Windows\System32\tasks). :param str xml_text: A string of xml representing the task to be created. This will be overridden by `xml_path` if passed. :param str xml_path: The path to an XML file on the local system containing the xml that defines the task. This will override `xml_text` :param str user_name: The user account under which to run the task. To specify the 'System' account, use 'System'. The password will be ignored. :param str password: The password to use for authentication. This should set the task to run whether the user is logged in or not, but is currently not working. :return: True if successful, False if unsuccessful, A string with the error message if there is an error :rtype: bool :raises: CommandExecutionError CLI Example: .. code-block:: bash salt 'minion-id' task.create_task_from_xml <task_name> xml_path=C:\task.xml
[ "r", "Create", "a", "task", "based", "on", "XML", ".", "Source", "can", "be", "a", "file", "or", "a", "string", "of", "XML", "." ]
python
train
proycon/flat
flat/comm.py
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/comm.py#L12-L22
def checkversion(version): """Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal""" try: for refversion, responseversion in zip([int(x) for x in REQUIREFOLIADOCSERVE.split('.')], [int(x) for x in version.split('.')]): if responseversion > refversion: return 1 #response is newer than library elif responseversion < refversion: return -1 #response is older than library return 0 #versions are equal except ValueError: raise ValueError("Unable to parse version, invalid syntax")
[ "def", "checkversion", "(", "version", ")", ":", "try", ":", "for", "refversion", ",", "responseversion", "in", "zip", "(", "[", "int", "(", "x", ")", "for", "x", "in", "REQUIREFOLIADOCSERVE", ".", "split", "(", "'.'", ")", "]", ",", "[", "int", "(", "x", ")", "for", "x", "in", "version", ".", "split", "(", "'.'", ")", "]", ")", ":", "if", "responseversion", ">", "refversion", ":", "return", "1", "#response is newer than library", "elif", "responseversion", "<", "refversion", ":", "return", "-", "1", "#response is older than library", "return", "0", "#versions are equal", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Unable to parse version, invalid syntax\"", ")" ]
Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal
[ "Checks", "foliadocserve", "version", "returns", "1", "if", "the", "document", "is", "newer", "than", "the", "library", "-", "1", "if", "it", "is", "older", "0", "if", "it", "is", "equal" ]
python
train
rmax/scrapy-redis
src/scrapy_redis/dupefilter.py
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/dupefilter.py#L86-L101
def request_seen(self, request): """Returns True if request was already seen. Parameters ---------- request : scrapy.http.Request Returns ------- bool """ fp = self.request_fingerprint(request) # This returns the number of values added, zero if already exists. added = self.server.sadd(self.key, fp) return added == 0
[ "def", "request_seen", "(", "self", ",", "request", ")", ":", "fp", "=", "self", ".", "request_fingerprint", "(", "request", ")", "# This returns the number of values added, zero if already exists.", "added", "=", "self", ".", "server", ".", "sadd", "(", "self", ".", "key", ",", "fp", ")", "return", "added", "==", "0" ]
Returns True if request was already seen. Parameters ---------- request : scrapy.http.Request Returns ------- bool
[ "Returns", "True", "if", "request", "was", "already", "seen", "." ]
python
train
MartinThoma/hwrt
hwrt/train.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/train.py#L22-L61
def update_if_outdated(folder): """Check if the currently watched instance (model, feature or preprocessing) is outdated and update it eventually. """ folders = [] while os.path.isdir(folder): folders.append(folder) # Get info.yml with open(os.path.join(folder, "info.yml")) as ymlfile: content = yaml.load(ymlfile) folder = os.path.join(utils.get_project_root(), content['data-source']) raw_source_file = folder if not os.path.isfile(raw_source_file): logging.error("File '%s' was not found.", raw_source_file) logging.error("You should eventually execute 'hwrt download'.") sys.exit(-1) dt = os.path.getmtime(raw_source_file) source_mtime = datetime.datetime.utcfromtimestamp(dt) folders = folders[::-1] # Reverse order to get the most "basic one first" for target_folder in folders: target_mtime = utils.get_latest_successful_run(target_folder) if target_mtime is None or source_mtime > target_mtime: # The source is later than the target. That means we need to # refresh the target if "preprocessed" in target_folder: logging.info("Preprocessed file was outdated. Update...") preprocess_dataset.main(os.path.join(utils.get_project_root(), target_folder)) elif "feature-files" in target_folder: logging.info("Feature file was outdated. Update...") create_ffiles.main(target_folder) elif "model" in target_folder: logging.info("Model file was outdated. Update...") create_model.main(target_folder, True) target_mtime = datetime.datetime.utcnow() else: logging.info("'%s' is up-to-date.", target_folder) source_mtime = target_mtime
[ "def", "update_if_outdated", "(", "folder", ")", ":", "folders", "=", "[", "]", "while", "os", ".", "path", ".", "isdir", "(", "folder", ")", ":", "folders", ".", "append", "(", "folder", ")", "# Get info.yml", "with", "open", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "\"info.yml\"", ")", ")", "as", "ymlfile", ":", "content", "=", "yaml", ".", "load", "(", "ymlfile", ")", "folder", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "get_project_root", "(", ")", ",", "content", "[", "'data-source'", "]", ")", "raw_source_file", "=", "folder", "if", "not", "os", ".", "path", ".", "isfile", "(", "raw_source_file", ")", ":", "logging", ".", "error", "(", "\"File '%s' was not found.\"", ",", "raw_source_file", ")", "logging", ".", "error", "(", "\"You should eventually execute 'hwrt download'.\"", ")", "sys", ".", "exit", "(", "-", "1", ")", "dt", "=", "os", ".", "path", ".", "getmtime", "(", "raw_source_file", ")", "source_mtime", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "dt", ")", "folders", "=", "folders", "[", ":", ":", "-", "1", "]", "# Reverse order to get the most \"basic one first\"", "for", "target_folder", "in", "folders", ":", "target_mtime", "=", "utils", ".", "get_latest_successful_run", "(", "target_folder", ")", "if", "target_mtime", "is", "None", "or", "source_mtime", ">", "target_mtime", ":", "# The source is later than the target. That means we need to", "# refresh the target", "if", "\"preprocessed\"", "in", "target_folder", ":", "logging", ".", "info", "(", "\"Preprocessed file was outdated. Update...\"", ")", "preprocess_dataset", ".", "main", "(", "os", ".", "path", ".", "join", "(", "utils", ".", "get_project_root", "(", ")", ",", "target_folder", ")", ")", "elif", "\"feature-files\"", "in", "target_folder", ":", "logging", ".", "info", "(", "\"Feature file was outdated. Update...\"", ")", "create_ffiles", ".", "main", "(", "target_folder", ")", "elif", "\"model\"", "in", "target_folder", ":", "logging", ".", "info", "(", "\"Model file was outdated. Update...\"", ")", "create_model", ".", "main", "(", "target_folder", ",", "True", ")", "target_mtime", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "else", ":", "logging", ".", "info", "(", "\"'%s' is up-to-date.\"", ",", "target_folder", ")", "source_mtime", "=", "target_mtime" ]
Check if the currently watched instance (model, feature or preprocessing) is outdated and update it eventually.
[ "Check", "if", "the", "currently", "watched", "instance", "(", "model", "feature", "or", "preprocessing", ")", "is", "outdated", "and", "update", "it", "eventually", "." ]
python
train
sdss/tree
python/tree/tree.py
https://github.com/sdss/tree/blob/f61fe0876c138ccb61874912d4b8590dadfa835c/python/tree/tree.py#L75-L103
def set_roots(self, uproot_with=None): ''' Set the roots of the tree in the os environment Parameters: uproot_with (str): A new TREE_DIR path used to override an existing TREE_DIR environment variable ''' # Check for TREE_DIR self.treedir = os.environ.get('TREE_DIR', None) if not uproot_with else uproot_with if not self.treedir: treefilepath = os.path.dirname(os.path.abspath(__file__)) if 'python/' in treefilepath: self.treedir = treefilepath.rsplit('/', 2)[0] else: self.treedir = treefilepath self.treedir = treefilepath os.environ['TREE_DIR'] = self.treedir # Check sas_base_dir if 'SAS_BASE_DIR' in os.environ: self.sasbasedir = os.environ["SAS_BASE_DIR"] else: self.sasbasedir = os.path.expanduser('~/sas') # make the directories if not os.path.isdir(self.sasbasedir): os.makedirs(self.sasbasedir)
[ "def", "set_roots", "(", "self", ",", "uproot_with", "=", "None", ")", ":", "# Check for TREE_DIR", "self", ".", "treedir", "=", "os", ".", "environ", ".", "get", "(", "'TREE_DIR'", ",", "None", ")", "if", "not", "uproot_with", "else", "uproot_with", "if", "not", "self", ".", "treedir", ":", "treefilepath", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "if", "'python/'", "in", "treefilepath", ":", "self", ".", "treedir", "=", "treefilepath", ".", "rsplit", "(", "'/'", ",", "2", ")", "[", "0", "]", "else", ":", "self", ".", "treedir", "=", "treefilepath", "self", ".", "treedir", "=", "treefilepath", "os", ".", "environ", "[", "'TREE_DIR'", "]", "=", "self", ".", "treedir", "# Check sas_base_dir", "if", "'SAS_BASE_DIR'", "in", "os", ".", "environ", ":", "self", ".", "sasbasedir", "=", "os", ".", "environ", "[", "\"SAS_BASE_DIR\"", "]", "else", ":", "self", ".", "sasbasedir", "=", "os", ".", "path", ".", "expanduser", "(", "'~/sas'", ")", "# make the directories", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "sasbasedir", ")", ":", "os", ".", "makedirs", "(", "self", ".", "sasbasedir", ")" ]
Set the roots of the tree in the os environment Parameters: uproot_with (str): A new TREE_DIR path used to override an existing TREE_DIR environment variable
[ "Set", "the", "roots", "of", "the", "tree", "in", "the", "os", "environment" ]
python
train
dixudx/rtcclient
rtcclient/client.py
https://github.com/dixudx/rtcclient/blob/1721dd0b047478f5bdd6359b07a2c503cfafd86f/rtcclient/client.py#L332-L374
def getTeamArea(self, teamarea_name, projectarea_id=None, projectarea_name=None, archived=False, returned_properties=None): """Get :class:`rtcclient.models.TeamArea` object by its name If `projectarea_id` or `projectarea_name` is specified, then the matched :class:`rtcclient.models.TeamArea` in that project area will be returned. Otherwise, only return the first found :class:`rtcclient.models.TeamArea` with that name. :param teamarea_name: the team area name :param projectarea_id: the :class:`rtcclient.project_area.ProjectArea` id :param projectarea_name: the project area name :param archived: (default is False) whether the team area is archived :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :return: the :class:`rtcclient.models.TeamArea` object :rtype: rtcclient.models.TeamArea """ if not isinstance(teamarea_name, six.string_types) or not teamarea_name: excp_msg = "Please specify a valid TeamArea name" self.log.error(excp_msg) raise exception.BadValue(excp_msg) self.log.debug("Try to get <TeamArea %s>", teamarea_name) teamareas = self._getTeamAreas(projectarea_id=projectarea_id, projectarea_name=projectarea_name, archived=archived, returned_properties=returned_properties, teamarea_name=teamarea_name) if teamareas is not None: teamarea = teamareas[0] self.log.info("Find <TeamArea %s>", teamarea) return teamarea self.log.error("No TeamArea named %s", teamarea_name) raise exception.NotFound("No TeamArea named %s" % teamarea_name)
[ "def", "getTeamArea", "(", "self", ",", "teamarea_name", ",", "projectarea_id", "=", "None", ",", "projectarea_name", "=", "None", ",", "archived", "=", "False", ",", "returned_properties", "=", "None", ")", ":", "if", "not", "isinstance", "(", "teamarea_name", ",", "six", ".", "string_types", ")", "or", "not", "teamarea_name", ":", "excp_msg", "=", "\"Please specify a valid TeamArea name\"", "self", ".", "log", ".", "error", "(", "excp_msg", ")", "raise", "exception", ".", "BadValue", "(", "excp_msg", ")", "self", ".", "log", ".", "debug", "(", "\"Try to get <TeamArea %s>\"", ",", "teamarea_name", ")", "teamareas", "=", "self", ".", "_getTeamAreas", "(", "projectarea_id", "=", "projectarea_id", ",", "projectarea_name", "=", "projectarea_name", ",", "archived", "=", "archived", ",", "returned_properties", "=", "returned_properties", ",", "teamarea_name", "=", "teamarea_name", ")", "if", "teamareas", "is", "not", "None", ":", "teamarea", "=", "teamareas", "[", "0", "]", "self", ".", "log", ".", "info", "(", "\"Find <TeamArea %s>\"", ",", "teamarea", ")", "return", "teamarea", "self", ".", "log", ".", "error", "(", "\"No TeamArea named %s\"", ",", "teamarea_name", ")", "raise", "exception", ".", "NotFound", "(", "\"No TeamArea named %s\"", "%", "teamarea_name", ")" ]
Get :class:`rtcclient.models.TeamArea` object by its name If `projectarea_id` or `projectarea_name` is specified, then the matched :class:`rtcclient.models.TeamArea` in that project area will be returned. Otherwise, only return the first found :class:`rtcclient.models.TeamArea` with that name. :param teamarea_name: the team area name :param projectarea_id: the :class:`rtcclient.project_area.ProjectArea` id :param projectarea_name: the project area name :param archived: (default is False) whether the team area is archived :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :return: the :class:`rtcclient.models.TeamArea` object :rtype: rtcclient.models.TeamArea
[ "Get", ":", "class", ":", "rtcclient", ".", "models", ".", "TeamArea", "object", "by", "its", "name" ]
python
train
adaptive-learning/proso-apps
proso_user/views_classes.py
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views_classes.py#L26-L63
def create_class(request): """Create new class POST parameters (JSON): name: Human readable name of class code (optional): unique code of class used for joining to class """ if request.method == 'GET': return render(request, 'classes_create.html', {}, help_text=create_class.__doc__) if request.method == 'POST': if not request.user.is_authenticated() or not hasattr(request.user, "userprofile"): return render_json(request, { 'error': _('User is not logged in.'), 'error_type': 'user_unauthorized' }, template='classes_create.html', status=401) data = json_body(request.body.decode("utf-8")) if 'code' in data and Class.objects.filter(code=data['code']).exists(): return render_json(request, { 'error': _('A class with this code already exists.'), 'error_type': 'class_with_code_exists' }, template='classes_create.html', status=400) if 'name' not in data or not data['name']: return render_json(request, {'error': _('Class name is missing.'), 'error_type': 'missing_class_name'}, template='classes_create.html', status=400) cls = Class(name=data['name'], owner=request.user.userprofile) if 'code' in data: cls.code = data['code'] cls.save() return render_json(request, cls.to_json(), template='classes_create.html', status=201) else: return HttpResponseBadRequest("method %s is not allowed".format(request.method))
[ "def", "create_class", "(", "request", ")", ":", "if", "request", ".", "method", "==", "'GET'", ":", "return", "render", "(", "request", ",", "'classes_create.html'", ",", "{", "}", ",", "help_text", "=", "create_class", ".", "__doc__", ")", "if", "request", ".", "method", "==", "'POST'", ":", "if", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", "or", "not", "hasattr", "(", "request", ".", "user", ",", "\"userprofile\"", ")", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'User is not logged in.'", ")", ",", "'error_type'", ":", "'user_unauthorized'", "}", ",", "template", "=", "'classes_create.html'", ",", "status", "=", "401", ")", "data", "=", "json_body", "(", "request", ".", "body", ".", "decode", "(", "\"utf-8\"", ")", ")", "if", "'code'", "in", "data", "and", "Class", ".", "objects", ".", "filter", "(", "code", "=", "data", "[", "'code'", "]", ")", ".", "exists", "(", ")", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'A class with this code already exists.'", ")", ",", "'error_type'", ":", "'class_with_code_exists'", "}", ",", "template", "=", "'classes_create.html'", ",", "status", "=", "400", ")", "if", "'name'", "not", "in", "data", "or", "not", "data", "[", "'name'", "]", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'Class name is missing.'", ")", ",", "'error_type'", ":", "'missing_class_name'", "}", ",", "template", "=", "'classes_create.html'", ",", "status", "=", "400", ")", "cls", "=", "Class", "(", "name", "=", "data", "[", "'name'", "]", ",", "owner", "=", "request", ".", "user", ".", "userprofile", ")", "if", "'code'", "in", "data", ":", "cls", ".", "code", "=", "data", "[", "'code'", "]", "cls", ".", "save", "(", ")", "return", "render_json", "(", "request", ",", "cls", ".", "to_json", "(", ")", ",", "template", "=", "'classes_create.html'", ",", "status", "=", "201", ")", "else", ":", "return", "HttpResponseBadRequest", "(", "\"method %s is not allowed\"", ".", "format", "(", "request", ".", "method", ")", ")" ]
Create new class POST parameters (JSON): name: Human readable name of class code (optional): unique code of class used for joining to class
[ "Create", "new", "class" ]
python
train
secdev/scapy
scapy/modules/krack/automaton.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/modules/krack/automaton.py#L219-L232
def build_ap_info_pkt(self, layer_cls, dest): """Build a packet with info describing the current AP For beacon / proberesp use """ return RadioTap() \ / Dot11(addr1=dest, addr2=self.mac, addr3=self.mac) \ / layer_cls(timestamp=0, beacon_interval=100, cap='ESS+privacy') \ / Dot11Elt(ID="SSID", info=self.ssid) \ / Dot11EltRates(rates=[130, 132, 139, 150, 12, 18, 24, 36]) \ / Dot11Elt(ID="DSset", info=chb(self.channel)) \ / Dot11EltRSN(group_cipher_suite=RSNCipherSuite(cipher=0x2), pairwise_cipher_suites=[RSNCipherSuite(cipher=0x2)], akm_suites=[AKMSuite(suite=0x2)])
[ "def", "build_ap_info_pkt", "(", "self", ",", "layer_cls", ",", "dest", ")", ":", "return", "RadioTap", "(", ")", "/", "Dot11", "(", "addr1", "=", "dest", ",", "addr2", "=", "self", ".", "mac", ",", "addr3", "=", "self", ".", "mac", ")", "/", "layer_cls", "(", "timestamp", "=", "0", ",", "beacon_interval", "=", "100", ",", "cap", "=", "'ESS+privacy'", ")", "/", "Dot11Elt", "(", "ID", "=", "\"SSID\"", ",", "info", "=", "self", ".", "ssid", ")", "/", "Dot11EltRates", "(", "rates", "=", "[", "130", ",", "132", ",", "139", ",", "150", ",", "12", ",", "18", ",", "24", ",", "36", "]", ")", "/", "Dot11Elt", "(", "ID", "=", "\"DSset\"", ",", "info", "=", "chb", "(", "self", ".", "channel", ")", ")", "/", "Dot11EltRSN", "(", "group_cipher_suite", "=", "RSNCipherSuite", "(", "cipher", "=", "0x2", ")", ",", "pairwise_cipher_suites", "=", "[", "RSNCipherSuite", "(", "cipher", "=", "0x2", ")", "]", ",", "akm_suites", "=", "[", "AKMSuite", "(", "suite", "=", "0x2", ")", "]", ")" ]
Build a packet with info describing the current AP For beacon / proberesp use
[ "Build", "a", "packet", "with", "info", "describing", "the", "current", "AP", "For", "beacon", "/", "proberesp", "use" ]
python
train
tanghaibao/jcvi
jcvi/apps/align.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/align.py#L44-L59
def run_vecscreen(infile=None, outfile=None, db="UniVec_Core", pctid=None, hitlen=None): """ BLASTN parameters reference: http://www.ncbi.nlm.nih.gov/VecScreen/VecScreen_docs.html """ db = get_abs_path(db) nin = db + ".nin" run_formatdb(infile=db, outfile=nin) cmd = "blastn" cmd += " -task blastn" cmd += " -query {0} -db {1} -out {2}".format(infile, db, outfile) cmd += " -penalty -5 -gapopen 4 -gapextend 4 -dust yes -soft_masking true" cmd += " -searchsp 1750000000000 -evalue 0.01 -outfmt 6 -num_threads 8" sh(cmd)
[ "def", "run_vecscreen", "(", "infile", "=", "None", ",", "outfile", "=", "None", ",", "db", "=", "\"UniVec_Core\"", ",", "pctid", "=", "None", ",", "hitlen", "=", "None", ")", ":", "db", "=", "get_abs_path", "(", "db", ")", "nin", "=", "db", "+", "\".nin\"", "run_formatdb", "(", "infile", "=", "db", ",", "outfile", "=", "nin", ")", "cmd", "=", "\"blastn\"", "cmd", "+=", "\" -task blastn\"", "cmd", "+=", "\" -query {0} -db {1} -out {2}\"", ".", "format", "(", "infile", ",", "db", ",", "outfile", ")", "cmd", "+=", "\" -penalty -5 -gapopen 4 -gapextend 4 -dust yes -soft_masking true\"", "cmd", "+=", "\" -searchsp 1750000000000 -evalue 0.01 -outfmt 6 -num_threads 8\"", "sh", "(", "cmd", ")" ]
BLASTN parameters reference: http://www.ncbi.nlm.nih.gov/VecScreen/VecScreen_docs.html
[ "BLASTN", "parameters", "reference", ":", "http", ":", "//", "www", ".", "ncbi", ".", "nlm", ".", "nih", ".", "gov", "/", "VecScreen", "/", "VecScreen_docs", ".", "html" ]
python
train
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L806-L809
def list_security_groups(self, retrieve_all=True, **_params): """Fetches a list of all security groups for a project.""" return self.list('security_groups', self.security_groups_path, retrieve_all, **_params)
[ "def", "list_security_groups", "(", "self", ",", "retrieve_all", "=", "True", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "list", "(", "'security_groups'", ",", "self", ".", "security_groups_path", ",", "retrieve_all", ",", "*", "*", "_params", ")" ]
Fetches a list of all security groups for a project.
[ "Fetches", "a", "list", "of", "all", "security", "groups", "for", "a", "project", "." ]
python
train
kblomqvist/yasha
yasha/cmsis.py
https://github.com/kblomqvist/yasha/blob/aebda08f45458611a59497fb7505f0881b73fbd5/yasha/cmsis.py#L240-L268
def fold(self): """Folds the Register in accordance with it's dimensions. If the register is dimensionless, the returned list just contains the register itself unchanged. In case the register name looks like a C array, the returned list contains the register itself, where nothing else than the '%s' placeholder in it's name has been replaced with value of the dim element. """ if self.dim is None: return [self] if self.name.endswith("[%s]"): # C array like self.name = self.name.replace("%s", str(self.dim)) return [self] registers = [] for offset, index in enumerate(self.dimIndex): reg = self.copy() reg.name = self.name.replace("%s", str(index)) reg.addressOffset += offset * reg.dimIncrement reg.fields = [field.copy() for field in reg.fields] for field in reg.fields: field.parent = reg reg.dim = reg.dimIndex = reg.dimIncrement = None # Dimensionless registers.append(reg) return registers
[ "def", "fold", "(", "self", ")", ":", "if", "self", ".", "dim", "is", "None", ":", "return", "[", "self", "]", "if", "self", ".", "name", ".", "endswith", "(", "\"[%s]\"", ")", ":", "# C array like", "self", ".", "name", "=", "self", ".", "name", ".", "replace", "(", "\"%s\"", ",", "str", "(", "self", ".", "dim", ")", ")", "return", "[", "self", "]", "registers", "=", "[", "]", "for", "offset", ",", "index", "in", "enumerate", "(", "self", ".", "dimIndex", ")", ":", "reg", "=", "self", ".", "copy", "(", ")", "reg", ".", "name", "=", "self", ".", "name", ".", "replace", "(", "\"%s\"", ",", "str", "(", "index", ")", ")", "reg", ".", "addressOffset", "+=", "offset", "*", "reg", ".", "dimIncrement", "reg", ".", "fields", "=", "[", "field", ".", "copy", "(", ")", "for", "field", "in", "reg", ".", "fields", "]", "for", "field", "in", "reg", ".", "fields", ":", "field", ".", "parent", "=", "reg", "reg", ".", "dim", "=", "reg", ".", "dimIndex", "=", "reg", ".", "dimIncrement", "=", "None", "# Dimensionless", "registers", ".", "append", "(", "reg", ")", "return", "registers" ]
Folds the Register in accordance with it's dimensions. If the register is dimensionless, the returned list just contains the register itself unchanged. In case the register name looks like a C array, the returned list contains the register itself, where nothing else than the '%s' placeholder in it's name has been replaced with value of the dim element.
[ "Folds", "the", "Register", "in", "accordance", "with", "it", "s", "dimensions", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/virtualchain_hooks.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/virtualchain_hooks.py#L143-L159
def get_db_state(working_dir): """ Callback to the virtual chain state engine. Get a *read-only* handle to our state engine implementation (i.e. our name database). Note that in this implementation, the database handle returned will only support read-only operations by default. Attempts to save state with the handle will lead to program abort. Returns the handle on success Raises on error """ impl = sys.modules[__name__] db_inst = BlockstackDB.get_readonly_instance(working_dir) assert db_inst, 'Failed to instantiate database handle' return db_inst
[ "def", "get_db_state", "(", "working_dir", ")", ":", "impl", "=", "sys", ".", "modules", "[", "__name__", "]", "db_inst", "=", "BlockstackDB", ".", "get_readonly_instance", "(", "working_dir", ")", "assert", "db_inst", ",", "'Failed to instantiate database handle'", "return", "db_inst" ]
Callback to the virtual chain state engine. Get a *read-only* handle to our state engine implementation (i.e. our name database). Note that in this implementation, the database handle returned will only support read-only operations by default. Attempts to save state with the handle will lead to program abort. Returns the handle on success Raises on error
[ "Callback", "to", "the", "virtual", "chain", "state", "engine", ".", "Get", "a", "*", "read", "-", "only", "*", "handle", "to", "our", "state", "engine", "implementation", "(", "i", ".", "e", ".", "our", "name", "database", ")", "." ]
python
train
franciscogarate/pyliferisk
pyliferisk/__init__.py
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L229-L238
def ex(mt, x): """ ex : Returns the curtate expectation of life. Life expectancy """ sum1 = 0 for j in mt.lx[x + 1:-1]: sum1 += j #print sum1 try: return sum1 / mt.lx[x] + 0.5 except: return 0
[ "def", "ex", "(", "mt", ",", "x", ")", ":", "sum1", "=", "0", "for", "j", "in", "mt", ".", "lx", "[", "x", "+", "1", ":", "-", "1", "]", ":", "sum1", "+=", "j", "#print sum1", "try", ":", "return", "sum1", "/", "mt", ".", "lx", "[", "x", "]", "+", "0.5", "except", ":", "return", "0" ]
ex : Returns the curtate expectation of life. Life expectancy
[ "ex", ":", "Returns", "the", "curtate", "expectation", "of", "life", ".", "Life", "expectancy" ]
python
train
Kane610/axis
axis/configuration.py
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/configuration.py#L28-L30
def url(self): """Represent device base url.""" return URL.format(http=self.web_proto, host=self.host, port=self.port)
[ "def", "url", "(", "self", ")", ":", "return", "URL", ".", "format", "(", "http", "=", "self", ".", "web_proto", ",", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ")" ]
Represent device base url.
[ "Represent", "device", "base", "url", "." ]
python
train
Diaoul/pywunderground
pywunderground/core.py
https://github.com/Diaoul/pywunderground/blob/d0fcb7c573e1c8285f6fc3930c6bddab820a9de7/pywunderground/core.py#L79-L88
def _unicode(string): """Try to convert a string to unicode using different encodings""" for encoding in ['utf-8', 'latin1']: try: result = unicode(string, encoding) return result except UnicodeDecodeError: pass result = unicode(string, 'utf-8', 'replace') return result
[ "def", "_unicode", "(", "string", ")", ":", "for", "encoding", "in", "[", "'utf-8'", ",", "'latin1'", "]", ":", "try", ":", "result", "=", "unicode", "(", "string", ",", "encoding", ")", "return", "result", "except", "UnicodeDecodeError", ":", "pass", "result", "=", "unicode", "(", "string", ",", "'utf-8'", ",", "'replace'", ")", "return", "result" ]
Try to convert a string to unicode using different encodings
[ "Try", "to", "convert", "a", "string", "to", "unicode", "using", "different", "encodings" ]
python
valid
bjodah/chempy
chempy/util/graph.py
https://github.com/bjodah/chempy/blob/bd62c3e1f7cb797782471203acd3bcf23b21c47e/chempy/util/graph.py#L92-L152
def rsys2graph(rsys, fname, output_dir=None, prog=None, save=False, **kwargs): """ Convenience function to call `rsys2dot` and write output to file and render the graph Parameters ---------- rsys : ReactionSystem fname : str filename output_dir : str (optional) path to directory (default: temporary directory) prog : str (optional) default: 'dot' save : bool removes temporary directory if False, default: False \\*\\*kwargs : Keyword arguments passed along to py:func:`rsys2dot`. Returns ------- str Outpath Examples -------- >>> rsys2graph(rsys, sbstncs, '/tmp/out.png') # doctest: +SKIP """ lines = rsys2dot(rsys, **kwargs) created_tempdir = False try: if output_dir is None: output_dir = tempfile.mkdtemp() created_tempdir = True basename, ext = os.path.splitext(os.path.basename(fname)) outpath = os.path.join(output_dir, fname) dotpath = os.path.join(output_dir, basename + '.dot') with open(dotpath, 'wt') as ofh: ofh.writelines(lines) if ext == '.tex': cmds = [prog or 'dot2tex'] else: cmds = [prog or 'dot', '-T'+outpath.split('.')[-1]] p = subprocess.Popen(cmds + [dotpath, '-o', outpath]) retcode = p.wait() if retcode: fmtstr = "{}\n returned with exit status {}" raise RuntimeError(fmtstr.format(' '.join(cmds), retcode)) return outpath finally: if save is True or save == 'True': pass else: if save is False or save == 'False': if created_tempdir: shutil.rmtree(output_dir) else: # interpret save as path to copy pdf to. shutil.copy(outpath, save)
[ "def", "rsys2graph", "(", "rsys", ",", "fname", ",", "output_dir", "=", "None", ",", "prog", "=", "None", ",", "save", "=", "False", ",", "*", "*", "kwargs", ")", ":", "lines", "=", "rsys2dot", "(", "rsys", ",", "*", "*", "kwargs", ")", "created_tempdir", "=", "False", "try", ":", "if", "output_dir", "is", "None", ":", "output_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "created_tempdir", "=", "True", "basename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "fname", ")", ")", "outpath", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "fname", ")", "dotpath", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "basename", "+", "'.dot'", ")", "with", "open", "(", "dotpath", ",", "'wt'", ")", "as", "ofh", ":", "ofh", ".", "writelines", "(", "lines", ")", "if", "ext", "==", "'.tex'", ":", "cmds", "=", "[", "prog", "or", "'dot2tex'", "]", "else", ":", "cmds", "=", "[", "prog", "or", "'dot'", ",", "'-T'", "+", "outpath", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", "p", "=", "subprocess", ".", "Popen", "(", "cmds", "+", "[", "dotpath", ",", "'-o'", ",", "outpath", "]", ")", "retcode", "=", "p", ".", "wait", "(", ")", "if", "retcode", ":", "fmtstr", "=", "\"{}\\n returned with exit status {}\"", "raise", "RuntimeError", "(", "fmtstr", ".", "format", "(", "' '", ".", "join", "(", "cmds", ")", ",", "retcode", ")", ")", "return", "outpath", "finally", ":", "if", "save", "is", "True", "or", "save", "==", "'True'", ":", "pass", "else", ":", "if", "save", "is", "False", "or", "save", "==", "'False'", ":", "if", "created_tempdir", ":", "shutil", ".", "rmtree", "(", "output_dir", ")", "else", ":", "# interpret save as path to copy pdf to.", "shutil", ".", "copy", "(", "outpath", ",", "save", ")" ]
Convenience function to call `rsys2dot` and write output to file and render the graph Parameters ---------- rsys : ReactionSystem fname : str filename output_dir : str (optional) path to directory (default: temporary directory) prog : str (optional) default: 'dot' save : bool removes temporary directory if False, default: False \\*\\*kwargs : Keyword arguments passed along to py:func:`rsys2dot`. Returns ------- str Outpath Examples -------- >>> rsys2graph(rsys, sbstncs, '/tmp/out.png') # doctest: +SKIP
[ "Convenience", "function", "to", "call", "rsys2dot", "and", "write", "output", "to", "file", "and", "render", "the", "graph" ]
python
train
ModisWorks/modis
modis/discord_modis/modules/music/_musicplayer.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/music/_musicplayer.py#L1250-L1279
async def vafter(self): """Function that is called after a song finishes playing""" self.logger.debug("Finished playing a song") if self.state != 'ready': self.logger.debug("Returning because player is in state {}".format(self.state)) return self.pause_time = None if self.vclient_task: loop = asyncio.get_event_loop() loop.call_soon(self.vclient_task.cancel) self.vclient_task = None try: if self.streamer is None: await self.stop() return if self.streamer.error is None: await self.vplay() else: self.statuslog.error(self.streamer.error) await self.destroy() except Exception as e: logger.exception(e) try: await self.destroy() except Exception as e: logger.exception(e)
[ "async", "def", "vafter", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Finished playing a song\"", ")", "if", "self", ".", "state", "!=", "'ready'", ":", "self", ".", "logger", ".", "debug", "(", "\"Returning because player is in state {}\"", ".", "format", "(", "self", ".", "state", ")", ")", "return", "self", ".", "pause_time", "=", "None", "if", "self", ".", "vclient_task", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "loop", ".", "call_soon", "(", "self", ".", "vclient_task", ".", "cancel", ")", "self", ".", "vclient_task", "=", "None", "try", ":", "if", "self", ".", "streamer", "is", "None", ":", "await", "self", ".", "stop", "(", ")", "return", "if", "self", ".", "streamer", ".", "error", "is", "None", ":", "await", "self", ".", "vplay", "(", ")", "else", ":", "self", ".", "statuslog", ".", "error", "(", "self", ".", "streamer", ".", "error", ")", "await", "self", ".", "destroy", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "try", ":", "await", "self", ".", "destroy", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "e", ")" ]
Function that is called after a song finishes playing
[ "Function", "that", "is", "called", "after", "a", "song", "finishes", "playing" ]
python
train
shmuelamar/cbox
cbox/cli.py
https://github.com/shmuelamar/cbox/blob/2d0cda5b3f61a55e530251430bf3d460dcd3732e/cbox/cli.py#L11-L57
def stream(input_type='lines', output_type=None, worker_type='simple', max_workers=1, workers_window=100): """wrapper for processing data from input stream into output into output stream while passing each data piece into the function. function should take at least one argument (an input stream piece) and return an `str` to be written into the output stream. Example Usage: >>> import cbox >>> >>> @cbox.stream() >>> def firstchar(line): >>> '''extracts the first char out of each line''' >>> return line[0] if line else '' :param str input_type: defines how the input stream is split. one of `lines`, `chars` or `raw`. :param str output_type: defines how to write into output stream (similarly to input stream). if `None`, split the output stream in the same way of `input_type`. one of `None`, `lines`, `chars` or `raw`. :param str worker_type: one of `simple`, `thread` or `asyncio`. :param int max_workers: how many max workers (i.e. threads) to run in parallel. only affect if `worker_type=thread`. :param int workers_window: how many tasks to execute in parallel before waiting for them to be completed. only affect if `worker_type` is not simple. """ def inner(f): @wraps(f) def wrapper(input_stream, output_stream, error_stream, **kwargs): in_parser = streams.get_input_parser(input_type) out_parser = streams.get_output_parser(output_type, input_type) runner = concurrency.get_runner( worker_type=worker_type, max_workers=max_workers, workers_window=workers_window, ) items = in_parser(input_stream) output = runner(f, items, kwargs) return out_parser(output_stream, error_stream, output) setattr(wrapper, executors.EXECUTOR_ATTR, executors.STREAM) return wrapper return inner
[ "def", "stream", "(", "input_type", "=", "'lines'", ",", "output_type", "=", "None", ",", "worker_type", "=", "'simple'", ",", "max_workers", "=", "1", ",", "workers_window", "=", "100", ")", ":", "def", "inner", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "input_stream", ",", "output_stream", ",", "error_stream", ",", "*", "*", "kwargs", ")", ":", "in_parser", "=", "streams", ".", "get_input_parser", "(", "input_type", ")", "out_parser", "=", "streams", ".", "get_output_parser", "(", "output_type", ",", "input_type", ")", "runner", "=", "concurrency", ".", "get_runner", "(", "worker_type", "=", "worker_type", ",", "max_workers", "=", "max_workers", ",", "workers_window", "=", "workers_window", ",", ")", "items", "=", "in_parser", "(", "input_stream", ")", "output", "=", "runner", "(", "f", ",", "items", ",", "kwargs", ")", "return", "out_parser", "(", "output_stream", ",", "error_stream", ",", "output", ")", "setattr", "(", "wrapper", ",", "executors", ".", "EXECUTOR_ATTR", ",", "executors", ".", "STREAM", ")", "return", "wrapper", "return", "inner" ]
wrapper for processing data from input stream into output into output stream while passing each data piece into the function. function should take at least one argument (an input stream piece) and return an `str` to be written into the output stream. Example Usage: >>> import cbox >>> >>> @cbox.stream() >>> def firstchar(line): >>> '''extracts the first char out of each line''' >>> return line[0] if line else '' :param str input_type: defines how the input stream is split. one of `lines`, `chars` or `raw`. :param str output_type: defines how to write into output stream (similarly to input stream). if `None`, split the output stream in the same way of `input_type`. one of `None`, `lines`, `chars` or `raw`. :param str worker_type: one of `simple`, `thread` or `asyncio`. :param int max_workers: how many max workers (i.e. threads) to run in parallel. only affect if `worker_type=thread`. :param int workers_window: how many tasks to execute in parallel before waiting for them to be completed. only affect if `worker_type` is not simple.
[ "wrapper", "for", "processing", "data", "from", "input", "stream", "into", "output", "into", "output", "stream", "while", "passing", "each", "data", "piece", "into", "the", "function", ".", "function", "should", "take", "at", "least", "one", "argument", "(", "an", "input", "stream", "piece", ")", "and", "return", "an", "str", "to", "be", "written", "into", "the", "output", "stream", "." ]
python
train
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/plugin.py
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/plugin.py#L72-L93
def baremetal(self): """Returns an baremetal service client""" # TODO(d0ugal): When the ironicclient has it's own OSC plugin, the # following client handling code should be removed in favor of the # upstream version. if self._baremetal is not None: return self._baremetal endpoint = self._instance.get_endpoint_for_service_type( "baremetal", region_name=self._instance._region_name, ) token = self._instance.auth.get_token(self._instance.session) self._baremetal = ironic_client.get_client( 1, os_auth_token=token, ironic_url=endpoint, ca_file=self._instance._cli_options.os_cacert) return self._baremetal
[ "def", "baremetal", "(", "self", ")", ":", "# TODO(d0ugal): When the ironicclient has it's own OSC plugin, the", "# following client handling code should be removed in favor of the", "# upstream version.", "if", "self", ".", "_baremetal", "is", "not", "None", ":", "return", "self", ".", "_baremetal", "endpoint", "=", "self", ".", "_instance", ".", "get_endpoint_for_service_type", "(", "\"baremetal\"", ",", "region_name", "=", "self", ".", "_instance", ".", "_region_name", ",", ")", "token", "=", "self", ".", "_instance", ".", "auth", ".", "get_token", "(", "self", ".", "_instance", ".", "session", ")", "self", ".", "_baremetal", "=", "ironic_client", ".", "get_client", "(", "1", ",", "os_auth_token", "=", "token", ",", "ironic_url", "=", "endpoint", ",", "ca_file", "=", "self", ".", "_instance", ".", "_cli_options", ".", "os_cacert", ")", "return", "self", ".", "_baremetal" ]
Returns an baremetal service client
[ "Returns", "an", "baremetal", "service", "client" ]
python
train
CogSciUOS/StudDP
studdp/model.py
https://github.com/CogSciUOS/StudDP/blob/e953aea51766438f2901c9e87f5b7b9e5bb892f5/studdp/model.py#L90-L100
def deep_documents(self): """ list of all documents find in subtrees of this node """ tree = [] for entry in self.contents: if isinstance(entry, Document): tree.append(entry) else: tree += entry.deep_documents return tree
[ "def", "deep_documents", "(", "self", ")", ":", "tree", "=", "[", "]", "for", "entry", "in", "self", ".", "contents", ":", "if", "isinstance", "(", "entry", ",", "Document", ")", ":", "tree", ".", "append", "(", "entry", ")", "else", ":", "tree", "+=", "entry", ".", "deep_documents", "return", "tree" ]
list of all documents find in subtrees of this node
[ "list", "of", "all", "documents", "find", "in", "subtrees", "of", "this", "node" ]
python
train
noahbenson/neuropythy
neuropythy/io/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/io/core.py#L134-L165
def guess_export_format(filename, data, **kwargs): ''' guess_export_format(filename, data) attempts to guess the export file format for the given filename and data (to be exported); it does this guessing by looking at the file extension and using registered sniff-tests from exporters. It will not attempt to save the file, so if the extension of the filename is missing, it is less likely that this function will deduce the file-type (though save will often succeeed at extracting the data by trying all types exhaustively). If guess_export_format cannot deduce the format, it yields None. Note that if the filename has an extension that is recognized by neuropythy but the data itself is inappropriate for that format, this function will never look beyond the extention in the filename; neither this function nor save perform that level of deduction. Keyword arguments that are passed to save should also be passed to guess_export_format. ''' # First try file endings (_,filename) = os.path.split(filename) fnm = filename.lower() fmt = None # to make sure we get the most specific ending, sort the exporters by their length es = sorted(((k,e) for (k,es) in six.iteritems(exporters) for e in es[1]), key=lambda x:-len(x[1])) for (k,e) in es: if fnm.endswith(('.' + e) if e[0] != '.' else e): return k # that didn't work; let's check the sniffers for (k,(_,_,sniff)) in six.iteritems(exporters): try: if sniff(filename, data, **kwargs): return k except Exception: pass return None
[ "def", "guess_export_format", "(", "filename", ",", "data", ",", "*", "*", "kwargs", ")", ":", "# First try file endings", "(", "_", ",", "filename", ")", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "fnm", "=", "filename", ".", "lower", "(", ")", "fmt", "=", "None", "# to make sure we get the most specific ending, sort the exporters by their length", "es", "=", "sorted", "(", "(", "(", "k", ",", "e", ")", "for", "(", "k", ",", "es", ")", "in", "six", ".", "iteritems", "(", "exporters", ")", "for", "e", "in", "es", "[", "1", "]", ")", ",", "key", "=", "lambda", "x", ":", "-", "len", "(", "x", "[", "1", "]", ")", ")", "for", "(", "k", ",", "e", ")", "in", "es", ":", "if", "fnm", ".", "endswith", "(", "(", "'.'", "+", "e", ")", "if", "e", "[", "0", "]", "!=", "'.'", "else", "e", ")", ":", "return", "k", "# that didn't work; let's check the sniffers", "for", "(", "k", ",", "(", "_", ",", "_", ",", "sniff", ")", ")", "in", "six", ".", "iteritems", "(", "exporters", ")", ":", "try", ":", "if", "sniff", "(", "filename", ",", "data", ",", "*", "*", "kwargs", ")", ":", "return", "k", "except", "Exception", ":", "pass", "return", "None" ]
guess_export_format(filename, data) attempts to guess the export file format for the given filename and data (to be exported); it does this guessing by looking at the file extension and using registered sniff-tests from exporters. It will not attempt to save the file, so if the extension of the filename is missing, it is less likely that this function will deduce the file-type (though save will often succeeed at extracting the data by trying all types exhaustively). If guess_export_format cannot deduce the format, it yields None. Note that if the filename has an extension that is recognized by neuropythy but the data itself is inappropriate for that format, this function will never look beyond the extention in the filename; neither this function nor save perform that level of deduction. Keyword arguments that are passed to save should also be passed to guess_export_format.
[ "guess_export_format", "(", "filename", "data", ")", "attempts", "to", "guess", "the", "export", "file", "format", "for", "the", "given", "filename", "and", "data", "(", "to", "be", "exported", ")", ";", "it", "does", "this", "guessing", "by", "looking", "at", "the", "file", "extension", "and", "using", "registered", "sniff", "-", "tests", "from", "exporters", ".", "It", "will", "not", "attempt", "to", "save", "the", "file", "so", "if", "the", "extension", "of", "the", "filename", "is", "missing", "it", "is", "less", "likely", "that", "this", "function", "will", "deduce", "the", "file", "-", "type", "(", "though", "save", "will", "often", "succeeed", "at", "extracting", "the", "data", "by", "trying", "all", "types", "exhaustively", ")", ".", "If", "guess_export_format", "cannot", "deduce", "the", "format", "it", "yields", "None", "." ]
python
train
peterbrittain/asciimatics
asciimatics/paths.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/paths.py#L113-L121
def _add_step(self, pos): """ Add a step to the end of the current recorded path. :param pos: The position tuple (x, y) to add to the list. """ self._steps.append(pos) self._rec_x = pos[0] self._rec_y = pos[1]
[ "def", "_add_step", "(", "self", ",", "pos", ")", ":", "self", ".", "_steps", ".", "append", "(", "pos", ")", "self", ".", "_rec_x", "=", "pos", "[", "0", "]", "self", ".", "_rec_y", "=", "pos", "[", "1", "]" ]
Add a step to the end of the current recorded path. :param pos: The position tuple (x, y) to add to the list.
[ "Add", "a", "step", "to", "the", "end", "of", "the", "current", "recorded", "path", "." ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/notificant_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/notificant_api.py#L511-L532
def update_notificant(self, id, **kwargs): # noqa: E501 """Update a specific notification target # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_notificant(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param Notificant body: Example Body: <pre>{ \"description\": \"Notificant Description\", \"template\": \"POST Body -- Mustache syntax\", \"title\": \"Email title\", \"triggers\": [ \"ALERT_OPENED\" ], \"method\": \"EMAIL\", \"recipient\": \"[email protected]\", \"emailSubject\": \"Email subject cannot contain new line\" }</pre> :return: ResponseContainerNotificant If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_notificant_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.update_notificant_with_http_info(id, **kwargs) # noqa: E501 return data
[ "def", "update_notificant", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "update_notificant_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "update_notificant_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Update a specific notification target # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_notificant(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param Notificant body: Example Body: <pre>{ \"description\": \"Notificant Description\", \"template\": \"POST Body -- Mustache syntax\", \"title\": \"Email title\", \"triggers\": [ \"ALERT_OPENED\" ], \"method\": \"EMAIL\", \"recipient\": \"[email protected]\", \"emailSubject\": \"Email subject cannot contain new line\" }</pre> :return: ResponseContainerNotificant If the method is called asynchronously, returns the request thread.
[ "Update", "a", "specific", "notification", "target", "#", "noqa", ":", "E501" ]
python
train
NarrativeScience/lsi
src/lsi/utils/stream.py
https://github.com/NarrativeScience/lsi/blob/7d901b03fdb1a34ef795e5412bfe9685d948e32d/src/lsi/utils/stream.py#L86-L108
def stream_command_dicts(commands, parallel=False): """ Takes a list of dictionaries with keys corresponding to ``stream_command`` arguments, and runs all concurrently. :param commands: A list of dictionaries, the keys of which should line up with the arguments to ``stream_command`` function. :type commands: ``list`` of ``dict`` :param parallel: If true, commands will be run in parallel. :type parallel: ``bool`` """ if parallel is True: threads = [] for command in commands: target = lambda: stream_command(**command) thread = Thread(target=target) thread.start() threads.append(thread) for t in threads: t.join() else: for command in commands: stream_command(**command)
[ "def", "stream_command_dicts", "(", "commands", ",", "parallel", "=", "False", ")", ":", "if", "parallel", "is", "True", ":", "threads", "=", "[", "]", "for", "command", "in", "commands", ":", "target", "=", "lambda", ":", "stream_command", "(", "*", "*", "command", ")", "thread", "=", "Thread", "(", "target", "=", "target", ")", "thread", ".", "start", "(", ")", "threads", ".", "append", "(", "thread", ")", "for", "t", "in", "threads", ":", "t", ".", "join", "(", ")", "else", ":", "for", "command", "in", "commands", ":", "stream_command", "(", "*", "*", "command", ")" ]
Takes a list of dictionaries with keys corresponding to ``stream_command`` arguments, and runs all concurrently. :param commands: A list of dictionaries, the keys of which should line up with the arguments to ``stream_command`` function. :type commands: ``list`` of ``dict`` :param parallel: If true, commands will be run in parallel. :type parallel: ``bool``
[ "Takes", "a", "list", "of", "dictionaries", "with", "keys", "corresponding", "to", "stream_command", "arguments", "and", "runs", "all", "concurrently", "." ]
python
test
Hironsan/HateSonar
hatesonar/crawler/twitter.py
https://github.com/Hironsan/HateSonar/blob/39ede274119bb128ac32ba3e6d7d58f6104d2354/hatesonar/crawler/twitter.py#L9-L20
def load_keys(): """Loads Twitter keys. Returns: tuple: consumer_key, consumer_secret, access_token, access_token_secret """ consumer_key = os.environ.get('CONSUMER_KEY') consumer_secret = os.environ.get('CONSUMER_SECRET') access_token = os.environ.get('ACCESS_TOKEN') access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET') return consumer_key, consumer_secret, access_token, access_token_secret
[ "def", "load_keys", "(", ")", ":", "consumer_key", "=", "os", ".", "environ", ".", "get", "(", "'CONSUMER_KEY'", ")", "consumer_secret", "=", "os", ".", "environ", ".", "get", "(", "'CONSUMER_SECRET'", ")", "access_token", "=", "os", ".", "environ", ".", "get", "(", "'ACCESS_TOKEN'", ")", "access_token_secret", "=", "os", ".", "environ", ".", "get", "(", "'ACCESS_TOKEN_SECRET'", ")", "return", "consumer_key", ",", "consumer_secret", ",", "access_token", ",", "access_token_secret" ]
Loads Twitter keys. Returns: tuple: consumer_key, consumer_secret, access_token, access_token_secret
[ "Loads", "Twitter", "keys", "." ]
python
train
xaptum/xtt-python
xtt/certificates.py
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/certificates.py#L35-L57
def generate_ecdsap256_server_certificate(server_id, server_pub_key, expiry, root_id, root_priv_key): """ Creates a new server certificate signed by the provided root. :param Identity server_id: the identity for the certificate :param ECDSAP256PublicKey server_pub_key: the public key for the certificate :param CertificateExpiry expiry: the expiry date for the certificate :param CertificateRootId root_id: the root identity to sign this certificate :param ECDSAP256PrivateKey root_priv_key: the root private key to sign this certificate """ cert = ECDSAP256ServerCertificate() rc = _lib.xtt_generate_server_certificate_ecdsap256(cert.native, server_id.native, server_pub_key.native, expiry.native, root_id.native, root_priv_key.native) if rc == RC.SUCCESS: return cert else: raise error_from_code(rc)
[ "def", "generate_ecdsap256_server_certificate", "(", "server_id", ",", "server_pub_key", ",", "expiry", ",", "root_id", ",", "root_priv_key", ")", ":", "cert", "=", "ECDSAP256ServerCertificate", "(", ")", "rc", "=", "_lib", ".", "xtt_generate_server_certificate_ecdsap256", "(", "cert", ".", "native", ",", "server_id", ".", "native", ",", "server_pub_key", ".", "native", ",", "expiry", ".", "native", ",", "root_id", ".", "native", ",", "root_priv_key", ".", "native", ")", "if", "rc", "==", "RC", ".", "SUCCESS", ":", "return", "cert", "else", ":", "raise", "error_from_code", "(", "rc", ")" ]
Creates a new server certificate signed by the provided root. :param Identity server_id: the identity for the certificate :param ECDSAP256PublicKey server_pub_key: the public key for the certificate :param CertificateExpiry expiry: the expiry date for the certificate :param CertificateRootId root_id: the root identity to sign this certificate :param ECDSAP256PrivateKey root_priv_key: the root private key to sign this certificate
[ "Creates", "a", "new", "server", "certificate", "signed", "by", "the", "provided", "root", "." ]
python
train
sdispater/cachy
cachy/stores/file_store.py
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/file_store.py#L114-L132
def increment(self, key, value=1): """ Increment the value of an item in the cache. :param key: The cache key :type key: str :param value: The increment value :type value: int :rtype: int or bool """ raw = self._get_payload(key) integer = int(raw['data']) + value self.put(key, integer, int(raw['time'])) return integer
[ "def", "increment", "(", "self", ",", "key", ",", "value", "=", "1", ")", ":", "raw", "=", "self", ".", "_get_payload", "(", "key", ")", "integer", "=", "int", "(", "raw", "[", "'data'", "]", ")", "+", "value", "self", ".", "put", "(", "key", ",", "integer", ",", "int", "(", "raw", "[", "'time'", "]", ")", ")", "return", "integer" ]
Increment the value of an item in the cache. :param key: The cache key :type key: str :param value: The increment value :type value: int :rtype: int or bool
[ "Increment", "the", "value", "of", "an", "item", "in", "the", "cache", "." ]
python
train
tensorflow/datasets
tensorflow_datasets/core/download/resource.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L273-L278
def exists_locally(cls, path): """Returns whether the resource exists locally, at `resource.path`.""" # If INFO file doesn't exist, consider resource does NOT exist, as it would # prevent guessing the `extract_method`. return (tf.io.gfile.exists(path) and tf.io.gfile.exists(_get_info_path(path)))
[ "def", "exists_locally", "(", "cls", ",", "path", ")", ":", "# If INFO file doesn't exist, consider resource does NOT exist, as it would", "# prevent guessing the `extract_method`.", "return", "(", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "path", ")", "and", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "_get_info_path", "(", "path", ")", ")", ")" ]
Returns whether the resource exists locally, at `resource.path`.
[ "Returns", "whether", "the", "resource", "exists", "locally", "at", "resource", ".", "path", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/auth.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/auth.py#L190-L251
def _authenticate_scram_sha1(credentials, sock_info): """Authenticate using SCRAM-SHA-1.""" username = credentials.username password = credentials.password source = credentials.source # Make local _hmac = hmac.HMAC _sha1 = sha1 user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") nonce = standard_b64encode( (("%s" % (SystemRandom().random(),))[2:]).encode("utf-8")) first_bare = b"n=" + user + b",r=" + nonce cmd = SON([('saslStart', 1), ('mechanism', 'SCRAM-SHA-1'), ('payload', Binary(b"n,," + first_bare)), ('autoAuthorize', 1)]) res = sock_info.command(source, cmd) server_first = res['payload'] parsed = _parse_scram_response(server_first) iterations = int(parsed[b'i']) salt = parsed[b's'] rnonce = parsed[b'r'] if not rnonce.startswith(nonce): raise OperationFailure("Server returned an invalid nonce.") without_proof = b"c=biws,r=" + rnonce salted_pass = _hi(_password_digest(username, password).encode("utf-8"), standard_b64decode(salt), iterations) client_key = _hmac(salted_pass, b"Client Key", _sha1).digest() stored_key = _sha1(client_key).digest() auth_msg = b",".join((first_bare, server_first, without_proof)) client_sig = _hmac(stored_key, auth_msg, _sha1).digest() client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) client_final = b",".join((without_proof, client_proof)) server_key = _hmac(salted_pass, b"Server Key", _sha1).digest() server_sig = standard_b64encode( _hmac(server_key, auth_msg, _sha1).digest()) cmd = SON([('saslContinue', 1), ('conversationId', res['conversationId']), ('payload', Binary(client_final))]) res = sock_info.command(source, cmd) parsed = _parse_scram_response(res['payload']) if not compare_digest(parsed[b'v'], server_sig): raise OperationFailure("Server returned an invalid signature.") # Depending on how it's configured, Cyrus SASL (which the server uses) # requires a third empty challenge. if not res['done']: cmd = SON([('saslContinue', 1), ('conversationId', res['conversationId']), ('payload', Binary(b''))]) res = sock_info.command(source, cmd) if not res['done']: raise OperationFailure('SASL conversation failed to complete.')
[ "def", "_authenticate_scram_sha1", "(", "credentials", ",", "sock_info", ")", ":", "username", "=", "credentials", ".", "username", "password", "=", "credentials", ".", "password", "source", "=", "credentials", ".", "source", "# Make local", "_hmac", "=", "hmac", ".", "HMAC", "_sha1", "=", "sha1", "user", "=", "username", ".", "encode", "(", "\"utf-8\"", ")", ".", "replace", "(", "b\"=\"", ",", "b\"=3D\"", ")", ".", "replace", "(", "b\",\"", ",", "b\"=2C\"", ")", "nonce", "=", "standard_b64encode", "(", "(", "(", "\"%s\"", "%", "(", "SystemRandom", "(", ")", ".", "random", "(", ")", ",", ")", ")", "[", "2", ":", "]", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", "first_bare", "=", "b\"n=\"", "+", "user", "+", "b\",r=\"", "+", "nonce", "cmd", "=", "SON", "(", "[", "(", "'saslStart'", ",", "1", ")", ",", "(", "'mechanism'", ",", "'SCRAM-SHA-1'", ")", ",", "(", "'payload'", ",", "Binary", "(", "b\"n,,\"", "+", "first_bare", ")", ")", ",", "(", "'autoAuthorize'", ",", "1", ")", "]", ")", "res", "=", "sock_info", ".", "command", "(", "source", ",", "cmd", ")", "server_first", "=", "res", "[", "'payload'", "]", "parsed", "=", "_parse_scram_response", "(", "server_first", ")", "iterations", "=", "int", "(", "parsed", "[", "b'i'", "]", ")", "salt", "=", "parsed", "[", "b's'", "]", "rnonce", "=", "parsed", "[", "b'r'", "]", "if", "not", "rnonce", ".", "startswith", "(", "nonce", ")", ":", "raise", "OperationFailure", "(", "\"Server returned an invalid nonce.\"", ")", "without_proof", "=", "b\"c=biws,r=\"", "+", "rnonce", "salted_pass", "=", "_hi", "(", "_password_digest", "(", "username", ",", "password", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "standard_b64decode", "(", "salt", ")", ",", "iterations", ")", "client_key", "=", "_hmac", "(", "salted_pass", ",", "b\"Client Key\"", ",", "_sha1", ")", ".", "digest", "(", ")", "stored_key", "=", "_sha1", "(", "client_key", ")", ".", "digest", "(", ")", "auth_msg", "=", "b\",\"", ".", "join", "(", "(", "first_bare", ",", "server_first", ",", "without_proof", ")", ")", "client_sig", "=", "_hmac", "(", "stored_key", ",", "auth_msg", ",", "_sha1", ")", ".", "digest", "(", ")", "client_proof", "=", "b\"p=\"", "+", "standard_b64encode", "(", "_xor", "(", "client_key", ",", "client_sig", ")", ")", "client_final", "=", "b\",\"", ".", "join", "(", "(", "without_proof", ",", "client_proof", ")", ")", "server_key", "=", "_hmac", "(", "salted_pass", ",", "b\"Server Key\"", ",", "_sha1", ")", ".", "digest", "(", ")", "server_sig", "=", "standard_b64encode", "(", "_hmac", "(", "server_key", ",", "auth_msg", ",", "_sha1", ")", ".", "digest", "(", ")", ")", "cmd", "=", "SON", "(", "[", "(", "'saslContinue'", ",", "1", ")", ",", "(", "'conversationId'", ",", "res", "[", "'conversationId'", "]", ")", ",", "(", "'payload'", ",", "Binary", "(", "client_final", ")", ")", "]", ")", "res", "=", "sock_info", ".", "command", "(", "source", ",", "cmd", ")", "parsed", "=", "_parse_scram_response", "(", "res", "[", "'payload'", "]", ")", "if", "not", "compare_digest", "(", "parsed", "[", "b'v'", "]", ",", "server_sig", ")", ":", "raise", "OperationFailure", "(", "\"Server returned an invalid signature.\"", ")", "# Depending on how it's configured, Cyrus SASL (which the server uses)", "# requires a third empty challenge.", "if", "not", "res", "[", "'done'", "]", ":", "cmd", "=", "SON", "(", "[", "(", "'saslContinue'", ",", "1", ")", ",", "(", "'conversationId'", ",", "res", "[", "'conversationId'", "]", ")", ",", "(", "'payload'", ",", "Binary", "(", "b''", ")", ")", "]", ")", "res", "=", "sock_info", ".", "command", "(", "source", ",", "cmd", ")", "if", "not", "res", "[", "'done'", "]", ":", "raise", "OperationFailure", "(", "'SASL conversation failed to complete.'", ")" ]
Authenticate using SCRAM-SHA-1.
[ "Authenticate", "using", "SCRAM", "-", "SHA", "-", "1", "." ]
python
train
ANTsX/ANTsPy
ants/core/ants_image_io.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/core/ants_image_io.py#L76-L106
def from_numpy(data, origin=None, spacing=None, direction=None, has_components=False, is_rgb=False): """ Create an ANTsImage object from a numpy array ANTsR function: `as.antsImage` Arguments --------- data : ndarray image data array origin : tuple/list image origin spacing : tuple/list image spacing direction : list/ndarray image direction has_components : boolean whether the image has components Returns ------- ANTsImage image with given data and any given information """ data = data.astype('float32') if data.dtype.name == 'float64' else data img = _from_numpy(data.T.copy(), origin, spacing, direction, has_components, is_rgb) return img
[ "def", "from_numpy", "(", "data", ",", "origin", "=", "None", ",", "spacing", "=", "None", ",", "direction", "=", "None", ",", "has_components", "=", "False", ",", "is_rgb", "=", "False", ")", ":", "data", "=", "data", ".", "astype", "(", "'float32'", ")", "if", "data", ".", "dtype", ".", "name", "==", "'float64'", "else", "data", "img", "=", "_from_numpy", "(", "data", ".", "T", ".", "copy", "(", ")", ",", "origin", ",", "spacing", ",", "direction", ",", "has_components", ",", "is_rgb", ")", "return", "img" ]
Create an ANTsImage object from a numpy array ANTsR function: `as.antsImage` Arguments --------- data : ndarray image data array origin : tuple/list image origin spacing : tuple/list image spacing direction : list/ndarray image direction has_components : boolean whether the image has components Returns ------- ANTsImage image with given data and any given information
[ "Create", "an", "ANTsImage", "object", "from", "a", "numpy", "array" ]
python
train
saltstack/salt
salt/spm/pkgfiles/local.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/pkgfiles/local.py#L103-L169
def install_file(package, formula_tar, member, formula_def, conn=None): ''' Install a single file to the file system ''' if member.name == package: return False if conn is None: conn = init() node_type = six.text_type(__opts__.get('spm_node_type')) out_path = conn['formula_path'] tld = formula_def.get('top_level_dir', package) new_name = member.name.replace('{0}/'.format(package), '', 1) if not new_name.startswith(tld) and not new_name.startswith('_') and not \ new_name.startswith('pillar.example') and not new_name.startswith('README'): log.debug('%s not in top level directory, not installing', new_name) return False for line in formula_def.get('files', []): tag = '' for ftype in FILE_TYPES: if line.startswith('{0}|'.format(ftype)): tag = line.split('|', 1)[0] line = line.split('|', 1)[1] if tag and new_name == line: if tag in ('c', 'd', 'g', 'l', 'r'): out_path = __opts__['spm_share_dir'] elif tag in ('s', 'm'): pass if member.name.startswith('{0}/_'.format(package)): if node_type in ('master', 'minion'): # Module files are distributed via extmods directory member.name = new_name.name.replace('{0}/_'.format(package), '') out_path = os.path.join( salt.syspaths.CACHE_DIR, node_type, 'extmods', ) else: # Module files are distributed via _modules, _states, etc member.name = new_name.name.replace('{0}/'.format(package), '') elif member.name == '{0}/pillar.example'.format(package): # Pillars are automatically put in the pillar_path member.name = '{0}.sls.orig'.format(package) out_path = conn['pillar_path'] elif package.endswith('-conf'): # Configuration files go into /etc/salt/ member.name = member.name.replace('{0}/'.format(package), '') out_path = salt.syspaths.CONFIG_DIR elif package.endswith('-reactor'): # Reactor files go into /srv/reactor/ out_path = __opts__['reactor_path'] # This ensures that double directories (i.e., apache/apache/) don't # get created comps = member.path.split('/') if len(comps) > 1 and comps[0] == comps[1]: member.path = '/'.join(comps[1:]) log.debug('Installing package file %s to %s', member.name, out_path) formula_tar.extract(member, out_path) return out_path
[ "def", "install_file", "(", "package", ",", "formula_tar", ",", "member", ",", "formula_def", ",", "conn", "=", "None", ")", ":", "if", "member", ".", "name", "==", "package", ":", "return", "False", "if", "conn", "is", "None", ":", "conn", "=", "init", "(", ")", "node_type", "=", "six", ".", "text_type", "(", "__opts__", ".", "get", "(", "'spm_node_type'", ")", ")", "out_path", "=", "conn", "[", "'formula_path'", "]", "tld", "=", "formula_def", ".", "get", "(", "'top_level_dir'", ",", "package", ")", "new_name", "=", "member", ".", "name", ".", "replace", "(", "'{0}/'", ".", "format", "(", "package", ")", ",", "''", ",", "1", ")", "if", "not", "new_name", ".", "startswith", "(", "tld", ")", "and", "not", "new_name", ".", "startswith", "(", "'_'", ")", "and", "not", "new_name", ".", "startswith", "(", "'pillar.example'", ")", "and", "not", "new_name", ".", "startswith", "(", "'README'", ")", ":", "log", ".", "debug", "(", "'%s not in top level directory, not installing'", ",", "new_name", ")", "return", "False", "for", "line", "in", "formula_def", ".", "get", "(", "'files'", ",", "[", "]", ")", ":", "tag", "=", "''", "for", "ftype", "in", "FILE_TYPES", ":", "if", "line", ".", "startswith", "(", "'{0}|'", ".", "format", "(", "ftype", ")", ")", ":", "tag", "=", "line", ".", "split", "(", "'|'", ",", "1", ")", "[", "0", "]", "line", "=", "line", ".", "split", "(", "'|'", ",", "1", ")", "[", "1", "]", "if", "tag", "and", "new_name", "==", "line", ":", "if", "tag", "in", "(", "'c'", ",", "'d'", ",", "'g'", ",", "'l'", ",", "'r'", ")", ":", "out_path", "=", "__opts__", "[", "'spm_share_dir'", "]", "elif", "tag", "in", "(", "'s'", ",", "'m'", ")", ":", "pass", "if", "member", ".", "name", ".", "startswith", "(", "'{0}/_'", ".", "format", "(", "package", ")", ")", ":", "if", "node_type", "in", "(", "'master'", ",", "'minion'", ")", ":", "# Module files are distributed via extmods directory", "member", ".", "name", "=", "new_name", ".", "name", ".", "replace", "(", "'{0}/_'", ".", "format", "(", "package", ")", ",", "''", ")", "out_path", "=", "os", ".", "path", ".", "join", "(", "salt", ".", "syspaths", ".", "CACHE_DIR", ",", "node_type", ",", "'extmods'", ",", ")", "else", ":", "# Module files are distributed via _modules, _states, etc", "member", ".", "name", "=", "new_name", ".", "name", ".", "replace", "(", "'{0}/'", ".", "format", "(", "package", ")", ",", "''", ")", "elif", "member", ".", "name", "==", "'{0}/pillar.example'", ".", "format", "(", "package", ")", ":", "# Pillars are automatically put in the pillar_path", "member", ".", "name", "=", "'{0}.sls.orig'", ".", "format", "(", "package", ")", "out_path", "=", "conn", "[", "'pillar_path'", "]", "elif", "package", ".", "endswith", "(", "'-conf'", ")", ":", "# Configuration files go into /etc/salt/", "member", ".", "name", "=", "member", ".", "name", ".", "replace", "(", "'{0}/'", ".", "format", "(", "package", ")", ",", "''", ")", "out_path", "=", "salt", ".", "syspaths", ".", "CONFIG_DIR", "elif", "package", ".", "endswith", "(", "'-reactor'", ")", ":", "# Reactor files go into /srv/reactor/", "out_path", "=", "__opts__", "[", "'reactor_path'", "]", "# This ensures that double directories (i.e., apache/apache/) don't", "# get created", "comps", "=", "member", ".", "path", ".", "split", "(", "'/'", ")", "if", "len", "(", "comps", ")", ">", "1", "and", "comps", "[", "0", "]", "==", "comps", "[", "1", "]", ":", "member", ".", "path", "=", "'/'", ".", "join", "(", "comps", "[", "1", ":", "]", ")", "log", ".", "debug", "(", "'Installing package file %s to %s'", ",", "member", ".", "name", ",", "out_path", ")", "formula_tar", ".", "extract", "(", "member", ",", "out_path", ")", "return", "out_path" ]
Install a single file to the file system
[ "Install", "a", "single", "file", "to", "the", "file", "system" ]
python
train
krischer/mtspec
mtspec/multitaper.py
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/multitaper.py#L775-L784
def empty(self, shape, complex=False): """ A wrapper around np.empty which automatically sets the correct type and returns an empty array. :param shape: The shape of the array in np.empty format """ if complex: return np.empty(shape, dtype=self.complex, order=self.order) return np.empty(shape, dtype=self.float, order=self.order)
[ "def", "empty", "(", "self", ",", "shape", ",", "complex", "=", "False", ")", ":", "if", "complex", ":", "return", "np", ".", "empty", "(", "shape", ",", "dtype", "=", "self", ".", "complex", ",", "order", "=", "self", ".", "order", ")", "return", "np", ".", "empty", "(", "shape", ",", "dtype", "=", "self", ".", "float", ",", "order", "=", "self", ".", "order", ")" ]
A wrapper around np.empty which automatically sets the correct type and returns an empty array. :param shape: The shape of the array in np.empty format
[ "A", "wrapper", "around", "np", ".", "empty", "which", "automatically", "sets", "the", "correct", "type", "and", "returns", "an", "empty", "array", "." ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L868-L879
def fetch_cdn_data(self, container): """ Returns a dict containing the CDN information for the specified container. If the container is not CDN-enabled, returns an empty dict. """ name = utils.get_name(container) uri = "/%s" % name try: resp, resp_body = self.api.cdn_request(uri, "HEAD") except exc.NotCDNEnabled: return {} return resp.headers
[ "def", "fetch_cdn_data", "(", "self", ",", "container", ")", ":", "name", "=", "utils", ".", "get_name", "(", "container", ")", "uri", "=", "\"/%s\"", "%", "name", "try", ":", "resp", ",", "resp_body", "=", "self", ".", "api", ".", "cdn_request", "(", "uri", ",", "\"HEAD\"", ")", "except", "exc", ".", "NotCDNEnabled", ":", "return", "{", "}", "return", "resp", ".", "headers" ]
Returns a dict containing the CDN information for the specified container. If the container is not CDN-enabled, returns an empty dict.
[ "Returns", "a", "dict", "containing", "the", "CDN", "information", "for", "the", "specified", "container", ".", "If", "the", "container", "is", "not", "CDN", "-", "enabled", "returns", "an", "empty", "dict", "." ]
python
train
rosenbrockc/fortpy
fortpy/parsers/docstring.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/parsers/docstring.py#L488-L531
def parsexml(self, xmlstring, modules, source=None): """Parses the docstrings out of the specified xml file. :arg source: the path to the file from which the XML string was extracted. """ result = {} from fortpy.utility import XML_fromstring xmlroot = XML_fromstring(xmlstring, source) if xmlroot.tag == "fortpy" and "mode" in xmlroot.attrib and \ xmlroot.attrib["mode"] == "docstring": #First, cycle through the kids to find the <global> tag (if any #exist). It's children will apply to any of the other tags we find #and we will have to update their attributes accordingly. xmlglobals = {} for child in xmlroot.iterfind("globals"): _update_globals(list(child), xmlglobals) _set_global_defaults(xmlglobals) #We fill the dictionary with decorates names as keys and lists #of the xml docstring elements as values. for child in xmlroot: if child.tag == "globals": continue xmltags = [] if child.tag == "decorates" and "name" in child.attrib: decorates = child.attrib["name"] xmltags.extend(list(child)) elif "decorates" in child.attrib: decorates = child.attrib["decorates"] xmltags.append(child) for xtag in xmltags: _update_from_globals(xtag, xmlglobals, child) if decorates in result: result[decorates].extend(xmltags) else: result[decorates] = xmltags #Loop through all the docstrings we found and team them up with #their respective module members. self._xml_update_modules(result, modules)
[ "def", "parsexml", "(", "self", ",", "xmlstring", ",", "modules", ",", "source", "=", "None", ")", ":", "result", "=", "{", "}", "from", "fortpy", ".", "utility", "import", "XML_fromstring", "xmlroot", "=", "XML_fromstring", "(", "xmlstring", ",", "source", ")", "if", "xmlroot", ".", "tag", "==", "\"fortpy\"", "and", "\"mode\"", "in", "xmlroot", ".", "attrib", "and", "xmlroot", ".", "attrib", "[", "\"mode\"", "]", "==", "\"docstring\"", ":", "#First, cycle through the kids to find the <global> tag (if any", "#exist). It's children will apply to any of the other tags we find", "#and we will have to update their attributes accordingly.", "xmlglobals", "=", "{", "}", "for", "child", "in", "xmlroot", ".", "iterfind", "(", "\"globals\"", ")", ":", "_update_globals", "(", "list", "(", "child", ")", ",", "xmlglobals", ")", "_set_global_defaults", "(", "xmlglobals", ")", "#We fill the dictionary with decorates names as keys and lists", "#of the xml docstring elements as values.", "for", "child", "in", "xmlroot", ":", "if", "child", ".", "tag", "==", "\"globals\"", ":", "continue", "xmltags", "=", "[", "]", "if", "child", ".", "tag", "==", "\"decorates\"", "and", "\"name\"", "in", "child", ".", "attrib", ":", "decorates", "=", "child", ".", "attrib", "[", "\"name\"", "]", "xmltags", ".", "extend", "(", "list", "(", "child", ")", ")", "elif", "\"decorates\"", "in", "child", ".", "attrib", ":", "decorates", "=", "child", ".", "attrib", "[", "\"decorates\"", "]", "xmltags", ".", "append", "(", "child", ")", "for", "xtag", "in", "xmltags", ":", "_update_from_globals", "(", "xtag", ",", "xmlglobals", ",", "child", ")", "if", "decorates", "in", "result", ":", "result", "[", "decorates", "]", ".", "extend", "(", "xmltags", ")", "else", ":", "result", "[", "decorates", "]", "=", "xmltags", "#Loop through all the docstrings we found and team them up with", "#their respective module members.", "self", ".", "_xml_update_modules", "(", "result", ",", "modules", ")" ]
Parses the docstrings out of the specified xml file. :arg source: the path to the file from which the XML string was extracted.
[ "Parses", "the", "docstrings", "out", "of", "the", "specified", "xml", "file", "." ]
python
train
davenquinn/Attitude
attitude/stereonet.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/stereonet.py#L13-L23
def ellipse(n=1000, adaptive=False): """ Get a parameterized set of vectors defining ellipse for a major and minor axis length. Resulting vector bundle has major axes along axes given. """ u = N.linspace(0,2*N.pi,n) # Get a bundle of vectors defining # a full rotation around the unit circle return N.array([N.cos(u),N.sin(u)]).T
[ "def", "ellipse", "(", "n", "=", "1000", ",", "adaptive", "=", "False", ")", ":", "u", "=", "N", ".", "linspace", "(", "0", ",", "2", "*", "N", ".", "pi", ",", "n", ")", "# Get a bundle of vectors defining", "# a full rotation around the unit circle", "return", "N", ".", "array", "(", "[", "N", ".", "cos", "(", "u", ")", ",", "N", ".", "sin", "(", "u", ")", "]", ")", ".", "T" ]
Get a parameterized set of vectors defining ellipse for a major and minor axis length. Resulting vector bundle has major axes along axes given.
[ "Get", "a", "parameterized", "set", "of", "vectors", "defining", "ellipse", "for", "a", "major", "and", "minor", "axis", "length", ".", "Resulting", "vector", "bundle", "has", "major", "axes", "along", "axes", "given", "." ]
python
train
apple/turicreate
deps/src/boost_1_68_0/libs/predef/tools/ci/build_log.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/build_log.py#L85-L103
def add_input(self, input): ''' Add a single build XML output file to our data. ''' events = xml.dom.pulldom.parse(input) context = [] for (event,node) in events: if event == xml.dom.pulldom.START_ELEMENT: context.append(node) if node.nodeType == xml.dom.Node.ELEMENT_NODE: x_f = self.x_name_(*context) if x_f: events.expandNode(node) # expanding eats the end element, hence walking us out one level context.pop() # call handler (x_f[1])(node) elif event == xml.dom.pulldom.END_ELEMENT: context.pop()
[ "def", "add_input", "(", "self", ",", "input", ")", ":", "events", "=", "xml", ".", "dom", ".", "pulldom", ".", "parse", "(", "input", ")", "context", "=", "[", "]", "for", "(", "event", ",", "node", ")", "in", "events", ":", "if", "event", "==", "xml", ".", "dom", ".", "pulldom", ".", "START_ELEMENT", ":", "context", ".", "append", "(", "node", ")", "if", "node", ".", "nodeType", "==", "xml", ".", "dom", ".", "Node", ".", "ELEMENT_NODE", ":", "x_f", "=", "self", ".", "x_name_", "(", "*", "context", ")", "if", "x_f", ":", "events", ".", "expandNode", "(", "node", ")", "# expanding eats the end element, hence walking us out one level", "context", ".", "pop", "(", ")", "# call handler", "(", "x_f", "[", "1", "]", ")", "(", "node", ")", "elif", "event", "==", "xml", ".", "dom", ".", "pulldom", ".", "END_ELEMENT", ":", "context", ".", "pop", "(", ")" ]
Add a single build XML output file to our data.
[ "Add", "a", "single", "build", "XML", "output", "file", "to", "our", "data", "." ]
python
train
ethereum/py-evm
eth/vm/logic/comparison.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/logic/comparison.py#L39-L53
def slt(computation: BaseComputation) -> None: """ Signed Lesser Comparison """ left, right = map( unsigned_to_signed, computation.stack_pop(num_items=2, type_hint=constants.UINT256), ) if left < right: result = 1 else: result = 0 computation.stack_push(signed_to_unsigned(result))
[ "def", "slt", "(", "computation", ":", "BaseComputation", ")", "->", "None", ":", "left", ",", "right", "=", "map", "(", "unsigned_to_signed", ",", "computation", ".", "stack_pop", "(", "num_items", "=", "2", ",", "type_hint", "=", "constants", ".", "UINT256", ")", ",", ")", "if", "left", "<", "right", ":", "result", "=", "1", "else", ":", "result", "=", "0", "computation", ".", "stack_push", "(", "signed_to_unsigned", "(", "result", ")", ")" ]
Signed Lesser Comparison
[ "Signed", "Lesser", "Comparison" ]
python
train
liampauling/betfair
betfairlightweight/baseclient.py
https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/baseclient.py#L126-L156
def cert(self): """ The betfair certificates, by default it looks for the certificates in /certs/. :return: Path of cert files :rtype: str """ if self.cert_files is not None: return self.cert_files certs = self.certs or '/certs/' ssl_path = os.path.join(os.pardir, certs) try: cert_path = os.listdir(ssl_path) except FileNotFoundError: raise CertsError(certs) except OSError: # Python 2 compatability raise CertsError(certs) cert = None key = None for file in cert_path: ext = os.path.splitext(file)[-1] if ext in ['.crt', '.cert']: cert = os.path.join(ssl_path, file) elif ext == '.key': key = os.path.join(ssl_path, file) if cert is None or key is None: raise CertsError(certs) return [cert, key]
[ "def", "cert", "(", "self", ")", ":", "if", "self", ".", "cert_files", "is", "not", "None", ":", "return", "self", ".", "cert_files", "certs", "=", "self", ".", "certs", "or", "'/certs/'", "ssl_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "pardir", ",", "certs", ")", "try", ":", "cert_path", "=", "os", ".", "listdir", "(", "ssl_path", ")", "except", "FileNotFoundError", ":", "raise", "CertsError", "(", "certs", ")", "except", "OSError", ":", "# Python 2 compatability", "raise", "CertsError", "(", "certs", ")", "cert", "=", "None", "key", "=", "None", "for", "file", "in", "cert_path", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file", ")", "[", "-", "1", "]", "if", "ext", "in", "[", "'.crt'", ",", "'.cert'", "]", ":", "cert", "=", "os", ".", "path", ".", "join", "(", "ssl_path", ",", "file", ")", "elif", "ext", "==", "'.key'", ":", "key", "=", "os", ".", "path", ".", "join", "(", "ssl_path", ",", "file", ")", "if", "cert", "is", "None", "or", "key", "is", "None", ":", "raise", "CertsError", "(", "certs", ")", "return", "[", "cert", ",", "key", "]" ]
The betfair certificates, by default it looks for the certificates in /certs/. :return: Path of cert files :rtype: str
[ "The", "betfair", "certificates", "by", "default", "it", "looks", "for", "the", "certificates", "in", "/", "certs", "/", "." ]
python
train
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L119-L140
def find_sequences(errors, epsilon): """Find sequences of values that are above epsilon. This is done following this steps: * create a boolean mask that indicates which value are above epsilon. * shift this mask by one place, filing the empty gap with a False * compare the shifted mask with the original one to see if there are changes. * Consider a sequence start any point which was true and has changed * Consider a sequence end any point which was false and has changed """ above = pd.Series(errors > epsilon) shift = above.shift(1).fillna(False) change = above != shift index = above.index starts = index[above & change].tolist() ends = (index[~above & change] - 1).tolist() if len(ends) == len(starts) - 1: ends.append(len(above) - 1) return list(zip(starts, ends))
[ "def", "find_sequences", "(", "errors", ",", "epsilon", ")", ":", "above", "=", "pd", ".", "Series", "(", "errors", ">", "epsilon", ")", "shift", "=", "above", ".", "shift", "(", "1", ")", ".", "fillna", "(", "False", ")", "change", "=", "above", "!=", "shift", "index", "=", "above", ".", "index", "starts", "=", "index", "[", "above", "&", "change", "]", ".", "tolist", "(", ")", "ends", "=", "(", "index", "[", "~", "above", "&", "change", "]", "-", "1", ")", ".", "tolist", "(", ")", "if", "len", "(", "ends", ")", "==", "len", "(", "starts", ")", "-", "1", ":", "ends", ".", "append", "(", "len", "(", "above", ")", "-", "1", ")", "return", "list", "(", "zip", "(", "starts", ",", "ends", ")", ")" ]
Find sequences of values that are above epsilon. This is done following this steps: * create a boolean mask that indicates which value are above epsilon. * shift this mask by one place, filing the empty gap with a False * compare the shifted mask with the original one to see if there are changes. * Consider a sequence start any point which was true and has changed * Consider a sequence end any point which was false and has changed
[ "Find", "sequences", "of", "values", "that", "are", "above", "epsilon", "." ]
python
train
edx/edx-enterprise
enterprise/management/commands/assign_enterprise_user_roles.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/management/commands/assign_enterprise_user_roles.py#L113-L118
def _get_enterprise_enrollment_api_admin_users_batch(self, start, end): # pylint: disable=invalid-name """ Returns a batched queryset of User objects. """ LOGGER.info('Fetching new batch of enterprise enrollment admin users from indexes: %s to %s', start, end) return User.objects.filter(groups__name=ENTERPRISE_ENROLLMENT_API_ACCESS_GROUP, is_staff=False)[start:end]
[ "def", "_get_enterprise_enrollment_api_admin_users_batch", "(", "self", ",", "start", ",", "end", ")", ":", "# pylint: disable=invalid-name", "LOGGER", ".", "info", "(", "'Fetching new batch of enterprise enrollment admin users from indexes: %s to %s'", ",", "start", ",", "end", ")", "return", "User", ".", "objects", ".", "filter", "(", "groups__name", "=", "ENTERPRISE_ENROLLMENT_API_ACCESS_GROUP", ",", "is_staff", "=", "False", ")", "[", "start", ":", "end", "]" ]
Returns a batched queryset of User objects.
[ "Returns", "a", "batched", "queryset", "of", "User", "objects", "." ]
python
valid
burnash/gspread
gspread/utils.py
https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/utils.py#L135-L162
def a1_to_rowcol(label): """Translates a cell's address in A1 notation to a tuple of integers. :param label: A cell label in A1 notation, e.g. 'B1'. Letter case is ignored. :type label: str :returns: a tuple containing `row` and `column` numbers. Both indexed from 1 (one). Example: >>> a1_to_rowcol('A1') (1, 1) """ m = CELL_ADDR_RE.match(label) if m: column_label = m.group(1).upper() row = int(m.group(2)) col = 0 for i, c in enumerate(reversed(column_label)): col += (ord(c) - MAGIC_NUMBER) * (26 ** i) else: raise IncorrectCellLabel(label) return (row, col)
[ "def", "a1_to_rowcol", "(", "label", ")", ":", "m", "=", "CELL_ADDR_RE", ".", "match", "(", "label", ")", "if", "m", ":", "column_label", "=", "m", ".", "group", "(", "1", ")", ".", "upper", "(", ")", "row", "=", "int", "(", "m", ".", "group", "(", "2", ")", ")", "col", "=", "0", "for", "i", ",", "c", "in", "enumerate", "(", "reversed", "(", "column_label", ")", ")", ":", "col", "+=", "(", "ord", "(", "c", ")", "-", "MAGIC_NUMBER", ")", "*", "(", "26", "**", "i", ")", "else", ":", "raise", "IncorrectCellLabel", "(", "label", ")", "return", "(", "row", ",", "col", ")" ]
Translates a cell's address in A1 notation to a tuple of integers. :param label: A cell label in A1 notation, e.g. 'B1'. Letter case is ignored. :type label: str :returns: a tuple containing `row` and `column` numbers. Both indexed from 1 (one). Example: >>> a1_to_rowcol('A1') (1, 1)
[ "Translates", "a", "cell", "s", "address", "in", "A1", "notation", "to", "a", "tuple", "of", "integers", "." ]
python
train
common-workflow-language/cwltool
cwltool/factory.py
https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/factory.py#L56-L61
def make(self, cwl): """Instantiate a CWL object from a CWl document.""" load = load_tool.load_tool(cwl, self.loading_context) if isinstance(load, int): raise Exception("Error loading tool") return Callable(load, self)
[ "def", "make", "(", "self", ",", "cwl", ")", ":", "load", "=", "load_tool", ".", "load_tool", "(", "cwl", ",", "self", ".", "loading_context", ")", "if", "isinstance", "(", "load", ",", "int", ")", ":", "raise", "Exception", "(", "\"Error loading tool\"", ")", "return", "Callable", "(", "load", ",", "self", ")" ]
Instantiate a CWL object from a CWl document.
[ "Instantiate", "a", "CWL", "object", "from", "a", "CWl", "document", "." ]
python
train
jic-dtool/dtoolcore
dtoolcore/__init__.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L366-L371
def _manifest(self): """Return manifest content.""" if self._manifest_cache is None: self._manifest_cache = self._storage_broker.get_manifest() return self._manifest_cache
[ "def", "_manifest", "(", "self", ")", ":", "if", "self", ".", "_manifest_cache", "is", "None", ":", "self", ".", "_manifest_cache", "=", "self", ".", "_storage_broker", ".", "get_manifest", "(", ")", "return", "self", ".", "_manifest_cache" ]
Return manifest content.
[ "Return", "manifest", "content", "." ]
python
train
pavlin-policar/openTSNE
openTSNE/tsne.py
https://github.com/pavlin-policar/openTSNE/blob/28513a0d669f2f20e7b971c0c6373dc375f72771/openTSNE/tsne.py#L691-L751
def prepare_partial(self, X, initialization="median", k=25, **affinity_params): """Prepare a partial embedding which can be optimized. Parameters ---------- X: np.ndarray The data matrix to be added to the existing embedding. initialization: Union[np.ndarray, str] The initial point positions to be used in the embedding space. Can be a precomputed numpy array, ``median``, ``weighted`` or ``random``. In all cases, ``median`` of ``weighted`` should be preferred. k: int The number of nearest neighbors to consider when initially placing the point onto the embedding. This is different from ``perpelxity`` because perplexity affects optimization while this only affects the initial point positions. **affinity_params: dict Additional params to be passed to the ``Affinities.to_new`` method. Please see individual :class:`~openTSNE.affinity.Affinities` implementations as the parameters differ between implementations. Returns ------- PartialTSNEEmbedding An unoptimized :class:`PartialTSNEEmbedding` object, prepared for optimization. """ P, neighbors, distances = self.affinities.to_new( X, return_distances=True, **affinity_params ) # If initial positions are given in an array, use a copy of that if isinstance(initialization, np.ndarray): init_checks.num_samples(initialization.shape[0], X.shape[0]) init_checks.num_dimensions(initialization.shape[1], self.shape[1]) embedding = np.array(initialization) # Random initialization with isotropic normal distribution elif initialization == "random": embedding = initialization_scheme.random(X, self.shape[1], self.random_state) elif initialization == "weighted": embedding = initialization_scheme.weighted_mean( X, self, neighbors[:, :k], distances[:, :k] ) elif initialization == "median": embedding = initialization_scheme.median(self, neighbors[:, :k]) else: raise ValueError(f"Unrecognized initialization scheme `{initialization}`.") return PartialTSNEEmbedding( embedding, reference_embedding=self, P=P, **self.gradient_descent_params, )
[ "def", "prepare_partial", "(", "self", ",", "X", ",", "initialization", "=", "\"median\"", ",", "k", "=", "25", ",", "*", "*", "affinity_params", ")", ":", "P", ",", "neighbors", ",", "distances", "=", "self", ".", "affinities", ".", "to_new", "(", "X", ",", "return_distances", "=", "True", ",", "*", "*", "affinity_params", ")", "# If initial positions are given in an array, use a copy of that", "if", "isinstance", "(", "initialization", ",", "np", ".", "ndarray", ")", ":", "init_checks", ".", "num_samples", "(", "initialization", ".", "shape", "[", "0", "]", ",", "X", ".", "shape", "[", "0", "]", ")", "init_checks", ".", "num_dimensions", "(", "initialization", ".", "shape", "[", "1", "]", ",", "self", ".", "shape", "[", "1", "]", ")", "embedding", "=", "np", ".", "array", "(", "initialization", ")", "# Random initialization with isotropic normal distribution", "elif", "initialization", "==", "\"random\"", ":", "embedding", "=", "initialization_scheme", ".", "random", "(", "X", ",", "self", ".", "shape", "[", "1", "]", ",", "self", ".", "random_state", ")", "elif", "initialization", "==", "\"weighted\"", ":", "embedding", "=", "initialization_scheme", ".", "weighted_mean", "(", "X", ",", "self", ",", "neighbors", "[", ":", ",", ":", "k", "]", ",", "distances", "[", ":", ",", ":", "k", "]", ")", "elif", "initialization", "==", "\"median\"", ":", "embedding", "=", "initialization_scheme", ".", "median", "(", "self", ",", "neighbors", "[", ":", ",", ":", "k", "]", ")", "else", ":", "raise", "ValueError", "(", "f\"Unrecognized initialization scheme `{initialization}`.\"", ")", "return", "PartialTSNEEmbedding", "(", "embedding", ",", "reference_embedding", "=", "self", ",", "P", "=", "P", ",", "*", "*", "self", ".", "gradient_descent_params", ",", ")" ]
Prepare a partial embedding which can be optimized. Parameters ---------- X: np.ndarray The data matrix to be added to the existing embedding. initialization: Union[np.ndarray, str] The initial point positions to be used in the embedding space. Can be a precomputed numpy array, ``median``, ``weighted`` or ``random``. In all cases, ``median`` of ``weighted`` should be preferred. k: int The number of nearest neighbors to consider when initially placing the point onto the embedding. This is different from ``perpelxity`` because perplexity affects optimization while this only affects the initial point positions. **affinity_params: dict Additional params to be passed to the ``Affinities.to_new`` method. Please see individual :class:`~openTSNE.affinity.Affinities` implementations as the parameters differ between implementations. Returns ------- PartialTSNEEmbedding An unoptimized :class:`PartialTSNEEmbedding` object, prepared for optimization.
[ "Prepare", "a", "partial", "embedding", "which", "can", "be", "optimized", "." ]
python
train
pyviz/holoviews
holoviews/plotting/plotly/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plotly/util.py#L285-L379
def _offset_subplot_ids(fig, offsets): """ Apply offsets to the subplot id numbers in a figure. Note: This function mutates the input figure dict Note: This function assumes that the normalize_subplot_ids function has already been run on the figure, so that all layout subplot properties in use are explicitly present in the figure's layout. Parameters ---------- fig: dict A plotly figure dict offsets: dict A dict from subplot types to the offset to be applied for each subplot type. This dict matches the form of the dict returned by get_max_subplot_ids """ # Offset traces for trace in fig.get('data', None): trace_type = trace.get('type', 'scatter') subplot_types = _trace_to_subplot.get(trace_type, []) for subplot_type in subplot_types: subplot_prop_name = _get_subplot_prop_name(subplot_type) # Compute subplot value prefix subplot_val_prefix = _get_subplot_val_prefix(subplot_type) subplot_val = trace.get(subplot_prop_name, subplot_val_prefix) subplot_number = _get_subplot_number(subplot_val) offset_subplot_number = ( subplot_number + offsets.get(subplot_type, 0)) if offset_subplot_number > 1: trace[subplot_prop_name] = ( subplot_val_prefix + str(offset_subplot_number)) else: trace[subplot_prop_name] = subplot_val_prefix # layout subplots layout = fig.setdefault('layout', {}) new_subplots = {} for subplot_type in offsets: offset = offsets[subplot_type] if offset < 1: continue for layout_prop in list(layout.keys()): if layout_prop.startswith(subplot_type): subplot_number = _get_subplot_number(layout_prop) new_subplot_number = subplot_number + offset new_layout_prop = subplot_type + str(new_subplot_number) new_subplots[new_layout_prop] = layout.pop(layout_prop) layout.update(new_subplots) # xaxis/yaxis anchors x_offset = offsets.get('xaxis', 0) y_offset = offsets.get('yaxis', 0) for layout_prop in list(layout.keys()): if layout_prop.startswith('xaxis'): xaxis = layout[layout_prop] anchor = xaxis.get('anchor', 'y') anchor_number = _get_subplot_number(anchor) + y_offset if anchor_number > 1: xaxis['anchor'] = 'y' + str(anchor_number) else: xaxis['anchor'] = 'y' elif layout_prop.startswith('yaxis'): yaxis = layout[layout_prop] anchor = yaxis.get('anchor', 'x') anchor_number = _get_subplot_number(anchor) + x_offset if anchor_number > 1: yaxis['anchor'] = 'x' + str(anchor_number) else: yaxis['anchor'] = 'x' # annotations/shapes/images for layout_prop in ['annotations', 'shapes', 'images']: for obj in layout.get(layout_prop, []): if x_offset: xref = obj.get('xref', 'x') if xref != 'paper': xref_number = _get_subplot_number(xref) obj['xref'] = 'x' + str(xref_number + x_offset) if y_offset: yref = obj.get('yref', 'y') if yref != 'paper': yref_number = _get_subplot_number(yref) obj['yref'] = 'y' + str(yref_number + y_offset)
[ "def", "_offset_subplot_ids", "(", "fig", ",", "offsets", ")", ":", "# Offset traces", "for", "trace", "in", "fig", ".", "get", "(", "'data'", ",", "None", ")", ":", "trace_type", "=", "trace", ".", "get", "(", "'type'", ",", "'scatter'", ")", "subplot_types", "=", "_trace_to_subplot", ".", "get", "(", "trace_type", ",", "[", "]", ")", "for", "subplot_type", "in", "subplot_types", ":", "subplot_prop_name", "=", "_get_subplot_prop_name", "(", "subplot_type", ")", "# Compute subplot value prefix", "subplot_val_prefix", "=", "_get_subplot_val_prefix", "(", "subplot_type", ")", "subplot_val", "=", "trace", ".", "get", "(", "subplot_prop_name", ",", "subplot_val_prefix", ")", "subplot_number", "=", "_get_subplot_number", "(", "subplot_val", ")", "offset_subplot_number", "=", "(", "subplot_number", "+", "offsets", ".", "get", "(", "subplot_type", ",", "0", ")", ")", "if", "offset_subplot_number", ">", "1", ":", "trace", "[", "subplot_prop_name", "]", "=", "(", "subplot_val_prefix", "+", "str", "(", "offset_subplot_number", ")", ")", "else", ":", "trace", "[", "subplot_prop_name", "]", "=", "subplot_val_prefix", "# layout subplots", "layout", "=", "fig", ".", "setdefault", "(", "'layout'", ",", "{", "}", ")", "new_subplots", "=", "{", "}", "for", "subplot_type", "in", "offsets", ":", "offset", "=", "offsets", "[", "subplot_type", "]", "if", "offset", "<", "1", ":", "continue", "for", "layout_prop", "in", "list", "(", "layout", ".", "keys", "(", ")", ")", ":", "if", "layout_prop", ".", "startswith", "(", "subplot_type", ")", ":", "subplot_number", "=", "_get_subplot_number", "(", "layout_prop", ")", "new_subplot_number", "=", "subplot_number", "+", "offset", "new_layout_prop", "=", "subplot_type", "+", "str", "(", "new_subplot_number", ")", "new_subplots", "[", "new_layout_prop", "]", "=", "layout", ".", "pop", "(", "layout_prop", ")", "layout", ".", "update", "(", "new_subplots", ")", "# xaxis/yaxis anchors", "x_offset", "=", "offsets", ".", "get", "(", "'xaxis'", ",", "0", ")", "y_offset", "=", "offsets", ".", "get", "(", "'yaxis'", ",", "0", ")", "for", "layout_prop", "in", "list", "(", "layout", ".", "keys", "(", ")", ")", ":", "if", "layout_prop", ".", "startswith", "(", "'xaxis'", ")", ":", "xaxis", "=", "layout", "[", "layout_prop", "]", "anchor", "=", "xaxis", ".", "get", "(", "'anchor'", ",", "'y'", ")", "anchor_number", "=", "_get_subplot_number", "(", "anchor", ")", "+", "y_offset", "if", "anchor_number", ">", "1", ":", "xaxis", "[", "'anchor'", "]", "=", "'y'", "+", "str", "(", "anchor_number", ")", "else", ":", "xaxis", "[", "'anchor'", "]", "=", "'y'", "elif", "layout_prop", ".", "startswith", "(", "'yaxis'", ")", ":", "yaxis", "=", "layout", "[", "layout_prop", "]", "anchor", "=", "yaxis", ".", "get", "(", "'anchor'", ",", "'x'", ")", "anchor_number", "=", "_get_subplot_number", "(", "anchor", ")", "+", "x_offset", "if", "anchor_number", ">", "1", ":", "yaxis", "[", "'anchor'", "]", "=", "'x'", "+", "str", "(", "anchor_number", ")", "else", ":", "yaxis", "[", "'anchor'", "]", "=", "'x'", "# annotations/shapes/images", "for", "layout_prop", "in", "[", "'annotations'", ",", "'shapes'", ",", "'images'", "]", ":", "for", "obj", "in", "layout", ".", "get", "(", "layout_prop", ",", "[", "]", ")", ":", "if", "x_offset", ":", "xref", "=", "obj", ".", "get", "(", "'xref'", ",", "'x'", ")", "if", "xref", "!=", "'paper'", ":", "xref_number", "=", "_get_subplot_number", "(", "xref", ")", "obj", "[", "'xref'", "]", "=", "'x'", "+", "str", "(", "xref_number", "+", "x_offset", ")", "if", "y_offset", ":", "yref", "=", "obj", ".", "get", "(", "'yref'", ",", "'y'", ")", "if", "yref", "!=", "'paper'", ":", "yref_number", "=", "_get_subplot_number", "(", "yref", ")", "obj", "[", "'yref'", "]", "=", "'y'", "+", "str", "(", "yref_number", "+", "y_offset", ")" ]
Apply offsets to the subplot id numbers in a figure. Note: This function mutates the input figure dict Note: This function assumes that the normalize_subplot_ids function has already been run on the figure, so that all layout subplot properties in use are explicitly present in the figure's layout. Parameters ---------- fig: dict A plotly figure dict offsets: dict A dict from subplot types to the offset to be applied for each subplot type. This dict matches the form of the dict returned by get_max_subplot_ids
[ "Apply", "offsets", "to", "the", "subplot", "id", "numbers", "in", "a", "figure", "." ]
python
train
python-openxml/python-docx
docx/opc/oxml.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/oxml.py#L109-L118
def new(ext, content_type): """ Return a new ``<Default>`` element with attributes set to parameter values. """ xml = '<Default xmlns="%s"/>' % nsmap['ct'] default = parse_xml(xml) default.set('Extension', ext) default.set('ContentType', content_type) return default
[ "def", "new", "(", "ext", ",", "content_type", ")", ":", "xml", "=", "'<Default xmlns=\"%s\"/>'", "%", "nsmap", "[", "'ct'", "]", "default", "=", "parse_xml", "(", "xml", ")", "default", ".", "set", "(", "'Extension'", ",", "ext", ")", "default", ".", "set", "(", "'ContentType'", ",", "content_type", ")", "return", "default" ]
Return a new ``<Default>`` element with attributes set to parameter values.
[ "Return", "a", "new", "<Default", ">", "element", "with", "attributes", "set", "to", "parameter", "values", "." ]
python
train
alvinwan/TexSoup
TexSoup/data.py
https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L137-L159
def children(self): r"""Immediate children of this TeX element that are valid TeX objects. This is equivalent to contents, excluding text elements and keeping only Tex expressions. :return: generator of all children :rtype: Iterator[TexExpr] >>> from TexSoup import TexSoup >>> soup = TexSoup(r''' ... \begin{itemize} ... Random text! ... \item Hello ... \end{itemize}''') >>> next(soup.itemize.children) \item Hello <BLANKLINE> """ for child in self.expr.children: node = TexNode(child) node.parent = self yield node
[ "def", "children", "(", "self", ")", ":", "for", "child", "in", "self", ".", "expr", ".", "children", ":", "node", "=", "TexNode", "(", "child", ")", "node", ".", "parent", "=", "self", "yield", "node" ]
r"""Immediate children of this TeX element that are valid TeX objects. This is equivalent to contents, excluding text elements and keeping only Tex expressions. :return: generator of all children :rtype: Iterator[TexExpr] >>> from TexSoup import TexSoup >>> soup = TexSoup(r''' ... \begin{itemize} ... Random text! ... \item Hello ... \end{itemize}''') >>> next(soup.itemize.children) \item Hello <BLANKLINE>
[ "r", "Immediate", "children", "of", "this", "TeX", "element", "that", "are", "valid", "TeX", "objects", "." ]
python
train
archman/beamline
beamline/ui/pltutils.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/ui/pltutils.py#L86-L96
def set_color(self, rgb_tuple): """ set figure and canvas with the same color. :param rgb_tuple: rgb color tuple, e.g. (255, 255, 255) for white color """ if rgb_tuple is None: rgb_tuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get() clr = [c / 255.0 for c in rgb_tuple] self.figure.set_facecolor(clr) self.figure.set_edgecolor(clr) self.canvas.SetBackgroundColour(wx.Colour(*rgb_tuple))
[ "def", "set_color", "(", "self", ",", "rgb_tuple", ")", ":", "if", "rgb_tuple", "is", "None", ":", "rgb_tuple", "=", "wx", ".", "SystemSettings", ".", "GetColour", "(", "wx", ".", "SYS_COLOUR_BTNFACE", ")", ".", "Get", "(", ")", "clr", "=", "[", "c", "/", "255.0", "for", "c", "in", "rgb_tuple", "]", "self", ".", "figure", ".", "set_facecolor", "(", "clr", ")", "self", ".", "figure", ".", "set_edgecolor", "(", "clr", ")", "self", ".", "canvas", ".", "SetBackgroundColour", "(", "wx", ".", "Colour", "(", "*", "rgb_tuple", ")", ")" ]
set figure and canvas with the same color. :param rgb_tuple: rgb color tuple, e.g. (255, 255, 255) for white color
[ "set", "figure", "and", "canvas", "with", "the", "same", "color", "." ]
python
train
JarryShaw/PyPCAPKit
src/foundation/extraction.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/foundation/extraction.py#L566-L571
def _cleanup(self): """Cleanup after extraction & analysis.""" self._expkg = None self._extmp = None self._flag_e = True self._ifile.close()
[ "def", "_cleanup", "(", "self", ")", ":", "self", ".", "_expkg", "=", "None", "self", ".", "_extmp", "=", "None", "self", ".", "_flag_e", "=", "True", "self", ".", "_ifile", ".", "close", "(", ")" ]
Cleanup after extraction & analysis.
[ "Cleanup", "after", "extraction", "&", "analysis", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxrecord.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxrecord.py#L79-L105
def _new(self, dx_hash, close=False, **kwargs): """ :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param init_from: Record from which to initialize the metadata :type init_from: :class:`DXRecord` :param close: Whether or not to close the record immediately after creating it :type close: boolean Create a new remote record object. """ if "init_from" in kwargs: if kwargs["init_from"] is not None: if not isinstance(kwargs["init_from"], DXRecord): raise DXError("Expected instance of DXRecord to init_from") dx_hash["initializeFrom"] = \ {"id": kwargs["init_from"].get_id(), "project": kwargs["init_from"].get_proj_id()} del kwargs["init_from"] if close: dx_hash["close"] = True resp = dxpy.api.record_new(dx_hash, **kwargs) self.set_ids(resp["id"], dx_hash["project"])
[ "def", "_new", "(", "self", ",", "dx_hash", ",", "close", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "\"init_from\"", "in", "kwargs", ":", "if", "kwargs", "[", "\"init_from\"", "]", "is", "not", "None", ":", "if", "not", "isinstance", "(", "kwargs", "[", "\"init_from\"", "]", ",", "DXRecord", ")", ":", "raise", "DXError", "(", "\"Expected instance of DXRecord to init_from\"", ")", "dx_hash", "[", "\"initializeFrom\"", "]", "=", "{", "\"id\"", ":", "kwargs", "[", "\"init_from\"", "]", ".", "get_id", "(", ")", ",", "\"project\"", ":", "kwargs", "[", "\"init_from\"", "]", ".", "get_proj_id", "(", ")", "}", "del", "kwargs", "[", "\"init_from\"", "]", "if", "close", ":", "dx_hash", "[", "\"close\"", "]", "=", "True", "resp", "=", "dxpy", ".", "api", ".", "record_new", "(", "dx_hash", ",", "*", "*", "kwargs", ")", "self", ".", "set_ids", "(", "resp", "[", "\"id\"", "]", ",", "dx_hash", "[", "\"project\"", "]", ")" ]
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param init_from: Record from which to initialize the metadata :type init_from: :class:`DXRecord` :param close: Whether or not to close the record immediately after creating it :type close: boolean Create a new remote record object.
[ ":", "param", "dx_hash", ":", "Standard", "hash", "populated", "in", ":", "func", ":", "dxpy", ".", "bindings", ".", "DXDataObject", ".", "new", "()", "containing", "attributes", "common", "to", "all", "data", "object", "classes", ".", ":", "type", "dx_hash", ":", "dict", ":", "param", "init_from", ":", "Record", "from", "which", "to", "initialize", "the", "metadata", ":", "type", "init_from", ":", ":", "class", ":", "DXRecord", ":", "param", "close", ":", "Whether", "or", "not", "to", "close", "the", "record", "immediately", "after", "creating", "it", ":", "type", "close", ":", "boolean" ]
python
train
srsudar/eg
eg/config.py
https://github.com/srsudar/eg/blob/96142a74f4416b4a7000c85032c070df713b849e/eg/config.py#L393-L428
def get_custom_color_config_from_egrc(config): """ Get the ColorConfig from the egrc config object. Any colors not defined will be None. """ pound = _get_color_from_config(config, CONFIG_NAMES.pound) heading = _get_color_from_config(config, CONFIG_NAMES.heading) code = _get_color_from_config(config, CONFIG_NAMES.code) backticks = _get_color_from_config(config, CONFIG_NAMES.backticks) prompt = _get_color_from_config(config, CONFIG_NAMES.prompt) pound_reset = _get_color_from_config(config, CONFIG_NAMES.pound_reset) heading_reset = _get_color_from_config( config, CONFIG_NAMES.heading_reset ) code_reset = _get_color_from_config(config, CONFIG_NAMES.code_reset) backticks_reset = _get_color_from_config( config, CONFIG_NAMES.backticks_reset ) prompt_reset = _get_color_from_config(config, CONFIG_NAMES.prompt_reset) result = ColorConfig( pound=pound, heading=heading, code=code, backticks=backticks, prompt=prompt, pound_reset=pound_reset, heading_reset=heading_reset, code_reset=code_reset, backticks_reset=backticks_reset, prompt_reset=prompt_reset ) return result
[ "def", "get_custom_color_config_from_egrc", "(", "config", ")", ":", "pound", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "pound", ")", "heading", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "heading", ")", "code", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "code", ")", "backticks", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "backticks", ")", "prompt", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "prompt", ")", "pound_reset", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "pound_reset", ")", "heading_reset", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "heading_reset", ")", "code_reset", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "code_reset", ")", "backticks_reset", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "backticks_reset", ")", "prompt_reset", "=", "_get_color_from_config", "(", "config", ",", "CONFIG_NAMES", ".", "prompt_reset", ")", "result", "=", "ColorConfig", "(", "pound", "=", "pound", ",", "heading", "=", "heading", ",", "code", "=", "code", ",", "backticks", "=", "backticks", ",", "prompt", "=", "prompt", ",", "pound_reset", "=", "pound_reset", ",", "heading_reset", "=", "heading_reset", ",", "code_reset", "=", "code_reset", ",", "backticks_reset", "=", "backticks_reset", ",", "prompt_reset", "=", "prompt_reset", ")", "return", "result" ]
Get the ColorConfig from the egrc config object. Any colors not defined will be None.
[ "Get", "the", "ColorConfig", "from", "the", "egrc", "config", "object", ".", "Any", "colors", "not", "defined", "will", "be", "None", "." ]
python
train
laf/russound
russound/russound.py
https://github.com/laf/russound/blob/683af823f78ad02111f9966dc7b535dd66a1f083/russound/russound.py#L202-L224
def create_send_message(self, string_message, controller, zone=None, parameter=None): """ Creates a message from a string, substituting the necessary parameters, that is ready to send to the socket """ cc = hex(int(controller) - 1).replace('0x', '') # RNET requires controller value to be zero based if zone is not None: zz = hex(int(zone) - 1).replace('0x', '') # RNET requires zone value to be zero based else: zz = '' if parameter is not None: pr = hex(int(parameter)).replace('0x', '') else: pr = '' string_message = string_message.replace('@cc', cc) # Replace controller parameter string_message = string_message.replace('@zz', zz) # Replace zone parameter string_message = string_message.replace('@kk', KEYPAD_CODE) # Replace keypad parameter string_message = string_message.replace('@pr', pr) # Replace specific parameter to message # Split message into an array for each "byte" and add the checksum and end of message bytes send_msg = string_message.split() send_msg = self.calc_checksum(send_msg) return send_msg
[ "def", "create_send_message", "(", "self", ",", "string_message", ",", "controller", ",", "zone", "=", "None", ",", "parameter", "=", "None", ")", ":", "cc", "=", "hex", "(", "int", "(", "controller", ")", "-", "1", ")", ".", "replace", "(", "'0x'", ",", "''", ")", "# RNET requires controller value to be zero based", "if", "zone", "is", "not", "None", ":", "zz", "=", "hex", "(", "int", "(", "zone", ")", "-", "1", ")", ".", "replace", "(", "'0x'", ",", "''", ")", "# RNET requires zone value to be zero based", "else", ":", "zz", "=", "''", "if", "parameter", "is", "not", "None", ":", "pr", "=", "hex", "(", "int", "(", "parameter", ")", ")", ".", "replace", "(", "'0x'", ",", "''", ")", "else", ":", "pr", "=", "''", "string_message", "=", "string_message", ".", "replace", "(", "'@cc'", ",", "cc", ")", "# Replace controller parameter", "string_message", "=", "string_message", ".", "replace", "(", "'@zz'", ",", "zz", ")", "# Replace zone parameter", "string_message", "=", "string_message", ".", "replace", "(", "'@kk'", ",", "KEYPAD_CODE", ")", "# Replace keypad parameter", "string_message", "=", "string_message", ".", "replace", "(", "'@pr'", ",", "pr", ")", "# Replace specific parameter to message", "# Split message into an array for each \"byte\" and add the checksum and end of message bytes", "send_msg", "=", "string_message", ".", "split", "(", ")", "send_msg", "=", "self", ".", "calc_checksum", "(", "send_msg", ")", "return", "send_msg" ]
Creates a message from a string, substituting the necessary parameters, that is ready to send to the socket
[ "Creates", "a", "message", "from", "a", "string", "substituting", "the", "necessary", "parameters", "that", "is", "ready", "to", "send", "to", "the", "socket" ]
python
train
crytic/slither
slither/detectors/functions/complex_function.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/detectors/functions/complex_function.py#L36-L73
def detect_complex_func(func): """Detect the cyclomatic complexity of the contract functions shouldn't be greater than 7 """ result = [] code_complexity = compute_cyclomatic_complexity(func) if code_complexity > ComplexFunction.MAX_CYCLOMATIC_COMPLEXITY: result.append({ "func": func, "cause": ComplexFunction.CAUSE_CYCLOMATIC }) """Detect the number of external calls in the func shouldn't be greater than 5 """ count = 0 for node in func.nodes: for ir in node.irs: if isinstance(ir, (HighLevelCall, LowLevelCall, LibraryCall)): count += 1 if count > ComplexFunction.MAX_EXTERNAL_CALLS: result.append({ "func": func, "cause": ComplexFunction.CAUSE_EXTERNAL_CALL }) """Checks the number of the state variables written shouldn't be greater than 10 """ if len(func.state_variables_written) > ComplexFunction.MAX_STATE_VARIABLES: result.append({ "func": func, "cause": ComplexFunction.CAUSE_STATE_VARS }) return result
[ "def", "detect_complex_func", "(", "func", ")", ":", "result", "=", "[", "]", "code_complexity", "=", "compute_cyclomatic_complexity", "(", "func", ")", "if", "code_complexity", ">", "ComplexFunction", ".", "MAX_CYCLOMATIC_COMPLEXITY", ":", "result", ".", "append", "(", "{", "\"func\"", ":", "func", ",", "\"cause\"", ":", "ComplexFunction", ".", "CAUSE_CYCLOMATIC", "}", ")", "\"\"\"Detect the number of external calls in the func\n shouldn't be greater than 5\n \"\"\"", "count", "=", "0", "for", "node", "in", "func", ".", "nodes", ":", "for", "ir", "in", "node", ".", "irs", ":", "if", "isinstance", "(", "ir", ",", "(", "HighLevelCall", ",", "LowLevelCall", ",", "LibraryCall", ")", ")", ":", "count", "+=", "1", "if", "count", ">", "ComplexFunction", ".", "MAX_EXTERNAL_CALLS", ":", "result", ".", "append", "(", "{", "\"func\"", ":", "func", ",", "\"cause\"", ":", "ComplexFunction", ".", "CAUSE_EXTERNAL_CALL", "}", ")", "\"\"\"Checks the number of the state variables written\n shouldn't be greater than 10\n \"\"\"", "if", "len", "(", "func", ".", "state_variables_written", ")", ">", "ComplexFunction", ".", "MAX_STATE_VARIABLES", ":", "result", ".", "append", "(", "{", "\"func\"", ":", "func", ",", "\"cause\"", ":", "ComplexFunction", ".", "CAUSE_STATE_VARS", "}", ")", "return", "result" ]
Detect the cyclomatic complexity of the contract functions shouldn't be greater than 7
[ "Detect", "the", "cyclomatic", "complexity", "of", "the", "contract", "functions", "shouldn", "t", "be", "greater", "than", "7" ]
python
train
jobovy/galpy
galpy/df/quasiisothermaldf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/quasiisothermaldf.py#L1628-L1698
def sampleV(self,R,z,n=1,**kwargs): """ NAME: sampleV PURPOSE: sample a radial, azimuthal, and vertical velocity at R,z INPUT: R - Galactocentric distance (can be Quantity) z - height (can be Quantity) n= number of distances to sample OUTPUT: list of samples HISTORY: 2012-12-17 - Written - Bovy (IAS) """ use_physical= kwargs.pop('use_physical',True) vo= kwargs.pop('vo',None) if vo is None and hasattr(self,'_voSet') and self._voSet: vo= self._vo if _APY_LOADED and isinstance(vo,units.Quantity): vo= vo.to(units.km/units.s).value #Determine the maximum of the velocity distribution maxVR= 0. maxVz= 0. maxVT= optimize.fmin_powell((lambda x: -self(R,0.,x,z,0.,log=True, use_physical=False)), 1.) logmaxVD= self(R,maxVR,maxVT,z,maxVz,log=True,use_physical=False) #Now rejection-sample vRs= [] vTs= [] vzs= [] while len(vRs) < n: nmore= n-len(vRs)+1 #sample propvR= numpy.random.normal(size=nmore)*2.*self._sr propvT= numpy.random.normal(size=nmore)*2.*self._sr+maxVT propvz= numpy.random.normal(size=nmore)*2.*self._sz VDatprop= self(R+numpy.zeros(nmore), propvR,propvT,z+numpy.zeros(nmore), propvz,log=True,use_physical=False)-logmaxVD VDatprop-= -0.5*(propvR**2./4./self._sr**2.+propvz**2./4./self._sz**2.\ +(propvT-maxVT)**2./4./self._sr**2.) VDatprop= numpy.reshape(VDatprop,(nmore)) indx= (VDatprop > numpy.log(numpy.random.random(size=nmore))) #accept vRs.extend(list(propvR[indx])) vTs.extend(list(propvT[indx])) vzs.extend(list(propvz[indx])) out= numpy.empty((n,3)) out[:,0]= vRs[0:n] out[:,1]= vTs[0:n] out[:,2]= vzs[0:n] if use_physical and not vo is None: if _APY_UNITS: return units.Quantity(out*vo,unit=units.km/units.s) else: return out*vo else: return out
[ "def", "sampleV", "(", "self", ",", "R", ",", "z", ",", "n", "=", "1", ",", "*", "*", "kwargs", ")", ":", "use_physical", "=", "kwargs", ".", "pop", "(", "'use_physical'", ",", "True", ")", "vo", "=", "kwargs", ".", "pop", "(", "'vo'", ",", "None", ")", "if", "vo", "is", "None", "and", "hasattr", "(", "self", ",", "'_voSet'", ")", "and", "self", ".", "_voSet", ":", "vo", "=", "self", ".", "_vo", "if", "_APY_LOADED", "and", "isinstance", "(", "vo", ",", "units", ".", "Quantity", ")", ":", "vo", "=", "vo", ".", "to", "(", "units", ".", "km", "/", "units", ".", "s", ")", ".", "value", "#Determine the maximum of the velocity distribution", "maxVR", "=", "0.", "maxVz", "=", "0.", "maxVT", "=", "optimize", ".", "fmin_powell", "(", "(", "lambda", "x", ":", "-", "self", "(", "R", ",", "0.", ",", "x", ",", "z", ",", "0.", ",", "log", "=", "True", ",", "use_physical", "=", "False", ")", ")", ",", "1.", ")", "logmaxVD", "=", "self", "(", "R", ",", "maxVR", ",", "maxVT", ",", "z", ",", "maxVz", ",", "log", "=", "True", ",", "use_physical", "=", "False", ")", "#Now rejection-sample", "vRs", "=", "[", "]", "vTs", "=", "[", "]", "vzs", "=", "[", "]", "while", "len", "(", "vRs", ")", "<", "n", ":", "nmore", "=", "n", "-", "len", "(", "vRs", ")", "+", "1", "#sample", "propvR", "=", "numpy", ".", "random", ".", "normal", "(", "size", "=", "nmore", ")", "*", "2.", "*", "self", ".", "_sr", "propvT", "=", "numpy", ".", "random", ".", "normal", "(", "size", "=", "nmore", ")", "*", "2.", "*", "self", ".", "_sr", "+", "maxVT", "propvz", "=", "numpy", ".", "random", ".", "normal", "(", "size", "=", "nmore", ")", "*", "2.", "*", "self", ".", "_sz", "VDatprop", "=", "self", "(", "R", "+", "numpy", ".", "zeros", "(", "nmore", ")", ",", "propvR", ",", "propvT", ",", "z", "+", "numpy", ".", "zeros", "(", "nmore", ")", ",", "propvz", ",", "log", "=", "True", ",", "use_physical", "=", "False", ")", "-", "logmaxVD", "VDatprop", "-=", "-", "0.5", "*", "(", "propvR", "**", "2.", "/", "4.", "/", "self", ".", "_sr", "**", "2.", "+", "propvz", "**", "2.", "/", "4.", "/", "self", ".", "_sz", "**", "2.", "+", "(", "propvT", "-", "maxVT", ")", "**", "2.", "/", "4.", "/", "self", ".", "_sr", "**", "2.", ")", "VDatprop", "=", "numpy", ".", "reshape", "(", "VDatprop", ",", "(", "nmore", ")", ")", "indx", "=", "(", "VDatprop", ">", "numpy", ".", "log", "(", "numpy", ".", "random", ".", "random", "(", "size", "=", "nmore", ")", ")", ")", "#accept", "vRs", ".", "extend", "(", "list", "(", "propvR", "[", "indx", "]", ")", ")", "vTs", ".", "extend", "(", "list", "(", "propvT", "[", "indx", "]", ")", ")", "vzs", ".", "extend", "(", "list", "(", "propvz", "[", "indx", "]", ")", ")", "out", "=", "numpy", ".", "empty", "(", "(", "n", ",", "3", ")", ")", "out", "[", ":", ",", "0", "]", "=", "vRs", "[", "0", ":", "n", "]", "out", "[", ":", ",", "1", "]", "=", "vTs", "[", "0", ":", "n", "]", "out", "[", ":", ",", "2", "]", "=", "vzs", "[", "0", ":", "n", "]", "if", "use_physical", "and", "not", "vo", "is", "None", ":", "if", "_APY_UNITS", ":", "return", "units", ".", "Quantity", "(", "out", "*", "vo", ",", "unit", "=", "units", ".", "km", "/", "units", ".", "s", ")", "else", ":", "return", "out", "*", "vo", "else", ":", "return", "out" ]
NAME: sampleV PURPOSE: sample a radial, azimuthal, and vertical velocity at R,z INPUT: R - Galactocentric distance (can be Quantity) z - height (can be Quantity) n= number of distances to sample OUTPUT: list of samples HISTORY: 2012-12-17 - Written - Bovy (IAS)
[ "NAME", ":" ]
python
train