id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
249,600
etcher-be/elib_run
elib_run/_run/_capture_output.py
capture_output_from_running_process
def capture_output_from_running_process(context: RunContext) -> None: """ Parses output from a running sub-process Decodes and filters the process output line by line, buffering it If "mute" is False, sends the output back in real time :param context: run context :type context: _RunContext """ # Get the raw output one line at a time _output = context.capture.readline(block=False) if _output: line = decode_and_filter(_output, context) if line: if not context.mute: # Print in real time _LOGGER_PROCESS.debug(line) # Buffer the line context.process_output_chunks.append(line) # Get additional output if any return capture_output_from_running_process(context) return None
python
def capture_output_from_running_process(context: RunContext) -> None: """ Parses output from a running sub-process Decodes and filters the process output line by line, buffering it If "mute" is False, sends the output back in real time :param context: run context :type context: _RunContext """ # Get the raw output one line at a time _output = context.capture.readline(block=False) if _output: line = decode_and_filter(_output, context) if line: if not context.mute: # Print in real time _LOGGER_PROCESS.debug(line) # Buffer the line context.process_output_chunks.append(line) # Get additional output if any return capture_output_from_running_process(context) return None
[ "def", "capture_output_from_running_process", "(", "context", ":", "RunContext", ")", "->", "None", ":", "# Get the raw output one line at a time", "_output", "=", "context", ".", "capture", ".", "readline", "(", "block", "=", "False", ")", "if", "_output", ":", "line", "=", "decode_and_filter", "(", "_output", ",", "context", ")", "if", "line", ":", "if", "not", "context", ".", "mute", ":", "# Print in real time", "_LOGGER_PROCESS", ".", "debug", "(", "line", ")", "# Buffer the line", "context", ".", "process_output_chunks", ".", "append", "(", "line", ")", "# Get additional output if any", "return", "capture_output_from_running_process", "(", "context", ")", "return", "None" ]
Parses output from a running sub-process Decodes and filters the process output line by line, buffering it If "mute" is False, sends the output back in real time :param context: run context :type context: _RunContext
[ "Parses", "output", "from", "a", "running", "sub", "-", "process" ]
c9d8ba9f067ab90c5baa27375a92b23f1b97cdde
https://github.com/etcher-be/elib_run/blob/c9d8ba9f067ab90c5baa27375a92b23f1b97cdde/elib_run/_run/_capture_output.py#L54-L84
249,601
awacha/credolib
credolib/atsas.py
autorg
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True): """Execute autorg. Inputs: filename: either a name of an ascii file, or an instance of Curve. mininterval: the minimum number of points in the Guinier range qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0 qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3 noprint: if the output of autorg should be redirected to the null device. Outputs: Rg as an ErrorValue I0 as an ErrorValue qmin: the lower end of the chosen Guinier range qmax: the upper end of the chosen Guinier range quality: the quality parameter, between 0 and 1 aggregation: float, the extent of aggregation """ if isinstance(filename, Curve): curve = filename with tempfile.NamedTemporaryFile('w+b', delete=False) as f: curve.save(f) filename = f.name cmdline = ['autorg', filename, '-f', 'ssv'] if mininterval is not None: cmdline.extend(['--mininterval', str(mininterval)]) if qminrg is not None: cmdline.extend(['--sminrg', str(qminrg)]) if qmaxrg is not None: cmdline.extend(['--smaxrg', str(qmaxrg)]) result = execute_command(cmdline, noprint=noprint) Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8) try: curve except NameError: curve = Curve.new_from_file(filename) else: os.unlink(filename) return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[ int(idxlast) - 1], float(quality), float(aggregation)
python
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True): """Execute autorg. Inputs: filename: either a name of an ascii file, or an instance of Curve. mininterval: the minimum number of points in the Guinier range qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0 qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3 noprint: if the output of autorg should be redirected to the null device. Outputs: Rg as an ErrorValue I0 as an ErrorValue qmin: the lower end of the chosen Guinier range qmax: the upper end of the chosen Guinier range quality: the quality parameter, between 0 and 1 aggregation: float, the extent of aggregation """ if isinstance(filename, Curve): curve = filename with tempfile.NamedTemporaryFile('w+b', delete=False) as f: curve.save(f) filename = f.name cmdline = ['autorg', filename, '-f', 'ssv'] if mininterval is not None: cmdline.extend(['--mininterval', str(mininterval)]) if qminrg is not None: cmdline.extend(['--sminrg', str(qminrg)]) if qmaxrg is not None: cmdline.extend(['--smaxrg', str(qmaxrg)]) result = execute_command(cmdline, noprint=noprint) Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8) try: curve except NameError: curve = Curve.new_from_file(filename) else: os.unlink(filename) return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[ int(idxlast) - 1], float(quality), float(aggregation)
[ "def", "autorg", "(", "filename", ",", "mininterval", "=", "None", ",", "qminrg", "=", "None", ",", "qmaxrg", "=", "None", ",", "noprint", "=", "True", ")", ":", "if", "isinstance", "(", "filename", ",", "Curve", ")", ":", "curve", "=", "filename", "with", "tempfile", ".", "NamedTemporaryFile", "(", "'w+b'", ",", "delete", "=", "False", ")", "as", "f", ":", "curve", ".", "save", "(", "f", ")", "filename", "=", "f", ".", "name", "cmdline", "=", "[", "'autorg'", ",", "filename", ",", "'-f'", ",", "'ssv'", "]", "if", "mininterval", "is", "not", "None", ":", "cmdline", ".", "extend", "(", "[", "'--mininterval'", ",", "str", "(", "mininterval", ")", "]", ")", "if", "qminrg", "is", "not", "None", ":", "cmdline", ".", "extend", "(", "[", "'--sminrg'", ",", "str", "(", "qminrg", ")", "]", ")", "if", "qmaxrg", "is", "not", "None", ":", "cmdline", ".", "extend", "(", "[", "'--smaxrg'", ",", "str", "(", "qmaxrg", ")", "]", ")", "result", "=", "execute_command", "(", "cmdline", ",", "noprint", "=", "noprint", ")", "Rg", ",", "dRg", ",", "I0", ",", "dI0", ",", "idxfirst", ",", "idxlast", ",", "quality", ",", "aggregation", ",", "filename", "=", "result", "[", "0", "]", ".", "split", "(", "None", ",", "8", ")", "try", ":", "curve", "except", "NameError", ":", "curve", "=", "Curve", ".", "new_from_file", "(", "filename", ")", "else", ":", "os", ".", "unlink", "(", "filename", ")", "return", "ErrorValue", "(", "float", "(", "Rg", ")", ",", "float", "(", "dRg", ")", ")", ",", "ErrorValue", "(", "float", "(", "I0", ")", ",", "float", "(", "dI0", ")", ")", ",", "curve", ".", "q", "[", "int", "(", "idxfirst", ")", "-", "1", "]", ",", "curve", ".", "q", "[", "int", "(", "idxlast", ")", "-", "1", "]", ",", "float", "(", "quality", ")", ",", "float", "(", "aggregation", ")" ]
Execute autorg. Inputs: filename: either a name of an ascii file, or an instance of Curve. mininterval: the minimum number of points in the Guinier range qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0 qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3 noprint: if the output of autorg should be redirected to the null device. Outputs: Rg as an ErrorValue I0 as an ErrorValue qmin: the lower end of the chosen Guinier range qmax: the upper end of the chosen Guinier range quality: the quality parameter, between 0 and 1 aggregation: float, the extent of aggregation
[ "Execute", "autorg", "." ]
11c0be3eea7257d3d6e13697d3e76ce538f2f1b2
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L156-L197
249,602
awacha/credolib
credolib/atsas.py
datcmp
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'): """Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others. """ if len({len(c) for c in curves}) != 1: raise ValueError('All curves have to be of the same length.') datcmpargs = [] if alpha is not None: datcmpargs.append('--alpha=%f' % alpha) if adjust is not None: datcmpargs.append('--adjust=%s' % adjust) if test is not None: datcmpargs.append('--test=%s' % test) with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td: for i, c in enumerate(curves): mat = np.zeros((len(c), 3)) mat[:, 0] = c.q mat[:, 1] = c.Intensity mat[:, 2] = c.Error np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat) matC = np.zeros((len(curves), len(curves))) + np.nan matp = np.zeros((len(curves), len(curves))) + np.nan matpadj = np.zeros((len(curves), len(curves))) + np.nan ok = np.zeros(len(curves)) + np.nan try: results = subprocess.check_output( ['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode( 'utf-8') except subprocess.CalledProcessError: pass else: for l in results.split('\n'): m = re.match( '^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$', l) if m is not None: i = int(m.group('i')) - 1 j = int(m.group('j')) - 1 matC[i, j] = matC[j, i] = float(m.group('C')) matp[i, j] = matp[j, i] = float(m.group('p')) matpadj[i, j] = matpadj[j, i] = float(m.group('adjp')) else: m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l) if m is not None: ok[int(m.group('i')) - 1] = (m.group('ack') == '*') return matC, matp, matpadj, ok
python
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'): """Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others. """ if len({len(c) for c in curves}) != 1: raise ValueError('All curves have to be of the same length.') datcmpargs = [] if alpha is not None: datcmpargs.append('--alpha=%f' % alpha) if adjust is not None: datcmpargs.append('--adjust=%s' % adjust) if test is not None: datcmpargs.append('--test=%s' % test) with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td: for i, c in enumerate(curves): mat = np.zeros((len(c), 3)) mat[:, 0] = c.q mat[:, 1] = c.Intensity mat[:, 2] = c.Error np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat) matC = np.zeros((len(curves), len(curves))) + np.nan matp = np.zeros((len(curves), len(curves))) + np.nan matpadj = np.zeros((len(curves), len(curves))) + np.nan ok = np.zeros(len(curves)) + np.nan try: results = subprocess.check_output( ['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode( 'utf-8') except subprocess.CalledProcessError: pass else: for l in results.split('\n'): m = re.match( '^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$', l) if m is not None: i = int(m.group('i')) - 1 j = int(m.group('j')) - 1 matC[i, j] = matC[j, i] = float(m.group('C')) matp[i, j] = matp[j, i] = float(m.group('p')) matpadj[i, j] = matpadj[j, i] = float(m.group('adjp')) else: m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l) if m is not None: ok[int(m.group('i')) - 1] = (m.group('ack') == '*') return matC, matp, matpadj, ok
[ "def", "datcmp", "(", "*", "curves", ",", "alpha", "=", "None", ",", "adjust", "=", "None", ",", "test", "=", "'CORMAP'", ")", ":", "if", "len", "(", "{", "len", "(", "c", ")", "for", "c", "in", "curves", "}", ")", "!=", "1", ":", "raise", "ValueError", "(", "'All curves have to be of the same length.'", ")", "datcmpargs", "=", "[", "]", "if", "alpha", "is", "not", "None", ":", "datcmpargs", ".", "append", "(", "'--alpha=%f'", "%", "alpha", ")", "if", "adjust", "is", "not", "None", ":", "datcmpargs", ".", "append", "(", "'--adjust=%s'", "%", "adjust", ")", "if", "test", "is", "not", "None", ":", "datcmpargs", ".", "append", "(", "'--test=%s'", "%", "test", ")", "with", "tempfile", ".", "TemporaryDirectory", "(", "prefix", "=", "'credolib_datcmp'", ")", "as", "td", ":", "for", "i", ",", "c", "in", "enumerate", "(", "curves", ")", ":", "mat", "=", "np", ".", "zeros", "(", "(", "len", "(", "c", ")", ",", "3", ")", ")", "mat", "[", ":", ",", "0", "]", "=", "c", ".", "q", "mat", "[", ":", ",", "1", "]", "=", "c", ".", "Intensity", "mat", "[", ":", ",", "2", "]", "=", "c", ".", "Error", "np", ".", "savetxt", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'curve_%d.dat'", "%", "i", ")", ",", "mat", ")", "matC", "=", "np", ".", "zeros", "(", "(", "len", "(", "curves", ")", ",", "len", "(", "curves", ")", ")", ")", "+", "np", ".", "nan", "matp", "=", "np", ".", "zeros", "(", "(", "len", "(", "curves", ")", ",", "len", "(", "curves", ")", ")", ")", "+", "np", ".", "nan", "matpadj", "=", "np", ".", "zeros", "(", "(", "len", "(", "curves", ")", ",", "len", "(", "curves", ")", ")", ")", "+", "np", ".", "nan", "ok", "=", "np", ".", "zeros", "(", "len", "(", "curves", ")", ")", "+", "np", ".", "nan", "try", ":", "results", "=", "subprocess", ".", "check_output", "(", "[", "'datcmp'", "]", "+", "datcmpargs", "+", "[", "os", ".", "path", ".", "join", "(", "td", ",", "'curve_%d.dat'", "%", "i", ")", "for", "i", "in", "range", "(", "len", "(", "curves", ")", ")", "]", ")", ".", "decode", "(", "'utf-8'", ")", "except", "subprocess", ".", "CalledProcessError", ":", "pass", "else", ":", "for", "l", "in", "results", ".", "split", "(", "'\\n'", ")", ":", "m", "=", "re", ".", "match", "(", "'^\\s*(?P<i>\\d+)\\s*vs\\.\\s*(?P<j>\\d+)\\s*(?P<C>\\d*\\.\\d*)\\s*(?P<p>\\d*\\.\\d*)\\s*(?P<adjp>\\d*\\.\\d*)[\\s\\*]{1}$'", ",", "l", ")", "if", "m", "is", "not", "None", ":", "i", "=", "int", "(", "m", ".", "group", "(", "'i'", ")", ")", "-", "1", "j", "=", "int", "(", "m", ".", "group", "(", "'j'", ")", ")", "-", "1", "matC", "[", "i", ",", "j", "]", "=", "matC", "[", "j", ",", "i", "]", "=", "float", "(", "m", ".", "group", "(", "'C'", ")", ")", "matp", "[", "i", ",", "j", "]", "=", "matp", "[", "j", ",", "i", "]", "=", "float", "(", "m", ".", "group", "(", "'p'", ")", ")", "matpadj", "[", "i", ",", "j", "]", "=", "matpadj", "[", "j", ",", "i", "]", "=", "float", "(", "m", ".", "group", "(", "'adjp'", ")", ")", "else", ":", "m", "=", "re", ".", "match", "(", "'\\s*(?P<i>\\d+)(?P<ack>[\\*\\s]{1})\\s*'", ",", "l", ")", "if", "m", "is", "not", "None", ":", "ok", "[", "int", "(", "m", ".", "group", "(", "'i'", ")", ")", "-", "1", "]", "=", "(", "m", ".", "group", "(", "'ack'", ")", "==", "'*'", ")", "return", "matC", ",", "matp", ",", "matpadj", ",", "ok" ]
Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others.
[ "Run", "datcmp", "on", "the", "scattering", "curves", "." ]
11c0be3eea7257d3d6e13697d3e76ce538f2f1b2
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L371-L428
249,603
awacha/credolib
credolib/atsas.py
datporod
def datporod(gnomoutfile): """Run datporod and return the estimated Porod volume. Returns: Radius of gyration found in the input file I0 found in the input file Vporod: the estimated Porod volume """ results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split() return float(results[0]), float(results[1]), float(results[2])
python
def datporod(gnomoutfile): """Run datporod and return the estimated Porod volume. Returns: Radius of gyration found in the input file I0 found in the input file Vporod: the estimated Porod volume """ results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split() return float(results[0]), float(results[1]), float(results[2])
[ "def", "datporod", "(", "gnomoutfile", ")", ":", "results", "=", "subprocess", ".", "check_output", "(", "[", "'datporod'", ",", "gnomoutfile", "]", ")", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "return", "float", "(", "results", "[", "0", "]", ")", ",", "float", "(", "results", "[", "1", "]", ")", ",", "float", "(", "results", "[", "2", "]", ")" ]
Run datporod and return the estimated Porod volume. Returns: Radius of gyration found in the input file I0 found in the input file Vporod: the estimated Porod volume
[ "Run", "datporod", "and", "return", "the", "estimated", "Porod", "volume", "." ]
11c0be3eea7257d3d6e13697d3e76ce538f2f1b2
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L431-L440
249,604
awacha/credolib
credolib/atsas.py
gnom
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None): """Run GNOM on the dataset. Inputs: curve: an instance of sastool.classes2.Curve or anything which has a save() method, saving the scattering curve to a given .dat file, in q=4*pi*sin(theta)/lambda [1/nm] units Rmax: the estimated maximum extent of the scattering object, in nm. outputfilename: the preferred name of the output file. If not given, the .out file produced by gnom will be lost. Npoints_realspace: the expected number of points in the real space initial_alpha: the initial value of the regularization parameter. Outputs: the same as of read_gnom_pr() """ with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td: curve.save(os.path.join(td, 'curve.dat')) print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max())) if Npoints_realspace is None: Npoints_realspace = "" else: Npoints_realspace = str(Npoints_realspace) if initial_alpha is None: initial_alpha = "" else: initial_alpha = str(initial_alpha) # GNOM questions and our answers: # Printer type [ postscr ] : <ENTER> # Input data, first file : <curve.dat in the temporary directory><ENTER> # Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER> # No of start points to skip [ 0 ] : 0<ENTER> # ... (just GNOM output) # ... (just GNOM output) # Input data, second file [ none ] : <ENTER> # No of end points to omit [ 0 ] : 0<ENTER> # ... (just GNOM output) # ... (just GNOM output) # Angular scale (1/2/3/4) [ 1 ] : 2<ENTER> # Plot input dataa (Y/N) [ Yes ] : N<ENTER> # File containing expert parameters [ none ] : <ENTER> # Kernel already calculated (Y/N) [ No ] : N<ENTER> # Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER> # Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER> # Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER> # -- Arbitrary monodisperse system -- # Rmin=0, Rmax is maximum particle diameter # Rmax for evaluating p(r) : <Rmax * 10><ENTER> # Number of points in real space [(always different)] : <Npoints_realspace><ENTER> # Kernel-storage file name [ kern.bin ] : <ENTER> # Experimental setup (0/1/2) [ 0 ] : 0<ENTER> # Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER> # Plot alpha distribution (Y/N) [ Yes ] : N<ENTER> # Plot results (Y/N) [ Yes ] : N<ENTER> # ... solution ... # Your choice : <ENTER> # Evaluate errors (Y/N) [ Yes ] : Y<ENTER> # Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER> # Next data set (Yes/No/Same) [ No ] : N<ENTER> gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % ( os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha) result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=gnominput.encode('utf-8')) pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True) pr[:, 0] /= 10 metadata['q'] *= 10 metadata['qj'] *= 10 metadata['qmin'] *= 10 metadata['qmax'] *= 10 metadata['dmax'] /= 10 metadata['dmin'] /= 10 metadata['Rg_guinier'] /= 10 metadata['Rg_gnom'] /= 10 if outputfilename is not None: shutil.copy(os.path.join(td, 'gnom.out'), outputfilename) return pr, metadata
python
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None): """Run GNOM on the dataset. Inputs: curve: an instance of sastool.classes2.Curve or anything which has a save() method, saving the scattering curve to a given .dat file, in q=4*pi*sin(theta)/lambda [1/nm] units Rmax: the estimated maximum extent of the scattering object, in nm. outputfilename: the preferred name of the output file. If not given, the .out file produced by gnom will be lost. Npoints_realspace: the expected number of points in the real space initial_alpha: the initial value of the regularization parameter. Outputs: the same as of read_gnom_pr() """ with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td: curve.save(os.path.join(td, 'curve.dat')) print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max())) if Npoints_realspace is None: Npoints_realspace = "" else: Npoints_realspace = str(Npoints_realspace) if initial_alpha is None: initial_alpha = "" else: initial_alpha = str(initial_alpha) # GNOM questions and our answers: # Printer type [ postscr ] : <ENTER> # Input data, first file : <curve.dat in the temporary directory><ENTER> # Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER> # No of start points to skip [ 0 ] : 0<ENTER> # ... (just GNOM output) # ... (just GNOM output) # Input data, second file [ none ] : <ENTER> # No of end points to omit [ 0 ] : 0<ENTER> # ... (just GNOM output) # ... (just GNOM output) # Angular scale (1/2/3/4) [ 1 ] : 2<ENTER> # Plot input dataa (Y/N) [ Yes ] : N<ENTER> # File containing expert parameters [ none ] : <ENTER> # Kernel already calculated (Y/N) [ No ] : N<ENTER> # Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER> # Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER> # Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER> # -- Arbitrary monodisperse system -- # Rmin=0, Rmax is maximum particle diameter # Rmax for evaluating p(r) : <Rmax * 10><ENTER> # Number of points in real space [(always different)] : <Npoints_realspace><ENTER> # Kernel-storage file name [ kern.bin ] : <ENTER> # Experimental setup (0/1/2) [ 0 ] : 0<ENTER> # Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER> # Plot alpha distribution (Y/N) [ Yes ] : N<ENTER> # Plot results (Y/N) [ Yes ] : N<ENTER> # ... solution ... # Your choice : <ENTER> # Evaluate errors (Y/N) [ Yes ] : Y<ENTER> # Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER> # Next data set (Yes/No/Same) [ No ] : N<ENTER> gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % ( os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha) result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=gnominput.encode('utf-8')) pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True) pr[:, 0] /= 10 metadata['q'] *= 10 metadata['qj'] *= 10 metadata['qmin'] *= 10 metadata['qmax'] *= 10 metadata['dmax'] /= 10 metadata['dmin'] /= 10 metadata['Rg_guinier'] /= 10 metadata['Rg_gnom'] /= 10 if outputfilename is not None: shutil.copy(os.path.join(td, 'gnom.out'), outputfilename) return pr, metadata
[ "def", "gnom", "(", "curve", ",", "Rmax", ",", "outputfilename", "=", "None", ",", "Npoints_realspace", "=", "None", ",", "initial_alpha", "=", "None", ")", ":", "with", "tempfile", ".", "TemporaryDirectory", "(", "prefix", "=", "'credolib_gnom'", ")", "as", "td", ":", "curve", ".", "save", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'curve.dat'", ")", ")", "print", "(", "'Using curve for GNOM: qrange from {} to {}'", ".", "format", "(", "curve", ".", "q", ".", "min", "(", ")", ",", "curve", ".", "q", ".", "max", "(", ")", ")", ")", "if", "Npoints_realspace", "is", "None", ":", "Npoints_realspace", "=", "\"\"", "else", ":", "Npoints_realspace", "=", "str", "(", "Npoints_realspace", ")", "if", "initial_alpha", "is", "None", ":", "initial_alpha", "=", "\"\"", "else", ":", "initial_alpha", "=", "str", "(", "initial_alpha", ")", "# GNOM questions and our answers:", "# Printer type [ postscr ] : <ENTER>", "# Input data, first file : <curve.dat in the temporary directory><ENTER>", "# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>", "# No of start points to skip [ 0 ] : 0<ENTER>", "# ... (just GNOM output)", "# ... (just GNOM output)", "# Input data, second file [ none ] : <ENTER>", "# No of end points to omit [ 0 ] : 0<ENTER>", "# ... (just GNOM output)", "# ... (just GNOM output)", "# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>", "# Plot input dataa (Y/N) [ Yes ] : N<ENTER>", "# File containing expert parameters [ none ] : <ENTER>", "# Kernel already calculated (Y/N) [ No ] : N<ENTER>", "# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>", "# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>", "# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>", "# -- Arbitrary monodisperse system --", "# Rmin=0, Rmax is maximum particle diameter", "# Rmax for evaluating p(r) : <Rmax * 10><ENTER>", "# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>", "# Kernel-storage file name [ kern.bin ] : <ENTER>", "# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>", "# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>", "# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>", "# Plot results (Y/N) [ Yes ] : N<ENTER>", "# ... solution ...", "# Your choice : <ENTER>", "# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>", "# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>", "# Next data set (Yes/No/Same) [ No ] : N<ENTER>", "gnominput", "=", "\"\\n%s\\n%s\\n0\\n\\n0\\n2\\nN\\n\\nN\\n0\\nY\\nY\\n%f\\n%s\\n\\n0\\n%s\\nN\\nN\\n\\nY\\nN\\nN\\n\"", "%", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'curve.dat'", ")", ",", "os", ".", "path", ".", "join", "(", "td", ",", "'gnom.out'", ")", ",", "10", "*", "Rmax", ",", "Npoints_realspace", ",", "initial_alpha", ")", "result", "=", "subprocess", ".", "run", "(", "[", "'gnom'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "input", "=", "gnominput", ".", "encode", "(", "'utf-8'", ")", ")", "pr", ",", "metadata", "=", "read_gnom_pr", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'gnom.out'", ")", ",", "True", ")", "pr", "[", ":", ",", "0", "]", "/=", "10", "metadata", "[", "'q'", "]", "*=", "10", "metadata", "[", "'qj'", "]", "*=", "10", "metadata", "[", "'qmin'", "]", "*=", "10", "metadata", "[", "'qmax'", "]", "*=", "10", "metadata", "[", "'dmax'", "]", "/=", "10", "metadata", "[", "'dmin'", "]", "/=", "10", "metadata", "[", "'Rg_guinier'", "]", "/=", "10", "metadata", "[", "'Rg_gnom'", "]", "/=", "10", "if", "outputfilename", "is", "not", "None", ":", "shutil", ".", "copy", "(", "os", ".", "path", ".", "join", "(", "td", ",", "'gnom.out'", ")", ",", "outputfilename", ")", "return", "pr", ",", "metadata" ]
Run GNOM on the dataset. Inputs: curve: an instance of sastool.classes2.Curve or anything which has a save() method, saving the scattering curve to a given .dat file, in q=4*pi*sin(theta)/lambda [1/nm] units Rmax: the estimated maximum extent of the scattering object, in nm. outputfilename: the preferred name of the output file. If not given, the .out file produced by gnom will be lost. Npoints_realspace: the expected number of points in the real space initial_alpha: the initial value of the regularization parameter. Outputs: the same as of read_gnom_pr()
[ "Run", "GNOM", "on", "the", "dataset", "." ]
11c0be3eea7257d3d6e13697d3e76ce538f2f1b2
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L443-L518
249,605
uw-it-aca/uw-restclients-libraries
uw_libraries/mylib.py
get_account
def get_account(netid, timestamp=None): """ The Libraries object has a method for getting information about a user's library account """ response = _get_account(netid, timestamp=timestamp) return _account_from_json(response)
python
def get_account(netid, timestamp=None): """ The Libraries object has a method for getting information about a user's library account """ response = _get_account(netid, timestamp=timestamp) return _account_from_json(response)
[ "def", "get_account", "(", "netid", ",", "timestamp", "=", "None", ")", ":", "response", "=", "_get_account", "(", "netid", ",", "timestamp", "=", "timestamp", ")", "return", "_account_from_json", "(", "response", ")" ]
The Libraries object has a method for getting information about a user's library account
[ "The", "Libraries", "object", "has", "a", "method", "for", "getting", "information", "about", "a", "user", "s", "library", "account" ]
2fa2e38be4516d7853c2802e2f23b17fbf4bac3d
https://github.com/uw-it-aca/uw-restclients-libraries/blob/2fa2e38be4516d7853c2802e2f23b17fbf4bac3d/uw_libraries/mylib.py#L45-L51
249,606
fred49/linshare-api
linshareapi/user/contactslistcontact.py
ContactsListContact.delete
def delete(self, list_uuid, uuid): """ Delete one list.""" res = self.get(list_uuid, uuid) url = "%(base)s/%(list_uuid)s/contacts/%(uuid)s" % { 'base': self.local_base_url, 'list_uuid': list_uuid, 'uuid': uuid } self.core.delete(url) return res
python
def delete(self, list_uuid, uuid): """ Delete one list.""" res = self.get(list_uuid, uuid) url = "%(base)s/%(list_uuid)s/contacts/%(uuid)s" % { 'base': self.local_base_url, 'list_uuid': list_uuid, 'uuid': uuid } self.core.delete(url) return res
[ "def", "delete", "(", "self", ",", "list_uuid", ",", "uuid", ")", ":", "res", "=", "self", ".", "get", "(", "list_uuid", ",", "uuid", ")", "url", "=", "\"%(base)s/%(list_uuid)s/contacts/%(uuid)s\"", "%", "{", "'base'", ":", "self", ".", "local_base_url", ",", "'list_uuid'", ":", "list_uuid", ",", "'uuid'", ":", "uuid", "}", "self", ".", "core", ".", "delete", "(", "url", ")", "return", "res" ]
Delete one list.
[ "Delete", "one", "list", "." ]
be646c25aa8ba3718abb6869c620b157d53d6e41
https://github.com/fred49/linshare-api/blob/be646c25aa8ba3718abb6869c620b157d53d6e41/linshareapi/user/contactslistcontact.py#L99-L108
249,607
ramrod-project/database-brain
schema/brain/controller/plugins.py
find_plugin
def find_plugin(value, key=DEFAULT_LOOKUP_KEY, conn=None): """ get's the plugin matching the key and value example: find_plugin("plugin1", "ServiceName") => list of 0 or 1 item example: find_plugin("plugin1", "Name") => list of 0-to-many items :param value: :param key: <str> (default "Name") :param conn: :return: """ # cast to list to hide rethink internals from caller result = list(RPC.filter({ key: value }).run(conn)) return result
python
def find_plugin(value, key=DEFAULT_LOOKUP_KEY, conn=None): """ get's the plugin matching the key and value example: find_plugin("plugin1", "ServiceName") => list of 0 or 1 item example: find_plugin("plugin1", "Name") => list of 0-to-many items :param value: :param key: <str> (default "Name") :param conn: :return: """ # cast to list to hide rethink internals from caller result = list(RPC.filter({ key: value }).run(conn)) return result
[ "def", "find_plugin", "(", "value", ",", "key", "=", "DEFAULT_LOOKUP_KEY", ",", "conn", "=", "None", ")", ":", "# cast to list to hide rethink internals from caller", "result", "=", "list", "(", "RPC", ".", "filter", "(", "{", "key", ":", "value", "}", ")", ".", "run", "(", "conn", ")", ")", "return", "result" ]
get's the plugin matching the key and value example: find_plugin("plugin1", "ServiceName") => list of 0 or 1 item example: find_plugin("plugin1", "Name") => list of 0-to-many items :param value: :param key: <str> (default "Name") :param conn: :return:
[ "get", "s", "the", "plugin", "matching", "the", "key", "and", "value" ]
b024cb44f34cabb9d80af38271ddb65c25767083
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/controller/plugins.py#L57-L75
249,608
OpenVolunteeringPlatform/django-ovp-projects
ovp_projects/emails.py
ApplyMail.sendAppliedToOwner
def sendAppliedToOwner(self, context={}): """ Sent to project owner when user applies to a project """ super(ApplyMail, self).__init__(self.apply.project.owner.email, self.async, self.apply.project.owner.locale) return self.sendEmail('volunteerApplied-ToOwner', 'New volunteer', context)
python
def sendAppliedToOwner(self, context={}): """ Sent to project owner when user applies to a project """ super(ApplyMail, self).__init__(self.apply.project.owner.email, self.async, self.apply.project.owner.locale) return self.sendEmail('volunteerApplied-ToOwner', 'New volunteer', context)
[ "def", "sendAppliedToOwner", "(", "self", ",", "context", "=", "{", "}", ")", ":", "super", "(", "ApplyMail", ",", "self", ")", ".", "__init__", "(", "self", ".", "apply", ".", "project", ".", "owner", ".", "email", ",", "self", ".", "async", ",", "self", ".", "apply", ".", "project", ".", "owner", ".", "locale", ")", "return", "self", ".", "sendEmail", "(", "'volunteerApplied-ToOwner'", ",", "'New volunteer'", ",", "context", ")" ]
Sent to project owner when user applies to a project
[ "Sent", "to", "project", "owner", "when", "user", "applies", "to", "a", "project" ]
239e27027ca99c7b44ee4f30bf55d06439d49251
https://github.com/OpenVolunteeringPlatform/django-ovp-projects/blob/239e27027ca99c7b44ee4f30bf55d06439d49251/ovp_projects/emails.py#L53-L58
249,609
mbarakaja/braulio
braulio/git.py
Git.add
def add(self, *files): """Add one or more files to the index running git-add.""" try: _run_command(("git", "add") + files) except CalledProcessError: # Only if the command fails we check if the files # exist, because git-add most of the time fails when # the provided files are not found. for f in files: if not Path(f).exists(): raise FileNotFoundError(f"No such file or directory: {f}")
python
def add(self, *files): """Add one or more files to the index running git-add.""" try: _run_command(("git", "add") + files) except CalledProcessError: # Only if the command fails we check if the files # exist, because git-add most of the time fails when # the provided files are not found. for f in files: if not Path(f).exists(): raise FileNotFoundError(f"No such file or directory: {f}")
[ "def", "add", "(", "self", ",", "*", "files", ")", ":", "try", ":", "_run_command", "(", "(", "\"git\"", ",", "\"add\"", ")", "+", "files", ")", "except", "CalledProcessError", ":", "# Only if the command fails we check if the files", "# exist, because git-add most of the time fails when", "# the provided files are not found.", "for", "f", "in", "files", ":", "if", "not", "Path", "(", "f", ")", ".", "exists", "(", ")", ":", "raise", "FileNotFoundError", "(", "f\"No such file or directory: {f}\"", ")" ]
Add one or more files to the index running git-add.
[ "Add", "one", "or", "more", "files", "to", "the", "index", "running", "git", "-", "add", "." ]
70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b
https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/git.py#L71-L82
249,610
mbarakaja/braulio
braulio/git.py
Git.commit
def commit(self, message, files=None): """Run git-commit.""" if files: self.add(*files) return _run_command(["git", "commit", "-m", f'"{message}"'])
python
def commit(self, message, files=None): """Run git-commit.""" if files: self.add(*files) return _run_command(["git", "commit", "-m", f'"{message}"'])
[ "def", "commit", "(", "self", ",", "message", ",", "files", "=", "None", ")", ":", "if", "files", ":", "self", ".", "add", "(", "*", "files", ")", "return", "_run_command", "(", "[", "\"git\"", ",", "\"commit\"", ",", "\"-m\"", ",", "f'\"{message}\"'", "]", ")" ]
Run git-commit.
[ "Run", "git", "-", "commit", "." ]
70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b
https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/git.py#L84-L90
249,611
mbarakaja/braulio
braulio/git.py
Git.log
def log(self, _from=None, to=None): """Run git-log.""" command = ["git", "log"] if _from: to = "HEAD" if not to else to revision_range = f"{_from}..{to}" command.append(revision_range) git_log_text = _run_command(command) commit_text_lst = _extract_commit_texts(git_log_text) return [Commit(commit_text) for commit_text in commit_text_lst]
python
def log(self, _from=None, to=None): """Run git-log.""" command = ["git", "log"] if _from: to = "HEAD" if not to else to revision_range = f"{_from}..{to}" command.append(revision_range) git_log_text = _run_command(command) commit_text_lst = _extract_commit_texts(git_log_text) return [Commit(commit_text) for commit_text in commit_text_lst]
[ "def", "log", "(", "self", ",", "_from", "=", "None", ",", "to", "=", "None", ")", ":", "command", "=", "[", "\"git\"", ",", "\"log\"", "]", "if", "_from", ":", "to", "=", "\"HEAD\"", "if", "not", "to", "else", "to", "revision_range", "=", "f\"{_from}..{to}\"", "command", ".", "append", "(", "revision_range", ")", "git_log_text", "=", "_run_command", "(", "command", ")", "commit_text_lst", "=", "_extract_commit_texts", "(", "git_log_text", ")", "return", "[", "Commit", "(", "commit_text", ")", "for", "commit_text", "in", "commit_text_lst", "]" ]
Run git-log.
[ "Run", "git", "-", "log", "." ]
70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b
https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/git.py#L92-L105
249,612
mbarakaja/braulio
braulio/git.py
Git.tag
def tag(self, name=None): """Create and list tag objects running git-tag command""" command = ["git", "tag"] if not name: command.extend( [ "-l", "--sort=creatordate", "--format=%(creatordate:short)%09%(refname:strip=2)", ] ) command_output = _run_command(command).strip() if command_output == "": return [] tag_text_list = command_output.split("\n") tag_list = [Tag(text) for text in tag_text_list] return list(reversed(tag_list)) command.extend(["-a", name, "-m", '""']) return _run_command(command)
python
def tag(self, name=None): """Create and list tag objects running git-tag command""" command = ["git", "tag"] if not name: command.extend( [ "-l", "--sort=creatordate", "--format=%(creatordate:short)%09%(refname:strip=2)", ] ) command_output = _run_command(command).strip() if command_output == "": return [] tag_text_list = command_output.split("\n") tag_list = [Tag(text) for text in tag_text_list] return list(reversed(tag_list)) command.extend(["-a", name, "-m", '""']) return _run_command(command)
[ "def", "tag", "(", "self", ",", "name", "=", "None", ")", ":", "command", "=", "[", "\"git\"", ",", "\"tag\"", "]", "if", "not", "name", ":", "command", ".", "extend", "(", "[", "\"-l\"", ",", "\"--sort=creatordate\"", ",", "\"--format=%(creatordate:short)%09%(refname:strip=2)\"", ",", "]", ")", "command_output", "=", "_run_command", "(", "command", ")", ".", "strip", "(", ")", "if", "command_output", "==", "\"\"", ":", "return", "[", "]", "tag_text_list", "=", "command_output", ".", "split", "(", "\"\\n\"", ")", "tag_list", "=", "[", "Tag", "(", "text", ")", "for", "text", "in", "tag_text_list", "]", "return", "list", "(", "reversed", "(", "tag_list", ")", ")", "command", ".", "extend", "(", "[", "\"-a\"", ",", "name", ",", "\"-m\"", ",", "'\"\"'", "]", ")", "return", "_run_command", "(", "command", ")" ]
Create and list tag objects running git-tag command
[ "Create", "and", "list", "tag", "objects", "running", "git", "-", "tag", "command" ]
70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b
https://github.com/mbarakaja/braulio/blob/70ab6f0dd631ef78c4da1b39d1c6fb6f9a995d2b/braulio/git.py#L107-L132
249,613
devricks/soft_drf
soft_drf/api/routers/__init__.py
DefaultRouter.register
def register(self, prefix, viewset, base_name=None, router_class=None): """ Append the given viewset to the proper registry. """ if base_name is None: base_name = self.get_default_base_name(viewset) if router_class is not None: kwargs = {'trailing_slash': bool(self.trailing_slash)} single_object_router_classes = ( AuthenticationRouter, SingleObjectRouter) if issubclass(router_class, single_object_router_classes): router = router_class(**kwargs) router.register(prefix, viewset, base_name=base_name) self._single_object_registry.append(router) else: self.registry.append((prefix, viewset, base_name))
python
def register(self, prefix, viewset, base_name=None, router_class=None): """ Append the given viewset to the proper registry. """ if base_name is None: base_name = self.get_default_base_name(viewset) if router_class is not None: kwargs = {'trailing_slash': bool(self.trailing_slash)} single_object_router_classes = ( AuthenticationRouter, SingleObjectRouter) if issubclass(router_class, single_object_router_classes): router = router_class(**kwargs) router.register(prefix, viewset, base_name=base_name) self._single_object_registry.append(router) else: self.registry.append((prefix, viewset, base_name))
[ "def", "register", "(", "self", ",", "prefix", ",", "viewset", ",", "base_name", "=", "None", ",", "router_class", "=", "None", ")", ":", "if", "base_name", "is", "None", ":", "base_name", "=", "self", ".", "get_default_base_name", "(", "viewset", ")", "if", "router_class", "is", "not", "None", ":", "kwargs", "=", "{", "'trailing_slash'", ":", "bool", "(", "self", ".", "trailing_slash", ")", "}", "single_object_router_classes", "=", "(", "AuthenticationRouter", ",", "SingleObjectRouter", ")", "if", "issubclass", "(", "router_class", ",", "single_object_router_classes", ")", ":", "router", "=", "router_class", "(", "*", "*", "kwargs", ")", "router", ".", "register", "(", "prefix", ",", "viewset", ",", "base_name", "=", "base_name", ")", "self", ".", "_single_object_registry", ".", "append", "(", "router", ")", "else", ":", "self", ".", "registry", ".", "append", "(", "(", "prefix", ",", "viewset", ",", "base_name", ")", ")" ]
Append the given viewset to the proper registry.
[ "Append", "the", "given", "viewset", "to", "the", "proper", "registry", "." ]
1869b13f9341bfcebd931059e93de2bc38570da3
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/routers/__init__.py#L16-L34
249,614
devricks/soft_drf
soft_drf/api/routers/__init__.py
DefaultRouter.register_nested
def register_nested( self, parent_prefix, prefix, viewset, base_name=None, parent_lookup_name=None, depth_level=1 ): """ Register a nested viewset wihtout worrying of instantiate a nested router for registry. """ kwargs = { 'trailing_slash': bool(self.trailing_slash) } if parent_lookup_name is not None: kwargs.update(lookup=parent_lookup_name) # Section for the depth of the route and add more routes if depth_level > 1: routers = filter( lambda r: (r._depth_level == (depth_level - 1)) and r._nested_prefix == parent_prefix, self._nested_object_registry ) try: parent_router = next(routers) except StopIteration: raise RuntimeError('parent registered resource not found') else: parent_router = self nested_router = NestedSimpleRouter( parent_router, parent_prefix, **kwargs ) nested_router._nested_prefix = prefix nested_router._depth_level = depth_level nested_router.register(prefix, viewset, base_name) self._nested_object_registry.append(nested_router)
python
def register_nested( self, parent_prefix, prefix, viewset, base_name=None, parent_lookup_name=None, depth_level=1 ): """ Register a nested viewset wihtout worrying of instantiate a nested router for registry. """ kwargs = { 'trailing_slash': bool(self.trailing_slash) } if parent_lookup_name is not None: kwargs.update(lookup=parent_lookup_name) # Section for the depth of the route and add more routes if depth_level > 1: routers = filter( lambda r: (r._depth_level == (depth_level - 1)) and r._nested_prefix == parent_prefix, self._nested_object_registry ) try: parent_router = next(routers) except StopIteration: raise RuntimeError('parent registered resource not found') else: parent_router = self nested_router = NestedSimpleRouter( parent_router, parent_prefix, **kwargs ) nested_router._nested_prefix = prefix nested_router._depth_level = depth_level nested_router.register(prefix, viewset, base_name) self._nested_object_registry.append(nested_router)
[ "def", "register_nested", "(", "self", ",", "parent_prefix", ",", "prefix", ",", "viewset", ",", "base_name", "=", "None", ",", "parent_lookup_name", "=", "None", ",", "depth_level", "=", "1", ")", ":", "kwargs", "=", "{", "'trailing_slash'", ":", "bool", "(", "self", ".", "trailing_slash", ")", "}", "if", "parent_lookup_name", "is", "not", "None", ":", "kwargs", ".", "update", "(", "lookup", "=", "parent_lookup_name", ")", "# Section for the depth of the route and add more routes", "if", "depth_level", ">", "1", ":", "routers", "=", "filter", "(", "lambda", "r", ":", "(", "r", ".", "_depth_level", "==", "(", "depth_level", "-", "1", ")", ")", "and", "r", ".", "_nested_prefix", "==", "parent_prefix", ",", "self", ".", "_nested_object_registry", ")", "try", ":", "parent_router", "=", "next", "(", "routers", ")", "except", "StopIteration", ":", "raise", "RuntimeError", "(", "'parent registered resource not found'", ")", "else", ":", "parent_router", "=", "self", "nested_router", "=", "NestedSimpleRouter", "(", "parent_router", ",", "parent_prefix", ",", "*", "*", "kwargs", ")", "nested_router", ".", "_nested_prefix", "=", "prefix", "nested_router", ".", "_depth_level", "=", "depth_level", "nested_router", ".", "register", "(", "prefix", ",", "viewset", ",", "base_name", ")", "self", ".", "_nested_object_registry", ".", "append", "(", "nested_router", ")" ]
Register a nested viewset wihtout worrying of instantiate a nested router for registry.
[ "Register", "a", "nested", "viewset", "wihtout", "worrying", "of", "instantiate", "a", "nested", "router", "for", "registry", "." ]
1869b13f9341bfcebd931059e93de2bc38570da3
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/routers/__init__.py#L36-L78
249,615
devricks/soft_drf
soft_drf/api/routers/__init__.py
DefaultRouter.get_urls
def get_urls(self): """ Generate the list of URL patterns including the registered single object routers urls. """ base_urls = super(SimpleRouter, self).get_urls() single_urls = sum([r.urls for r in self._single_object_registry], []) nested_urls = sum([r.urls for r in self._nested_object_registry], []) return base_urls + single_urls + nested_urls
python
def get_urls(self): """ Generate the list of URL patterns including the registered single object routers urls. """ base_urls = super(SimpleRouter, self).get_urls() single_urls = sum([r.urls for r in self._single_object_registry], []) nested_urls = sum([r.urls for r in self._nested_object_registry], []) return base_urls + single_urls + nested_urls
[ "def", "get_urls", "(", "self", ")", ":", "base_urls", "=", "super", "(", "SimpleRouter", ",", "self", ")", ".", "get_urls", "(", ")", "single_urls", "=", "sum", "(", "[", "r", ".", "urls", "for", "r", "in", "self", ".", "_single_object_registry", "]", ",", "[", "]", ")", "nested_urls", "=", "sum", "(", "[", "r", ".", "urls", "for", "r", "in", "self", ".", "_nested_object_registry", "]", ",", "[", "]", ")", "return", "base_urls", "+", "single_urls", "+", "nested_urls" ]
Generate the list of URL patterns including the registered single object routers urls.
[ "Generate", "the", "list", "of", "URL", "patterns", "including", "the", "registered", "single", "object", "routers", "urls", "." ]
1869b13f9341bfcebd931059e93de2bc38570da3
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/routers/__init__.py#L80-L89
249,616
DoWhileGeek/authentise-services
authentise_services/config.py
Config.parse_config
def parse_config(path): """parse either the config file we found, or use some canned defaults""" config = configparser.ConfigParser() if path: # if user has config with user creds in it, this will grab it config.read(path) try: return {k: v for k, v in config["default"].items()} except KeyError: return {}
python
def parse_config(path): """parse either the config file we found, or use some canned defaults""" config = configparser.ConfigParser() if path: # if user has config with user creds in it, this will grab it config.read(path) try: return {k: v for k, v in config["default"].items()} except KeyError: return {}
[ "def", "parse_config", "(", "path", ")", ":", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "if", "path", ":", "# if user has config with user creds in it, this will grab it", "config", ".", "read", "(", "path", ")", "try", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "config", "[", "\"default\"", "]", ".", "items", "(", ")", "}", "except", "KeyError", ":", "return", "{", "}" ]
parse either the config file we found, or use some canned defaults
[ "parse", "either", "the", "config", "file", "we", "found", "or", "use", "some", "canned", "defaults" ]
ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d
https://github.com/DoWhileGeek/authentise-services/blob/ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d/authentise_services/config.py#L25-L35
249,617
b3j0f/utils
b3j0f/utils/path.py
lookup
def lookup(path, cache=True, scope=None, safe=False): """Get element reference from input element. The element can be a builtin/globals/scope object or is resolved from the current execution stack. :limitations: it does not resolve class methods or static values such as True, False, numbers, string and keywords. :param str path: full path to a python element. :param bool cache: if True (default), permits to reduce time complexity for lookup resolution in using cache memory to save resolved elements. :param dict scope: object scrope from where find path. For example, this scope can be locals(). Default is globals(). :param bool safe: use lookup in a safe context. A safe context avoid to reach builtins function with I/O consequences. :return: python object which is accessible through input path or raise an exception if the path is wrong. :rtype: object :raises ImportError: if path is wrong """ result = None found = path and cache and path in __LOOKUP_CACHE if found: result = __LOOKUP_CACHE[path] elif path: _eval = safe_eval if safe else eval try: # search among scope result = _eval(path, scope) except (NameError, SyntaxError): # we generate a result in order to accept the result such as a None generated_result = random() result = generated_result components = path.split('.') index = 0 components_len = len(components) module_name = components[0] # try to resolve an absolute path try: result = import_module(module_name) except ImportError: # resolve element globals or locals of the from previous frame previous_frame = currentframe().f_back if module_name in previous_frame.f_locals: result = previous_frame.f_locals[module_name] elif module_name in previous_frame.f_globals: result = previous_frame.f_globals[module_name] found = result is not generated_result if found: if components_len > 1: index = 1 # try to import all sub-modules/packages try: # check if name is defined from an external module # find the right module while index < components_len: module_name = '{0}.{1}'.format( module_name, components[index] ) result = import_module(module_name) index += 1 except ImportError: # path sub-module content try: if PY26: # when __import__ is used index = 1 # restart count of pathing while index < components_len: result = getattr(result, components[index]) index += 1 except AttributeError: raise ImportError( 'Wrong path {0} at {1}'.format( path, components[:index] ) ) else: # in case of PY26 if PY26: index = 1 while index < components_len: result = getattr(result, components[index]) index += 1 else: found = True if found: if cache: # save in cache if found __LOOKUP_CACHE[path] = result else: raise ImportError('Wrong path {0}'.format(path)) return result
python
def lookup(path, cache=True, scope=None, safe=False): """Get element reference from input element. The element can be a builtin/globals/scope object or is resolved from the current execution stack. :limitations: it does not resolve class methods or static values such as True, False, numbers, string and keywords. :param str path: full path to a python element. :param bool cache: if True (default), permits to reduce time complexity for lookup resolution in using cache memory to save resolved elements. :param dict scope: object scrope from where find path. For example, this scope can be locals(). Default is globals(). :param bool safe: use lookup in a safe context. A safe context avoid to reach builtins function with I/O consequences. :return: python object which is accessible through input path or raise an exception if the path is wrong. :rtype: object :raises ImportError: if path is wrong """ result = None found = path and cache and path in __LOOKUP_CACHE if found: result = __LOOKUP_CACHE[path] elif path: _eval = safe_eval if safe else eval try: # search among scope result = _eval(path, scope) except (NameError, SyntaxError): # we generate a result in order to accept the result such as a None generated_result = random() result = generated_result components = path.split('.') index = 0 components_len = len(components) module_name = components[0] # try to resolve an absolute path try: result = import_module(module_name) except ImportError: # resolve element globals or locals of the from previous frame previous_frame = currentframe().f_back if module_name in previous_frame.f_locals: result = previous_frame.f_locals[module_name] elif module_name in previous_frame.f_globals: result = previous_frame.f_globals[module_name] found = result is not generated_result if found: if components_len > 1: index = 1 # try to import all sub-modules/packages try: # check if name is defined from an external module # find the right module while index < components_len: module_name = '{0}.{1}'.format( module_name, components[index] ) result = import_module(module_name) index += 1 except ImportError: # path sub-module content try: if PY26: # when __import__ is used index = 1 # restart count of pathing while index < components_len: result = getattr(result, components[index]) index += 1 except AttributeError: raise ImportError( 'Wrong path {0} at {1}'.format( path, components[:index] ) ) else: # in case of PY26 if PY26: index = 1 while index < components_len: result = getattr(result, components[index]) index += 1 else: found = True if found: if cache: # save in cache if found __LOOKUP_CACHE[path] = result else: raise ImportError('Wrong path {0}'.format(path)) return result
[ "def", "lookup", "(", "path", ",", "cache", "=", "True", ",", "scope", "=", "None", ",", "safe", "=", "False", ")", ":", "result", "=", "None", "found", "=", "path", "and", "cache", "and", "path", "in", "__LOOKUP_CACHE", "if", "found", ":", "result", "=", "__LOOKUP_CACHE", "[", "path", "]", "elif", "path", ":", "_eval", "=", "safe_eval", "if", "safe", "else", "eval", "try", ":", "# search among scope", "result", "=", "_eval", "(", "path", ",", "scope", ")", "except", "(", "NameError", ",", "SyntaxError", ")", ":", "# we generate a result in order to accept the result such as a None", "generated_result", "=", "random", "(", ")", "result", "=", "generated_result", "components", "=", "path", ".", "split", "(", "'.'", ")", "index", "=", "0", "components_len", "=", "len", "(", "components", ")", "module_name", "=", "components", "[", "0", "]", "# try to resolve an absolute path", "try", ":", "result", "=", "import_module", "(", "module_name", ")", "except", "ImportError", ":", "# resolve element globals or locals of the from previous frame", "previous_frame", "=", "currentframe", "(", ")", ".", "f_back", "if", "module_name", "in", "previous_frame", ".", "f_locals", ":", "result", "=", "previous_frame", ".", "f_locals", "[", "module_name", "]", "elif", "module_name", "in", "previous_frame", ".", "f_globals", ":", "result", "=", "previous_frame", ".", "f_globals", "[", "module_name", "]", "found", "=", "result", "is", "not", "generated_result", "if", "found", ":", "if", "components_len", ">", "1", ":", "index", "=", "1", "# try to import all sub-modules/packages", "try", ":", "# check if name is defined from an external module", "# find the right module", "while", "index", "<", "components_len", ":", "module_name", "=", "'{0}.{1}'", ".", "format", "(", "module_name", ",", "components", "[", "index", "]", ")", "result", "=", "import_module", "(", "module_name", ")", "index", "+=", "1", "except", "ImportError", ":", "# path sub-module content", "try", ":", "if", "PY26", ":", "# when __import__ is used", "index", "=", "1", "# restart count of pathing", "while", "index", "<", "components_len", ":", "result", "=", "getattr", "(", "result", ",", "components", "[", "index", "]", ")", "index", "+=", "1", "except", "AttributeError", ":", "raise", "ImportError", "(", "'Wrong path {0} at {1}'", ".", "format", "(", "path", ",", "components", "[", ":", "index", "]", ")", ")", "else", ":", "# in case of PY26", "if", "PY26", ":", "index", "=", "1", "while", "index", "<", "components_len", ":", "result", "=", "getattr", "(", "result", ",", "components", "[", "index", "]", ")", "index", "+=", "1", "else", ":", "found", "=", "True", "if", "found", ":", "if", "cache", ":", "# save in cache if found", "__LOOKUP_CACHE", "[", "path", "]", "=", "result", "else", ":", "raise", "ImportError", "(", "'Wrong path {0}'", ".", "format", "(", "path", ")", ")", "return", "result" ]
Get element reference from input element. The element can be a builtin/globals/scope object or is resolved from the current execution stack. :limitations: it does not resolve class methods or static values such as True, False, numbers, string and keywords. :param str path: full path to a python element. :param bool cache: if True (default), permits to reduce time complexity for lookup resolution in using cache memory to save resolved elements. :param dict scope: object scrope from where find path. For example, this scope can be locals(). Default is globals(). :param bool safe: use lookup in a safe context. A safe context avoid to reach builtins function with I/O consequences. :return: python object which is accessible through input path or raise an exception if the path is wrong. :rtype: object :raises ImportError: if path is wrong
[ "Get", "element", "reference", "from", "input", "element", "." ]
793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/path.py#L100-L211
249,618
b3j0f/utils
b3j0f/utils/path.py
getpath
def getpath(element): """Get full path of a given element such as the opposite of the resolve_path behaviour. :param element: must be directly defined into a module or a package and has the attribute '__name__'. :return: element absolute path. :rtype: str :raises AttributeError: if element has not the attribute __name__. :Example: >>> getpath(getpath) b3j0f.utils.path.getpath """ if not hasattr(element, '__name__'): raise AttributeError( 'element {0} must have the attribute __name__'.format(element) ) result = element.__name__ if ismodule(element) else \ '{0}.{1}'.format(element.__module__, element.__name__) return result
python
def getpath(element): """Get full path of a given element such as the opposite of the resolve_path behaviour. :param element: must be directly defined into a module or a package and has the attribute '__name__'. :return: element absolute path. :rtype: str :raises AttributeError: if element has not the attribute __name__. :Example: >>> getpath(getpath) b3j0f.utils.path.getpath """ if not hasattr(element, '__name__'): raise AttributeError( 'element {0} must have the attribute __name__'.format(element) ) result = element.__name__ if ismodule(element) else \ '{0}.{1}'.format(element.__module__, element.__name__) return result
[ "def", "getpath", "(", "element", ")", ":", "if", "not", "hasattr", "(", "element", ",", "'__name__'", ")", ":", "raise", "AttributeError", "(", "'element {0} must have the attribute __name__'", ".", "format", "(", "element", ")", ")", "result", "=", "element", ".", "__name__", "if", "ismodule", "(", "element", ")", "else", "'{0}.{1}'", ".", "format", "(", "element", ".", "__module__", ",", "element", ".", "__name__", ")", "return", "result" ]
Get full path of a given element such as the opposite of the resolve_path behaviour. :param element: must be directly defined into a module or a package and has the attribute '__name__'. :return: element absolute path. :rtype: str :raises AttributeError: if element has not the attribute __name__. :Example: >>> getpath(getpath) b3j0f.utils.path.getpath
[ "Get", "full", "path", "of", "a", "given", "element", "such", "as", "the", "opposite", "of", "the", "resolve_path", "behaviour", "." ]
793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/path.py#L214-L240
249,619
bbiskup/purkinje-messages
purkinje_messages/message.py
register_eventclass
def register_eventclass(event_id): """Decorator for registering event classes for parsing """ def register(cls): if not issubclass(cls, Event): raise MessageException(('Cannot register a class that' ' is not a subclass of Event')) EVENT_REGISTRY[event_id] = cls logger.debug('######### Event registry is now: {0}'.format( EVENT_REGISTRY)) return cls return register
python
def register_eventclass(event_id): """Decorator for registering event classes for parsing """ def register(cls): if not issubclass(cls, Event): raise MessageException(('Cannot register a class that' ' is not a subclass of Event')) EVENT_REGISTRY[event_id] = cls logger.debug('######### Event registry is now: {0}'.format( EVENT_REGISTRY)) return cls return register
[ "def", "register_eventclass", "(", "event_id", ")", ":", "def", "register", "(", "cls", ")", ":", "if", "not", "issubclass", "(", "cls", ",", "Event", ")", ":", "raise", "MessageException", "(", "(", "'Cannot register a class that'", "' is not a subclass of Event'", ")", ")", "EVENT_REGISTRY", "[", "event_id", "]", "=", "cls", "logger", ".", "debug", "(", "'######### Event registry is now: {0}'", ".", "format", "(", "EVENT_REGISTRY", ")", ")", "return", "cls", "return", "register" ]
Decorator for registering event classes for parsing
[ "Decorator", "for", "registering", "event", "classes", "for", "parsing" ]
ba4217d993a86fd882bcf73d206d2910e65316dd
https://github.com/bbiskup/purkinje-messages/blob/ba4217d993a86fd882bcf73d206d2910e65316dd/purkinje_messages/message.py#L141-L152
249,620
stevepeak/inquiry
inquiry/helpers.py
json_minify
def json_minify(string, strip_space=True): # pragma: no cover """Removes whitespace from json strings, returning the string """ in_string = False in_multi = False in_single = False new_str = [] index = 0 for match in re.finditer(TOKENIZER, string): if not (in_multi or in_single): tmp = string[index:match.start()] if not in_string and strip_space: # replace white space as defined in standard tmp = re.sub('[ \t\n\r]+', '', tmp) new_str.append(tmp) index = match.end() val = match.group() if val == '"' and not (in_multi or in_single): escaped = END_SLASHES_RE.search(string, 0, match.start()) # start of string or unescaped quote character to end string if not in_string or (escaped is None or len(escaped.group()) % 2 == 0): in_string = not in_string index -= 1 # include " character in next catch elif not (in_string or in_multi or in_single): if val == '/*': in_multi = True elif val == '//': in_single = True elif val == '*/' and in_multi and not (in_string or in_single): in_multi = False elif val in '\r\n' and not (in_multi or in_string) and in_single: in_single = False elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)): new_str.append(val) new_str.append(string[index:]) return ''.join(new_str)
python
def json_minify(string, strip_space=True): # pragma: no cover """Removes whitespace from json strings, returning the string """ in_string = False in_multi = False in_single = False new_str = [] index = 0 for match in re.finditer(TOKENIZER, string): if not (in_multi or in_single): tmp = string[index:match.start()] if not in_string and strip_space: # replace white space as defined in standard tmp = re.sub('[ \t\n\r]+', '', tmp) new_str.append(tmp) index = match.end() val = match.group() if val == '"' and not (in_multi or in_single): escaped = END_SLASHES_RE.search(string, 0, match.start()) # start of string or unescaped quote character to end string if not in_string or (escaped is None or len(escaped.group()) % 2 == 0): in_string = not in_string index -= 1 # include " character in next catch elif not (in_string or in_multi or in_single): if val == '/*': in_multi = True elif val == '//': in_single = True elif val == '*/' and in_multi and not (in_string or in_single): in_multi = False elif val in '\r\n' and not (in_multi or in_string) and in_single: in_single = False elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)): new_str.append(val) new_str.append(string[index:]) return ''.join(new_str)
[ "def", "json_minify", "(", "string", ",", "strip_space", "=", "True", ")", ":", "# pragma: no cover", "in_string", "=", "False", "in_multi", "=", "False", "in_single", "=", "False", "new_str", "=", "[", "]", "index", "=", "0", "for", "match", "in", "re", ".", "finditer", "(", "TOKENIZER", ",", "string", ")", ":", "if", "not", "(", "in_multi", "or", "in_single", ")", ":", "tmp", "=", "string", "[", "index", ":", "match", ".", "start", "(", ")", "]", "if", "not", "in_string", "and", "strip_space", ":", "# replace white space as defined in standard", "tmp", "=", "re", ".", "sub", "(", "'[ \\t\\n\\r]+'", ",", "''", ",", "tmp", ")", "new_str", ".", "append", "(", "tmp", ")", "index", "=", "match", ".", "end", "(", ")", "val", "=", "match", ".", "group", "(", ")", "if", "val", "==", "'\"'", "and", "not", "(", "in_multi", "or", "in_single", ")", ":", "escaped", "=", "END_SLASHES_RE", ".", "search", "(", "string", ",", "0", ",", "match", ".", "start", "(", ")", ")", "# start of string or unescaped quote character to end string", "if", "not", "in_string", "or", "(", "escaped", "is", "None", "or", "len", "(", "escaped", ".", "group", "(", ")", ")", "%", "2", "==", "0", ")", ":", "in_string", "=", "not", "in_string", "index", "-=", "1", "# include \" character in next catch", "elif", "not", "(", "in_string", "or", "in_multi", "or", "in_single", ")", ":", "if", "val", "==", "'/*'", ":", "in_multi", "=", "True", "elif", "val", "==", "'//'", ":", "in_single", "=", "True", "elif", "val", "==", "'*/'", "and", "in_multi", "and", "not", "(", "in_string", "or", "in_single", ")", ":", "in_multi", "=", "False", "elif", "val", "in", "'\\r\\n'", "and", "not", "(", "in_multi", "or", "in_string", ")", "and", "in_single", ":", "in_single", "=", "False", "elif", "not", "(", "(", "in_multi", "or", "in_single", ")", "or", "(", "val", "in", "' \\r\\n\\t'", "and", "strip_space", ")", ")", ":", "new_str", ".", "append", "(", "val", ")", "new_str", ".", "append", "(", "string", "[", "index", ":", "]", ")", "return", "''", ".", "join", "(", "new_str", ")" ]
Removes whitespace from json strings, returning the string
[ "Removes", "whitespace", "from", "json", "strings", "returning", "the", "string" ]
f6ea435c302560ba19985b5d4ce2c97e2f321508
https://github.com/stevepeak/inquiry/blob/f6ea435c302560ba19985b5d4ce2c97e2f321508/inquiry/helpers.py#L110-L152
249,621
alexlovelltroy/django-classy-mail
classy_mail/mixins/base.py
resolve_template
def resolve_template(template): "Accepts a template object, path-to-template or list of paths" if isinstance(template, (list, tuple)): return loader.select_template(template) elif isinstance(template, basestring): try: return loader.get_template(template) except TemplateDoesNotExist: return None else: return template
python
def resolve_template(template): "Accepts a template object, path-to-template or list of paths" if isinstance(template, (list, tuple)): return loader.select_template(template) elif isinstance(template, basestring): try: return loader.get_template(template) except TemplateDoesNotExist: return None else: return template
[ "def", "resolve_template", "(", "template", ")", ":", "if", "isinstance", "(", "template", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "loader", ".", "select_template", "(", "template", ")", "elif", "isinstance", "(", "template", ",", "basestring", ")", ":", "try", ":", "return", "loader", ".", "get_template", "(", "template", ")", "except", "TemplateDoesNotExist", ":", "return", "None", "else", ":", "return", "template" ]
Accepts a template object, path-to-template or list of paths
[ "Accepts", "a", "template", "object", "path", "-", "to", "-", "template", "or", "list", "of", "paths" ]
1f225555bce44d8dbc4c695a4b7ffc71ac500168
https://github.com/alexlovelltroy/django-classy-mail/blob/1f225555bce44d8dbc4c695a4b7ffc71ac500168/classy_mail/mixins/base.py#L6-L16
249,622
brentpayne/kennyg
kennyg/element.py
DateValue.value
def value(self, value, *args, **kwargs): """ Takes a string value and returns the Date based on the format """ from datetime import datetime value = self.obj.value(value, *args, **kwargs) try: rv = datetime.strptime(value, self.format) except ValueError as _: # noqa rv = None return rv
python
def value(self, value, *args, **kwargs): """ Takes a string value and returns the Date based on the format """ from datetime import datetime value = self.obj.value(value, *args, **kwargs) try: rv = datetime.strptime(value, self.format) except ValueError as _: # noqa rv = None return rv
[ "def", "value", "(", "self", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "datetime", "import", "datetime", "value", "=", "self", ".", "obj", ".", "value", "(", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "rv", "=", "datetime", ".", "strptime", "(", "value", ",", "self", ".", "format", ")", "except", "ValueError", "as", "_", ":", "# noqa", "rv", "=", "None", "return", "rv" ]
Takes a string value and returns the Date based on the format
[ "Takes", "a", "string", "value", "and", "returns", "the", "Date", "based", "on", "the", "format" ]
c688dd6d270bb7dcdcce7f08c54eafb1bf3232f2
https://github.com/brentpayne/kennyg/blob/c688dd6d270bb7dcdcce7f08c54eafb1bf3232f2/kennyg/element.py#L123-L133
249,623
klmitch/framer
framer/transport.py
FramerAdaptor._interpret_framer
def _interpret_framer(self, args, kwargs): """ Interprets positional and keyword arguments related to framers. :param args: A tuple of positional arguments. The first such argument will be interpreted as a framer object, and the second will be interpreted as a framer state. :param kwargs: A dictionary of keyword arguments. The ``send`` and ``recv`` keyword arguments are interpreted as send and receive framers, respectively, and the ``send_state`` and ``recv_state`` keyword arguments are interpreted as states for those framers. :returns: An instance of ``FramerElement``, which may be pushed onto the framer stack. """ # Cannot specify both positional and keyword arguments, but # must provide one or the other if not args and not kwargs: raise exc.InvalidFramerSpecification( "No framers specified") elif args and kwargs: raise exc.InvalidFramerSpecification( "Cannot mix positional and keyword framer specifications") # Start with the current send and receive framers send = self._send_framer recv = self._recv_framer send_state = self._send_state recv_state = self._recv_state # Now, is it positional style? if args: send = args[0] recv = args[0] # Do we have a state? if len(args) > 1: send_state = args[1] recv_state = args[1] else: # Allocate one state = framers.FramerState() # Initialize it send.initialize_state(state) send_state = state recv_state = state else: # OK, it's keyword style; do we have a send framer? if 'send' in kwargs: send = kwargs['send'] # Do we have a send state? if 'send_state' in kwargs: send_state = kwargs['send_state'] else: # Allocate one and initialize it send_state = framers.FramerState() send.initialize_state(send_state) # How about a receive framer? if 'recv' in kwargs: recv = kwargs['recv'] # Do we have a recv state? if 'recv_state' in kwargs: recv_state = kwargs['recv_state'] else: # Allocate one and initialize it recv_state = framers.FramerState() recv.initialize_state(recv_state) # Create and return a FramerElement return FramerElement(send, recv, send_state, recv_state)
python
def _interpret_framer(self, args, kwargs): """ Interprets positional and keyword arguments related to framers. :param args: A tuple of positional arguments. The first such argument will be interpreted as a framer object, and the second will be interpreted as a framer state. :param kwargs: A dictionary of keyword arguments. The ``send`` and ``recv`` keyword arguments are interpreted as send and receive framers, respectively, and the ``send_state`` and ``recv_state`` keyword arguments are interpreted as states for those framers. :returns: An instance of ``FramerElement``, which may be pushed onto the framer stack. """ # Cannot specify both positional and keyword arguments, but # must provide one or the other if not args and not kwargs: raise exc.InvalidFramerSpecification( "No framers specified") elif args and kwargs: raise exc.InvalidFramerSpecification( "Cannot mix positional and keyword framer specifications") # Start with the current send and receive framers send = self._send_framer recv = self._recv_framer send_state = self._send_state recv_state = self._recv_state # Now, is it positional style? if args: send = args[0] recv = args[0] # Do we have a state? if len(args) > 1: send_state = args[1] recv_state = args[1] else: # Allocate one state = framers.FramerState() # Initialize it send.initialize_state(state) send_state = state recv_state = state else: # OK, it's keyword style; do we have a send framer? if 'send' in kwargs: send = kwargs['send'] # Do we have a send state? if 'send_state' in kwargs: send_state = kwargs['send_state'] else: # Allocate one and initialize it send_state = framers.FramerState() send.initialize_state(send_state) # How about a receive framer? if 'recv' in kwargs: recv = kwargs['recv'] # Do we have a recv state? if 'recv_state' in kwargs: recv_state = kwargs['recv_state'] else: # Allocate one and initialize it recv_state = framers.FramerState() recv.initialize_state(recv_state) # Create and return a FramerElement return FramerElement(send, recv, send_state, recv_state)
[ "def", "_interpret_framer", "(", "self", ",", "args", ",", "kwargs", ")", ":", "# Cannot specify both positional and keyword arguments, but", "# must provide one or the other", "if", "not", "args", "and", "not", "kwargs", ":", "raise", "exc", ".", "InvalidFramerSpecification", "(", "\"No framers specified\"", ")", "elif", "args", "and", "kwargs", ":", "raise", "exc", ".", "InvalidFramerSpecification", "(", "\"Cannot mix positional and keyword framer specifications\"", ")", "# Start with the current send and receive framers", "send", "=", "self", ".", "_send_framer", "recv", "=", "self", ".", "_recv_framer", "send_state", "=", "self", ".", "_send_state", "recv_state", "=", "self", ".", "_recv_state", "# Now, is it positional style?", "if", "args", ":", "send", "=", "args", "[", "0", "]", "recv", "=", "args", "[", "0", "]", "# Do we have a state?", "if", "len", "(", "args", ")", ">", "1", ":", "send_state", "=", "args", "[", "1", "]", "recv_state", "=", "args", "[", "1", "]", "else", ":", "# Allocate one", "state", "=", "framers", ".", "FramerState", "(", ")", "# Initialize it", "send", ".", "initialize_state", "(", "state", ")", "send_state", "=", "state", "recv_state", "=", "state", "else", ":", "# OK, it's keyword style; do we have a send framer?", "if", "'send'", "in", "kwargs", ":", "send", "=", "kwargs", "[", "'send'", "]", "# Do we have a send state?", "if", "'send_state'", "in", "kwargs", ":", "send_state", "=", "kwargs", "[", "'send_state'", "]", "else", ":", "# Allocate one and initialize it", "send_state", "=", "framers", ".", "FramerState", "(", ")", "send", ".", "initialize_state", "(", "send_state", ")", "# How about a receive framer?", "if", "'recv'", "in", "kwargs", ":", "recv", "=", "kwargs", "[", "'recv'", "]", "# Do we have a recv state?", "if", "'recv_state'", "in", "kwargs", ":", "recv_state", "=", "kwargs", "[", "'recv_state'", "]", "else", ":", "# Allocate one and initialize it", "recv_state", "=", "framers", ".", "FramerState", "(", ")", "recv", ".", "initialize_state", "(", "recv_state", ")", "# Create and return a FramerElement", "return", "FramerElement", "(", "send", ",", "recv", ",", "send_state", ",", "recv_state", ")" ]
Interprets positional and keyword arguments related to framers. :param args: A tuple of positional arguments. The first such argument will be interpreted as a framer object, and the second will be interpreted as a framer state. :param kwargs: A dictionary of keyword arguments. The ``send`` and ``recv`` keyword arguments are interpreted as send and receive framers, respectively, and the ``send_state`` and ``recv_state`` keyword arguments are interpreted as states for those framers. :returns: An instance of ``FramerElement``, which may be pushed onto the framer stack.
[ "Interprets", "positional", "and", "keyword", "arguments", "related", "to", "framers", "." ]
bd34cee9737793dab61d1d8973930b64bd08acb4
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L131-L210
249,624
klmitch/framer
framer/transport.py
FramerAdaptor.connection_made
def connection_made(self, transport): """ Called by the underlying transport when a connection is made. :param transport: The transport representing the connection. """ # Save the underlying transport self._transport = transport # Call connection_made() on the client protocol, passing # ourself as the transport self._client.connection_made(self)
python
def connection_made(self, transport): """ Called by the underlying transport when a connection is made. :param transport: The transport representing the connection. """ # Save the underlying transport self._transport = transport # Call connection_made() on the client protocol, passing # ourself as the transport self._client.connection_made(self)
[ "def", "connection_made", "(", "self", ",", "transport", ")", ":", "# Save the underlying transport", "self", ".", "_transport", "=", "transport", "# Call connection_made() on the client protocol, passing", "# ourself as the transport", "self", ".", "_client", ".", "connection_made", "(", "self", ")" ]
Called by the underlying transport when a connection is made. :param transport: The transport representing the connection.
[ "Called", "by", "the", "underlying", "transport", "when", "a", "connection", "is", "made", "." ]
bd34cee9737793dab61d1d8973930b64bd08acb4
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L212-L224
249,625
klmitch/framer
framer/transport.py
FramerAdaptor.data_received
def data_received(self, data): """ Called by the underlying transport when data is received. :param data: The data received on the connection. """ # First, add the data to the receive buffer self._recv_buf += data # Now, pass all frames we can find to the client protocol while self._recv_buf and not self._recv_paused: try: # Extract one frame frame = self._recv_framer.to_frame(self._recv_buf, self._recv_state) except exc.NoFrames: # There's data in the buffer, but no complete frames break # Now call the client protocol's frame_received() method self._client.frame_received(frame)
python
def data_received(self, data): """ Called by the underlying transport when data is received. :param data: The data received on the connection. """ # First, add the data to the receive buffer self._recv_buf += data # Now, pass all frames we can find to the client protocol while self._recv_buf and not self._recv_paused: try: # Extract one frame frame = self._recv_framer.to_frame(self._recv_buf, self._recv_state) except exc.NoFrames: # There's data in the buffer, but no complete frames break # Now call the client protocol's frame_received() method self._client.frame_received(frame)
[ "def", "data_received", "(", "self", ",", "data", ")", ":", "# First, add the data to the receive buffer", "self", ".", "_recv_buf", "+=", "data", "# Now, pass all frames we can find to the client protocol", "while", "self", ".", "_recv_buf", "and", "not", "self", ".", "_recv_paused", ":", "try", ":", "# Extract one frame", "frame", "=", "self", ".", "_recv_framer", ".", "to_frame", "(", "self", ".", "_recv_buf", ",", "self", ".", "_recv_state", ")", "except", "exc", ".", "NoFrames", ":", "# There's data in the buffer, but no complete frames", "break", "# Now call the client protocol's frame_received() method", "self", ".", "_client", ".", "frame_received", "(", "frame", ")" ]
Called by the underlying transport when data is received. :param data: The data received on the connection.
[ "Called", "by", "the", "underlying", "transport", "when", "data", "is", "received", "." ]
bd34cee9737793dab61d1d8973930b64bd08acb4
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L257-L278
249,626
klmitch/framer
framer/transport.py
FramerAdaptor.get_extra_info
def get_extra_info(self, name, default=None): """ Called by the client protocol to return optional transport information. Information requests not recognized by the ``FramerProtocol`` are passed on to the underlying transport. The values of ``name`` recognized directly by ``FramerProtocol`` are: =============== ============================================ Value Description =============== ============================================ send_framer The active framer for the send direction. send_state The state for the send framer. recv_framer The active framer for the receive direction. recv_state The state for the receive framer. recv_buf The current receive buffer. recv_paused ``True`` if reading is paused. client_protocol The client ``FramedProtocol``. transport The underlying transport. =============== ============================================ :param name: A string representing the piece of transport-specific information to get. :param default: The value to return if the information doesn't exist. :returns: The requested data. """ # Handle data we know about if name in self._handlers: return self._handlers[name](self) # Call get_extra_info() on the transport return self._transport.get_extra_info(name, default=default)
python
def get_extra_info(self, name, default=None): """ Called by the client protocol to return optional transport information. Information requests not recognized by the ``FramerProtocol`` are passed on to the underlying transport. The values of ``name`` recognized directly by ``FramerProtocol`` are: =============== ============================================ Value Description =============== ============================================ send_framer The active framer for the send direction. send_state The state for the send framer. recv_framer The active framer for the receive direction. recv_state The state for the receive framer. recv_buf The current receive buffer. recv_paused ``True`` if reading is paused. client_protocol The client ``FramedProtocol``. transport The underlying transport. =============== ============================================ :param name: A string representing the piece of transport-specific information to get. :param default: The value to return if the information doesn't exist. :returns: The requested data. """ # Handle data we know about if name in self._handlers: return self._handlers[name](self) # Call get_extra_info() on the transport return self._transport.get_extra_info(name, default=default)
[ "def", "get_extra_info", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "# Handle data we know about", "if", "name", "in", "self", ".", "_handlers", ":", "return", "self", ".", "_handlers", "[", "name", "]", "(", "self", ")", "# Call get_extra_info() on the transport", "return", "self", ".", "_transport", ".", "get_extra_info", "(", "name", ",", "default", "=", "default", ")" ]
Called by the client protocol to return optional transport information. Information requests not recognized by the ``FramerProtocol`` are passed on to the underlying transport. The values of ``name`` recognized directly by ``FramerProtocol`` are: =============== ============================================ Value Description =============== ============================================ send_framer The active framer for the send direction. send_state The state for the send framer. recv_framer The active framer for the receive direction. recv_state The state for the receive framer. recv_buf The current receive buffer. recv_paused ``True`` if reading is paused. client_protocol The client ``FramedProtocol``. transport The underlying transport. =============== ============================================ :param name: A string representing the piece of transport-specific information to get. :param default: The value to return if the information doesn't exist. :returns: The requested data.
[ "Called", "by", "the", "client", "protocol", "to", "return", "optional", "transport", "information", ".", "Information", "requests", "not", "recognized", "by", "the", "FramerProtocol", "are", "passed", "on", "to", "the", "underlying", "transport", "." ]
bd34cee9737793dab61d1d8973930b64bd08acb4
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L307-L342
249,627
klmitch/framer
framer/transport.py
FramerAdaptor.set_write_buffer_limits
def set_write_buffer_limits(self, high=None, low=None): """ Called by the client protocol to set the high- and low-water limits for write flow control. These two values control when call the protocol's ``pause_writing()`` and ``resume_writing()`` methods are called. :param high: The high-water limit. Must be a non-negative integer greater than or equal to ``low``, if both are specified. :param low: The low-water limit. Must be a non-negative integer less than or equal to ``high``, if both are specified. If only ``high`` is specified, defaults to an implementation-specific value less than or equal to ``high``. """ # Call set_write_buffer_limits() on the transport self._transport.set_write_buffer_limits(high=high, low=low)
python
def set_write_buffer_limits(self, high=None, low=None): """ Called by the client protocol to set the high- and low-water limits for write flow control. These two values control when call the protocol's ``pause_writing()`` and ``resume_writing()`` methods are called. :param high: The high-water limit. Must be a non-negative integer greater than or equal to ``low``, if both are specified. :param low: The low-water limit. Must be a non-negative integer less than or equal to ``high``, if both are specified. If only ``high`` is specified, defaults to an implementation-specific value less than or equal to ``high``. """ # Call set_write_buffer_limits() on the transport self._transport.set_write_buffer_limits(high=high, low=low)
[ "def", "set_write_buffer_limits", "(", "self", ",", "high", "=", "None", ",", "low", "=", "None", ")", ":", "# Call set_write_buffer_limits() on the transport", "self", ".", "_transport", ".", "set_write_buffer_limits", "(", "high", "=", "high", ",", "low", "=", "low", ")" ]
Called by the client protocol to set the high- and low-water limits for write flow control. These two values control when call the protocol's ``pause_writing()`` and ``resume_writing()`` methods are called. :param high: The high-water limit. Must be a non-negative integer greater than or equal to ``low``, if both are specified. :param low: The low-water limit. Must be a non-negative integer less than or equal to ``high``, if both are specified. If only ``high`` is specified, defaults to an implementation-specific value less than or equal to ``high``.
[ "Called", "by", "the", "client", "protocol", "to", "set", "the", "high", "-", "and", "low", "-", "water", "limits", "for", "write", "flow", "control", "." ]
bd34cee9737793dab61d1d8973930b64bd08acb4
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L413-L433
249,628
klmitch/framer
framer/transport.py
FramerAdaptor.send_frame
def send_frame(self, frame): """ Called by the client protocol to send a frame to the remote peer. This method does not block; it buffers the data and arranges for it to be sent out asynchronously. :param frame: The frame to send to the peer. Must be in the format expected by the currently active send framer. """ # Convert the frame to bytes and write them to the connection data = self._send_framer.to_bytes(frame, self._send_state) self._transport.write(data)
python
def send_frame(self, frame): """ Called by the client protocol to send a frame to the remote peer. This method does not block; it buffers the data and arranges for it to be sent out asynchronously. :param frame: The frame to send to the peer. Must be in the format expected by the currently active send framer. """ # Convert the frame to bytes and write them to the connection data = self._send_framer.to_bytes(frame, self._send_state) self._transport.write(data)
[ "def", "send_frame", "(", "self", ",", "frame", ")", ":", "# Convert the frame to bytes and write them to the connection", "data", "=", "self", ".", "_send_framer", ".", "to_bytes", "(", "frame", ",", "self", ".", "_send_state", ")", "self", ".", "_transport", ".", "write", "(", "data", ")" ]
Called by the client protocol to send a frame to the remote peer. This method does not block; it buffers the data and arranges for it to be sent out asynchronously. :param frame: The frame to send to the peer. Must be in the format expected by the currently active send framer.
[ "Called", "by", "the", "client", "protocol", "to", "send", "a", "frame", "to", "the", "remote", "peer", ".", "This", "method", "does", "not", "block", ";", "it", "buffers", "the", "data", "and", "arranges", "for", "it", "to", "be", "sent", "out", "asynchronously", "." ]
bd34cee9737793dab61d1d8973930b64bd08acb4
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/transport.py#L447-L460
249,629
exekias/droplet
droplet/web/menus.py
menu
def menu(): """ Return global menu composed from all modules menu. This method will compose the global menu by calling menu() function for module, it should be located under module_path.menu module """ root = MenuItem('') for mod in droplet.modules(): if mod.installed: module_path = mod.__class__.__module__.rsplit('.', 1)[0] menu = import_module(module_path + '.menu') if menu: menu.menu(root) return root
python
def menu(): """ Return global menu composed from all modules menu. This method will compose the global menu by calling menu() function for module, it should be located under module_path.menu module """ root = MenuItem('') for mod in droplet.modules(): if mod.installed: module_path = mod.__class__.__module__.rsplit('.', 1)[0] menu = import_module(module_path + '.menu') if menu: menu.menu(root) return root
[ "def", "menu", "(", ")", ":", "root", "=", "MenuItem", "(", "''", ")", "for", "mod", "in", "droplet", ".", "modules", "(", ")", ":", "if", "mod", ".", "installed", ":", "module_path", "=", "mod", ".", "__class__", ".", "__module__", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "menu", "=", "import_module", "(", "module_path", "+", "'.menu'", ")", "if", "menu", ":", "menu", ".", "menu", "(", "root", ")", "return", "root" ]
Return global menu composed from all modules menu. This method will compose the global menu by calling menu() function for module, it should be located under module_path.menu module
[ "Return", "global", "menu", "composed", "from", "all", "modules", "menu", "." ]
aeac573a2c1c4b774e99d5414a1c79b1bb734941
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/web/menus.py#L56-L72
249,630
exekias/droplet
droplet/web/menus.py
MenuItem.append
def append(self, item): """ Add the given item as children """ if self.url: raise TypeError('Menu items with URL cannot have childrens') # Look for already present common node if not item.is_leaf(): for current_item in self.items: if item.name == current_item.name: for children in item.items: current_item.append(children) return # First insertion self.items.append(item)
python
def append(self, item): """ Add the given item as children """ if self.url: raise TypeError('Menu items with URL cannot have childrens') # Look for already present common node if not item.is_leaf(): for current_item in self.items: if item.name == current_item.name: for children in item.items: current_item.append(children) return # First insertion self.items.append(item)
[ "def", "append", "(", "self", ",", "item", ")", ":", "if", "self", ".", "url", ":", "raise", "TypeError", "(", "'Menu items with URL cannot have childrens'", ")", "# Look for already present common node", "if", "not", "item", ".", "is_leaf", "(", ")", ":", "for", "current_item", "in", "self", ".", "items", ":", "if", "item", ".", "name", "==", "current_item", ".", "name", ":", "for", "children", "in", "item", ".", "items", ":", "current_item", ".", "append", "(", "children", ")", "return", "# First insertion", "self", ".", "items", ".", "append", "(", "item", ")" ]
Add the given item as children
[ "Add", "the", "given", "item", "as", "children" ]
aeac573a2c1c4b774e99d5414a1c79b1bb734941
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/web/menus.py#L34-L50
249,631
ardydedase/pycouchbase
pycouchbase/document.py
Document.doc_id
def doc_id(self): """Returns the couchbase document's id, object property. :returns: The document id (that is created from :attr:'doc_type' and :attr:'__key_field__' value, or auto-hashed document id at first saving). :rtype: unicode """ if self.id: return '%s_%s' % (self.doc_type, self.id.lower()) return self._hashed_key
python
def doc_id(self): """Returns the couchbase document's id, object property. :returns: The document id (that is created from :attr:'doc_type' and :attr:'__key_field__' value, or auto-hashed document id at first saving). :rtype: unicode """ if self.id: return '%s_%s' % (self.doc_type, self.id.lower()) return self._hashed_key
[ "def", "doc_id", "(", "self", ")", ":", "if", "self", ".", "id", ":", "return", "'%s_%s'", "%", "(", "self", ".", "doc_type", ",", "self", ".", "id", ".", "lower", "(", ")", ")", "return", "self", ".", "_hashed_key" ]
Returns the couchbase document's id, object property. :returns: The document id (that is created from :attr:'doc_type' and :attr:'__key_field__' value, or auto-hashed document id at first saving). :rtype: unicode
[ "Returns", "the", "couchbase", "document", "s", "id", "object", "property", "." ]
6f010b4d2ef41aead2366878d0cf0b1284c0db0e
https://github.com/ardydedase/pycouchbase/blob/6f010b4d2ef41aead2366878d0cf0b1284c0db0e/pycouchbase/document.py#L97-L107
249,632
ardydedase/pycouchbase
pycouchbase/document.py
Document.touch
def touch(self, expiration): """Updates the current document's expiration value. :param expiration: Expiration in seconds for the document to be removed by couchbase server, defaults to 0 - will never expire. :type expiration: int :returns: Response from CouchbaseClient. :rtype: unicode :raises: :exc:'cbwrapper.errors.DoesNotExist' or :exc:'couchbase.exception.TemporaryFailError' """ if not self.cas_value or not self.doc_id: raise self.DoesNotExist(self) return self.bucket.touch(self.doc_id, expiration)
python
def touch(self, expiration): """Updates the current document's expiration value. :param expiration: Expiration in seconds for the document to be removed by couchbase server, defaults to 0 - will never expire. :type expiration: int :returns: Response from CouchbaseClient. :rtype: unicode :raises: :exc:'cbwrapper.errors.DoesNotExist' or :exc:'couchbase.exception.TemporaryFailError' """ if not self.cas_value or not self.doc_id: raise self.DoesNotExist(self) return self.bucket.touch(self.doc_id, expiration)
[ "def", "touch", "(", "self", ",", "expiration", ")", ":", "if", "not", "self", ".", "cas_value", "or", "not", "self", ".", "doc_id", ":", "raise", "self", ".", "DoesNotExist", "(", "self", ")", "return", "self", ".", "bucket", ".", "touch", "(", "self", ".", "doc_id", ",", "expiration", ")" ]
Updates the current document's expiration value. :param expiration: Expiration in seconds for the document to be removed by couchbase server, defaults to 0 - will never expire. :type expiration: int :returns: Response from CouchbaseClient. :rtype: unicode :raises: :exc:'cbwrapper.errors.DoesNotExist' or :exc:'couchbase.exception.TemporaryFailError'
[ "Updates", "the", "current", "document", "s", "expiration", "value", "." ]
6f010b4d2ef41aead2366878d0cf0b1284c0db0e
https://github.com/ardydedase/pycouchbase/blob/6f010b4d2ef41aead2366878d0cf0b1284c0db0e/pycouchbase/document.py#L245-L258
249,633
vecnet/vecnet.simulation
vecnet/simulation/sim_status.py
get_description
def get_description(status_code): """ Get the description for a status code. """ description = _descriptions.get(status_code) if description is None: description = 'code = %s (no description)' % str(status_code) return description
python
def get_description(status_code): """ Get the description for a status code. """ description = _descriptions.get(status_code) if description is None: description = 'code = %s (no description)' % str(status_code) return description
[ "def", "get_description", "(", "status_code", ")", ":", "description", "=", "_descriptions", ".", "get", "(", "status_code", ")", "if", "description", "is", "None", ":", "description", "=", "'code = %s (no description)'", "%", "str", "(", "status_code", ")", "return", "description" ]
Get the description for a status code.
[ "Get", "the", "description", "for", "a", "status", "code", "." ]
3a4b3df7b12418c6fa8a7d9cd49656a1c031fc0e
https://github.com/vecnet/vecnet.simulation/blob/3a4b3df7b12418c6fa8a7d9cd49656a1c031fc0e/vecnet/simulation/sim_status.py#L47-L54
249,634
emilssolmanis/tapes
tapes/meta.py
metered_meta
def metered_meta(metrics, base=type): """Creates a metaclass that will add the specified metrics at a path parametrized on the dynamic class name. Prime use case is for base classes if all subclasses need separate metrics and / or the metrics need to be used in base class methods, e.g., Tornado's ``RequestHandler`` like:: import tapes import tornado import abc registry = tapes.Registry() class MyCommonBaseHandler(tornado.web.RequestHandler): __metaclass__ = metered_meta([ ('latency', 'my.http.endpoints.{}.latency', registry.timer) ], base=abc.ABCMeta) @tornado.gen.coroutine def get(self, *args, **kwargs): with self.latency.time(): yield self.get_impl(*args, **kwargs) @abc.abstractmethod def get_impl(self, *args, **kwargs): pass class MyImplHandler(MyCommonBaseHandler): @tornado.gen.coroutine def get_impl(self, *args, **kwargs): self.finish({'stuff': 'something'}) class MyOtherImplHandler(MyCommonBaseHandler): @tornado.gen.coroutine def get_impl(self, *args, **kwargs): self.finish({'other stuff': 'more of something'}) This would produce two different relevant metrics, - ``my.http.endpoints.MyImplHandler.latency`` - ``my.http.endpoints.MyOtherImplHandler.latency`` and, as an unfortunate side effect of adding it in the base class, a ``my.http.endpoints.MyCommonBaseHandler.latency`` too. :param metrics: list of (attr_name, metrics_path_template, metrics_factory) :param base: optional meta base if other than `type` :return: a metaclass that populates the class with the needed metrics at paths based on the dynamic class name """ class _MeteredMeta(base): def __new__(meta, name, bases, dict_): new_dict = dict(**dict_) for attr_name, template, factory in metrics: new_dict[attr_name] = factory(template.format(name)) return super(_MeteredMeta, meta).__new__(meta, name, bases, new_dict) return _MeteredMeta
python
def metered_meta(metrics, base=type): """Creates a metaclass that will add the specified metrics at a path parametrized on the dynamic class name. Prime use case is for base classes if all subclasses need separate metrics and / or the metrics need to be used in base class methods, e.g., Tornado's ``RequestHandler`` like:: import tapes import tornado import abc registry = tapes.Registry() class MyCommonBaseHandler(tornado.web.RequestHandler): __metaclass__ = metered_meta([ ('latency', 'my.http.endpoints.{}.latency', registry.timer) ], base=abc.ABCMeta) @tornado.gen.coroutine def get(self, *args, **kwargs): with self.latency.time(): yield self.get_impl(*args, **kwargs) @abc.abstractmethod def get_impl(self, *args, **kwargs): pass class MyImplHandler(MyCommonBaseHandler): @tornado.gen.coroutine def get_impl(self, *args, **kwargs): self.finish({'stuff': 'something'}) class MyOtherImplHandler(MyCommonBaseHandler): @tornado.gen.coroutine def get_impl(self, *args, **kwargs): self.finish({'other stuff': 'more of something'}) This would produce two different relevant metrics, - ``my.http.endpoints.MyImplHandler.latency`` - ``my.http.endpoints.MyOtherImplHandler.latency`` and, as an unfortunate side effect of adding it in the base class, a ``my.http.endpoints.MyCommonBaseHandler.latency`` too. :param metrics: list of (attr_name, metrics_path_template, metrics_factory) :param base: optional meta base if other than `type` :return: a metaclass that populates the class with the needed metrics at paths based on the dynamic class name """ class _MeteredMeta(base): def __new__(meta, name, bases, dict_): new_dict = dict(**dict_) for attr_name, template, factory in metrics: new_dict[attr_name] = factory(template.format(name)) return super(_MeteredMeta, meta).__new__(meta, name, bases, new_dict) return _MeteredMeta
[ "def", "metered_meta", "(", "metrics", ",", "base", "=", "type", ")", ":", "class", "_MeteredMeta", "(", "base", ")", ":", "def", "__new__", "(", "meta", ",", "name", ",", "bases", ",", "dict_", ")", ":", "new_dict", "=", "dict", "(", "*", "*", "dict_", ")", "for", "attr_name", ",", "template", ",", "factory", "in", "metrics", ":", "new_dict", "[", "attr_name", "]", "=", "factory", "(", "template", ".", "format", "(", "name", ")", ")", "return", "super", "(", "_MeteredMeta", ",", "meta", ")", ".", "__new__", "(", "meta", ",", "name", ",", "bases", ",", "new_dict", ")", "return", "_MeteredMeta" ]
Creates a metaclass that will add the specified metrics at a path parametrized on the dynamic class name. Prime use case is for base classes if all subclasses need separate metrics and / or the metrics need to be used in base class methods, e.g., Tornado's ``RequestHandler`` like:: import tapes import tornado import abc registry = tapes.Registry() class MyCommonBaseHandler(tornado.web.RequestHandler): __metaclass__ = metered_meta([ ('latency', 'my.http.endpoints.{}.latency', registry.timer) ], base=abc.ABCMeta) @tornado.gen.coroutine def get(self, *args, **kwargs): with self.latency.time(): yield self.get_impl(*args, **kwargs) @abc.abstractmethod def get_impl(self, *args, **kwargs): pass class MyImplHandler(MyCommonBaseHandler): @tornado.gen.coroutine def get_impl(self, *args, **kwargs): self.finish({'stuff': 'something'}) class MyOtherImplHandler(MyCommonBaseHandler): @tornado.gen.coroutine def get_impl(self, *args, **kwargs): self.finish({'other stuff': 'more of something'}) This would produce two different relevant metrics, - ``my.http.endpoints.MyImplHandler.latency`` - ``my.http.endpoints.MyOtherImplHandler.latency`` and, as an unfortunate side effect of adding it in the base class, a ``my.http.endpoints.MyCommonBaseHandler.latency`` too. :param metrics: list of (attr_name, metrics_path_template, metrics_factory) :param base: optional meta base if other than `type` :return: a metaclass that populates the class with the needed metrics at paths based on the dynamic class name
[ "Creates", "a", "metaclass", "that", "will", "add", "the", "specified", "metrics", "at", "a", "path", "parametrized", "on", "the", "dynamic", "class", "name", "." ]
7797fc9ebcb359cb1ba5085570e3cab5ebcd1d3c
https://github.com/emilssolmanis/tapes/blob/7797fc9ebcb359cb1ba5085570e3cab5ebcd1d3c/tapes/meta.py#L1-L57
249,635
KnowledgeLinks/rdfframework
rdfframework/search/elasticsearchbase.py
EsBase.make_action_list
def make_action_list(self, item_list, **kwargs): ''' Generates a list of actions for sending to Elasticsearch ''' action_list = [] es_index = get2(kwargs, "es_index", self.es_index) action_type = kwargs.get("action_type","index") action_settings = {'_op_type': action_type, '_index': es_index} doc_type = kwargs.get("doc_type", self.doc_type) if not doc_type: doc_type = "unk" id_field = kwargs.get("id_field") for item in item_list: action = get_es_action_item(item, action_settings, doc_type, id_field) action_list.append(action) return action_list
python
def make_action_list(self, item_list, **kwargs): ''' Generates a list of actions for sending to Elasticsearch ''' action_list = [] es_index = get2(kwargs, "es_index", self.es_index) action_type = kwargs.get("action_type","index") action_settings = {'_op_type': action_type, '_index': es_index} doc_type = kwargs.get("doc_type", self.doc_type) if not doc_type: doc_type = "unk" id_field = kwargs.get("id_field") for item in item_list: action = get_es_action_item(item, action_settings, doc_type, id_field) action_list.append(action) return action_list
[ "def", "make_action_list", "(", "self", ",", "item_list", ",", "*", "*", "kwargs", ")", ":", "action_list", "=", "[", "]", "es_index", "=", "get2", "(", "kwargs", ",", "\"es_index\"", ",", "self", ".", "es_index", ")", "action_type", "=", "kwargs", ".", "get", "(", "\"action_type\"", ",", "\"index\"", ")", "action_settings", "=", "{", "'_op_type'", ":", "action_type", ",", "'_index'", ":", "es_index", "}", "doc_type", "=", "kwargs", ".", "get", "(", "\"doc_type\"", ",", "self", ".", "doc_type", ")", "if", "not", "doc_type", ":", "doc_type", "=", "\"unk\"", "id_field", "=", "kwargs", ".", "get", "(", "\"id_field\"", ")", "for", "item", "in", "item_list", ":", "action", "=", "get_es_action_item", "(", "item", ",", "action_settings", ",", "doc_type", ",", "id_field", ")", "action_list", ".", "append", "(", "action", ")", "return", "action_list" ]
Generates a list of actions for sending to Elasticsearch
[ "Generates", "a", "list", "of", "actions", "for", "sending", "to", "Elasticsearch" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/elasticsearchbase.py#L45-L63
249,636
KnowledgeLinks/rdfframework
rdfframework/search/elasticsearchbase.py
EsBase.bulk_save
def bulk_save(self, action_list, **kwargs): ''' sends a passed in action_list to elasticsearch ''' lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) err_log = logging.getLogger("index.errors") es = self.es es_index = get2(kwargs, "es_index", self.es_index) reset_index = kwargs.get("reset_index",self.reset_index) doc_type = kwargs.get("doc_type", self.doc_type) lg.info("Sending %s items to Elasticsearch",len(action_list)) # bulk_stream = helpers.streaming_bulk(es, result = helpers.bulk(es, action_list, chunk_size=400, raise_on_error=False) lg.info("FINISHED sending to Elasticsearch") if result[1]: lg.info("Formating Error results") # action_keys = {item['_id']:i for i, item in enumerate(action_list)} new_result = [] for item in result[1][:5]: for action_item in action_list: if action_item['_id'] == item[list(item)[0]]['_id']: new_result.append((item, action_item,)) break err_log.info("Results for batch '%s'\n(%s,\n%s\n%s)", kwargs.get('batch', "No Batch Number provided"), result[0], json.dumps(new_result, indent=4), json.dumps(result[1])) del new_result lg.info("Finished Error logging") # for success, result in bulk_stream: # lg.debug("\nsuccess: %s \nresult:\n%s", success, pp.pformat(result)) return result
python
def bulk_save(self, action_list, **kwargs): ''' sends a passed in action_list to elasticsearch ''' lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) err_log = logging.getLogger("index.errors") es = self.es es_index = get2(kwargs, "es_index", self.es_index) reset_index = kwargs.get("reset_index",self.reset_index) doc_type = kwargs.get("doc_type", self.doc_type) lg.info("Sending %s items to Elasticsearch",len(action_list)) # bulk_stream = helpers.streaming_bulk(es, result = helpers.bulk(es, action_list, chunk_size=400, raise_on_error=False) lg.info("FINISHED sending to Elasticsearch") if result[1]: lg.info("Formating Error results") # action_keys = {item['_id']:i for i, item in enumerate(action_list)} new_result = [] for item in result[1][:5]: for action_item in action_list: if action_item['_id'] == item[list(item)[0]]['_id']: new_result.append((item, action_item,)) break err_log.info("Results for batch '%s'\n(%s,\n%s\n%s)", kwargs.get('batch', "No Batch Number provided"), result[0], json.dumps(new_result, indent=4), json.dumps(result[1])) del new_result lg.info("Finished Error logging") # for success, result in bulk_stream: # lg.debug("\nsuccess: %s \nresult:\n%s", success, pp.pformat(result)) return result
[ "def", "bulk_save", "(", "self", ",", "action_list", ",", "*", "*", "kwargs", ")", ":", "lg", "=", "logging", ".", "getLogger", "(", "\"%s.%s\"", "%", "(", "self", ".", "ln", ",", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ")", ")", "lg", ".", "setLevel", "(", "self", ".", "log_level", ")", "err_log", "=", "logging", ".", "getLogger", "(", "\"index.errors\"", ")", "es", "=", "self", ".", "es", "es_index", "=", "get2", "(", "kwargs", ",", "\"es_index\"", ",", "self", ".", "es_index", ")", "reset_index", "=", "kwargs", ".", "get", "(", "\"reset_index\"", ",", "self", ".", "reset_index", ")", "doc_type", "=", "kwargs", ".", "get", "(", "\"doc_type\"", ",", "self", ".", "doc_type", ")", "lg", ".", "info", "(", "\"Sending %s items to Elasticsearch\"", ",", "len", "(", "action_list", ")", ")", "# bulk_stream = helpers.streaming_bulk(es,\r", "result", "=", "helpers", ".", "bulk", "(", "es", ",", "action_list", ",", "chunk_size", "=", "400", ",", "raise_on_error", "=", "False", ")", "lg", ".", "info", "(", "\"FINISHED sending to Elasticsearch\"", ")", "if", "result", "[", "1", "]", ":", "lg", ".", "info", "(", "\"Formating Error results\"", ")", "# action_keys = {item['_id']:i for i, item in enumerate(action_list)}\r", "new_result", "=", "[", "]", "for", "item", "in", "result", "[", "1", "]", "[", ":", "5", "]", ":", "for", "action_item", "in", "action_list", ":", "if", "action_item", "[", "'_id'", "]", "==", "item", "[", "list", "(", "item", ")", "[", "0", "]", "]", "[", "'_id'", "]", ":", "new_result", ".", "append", "(", "(", "item", ",", "action_item", ",", ")", ")", "break", "err_log", ".", "info", "(", "\"Results for batch '%s'\\n(%s,\\n%s\\n%s)\"", ",", "kwargs", ".", "get", "(", "'batch'", ",", "\"No Batch Number provided\"", ")", ",", "result", "[", "0", "]", ",", "json", ".", "dumps", "(", "new_result", ",", "indent", "=", "4", ")", ",", "json", ".", "dumps", "(", "result", "[", "1", "]", ")", ")", "del", "new_result", "lg", ".", "info", "(", "\"Finished Error logging\"", ")", "# for success, result in bulk_stream:\r", "# lg.debug(\"\\nsuccess: %s \\nresult:\\n%s\", success, pp.pformat(result))\r", "return", "result" ]
sends a passed in action_list to elasticsearch
[ "sends", "a", "passed", "in", "action_list", "to", "elasticsearch" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/elasticsearchbase.py#L65-L101
249,637
KnowledgeLinks/rdfframework
rdfframework/search/elasticsearchbase.py
EsBase.get_list
def get_list(self, method="list", **kwargs): """ returns a key value list of items based on the specfied criteria """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) args = inspect.getargvalues(inspect.currentframe())[3] lg.debug("\n****** Args *****:\n%s", pp.pformat(args)) es = kwargs.get("es",self.es) doc_type = get2(kwargs, "doc_type", self.doc_type) id_field = get2(kwargs, "id_field", "_id") value_fld = kwargs.get("value_fld") fields = kwargs.get("fields") sort_dir = get2(kwargs,"sort_dir", "asc") sort_fields = get2(kwargs,"sort_fields", get2(kwargs, "fields", [value_fld])) size = get2(kwargs,"size",2000) term = get2(kwargs,"term",'').replace("/","//") filter_field = kwargs.get('filter_field') filter_value = kwargs.get('filter_value') dsl = {} # set retutn to only return the fields specified or return the whole # document if not specified if fields is not None: dsl["_source"] = fields elif value_fld is not None: dsl["_source"] = [value_fld] fields = [value_fld] else: fields = [] # set query parameters based on the return method "list" or "search" if sort_dir != "none" and method == "list": dsl["sort"] = [] for fld in sort_fields: if fld is not None: dsl["sort"].append({ fld: sort_dir }) if method == "search": # query in elasticsearch breaks if the is a single open parenthesis # remove a single parenthesis from the search term if "(" in term and ")" not in term: search_term = term.replace("(", "") else: search_term = term size = 5 dsl['query'] = { "bool": { "should": [ { "query_string" : { "analyze_wildcard": { "query": "*%s*" % search_term } } }, { "query_string" : { "query": "*%s*" % search_term, "analyzer": "default", "analyze_wildcard": True, "fields": fields, "boost": 10 } } ] } } else: pass if filter_value: dsl['filter'] = { "term": { filter_field: filter_value } } lg.info("\n-------- size: %s\ndsl:\n%s", size, json.dumps(dsl,indent=4)) result = es.search(index=self.es_index, size=size, doc_type=doc_type, body=dsl) if kwargs.get("calc"): result = self._calc_result(result, kwargs['calc']) lg.debug(pp.pformat(result)) return result
python
def get_list(self, method="list", **kwargs): """ returns a key value list of items based on the specfied criteria """ lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) args = inspect.getargvalues(inspect.currentframe())[3] lg.debug("\n****** Args *****:\n%s", pp.pformat(args)) es = kwargs.get("es",self.es) doc_type = get2(kwargs, "doc_type", self.doc_type) id_field = get2(kwargs, "id_field", "_id") value_fld = kwargs.get("value_fld") fields = kwargs.get("fields") sort_dir = get2(kwargs,"sort_dir", "asc") sort_fields = get2(kwargs,"sort_fields", get2(kwargs, "fields", [value_fld])) size = get2(kwargs,"size",2000) term = get2(kwargs,"term",'').replace("/","//") filter_field = kwargs.get('filter_field') filter_value = kwargs.get('filter_value') dsl = {} # set retutn to only return the fields specified or return the whole # document if not specified if fields is not None: dsl["_source"] = fields elif value_fld is not None: dsl["_source"] = [value_fld] fields = [value_fld] else: fields = [] # set query parameters based on the return method "list" or "search" if sort_dir != "none" and method == "list": dsl["sort"] = [] for fld in sort_fields: if fld is not None: dsl["sort"].append({ fld: sort_dir }) if method == "search": # query in elasticsearch breaks if the is a single open parenthesis # remove a single parenthesis from the search term if "(" in term and ")" not in term: search_term = term.replace("(", "") else: search_term = term size = 5 dsl['query'] = { "bool": { "should": [ { "query_string" : { "analyze_wildcard": { "query": "*%s*" % search_term } } }, { "query_string" : { "query": "*%s*" % search_term, "analyzer": "default", "analyze_wildcard": True, "fields": fields, "boost": 10 } } ] } } else: pass if filter_value: dsl['filter'] = { "term": { filter_field: filter_value } } lg.info("\n-------- size: %s\ndsl:\n%s", size, json.dumps(dsl,indent=4)) result = es.search(index=self.es_index, size=size, doc_type=doc_type, body=dsl) if kwargs.get("calc"): result = self._calc_result(result, kwargs['calc']) lg.debug(pp.pformat(result)) return result
[ "def", "get_list", "(", "self", ",", "method", "=", "\"list\"", ",", "*", "*", "kwargs", ")", ":", "lg", "=", "logging", ".", "getLogger", "(", "\"%s.%s\"", "%", "(", "self", ".", "ln", ",", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ")", ")", "lg", ".", "setLevel", "(", "self", ".", "log_level", ")", "args", "=", "inspect", ".", "getargvalues", "(", "inspect", ".", "currentframe", "(", ")", ")", "[", "3", "]", "lg", ".", "debug", "(", "\"\\n****** Args *****:\\n%s\"", ",", "pp", ".", "pformat", "(", "args", ")", ")", "es", "=", "kwargs", ".", "get", "(", "\"es\"", ",", "self", ".", "es", ")", "doc_type", "=", "get2", "(", "kwargs", ",", "\"doc_type\"", ",", "self", ".", "doc_type", ")", "id_field", "=", "get2", "(", "kwargs", ",", "\"id_field\"", ",", "\"_id\"", ")", "value_fld", "=", "kwargs", ".", "get", "(", "\"value_fld\"", ")", "fields", "=", "kwargs", ".", "get", "(", "\"fields\"", ")", "sort_dir", "=", "get2", "(", "kwargs", ",", "\"sort_dir\"", ",", "\"asc\"", ")", "sort_fields", "=", "get2", "(", "kwargs", ",", "\"sort_fields\"", ",", "get2", "(", "kwargs", ",", "\"fields\"", ",", "[", "value_fld", "]", ")", ")", "size", "=", "get2", "(", "kwargs", ",", "\"size\"", ",", "2000", ")", "term", "=", "get2", "(", "kwargs", ",", "\"term\"", ",", "''", ")", ".", "replace", "(", "\"/\"", ",", "\"//\"", ")", "filter_field", "=", "kwargs", ".", "get", "(", "'filter_field'", ")", "filter_value", "=", "kwargs", ".", "get", "(", "'filter_value'", ")", "dsl", "=", "{", "}", "# set retutn to only return the fields specified or return the whole\r", "# document if not specified\r", "if", "fields", "is", "not", "None", ":", "dsl", "[", "\"_source\"", "]", "=", "fields", "elif", "value_fld", "is", "not", "None", ":", "dsl", "[", "\"_source\"", "]", "=", "[", "value_fld", "]", "fields", "=", "[", "value_fld", "]", "else", ":", "fields", "=", "[", "]", "# set query parameters based on the return method \"list\" or \"search\"\r", "if", "sort_dir", "!=", "\"none\"", "and", "method", "==", "\"list\"", ":", "dsl", "[", "\"sort\"", "]", "=", "[", "]", "for", "fld", "in", "sort_fields", ":", "if", "fld", "is", "not", "None", ":", "dsl", "[", "\"sort\"", "]", ".", "append", "(", "{", "fld", ":", "sort_dir", "}", ")", "if", "method", "==", "\"search\"", ":", "# query in elasticsearch breaks if the is a single open parenthesis\r", "# remove a single parenthesis from the search term\r", "if", "\"(\"", "in", "term", "and", "\")\"", "not", "in", "term", ":", "search_term", "=", "term", ".", "replace", "(", "\"(\"", ",", "\"\"", ")", "else", ":", "search_term", "=", "term", "size", "=", "5", "dsl", "[", "'query'", "]", "=", "{", "\"bool\"", ":", "{", "\"should\"", ":", "[", "{", "\"query_string\"", ":", "{", "\"analyze_wildcard\"", ":", "{", "\"query\"", ":", "\"*%s*\"", "%", "search_term", "}", "}", "}", ",", "{", "\"query_string\"", ":", "{", "\"query\"", ":", "\"*%s*\"", "%", "search_term", ",", "\"analyzer\"", ":", "\"default\"", ",", "\"analyze_wildcard\"", ":", "True", ",", "\"fields\"", ":", "fields", ",", "\"boost\"", ":", "10", "}", "}", "]", "}", "}", "else", ":", "pass", "if", "filter_value", ":", "dsl", "[", "'filter'", "]", "=", "{", "\"term\"", ":", "{", "filter_field", ":", "filter_value", "}", "}", "lg", ".", "info", "(", "\"\\n-------- size: %s\\ndsl:\\n%s\"", ",", "size", ",", "json", ".", "dumps", "(", "dsl", ",", "indent", "=", "4", ")", ")", "result", "=", "es", ".", "search", "(", "index", "=", "self", ".", "es_index", ",", "size", "=", "size", ",", "doc_type", "=", "doc_type", ",", "body", "=", "dsl", ")", "if", "kwargs", ".", "get", "(", "\"calc\"", ")", ":", "result", "=", "self", ".", "_calc_result", "(", "result", ",", "kwargs", "[", "'calc'", "]", ")", "lg", ".", "debug", "(", "pp", ".", "pformat", "(", "result", ")", ")", "return", "result" ]
returns a key value list of items based on the specfied criteria
[ "returns", "a", "key", "value", "list", "of", "items", "based", "on", "the", "specfied", "criteria" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/elasticsearchbase.py#L227-L311
249,638
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster.authorize_role
def authorize_role(self, role, protocol, from_port, to_port, cidr_ip): """ Authorize access to machines in a given role from a given network. """ if (protocol != 'tcp' and protocol != 'udp'): raise RuntimeError('error: expected protocol to be tcp or udp '\ 'but got %s' % (protocol)) self._check_role_name(role) role_group_name = self._group_name_for_role(role) # Revoke first to avoid InvalidPermission.Duplicate error self.ec2.revoke_security_group(role_group_name, ip_protocol=protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip) self.ec2.authorize_security_group(role_group_name, ip_protocol=protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip)
python
def authorize_role(self, role, protocol, from_port, to_port, cidr_ip): """ Authorize access to machines in a given role from a given network. """ if (protocol != 'tcp' and protocol != 'udp'): raise RuntimeError('error: expected protocol to be tcp or udp '\ 'but got %s' % (protocol)) self._check_role_name(role) role_group_name = self._group_name_for_role(role) # Revoke first to avoid InvalidPermission.Duplicate error self.ec2.revoke_security_group(role_group_name, ip_protocol=protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip) self.ec2.authorize_security_group(role_group_name, ip_protocol=protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip)
[ "def", "authorize_role", "(", "self", ",", "role", ",", "protocol", ",", "from_port", ",", "to_port", ",", "cidr_ip", ")", ":", "if", "(", "protocol", "!=", "'tcp'", "and", "protocol", "!=", "'udp'", ")", ":", "raise", "RuntimeError", "(", "'error: expected protocol to be tcp or udp '", "'but got %s'", "%", "(", "protocol", ")", ")", "self", ".", "_check_role_name", "(", "role", ")", "role_group_name", "=", "self", ".", "_group_name_for_role", "(", "role", ")", "# Revoke first to avoid InvalidPermission.Duplicate error", "self", ".", "ec2", ".", "revoke_security_group", "(", "role_group_name", ",", "ip_protocol", "=", "protocol", ",", "from_port", "=", "from_port", ",", "to_port", "=", "to_port", ",", "cidr_ip", "=", "cidr_ip", ")", "self", ".", "ec2", ".", "authorize_security_group", "(", "role_group_name", ",", "ip_protocol", "=", "protocol", ",", "from_port", "=", "from_port", ",", "to_port", "=", "to_port", ",", "cidr_ip", "=", "cidr_ip", ")" ]
Authorize access to machines in a given role from a given network.
[ "Authorize", "access", "to", "machines", "in", "a", "given", "role", "from", "a", "given", "network", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L83-L102
249,639
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster.check_running
def check_running(self, role, number): """ Check that a certain number of instances in a role are running. """ instances = self.get_instances_in_role(role, "running") if len(instances) != number: print "Expected %s instances in role %s, but was %s %s" % \ (number, role, len(instances), instances) return False else: return instances
python
def check_running(self, role, number): """ Check that a certain number of instances in a role are running. """ instances = self.get_instances_in_role(role, "running") if len(instances) != number: print "Expected %s instances in role %s, but was %s %s" % \ (number, role, len(instances), instances) return False else: return instances
[ "def", "check_running", "(", "self", ",", "role", ",", "number", ")", ":", "instances", "=", "self", ".", "get_instances_in_role", "(", "role", ",", "\"running\"", ")", "if", "len", "(", "instances", ")", "!=", "number", ":", "print", "\"Expected %s instances in role %s, but was %s %s\"", "%", "(", "number", ",", "role", ",", "len", "(", "instances", ")", ",", "instances", ")", "return", "False", "else", ":", "return", "instances" ]
Check that a certain number of instances in a role are running.
[ "Check", "that", "a", "certain", "number", "of", "instances", "in", "a", "role", "are", "running", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L104-L114
249,640
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster.get_instances_in_role
def get_instances_in_role(self, role, state_filter=None): """ Get all the instances in a role, filtered by state. @param role: the name of the role @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states """ self._check_role_name(role) instances = [] for instance in self._get_instances(self._group_name_for_role(role), state_filter): instances.append(Instance(instance.id, instance.dns_name, instance.private_dns_name, instance.private_ip_address)) return instances
python
def get_instances_in_role(self, role, state_filter=None): """ Get all the instances in a role, filtered by state. @param role: the name of the role @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states """ self._check_role_name(role) instances = [] for instance in self._get_instances(self._group_name_for_role(role), state_filter): instances.append(Instance(instance.id, instance.dns_name, instance.private_dns_name, instance.private_ip_address)) return instances
[ "def", "get_instances_in_role", "(", "self", ",", "role", ",", "state_filter", "=", "None", ")", ":", "self", ".", "_check_role_name", "(", "role", ")", "instances", "=", "[", "]", "for", "instance", "in", "self", ".", "_get_instances", "(", "self", ".", "_group_name_for_role", "(", "role", ")", ",", "state_filter", ")", ":", "instances", ".", "append", "(", "Instance", "(", "instance", ".", "id", ",", "instance", ".", "dns_name", ",", "instance", ".", "private_dns_name", ",", "instance", ".", "private_ip_address", ")", ")", "return", "instances" ]
Get all the instances in a role, filtered by state. @param role: the name of the role @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states
[ "Get", "all", "the", "instances", "in", "a", "role", "filtered", "by", "state", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L116-L133
249,641
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster.get_instances
def get_instances(self, state_filter=None): """ Get all the instances filtered by state. @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states """ instances = [] for instance in self._get_instances(self._get_cluster_group_name(), state_filter): instances.append(Instance(instance.id, instance.dns_name, instance.private_dns_name, instance.private_ip_address)) return instances
python
def get_instances(self, state_filter=None): """ Get all the instances filtered by state. @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states """ instances = [] for instance in self._get_instances(self._get_cluster_group_name(), state_filter): instances.append(Instance(instance.id, instance.dns_name, instance.private_dns_name, instance.private_ip_address)) return instances
[ "def", "get_instances", "(", "self", ",", "state_filter", "=", "None", ")", ":", "instances", "=", "[", "]", "for", "instance", "in", "self", ".", "_get_instances", "(", "self", ".", "_get_cluster_group_name", "(", ")", ",", "state_filter", ")", ":", "instances", ".", "append", "(", "Instance", "(", "instance", ".", "id", ",", "instance", ".", "dns_name", ",", "instance", ".", "private_dns_name", ",", "instance", ".", "private_ip_address", ")", ")", "return", "instances" ]
Get all the instances filtered by state. @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states
[ "Get", "all", "the", "instances", "filtered", "by", "state", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L135-L149
249,642
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster.print_status
def print_status(self, roles=None, state_filter="running"): """ Print the status of instances in the given roles, filtered by state. """ if not roles: for instance in self._get_instances(self._get_cluster_group_name(), state_filter): self._print_instance("", instance) else: for role in roles: for instance in self._get_instances(self._group_name_for_role(role), state_filter): self._print_instance(role, instance)
python
def print_status(self, roles=None, state_filter="running"): """ Print the status of instances in the given roles, filtered by state. """ if not roles: for instance in self._get_instances(self._get_cluster_group_name(), state_filter): self._print_instance("", instance) else: for role in roles: for instance in self._get_instances(self._group_name_for_role(role), state_filter): self._print_instance(role, instance)
[ "def", "print_status", "(", "self", ",", "roles", "=", "None", ",", "state_filter", "=", "\"running\"", ")", ":", "if", "not", "roles", ":", "for", "instance", "in", "self", ".", "_get_instances", "(", "self", ".", "_get_cluster_group_name", "(", ")", ",", "state_filter", ")", ":", "self", ".", "_print_instance", "(", "\"\"", ",", "instance", ")", "else", ":", "for", "role", "in", "roles", ":", "for", "instance", "in", "self", ".", "_get_instances", "(", "self", ".", "_group_name_for_role", "(", "role", ")", ",", "state_filter", ")", ":", "self", ".", "_print_instance", "(", "role", ",", "instance", ")" ]
Print the status of instances in the given roles, filtered by state.
[ "Print", "the", "status", "of", "instances", "in", "the", "given", "roles", "filtered", "by", "state", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L151-L163
249,643
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster.CreateBlockDeviceMap
def CreateBlockDeviceMap(self, image_id, instance_type): """ If you launch without specifying a manual device block mapping, you may not get all the ephemeral devices available to the given instance type. This will build one that ensures all available ephemeral devices are mapped. """ # get the block device mapping stored with the image image = self.ec2.get_image(image_id) block_device_map = image.block_device_mapping assert(block_device_map) # update it to include the ephemeral devices # max is 4... is it an error for instances with fewer than 4 ? # see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ # InstanceStorage.html#StorageOnInstanceTypes ephemeral_device_names = ['/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde'] for i, device_name in enumerate(ephemeral_device_names): name = 'ephemeral%d' % (i) bdt = blockdevicemapping.BlockDeviceType(ephemeral_name = name) block_device_map[device_name] = bdt return block_device_map
python
def CreateBlockDeviceMap(self, image_id, instance_type): """ If you launch without specifying a manual device block mapping, you may not get all the ephemeral devices available to the given instance type. This will build one that ensures all available ephemeral devices are mapped. """ # get the block device mapping stored with the image image = self.ec2.get_image(image_id) block_device_map = image.block_device_mapping assert(block_device_map) # update it to include the ephemeral devices # max is 4... is it an error for instances with fewer than 4 ? # see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ # InstanceStorage.html#StorageOnInstanceTypes ephemeral_device_names = ['/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde'] for i, device_name in enumerate(ephemeral_device_names): name = 'ephemeral%d' % (i) bdt = blockdevicemapping.BlockDeviceType(ephemeral_name = name) block_device_map[device_name] = bdt return block_device_map
[ "def", "CreateBlockDeviceMap", "(", "self", ",", "image_id", ",", "instance_type", ")", ":", "# get the block device mapping stored with the image", "image", "=", "self", ".", "ec2", ".", "get_image", "(", "image_id", ")", "block_device_map", "=", "image", ".", "block_device_mapping", "assert", "(", "block_device_map", ")", "# update it to include the ephemeral devices ", "# max is 4... is it an error for instances with fewer than 4 ? ", "# see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/", "# InstanceStorage.html#StorageOnInstanceTypes", "ephemeral_device_names", "=", "[", "'/dev/sdb'", ",", "'/dev/sdc'", ",", "'/dev/sdd'", ",", "'/dev/sde'", "]", "for", "i", ",", "device_name", "in", "enumerate", "(", "ephemeral_device_names", ")", ":", "name", "=", "'ephemeral%d'", "%", "(", "i", ")", "bdt", "=", "blockdevicemapping", ".", "BlockDeviceType", "(", "ephemeral_name", "=", "name", ")", "block_device_map", "[", "device_name", "]", "=", "bdt", "return", "block_device_map" ]
If you launch without specifying a manual device block mapping, you may not get all the ephemeral devices available to the given instance type. This will build one that ensures all available ephemeral devices are mapped.
[ "If", "you", "launch", "without", "specifying", "a", "manual", "device", "block", "mapping", "you", "may", "not", "get", "all", "the", "ephemeral", "devices", "available", "to", "the", "given", "instance", "type", ".", "This", "will", "build", "one", "that", "ensures", "all", "available", "ephemeral", "devices", "are", "mapped", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L213-L233
249,644
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster._create_security_groups
def _create_security_groups(self, role): """ Create the security groups for a given role, including a group for the cluster if it doesn't exist. """ self._check_role_name(role) security_group_names = self._get_all_group_names() cluster_group_name = self._get_cluster_group_name() if not cluster_group_name in security_group_names: self.ec2.create_security_group(cluster_group_name, "Cluster (%s)" % (self._name)) self.ec2.authorize_security_group(cluster_group_name, cluster_group_name) # Allow SSH from anywhere self.ec2.authorize_security_group(cluster_group_name, ip_protocol="tcp", from_port=22, to_port=22, cidr_ip="0.0.0.0/0") role_group_name = self._group_name_for_role(role) if not role_group_name in security_group_names: self.ec2.create_security_group(role_group_name, "Role %s (%s)" % (role, self._name)) return
python
def _create_security_groups(self, role): """ Create the security groups for a given role, including a group for the cluster if it doesn't exist. """ self._check_role_name(role) security_group_names = self._get_all_group_names() cluster_group_name = self._get_cluster_group_name() if not cluster_group_name in security_group_names: self.ec2.create_security_group(cluster_group_name, "Cluster (%s)" % (self._name)) self.ec2.authorize_security_group(cluster_group_name, cluster_group_name) # Allow SSH from anywhere self.ec2.authorize_security_group(cluster_group_name, ip_protocol="tcp", from_port=22, to_port=22, cidr_ip="0.0.0.0/0") role_group_name = self._group_name_for_role(role) if not role_group_name in security_group_names: self.ec2.create_security_group(role_group_name, "Role %s (%s)" % (role, self._name)) return
[ "def", "_create_security_groups", "(", "self", ",", "role", ")", ":", "self", ".", "_check_role_name", "(", "role", ")", "security_group_names", "=", "self", ".", "_get_all_group_names", "(", ")", "cluster_group_name", "=", "self", ".", "_get_cluster_group_name", "(", ")", "if", "not", "cluster_group_name", "in", "security_group_names", ":", "self", ".", "ec2", ".", "create_security_group", "(", "cluster_group_name", ",", "\"Cluster (%s)\"", "%", "(", "self", ".", "_name", ")", ")", "self", ".", "ec2", ".", "authorize_security_group", "(", "cluster_group_name", ",", "cluster_group_name", ")", "# Allow SSH from anywhere", "self", ".", "ec2", ".", "authorize_security_group", "(", "cluster_group_name", ",", "ip_protocol", "=", "\"tcp\"", ",", "from_port", "=", "22", ",", "to_port", "=", "22", ",", "cidr_ip", "=", "\"0.0.0.0/0\"", ")", "role_group_name", "=", "self", ".", "_group_name_for_role", "(", "role", ")", "if", "not", "role_group_name", "in", "security_group_names", ":", "self", ".", "ec2", ".", "create_security_group", "(", "role_group_name", ",", "\"Role %s (%s)\"", "%", "(", "role", ",", "self", ".", "_name", ")", ")", "return" ]
Create the security groups for a given role, including a group for the cluster if it doesn't exist.
[ "Create", "the", "security", "groups", "for", "a", "given", "role", "including", "a", "group", "for", "the", "cluster", "if", "it", "doesn", "t", "exist", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L468-L492
249,645
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
Ec2Cluster._delete_security_groups
def _delete_security_groups(self): """ Delete the security groups for each role in the cluster, and the group for the cluster. """ group_names = self._get_all_group_names_for_cluster() for group in group_names: self.ec2.delete_security_group(group)
python
def _delete_security_groups(self): """ Delete the security groups for each role in the cluster, and the group for the cluster. """ group_names = self._get_all_group_names_for_cluster() for group in group_names: self.ec2.delete_security_group(group)
[ "def", "_delete_security_groups", "(", "self", ")", ":", "group_names", "=", "self", ".", "_get_all_group_names_for_cluster", "(", ")", "for", "group", "in", "group_names", ":", "self", ".", "ec2", ".", "delete_security_group", "(", "group", ")" ]
Delete the security groups for each role in the cluster, and the group for the cluster.
[ "Delete", "the", "security", "groups", "for", "each", "role", "in", "the", "cluster", "and", "the", "group", "for", "the", "cluster", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L494-L501
249,646
b3j0f/conf
b3j0f/conf/parser/core.py
serialize
def serialize(expr): """Serialize input expr into a parsable value. :rtype: str""" result = None if isinstance(expr, string_types): result = expr elif expr is not None: result = '=py:{0}'.format(expr) return result
python
def serialize(expr): """Serialize input expr into a parsable value. :rtype: str""" result = None if isinstance(expr, string_types): result = expr elif expr is not None: result = '=py:{0}'.format(expr) return result
[ "def", "serialize", "(", "expr", ")", ":", "result", "=", "None", "if", "isinstance", "(", "expr", ",", "string_types", ")", ":", "result", "=", "expr", "elif", "expr", "is", "not", "None", ":", "result", "=", "'=py:{0}'", ".", "format", "(", "expr", ")", "return", "result" ]
Serialize input expr into a parsable value. :rtype: str
[ "Serialize", "input", "expr", "into", "a", "parsable", "value", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/parser/core.py#L198-L211
249,647
b3j0f/conf
b3j0f/conf/parser/core.py
parse
def parse( svalue, conf=None, configurable=None, ptype=None, scope=DEFAULT_SCOPE, safe=DEFAULT_SAFE, besteffort=DEFAULT_BESTEFFORT ): """Parser which delegates parsing to expression or format parser.""" result = None if ptype is None: ptype = object compilation = REGEX_EXPR.match(svalue) _scope = {} if scope is None else scope.copy() if compilation: lang, expr = compilation.group('lang', 'expr') result = _exprparser( expr=expr, lang=lang, conf=conf, configurable=configurable, scope=_scope, safe=safe, besteffort=besteffort ) else: result = _strparser( svalue=svalue, conf=conf, configurable=configurable, scope=_scope, safe=safe, besteffort=besteffort ) # try to cast value in ptype if not isinstance(result, ptype): try: result = ptype(result) except TypeError: result = result return result
python
def parse( svalue, conf=None, configurable=None, ptype=None, scope=DEFAULT_SCOPE, safe=DEFAULT_SAFE, besteffort=DEFAULT_BESTEFFORT ): """Parser which delegates parsing to expression or format parser.""" result = None if ptype is None: ptype = object compilation = REGEX_EXPR.match(svalue) _scope = {} if scope is None else scope.copy() if compilation: lang, expr = compilation.group('lang', 'expr') result = _exprparser( expr=expr, lang=lang, conf=conf, configurable=configurable, scope=_scope, safe=safe, besteffort=besteffort ) else: result = _strparser( svalue=svalue, conf=conf, configurable=configurable, scope=_scope, safe=safe, besteffort=besteffort ) # try to cast value in ptype if not isinstance(result, ptype): try: result = ptype(result) except TypeError: result = result return result
[ "def", "parse", "(", "svalue", ",", "conf", "=", "None", ",", "configurable", "=", "None", ",", "ptype", "=", "None", ",", "scope", "=", "DEFAULT_SCOPE", ",", "safe", "=", "DEFAULT_SAFE", ",", "besteffort", "=", "DEFAULT_BESTEFFORT", ")", ":", "result", "=", "None", "if", "ptype", "is", "None", ":", "ptype", "=", "object", "compilation", "=", "REGEX_EXPR", ".", "match", "(", "svalue", ")", "_scope", "=", "{", "}", "if", "scope", "is", "None", "else", "scope", ".", "copy", "(", ")", "if", "compilation", ":", "lang", ",", "expr", "=", "compilation", ".", "group", "(", "'lang'", ",", "'expr'", ")", "result", "=", "_exprparser", "(", "expr", "=", "expr", ",", "lang", "=", "lang", ",", "conf", "=", "conf", ",", "configurable", "=", "configurable", ",", "scope", "=", "_scope", ",", "safe", "=", "safe", ",", "besteffort", "=", "besteffort", ")", "else", ":", "result", "=", "_strparser", "(", "svalue", "=", "svalue", ",", "conf", "=", "conf", ",", "configurable", "=", "configurable", ",", "scope", "=", "_scope", ",", "safe", "=", "safe", ",", "besteffort", "=", "besteffort", ")", "# try to cast value in ptype", "if", "not", "isinstance", "(", "result", ",", "ptype", ")", ":", "try", ":", "result", "=", "ptype", "(", "result", ")", "except", "TypeError", ":", "result", "=", "result", "return", "result" ]
Parser which delegates parsing to expression or format parser.
[ "Parser", "which", "delegates", "parsing", "to", "expression", "or", "format", "parser", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/parser/core.py#L214-L252
249,648
b3j0f/conf
b3j0f/conf/parser/core.py
_exprparser
def _exprparser( expr, scope, lang=None, conf=None, configurable=None, safe=DEFAULT_SAFE, besteffort=DEFAULT_BESTEFFORT, tostr=False ): """In charge of parsing an expression and return a python object.""" if scope is None: scope = {} scope.update({ 'configurable': configurable, 'conf': conf }) expr = REGEX_EXPR_R.sub( _refrepl( configurable=configurable, conf=conf, safe=safe, scope=scope, besteffort=besteffort ), expr ) result = resolve( expr=expr, name=lang, safe=safe, scope=scope, tostr=tostr, besteffort=besteffort ) return result
python
def _exprparser( expr, scope, lang=None, conf=None, configurable=None, safe=DEFAULT_SAFE, besteffort=DEFAULT_BESTEFFORT, tostr=False ): """In charge of parsing an expression and return a python object.""" if scope is None: scope = {} scope.update({ 'configurable': configurable, 'conf': conf }) expr = REGEX_EXPR_R.sub( _refrepl( configurable=configurable, conf=conf, safe=safe, scope=scope, besteffort=besteffort ), expr ) result = resolve( expr=expr, name=lang, safe=safe, scope=scope, tostr=tostr, besteffort=besteffort ) return result
[ "def", "_exprparser", "(", "expr", ",", "scope", ",", "lang", "=", "None", ",", "conf", "=", "None", ",", "configurable", "=", "None", ",", "safe", "=", "DEFAULT_SAFE", ",", "besteffort", "=", "DEFAULT_BESTEFFORT", ",", "tostr", "=", "False", ")", ":", "if", "scope", "is", "None", ":", "scope", "=", "{", "}", "scope", ".", "update", "(", "{", "'configurable'", ":", "configurable", ",", "'conf'", ":", "conf", "}", ")", "expr", "=", "REGEX_EXPR_R", ".", "sub", "(", "_refrepl", "(", "configurable", "=", "configurable", ",", "conf", "=", "conf", ",", "safe", "=", "safe", ",", "scope", "=", "scope", ",", "besteffort", "=", "besteffort", ")", ",", "expr", ")", "result", "=", "resolve", "(", "expr", "=", "expr", ",", "name", "=", "lang", ",", "safe", "=", "safe", ",", "scope", "=", "scope", ",", "tostr", "=", "tostr", ",", "besteffort", "=", "besteffort", ")", "return", "result" ]
In charge of parsing an expression and return a python object.
[ "In", "charge", "of", "parsing", "an", "expression", "and", "return", "a", "python", "object", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/parser/core.py#L255-L281
249,649
b3j0f/conf
b3j0f/conf/parser/core.py
_ref
def _ref( pname, conf=None, configurable=None, cname=None, path=None, history=0 ): """Resolve a parameter value. :param Configuration conf: configuration to use. :param str pname: parameter name. :param Configurable configurable: configurable. :param str cname: category name. :param str path: conf path. :param int history: parameter history research. :return: parameter. :raises: ParserError if conf and configurable are None. """ result = None if configurable is not None: kwargs = {} if conf is not None: kwargs['conf'] = conf if path is not None: kwargs['paths'] = path if conf is None: conf = configurable.getconf(**kwargs) if conf is None: raise ParserError( 'Wrong ref parameters. Conf and configurable are both None.' ) result = conf.param(pname=pname, cname=cname, history=history) return result
python
def _ref( pname, conf=None, configurable=None, cname=None, path=None, history=0 ): """Resolve a parameter value. :param Configuration conf: configuration to use. :param str pname: parameter name. :param Configurable configurable: configurable. :param str cname: category name. :param str path: conf path. :param int history: parameter history research. :return: parameter. :raises: ParserError if conf and configurable are None. """ result = None if configurable is not None: kwargs = {} if conf is not None: kwargs['conf'] = conf if path is not None: kwargs['paths'] = path if conf is None: conf = configurable.getconf(**kwargs) if conf is None: raise ParserError( 'Wrong ref parameters. Conf and configurable are both None.' ) result = conf.param(pname=pname, cname=cname, history=history) return result
[ "def", "_ref", "(", "pname", ",", "conf", "=", "None", ",", "configurable", "=", "None", ",", "cname", "=", "None", ",", "path", "=", "None", ",", "history", "=", "0", ")", ":", "result", "=", "None", "if", "configurable", "is", "not", "None", ":", "kwargs", "=", "{", "}", "if", "conf", "is", "not", "None", ":", "kwargs", "[", "'conf'", "]", "=", "conf", "if", "path", "is", "not", "None", ":", "kwargs", "[", "'paths'", "]", "=", "path", "if", "conf", "is", "None", ":", "conf", "=", "configurable", ".", "getconf", "(", "*", "*", "kwargs", ")", "if", "conf", "is", "None", ":", "raise", "ParserError", "(", "'Wrong ref parameters. Conf and configurable are both None.'", ")", "result", "=", "conf", ".", "param", "(", "pname", "=", "pname", ",", "cname", "=", "cname", ",", "history", "=", "history", ")", "return", "result" ]
Resolve a parameter value. :param Configuration conf: configuration to use. :param str pname: parameter name. :param Configurable configurable: configurable. :param str cname: category name. :param str path: conf path. :param int history: parameter history research. :return: parameter. :raises: ParserError if conf and configurable are None.
[ "Resolve", "a", "parameter", "value", "." ]
18dd6d5d6560f9b202793739e2330a2181163511
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/parser/core.py#L385-L422
249,650
dev-platypus/platyutil
python/platyutil/system.py
mount
def mount(dev, mountpoint, flags='', log=None): '''Mount the given dev to the given mountpoint by using the given flags''' ensureDirectory(mountpoint) systemCall('mount %s %s %s' % (flags, dev, mountpoint), log=log)
python
def mount(dev, mountpoint, flags='', log=None): '''Mount the given dev to the given mountpoint by using the given flags''' ensureDirectory(mountpoint) systemCall('mount %s %s %s' % (flags, dev, mountpoint), log=log)
[ "def", "mount", "(", "dev", ",", "mountpoint", ",", "flags", "=", "''", ",", "log", "=", "None", ")", ":", "ensureDirectory", "(", "mountpoint", ")", "systemCall", "(", "'mount %s %s %s'", "%", "(", "flags", ",", "dev", ",", "mountpoint", ")", ",", "log", "=", "log", ")" ]
Mount the given dev to the given mountpoint by using the given flags
[ "Mount", "the", "given", "dev", "to", "the", "given", "mountpoint", "by", "using", "the", "given", "flags" ]
5f3dadbdc2445e71755fb09d6020641c77d13c47
https://github.com/dev-platypus/platyutil/blob/5f3dadbdc2445e71755fb09d6020641c77d13c47/python/platyutil/system.py#L37-L41
249,651
dev-platypus/platyutil
python/platyutil/system.py
systemCall
def systemCall(cmd, sh=True, log=None): '''Fancy magic version of os.system''' if log is None: log = logging log.debug('System call [sh:%s]: %s' \ % (sh, cmd)) out = [] proc = None poller = None outBuf = [''] errBuf = [''] def pollOutput(): ''' Read, log and store output (if any) from processes pipes. ''' removeChars = '\r\n' # collect fds with new output fds = [entry[0] for entry in poller.poll()] if proc.stdout.fileno() in fds: while True: try: tmp = proc.stdout.read(100) except IOError: break outBuf[0] += tmp while '\n' in outBuf[0]: line, _, outBuf[0] = outBuf[0].partition('\n') log.debug(line) out.append(line + '\n') if not tmp: break if proc.stderr.fileno() in fds: while True: try: tmp = proc.stderr.read(100) except IOError: break errBuf[0] += tmp while '\n' in errBuf[0]: line, _, errBuf[0] = errBuf[0].partition('\n') log.warning(line) if not tmp: break while True: if proc is None: # create and start process proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=sh) # create poll select poller = select.poll() flags = fcntl.fcntl(proc.stdout, fcntl.F_GETFL) fcntl.fcntl(proc.stdout, fcntl.F_SETFL, flags| os.O_NONBLOCK) flags = fcntl.fcntl(proc.stderr, fcntl.F_GETFL) fcntl.fcntl(proc.stderr, fcntl.F_SETFL, flags| os.O_NONBLOCK) # register pipes to polling poller.register(proc.stdout, select.POLLIN) poller.register(proc.stderr, select.POLLIN) pollOutput() if proc.poll() is not None: # proc finished break # poll once after the process ended to collect all the missing output pollOutput() # check return code if proc.returncode != 0: raise RuntimeError( CalledProcessError(proc.returncode, cmd, ''.join(out)) ) return ''.join(out)
python
def systemCall(cmd, sh=True, log=None): '''Fancy magic version of os.system''' if log is None: log = logging log.debug('System call [sh:%s]: %s' \ % (sh, cmd)) out = [] proc = None poller = None outBuf = [''] errBuf = [''] def pollOutput(): ''' Read, log and store output (if any) from processes pipes. ''' removeChars = '\r\n' # collect fds with new output fds = [entry[0] for entry in poller.poll()] if proc.stdout.fileno() in fds: while True: try: tmp = proc.stdout.read(100) except IOError: break outBuf[0] += tmp while '\n' in outBuf[0]: line, _, outBuf[0] = outBuf[0].partition('\n') log.debug(line) out.append(line + '\n') if not tmp: break if proc.stderr.fileno() in fds: while True: try: tmp = proc.stderr.read(100) except IOError: break errBuf[0] += tmp while '\n' in errBuf[0]: line, _, errBuf[0] = errBuf[0].partition('\n') log.warning(line) if not tmp: break while True: if proc is None: # create and start process proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=sh) # create poll select poller = select.poll() flags = fcntl.fcntl(proc.stdout, fcntl.F_GETFL) fcntl.fcntl(proc.stdout, fcntl.F_SETFL, flags| os.O_NONBLOCK) flags = fcntl.fcntl(proc.stderr, fcntl.F_GETFL) fcntl.fcntl(proc.stderr, fcntl.F_SETFL, flags| os.O_NONBLOCK) # register pipes to polling poller.register(proc.stdout, select.POLLIN) poller.register(proc.stderr, select.POLLIN) pollOutput() if proc.poll() is not None: # proc finished break # poll once after the process ended to collect all the missing output pollOutput() # check return code if proc.returncode != 0: raise RuntimeError( CalledProcessError(proc.returncode, cmd, ''.join(out)) ) return ''.join(out)
[ "def", "systemCall", "(", "cmd", ",", "sh", "=", "True", ",", "log", "=", "None", ")", ":", "if", "log", "is", "None", ":", "log", "=", "logging", "log", ".", "debug", "(", "'System call [sh:%s]: %s'", "%", "(", "sh", ",", "cmd", ")", ")", "out", "=", "[", "]", "proc", "=", "None", "poller", "=", "None", "outBuf", "=", "[", "''", "]", "errBuf", "=", "[", "''", "]", "def", "pollOutput", "(", ")", ":", "'''\n Read, log and store output (if any) from processes pipes.\n '''", "removeChars", "=", "'\\r\\n'", "# collect fds with new output", "fds", "=", "[", "entry", "[", "0", "]", "for", "entry", "in", "poller", ".", "poll", "(", ")", "]", "if", "proc", ".", "stdout", ".", "fileno", "(", ")", "in", "fds", ":", "while", "True", ":", "try", ":", "tmp", "=", "proc", ".", "stdout", ".", "read", "(", "100", ")", "except", "IOError", ":", "break", "outBuf", "[", "0", "]", "+=", "tmp", "while", "'\\n'", "in", "outBuf", "[", "0", "]", ":", "line", ",", "_", ",", "outBuf", "[", "0", "]", "=", "outBuf", "[", "0", "]", ".", "partition", "(", "'\\n'", ")", "log", ".", "debug", "(", "line", ")", "out", ".", "append", "(", "line", "+", "'\\n'", ")", "if", "not", "tmp", ":", "break", "if", "proc", ".", "stderr", ".", "fileno", "(", ")", "in", "fds", ":", "while", "True", ":", "try", ":", "tmp", "=", "proc", ".", "stderr", ".", "read", "(", "100", ")", "except", "IOError", ":", "break", "errBuf", "[", "0", "]", "+=", "tmp", "while", "'\\n'", "in", "errBuf", "[", "0", "]", ":", "line", ",", "_", ",", "errBuf", "[", "0", "]", "=", "errBuf", "[", "0", "]", ".", "partition", "(", "'\\n'", ")", "log", ".", "warning", "(", "line", ")", "if", "not", "tmp", ":", "break", "while", "True", ":", "if", "proc", "is", "None", ":", "# create and start process", "proc", "=", "Popen", "(", "cmd", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "shell", "=", "sh", ")", "# create poll select", "poller", "=", "select", ".", "poll", "(", ")", "flags", "=", "fcntl", ".", "fcntl", "(", "proc", ".", "stdout", ",", "fcntl", ".", "F_GETFL", ")", "fcntl", ".", "fcntl", "(", "proc", ".", "stdout", ",", "fcntl", ".", "F_SETFL", ",", "flags", "|", "os", ".", "O_NONBLOCK", ")", "flags", "=", "fcntl", ".", "fcntl", "(", "proc", ".", "stderr", ",", "fcntl", ".", "F_GETFL", ")", "fcntl", ".", "fcntl", "(", "proc", ".", "stderr", ",", "fcntl", ".", "F_SETFL", ",", "flags", "|", "os", ".", "O_NONBLOCK", ")", "# register pipes to polling", "poller", ".", "register", "(", "proc", ".", "stdout", ",", "select", ".", "POLLIN", ")", "poller", ".", "register", "(", "proc", ".", "stderr", ",", "select", ".", "POLLIN", ")", "pollOutput", "(", ")", "if", "proc", ".", "poll", "(", ")", "is", "not", "None", ":", "# proc finished", "break", "# poll once after the process ended to collect all the missing output", "pollOutput", "(", ")", "# check return code", "if", "proc", ".", "returncode", "!=", "0", ":", "raise", "RuntimeError", "(", "CalledProcessError", "(", "proc", ".", "returncode", ",", "cmd", ",", "''", ".", "join", "(", "out", ")", ")", ")", "return", "''", ".", "join", "(", "out", ")" ]
Fancy magic version of os.system
[ "Fancy", "magic", "version", "of", "os", ".", "system" ]
5f3dadbdc2445e71755fb09d6020641c77d13c47
https://github.com/dev-platypus/platyutil/blob/5f3dadbdc2445e71755fb09d6020641c77d13c47/python/platyutil/system.py#L50-L135
249,652
dev-platypus/platyutil
python/platyutil/system.py
chrootedSystemCall
def chrootedSystemCall(chrootDir, cmd, sh=True, mountPseudoFs=True, log=None): '''Chrooted version of systemCall. Manages necessary pseudo filesystems.''' if log is None: log = conduct.app.log # determine mount points for pseudo fs proc = path.join(chrootDir, 'proc') sys = path.join(chrootDir, 'sys') dev = path.join(chrootDir, 'dev') devpts = path.join(chrootDir, 'dev', 'pts') # mount pseudo fs if mountPseudoFs: mount('proc', proc, '-t proc') mount('/sys', sys, '--rbind') mount('/dev', dev, '--rbind') try: # exec chrooted cmd log.debug('Execute chrooted command ...') cmd = 'chroot %s %s' % (chrootDir, cmd) return systemCall(cmd, sh, log) finally: # umount if pseudo fs was mounted if mountPseudoFs: # handle devpts if path.exists(devpts): umount(devpts, '-lf') # lazy is ok for pseudo fs umount(dev, '-lf') umount(sys, '-lf') umount(proc, '-lf')
python
def chrootedSystemCall(chrootDir, cmd, sh=True, mountPseudoFs=True, log=None): '''Chrooted version of systemCall. Manages necessary pseudo filesystems.''' if log is None: log = conduct.app.log # determine mount points for pseudo fs proc = path.join(chrootDir, 'proc') sys = path.join(chrootDir, 'sys') dev = path.join(chrootDir, 'dev') devpts = path.join(chrootDir, 'dev', 'pts') # mount pseudo fs if mountPseudoFs: mount('proc', proc, '-t proc') mount('/sys', sys, '--rbind') mount('/dev', dev, '--rbind') try: # exec chrooted cmd log.debug('Execute chrooted command ...') cmd = 'chroot %s %s' % (chrootDir, cmd) return systemCall(cmd, sh, log) finally: # umount if pseudo fs was mounted if mountPseudoFs: # handle devpts if path.exists(devpts): umount(devpts, '-lf') # lazy is ok for pseudo fs umount(dev, '-lf') umount(sys, '-lf') umount(proc, '-lf')
[ "def", "chrootedSystemCall", "(", "chrootDir", ",", "cmd", ",", "sh", "=", "True", ",", "mountPseudoFs", "=", "True", ",", "log", "=", "None", ")", ":", "if", "log", "is", "None", ":", "log", "=", "conduct", ".", "app", ".", "log", "# determine mount points for pseudo fs", "proc", "=", "path", ".", "join", "(", "chrootDir", ",", "'proc'", ")", "sys", "=", "path", ".", "join", "(", "chrootDir", ",", "'sys'", ")", "dev", "=", "path", ".", "join", "(", "chrootDir", ",", "'dev'", ")", "devpts", "=", "path", ".", "join", "(", "chrootDir", ",", "'dev'", ",", "'pts'", ")", "# mount pseudo fs", "if", "mountPseudoFs", ":", "mount", "(", "'proc'", ",", "proc", ",", "'-t proc'", ")", "mount", "(", "'/sys'", ",", "sys", ",", "'--rbind'", ")", "mount", "(", "'/dev'", ",", "dev", ",", "'--rbind'", ")", "try", ":", "# exec chrooted cmd", "log", ".", "debug", "(", "'Execute chrooted command ...'", ")", "cmd", "=", "'chroot %s %s'", "%", "(", "chrootDir", ",", "cmd", ")", "return", "systemCall", "(", "cmd", ",", "sh", ",", "log", ")", "finally", ":", "# umount if pseudo fs was mounted", "if", "mountPseudoFs", ":", "# handle devpts", "if", "path", ".", "exists", "(", "devpts", ")", ":", "umount", "(", "devpts", ",", "'-lf'", ")", "# lazy is ok for pseudo fs", "umount", "(", "dev", ",", "'-lf'", ")", "umount", "(", "sys", ",", "'-lf'", ")", "umount", "(", "proc", ",", "'-lf'", ")" ]
Chrooted version of systemCall. Manages necessary pseudo filesystems.
[ "Chrooted", "version", "of", "systemCall", ".", "Manages", "necessary", "pseudo", "filesystems", "." ]
5f3dadbdc2445e71755fb09d6020641c77d13c47
https://github.com/dev-platypus/platyutil/blob/5f3dadbdc2445e71755fb09d6020641c77d13c47/python/platyutil/system.py#L138-L169
249,653
cogniteev/docido-python-sdk
docido_sdk/core.py
ExtensionPoint.extensions
def extensions(self, component): """Return a list of components that declare to implement the extension point interface. """ classes = ComponentMeta._registry.get(self.interface, ()) components = [component.compmgr[cls] for cls in classes] return [c for c in components if c]
python
def extensions(self, component): """Return a list of components that declare to implement the extension point interface. """ classes = ComponentMeta._registry.get(self.interface, ()) components = [component.compmgr[cls] for cls in classes] return [c for c in components if c]
[ "def", "extensions", "(", "self", ",", "component", ")", ":", "classes", "=", "ComponentMeta", ".", "_registry", ".", "get", "(", "self", ".", "interface", ",", "(", ")", ")", "components", "=", "[", "component", ".", "compmgr", "[", "cls", "]", "for", "cls", "in", "classes", "]", "return", "[", "c", "for", "c", "in", "components", "if", "c", "]" ]
Return a list of components that declare to implement the extension point interface.
[ "Return", "a", "list", "of", "components", "that", "declare", "to", "implement", "the", "extension", "point", "interface", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/core.py#L30-L36
249,654
cogniteev/docido-python-sdk
docido_sdk/core.py
Component.implements
def implements(*interfaces): """Can be used in the class definition of `Component` subclasses to declare the extension points that are extended. """ import sys frame = sys._getframe(1) locals_ = frame.f_locals # Some sanity checks msg = 'implements() can only be used in a class definition' assert locals_ is not frame.f_globals and '__module__' in locals_, msg locals_.setdefault('_implements', []).extend(interfaces)
python
def implements(*interfaces): """Can be used in the class definition of `Component` subclasses to declare the extension points that are extended. """ import sys frame = sys._getframe(1) locals_ = frame.f_locals # Some sanity checks msg = 'implements() can only be used in a class definition' assert locals_ is not frame.f_globals and '__module__' in locals_, msg locals_.setdefault('_implements', []).extend(interfaces)
[ "def", "implements", "(", "*", "interfaces", ")", ":", "import", "sys", "frame", "=", "sys", ".", "_getframe", "(", "1", ")", "locals_", "=", "frame", ".", "f_locals", "# Some sanity checks", "msg", "=", "'implements() can only be used in a class definition'", "assert", "locals_", "is", "not", "frame", ".", "f_globals", "and", "'__module__'", "in", "locals_", ",", "msg", "locals_", ".", "setdefault", "(", "'_implements'", ",", "[", "]", ")", ".", "extend", "(", "interfaces", ")" ]
Can be used in the class definition of `Component` subclasses to declare the extension points that are extended.
[ "Can", "be", "used", "in", "the", "class", "definition", "of", "Component", "subclasses", "to", "declare", "the", "extension", "points", "that", "are", "extended", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/core.py#L144-L157
249,655
cogniteev/docido-python-sdk
docido_sdk/core.py
ComponentManager.is_enabled
def is_enabled(self, cls): """Return whether the given component class is enabled.""" if cls not in self.enabled: self.enabled[cls] = self.is_component_enabled(cls) return self.enabled[cls]
python
def is_enabled(self, cls): """Return whether the given component class is enabled.""" if cls not in self.enabled: self.enabled[cls] = self.is_component_enabled(cls) return self.enabled[cls]
[ "def", "is_enabled", "(", "self", ",", "cls", ")", ":", "if", "cls", "not", "in", "self", ".", "enabled", ":", "self", ".", "enabled", "[", "cls", "]", "=", "self", ".", "is_component_enabled", "(", "cls", ")", "return", "self", ".", "enabled", "[", "cls", "]" ]
Return whether the given component class is enabled.
[ "Return", "whether", "the", "given", "component", "class", "is", "enabled", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/core.py#L206-L210
249,656
cogniteev/docido-python-sdk
docido_sdk/core.py
ComponentManager.disable_component
def disable_component(self, component): """Force a component to be disabled. :param component: can be a class or an instance. """ if not isinstance(component, type): component = component.__class__ self.enabled[component] = False self.components[component] = None
python
def disable_component(self, component): """Force a component to be disabled. :param component: can be a class or an instance. """ if not isinstance(component, type): component = component.__class__ self.enabled[component] = False self.components[component] = None
[ "def", "disable_component", "(", "self", ",", "component", ")", ":", "if", "not", "isinstance", "(", "component", ",", "type", ")", ":", "component", "=", "component", ".", "__class__", "self", ".", "enabled", "[", "component", "]", "=", "False", "self", ".", "components", "[", "component", "]", "=", "None" ]
Force a component to be disabled. :param component: can be a class or an instance.
[ "Force", "a", "component", "to", "be", "disabled", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/core.py#L212-L220
249,657
btrevizan/pystrct
pystrct/pystrct.py
StructFile.size
def size(self): """Calculate and return the file size in bytes.""" old = self.__file.tell() # old position self.__file.seek(0, 2) # end of file n_bytes = self.__file.tell() # file size in bytes self.__file.seek(old) # back to old position return n_bytes
python
def size(self): """Calculate and return the file size in bytes.""" old = self.__file.tell() # old position self.__file.seek(0, 2) # end of file n_bytes = self.__file.tell() # file size in bytes self.__file.seek(old) # back to old position return n_bytes
[ "def", "size", "(", "self", ")", ":", "old", "=", "self", ".", "__file", ".", "tell", "(", ")", "# old position", "self", ".", "__file", ".", "seek", "(", "0", ",", "2", ")", "# end of file", "n_bytes", "=", "self", ".", "__file", ".", "tell", "(", ")", "# file size in bytes", "self", ".", "__file", ".", "seek", "(", "old", ")", "# back to old position", "return", "n_bytes" ]
Calculate and return the file size in bytes.
[ "Calculate", "and", "return", "the", "file", "size", "in", "bytes", "." ]
80e7edaacfbcb191a26ac449f049bbce878c67a3
https://github.com/btrevizan/pystrct/blob/80e7edaacfbcb191a26ac449f049bbce878c67a3/pystrct/pystrct.py#L33-L42
249,658
btrevizan/pystrct
pystrct/pystrct.py
StructFile.prev
def prev(self, n=1): """Get the previous n data from file. Keyword argument: n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise. """ # Current position - #data i = abs(self.tell - n) # Get the next n data starting from i return self.get(i, n)
python
def prev(self, n=1): """Get the previous n data from file. Keyword argument: n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise. """ # Current position - #data i = abs(self.tell - n) # Get the next n data starting from i return self.get(i, n)
[ "def", "prev", "(", "self", ",", "n", "=", "1", ")", ":", "# Current position - #data", "i", "=", "abs", "(", "self", ".", "tell", "-", "n", ")", "# Get the next n data starting from i", "return", "self", ".", "get", "(", "i", ",", "n", ")" ]
Get the previous n data from file. Keyword argument: n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise.
[ "Get", "the", "previous", "n", "data", "from", "file", "." ]
80e7edaacfbcb191a26ac449f049bbce878c67a3
https://github.com/btrevizan/pystrct/blob/80e7edaacfbcb191a26ac449f049bbce878c67a3/pystrct/pystrct.py#L70-L85
249,659
btrevizan/pystrct
pystrct/pystrct.py
StructFile.get
def get(self, i, n=1): """Get the n data starting from the ith. Keyword argument: i -- position in file n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise. *This method changes file.tell value. """ # If there is nothing to get... if self.size == 0: return None if n < 1: return [] # Current byte position - (n * data_size) offset = i * self.__strct.size # Set file pointer to -(#data) self.__file.seek(offset) # Unpack raw data to struct data = map(lambda x: self.unpack(x), self.raw(n)) data = map(lambda x: x if len(x) > 1 else x[0], data) data = list(data) # If n is 1, return a single unpacked data. # Otherwise, return a list of unpacked data return data[0] if n == 1 else data
python
def get(self, i, n=1): """Get the n data starting from the ith. Keyword argument: i -- position in file n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise. *This method changes file.tell value. """ # If there is nothing to get... if self.size == 0: return None if n < 1: return [] # Current byte position - (n * data_size) offset = i * self.__strct.size # Set file pointer to -(#data) self.__file.seek(offset) # Unpack raw data to struct data = map(lambda x: self.unpack(x), self.raw(n)) data = map(lambda x: x if len(x) > 1 else x[0], data) data = list(data) # If n is 1, return a single unpacked data. # Otherwise, return a list of unpacked data return data[0] if n == 1 else data
[ "def", "get", "(", "self", ",", "i", ",", "n", "=", "1", ")", ":", "# If there is nothing to get...", "if", "self", ".", "size", "==", "0", ":", "return", "None", "if", "n", "<", "1", ":", "return", "[", "]", "# Current byte position - (n * data_size)", "offset", "=", "i", "*", "self", ".", "__strct", ".", "size", "# Set file pointer to -(#data)", "self", ".", "__file", ".", "seek", "(", "offset", ")", "# Unpack raw data to struct", "data", "=", "map", "(", "lambda", "x", ":", "self", ".", "unpack", "(", "x", ")", ",", "self", ".", "raw", "(", "n", ")", ")", "data", "=", "map", "(", "lambda", "x", ":", "x", "if", "len", "(", "x", ")", ">", "1", "else", "x", "[", "0", "]", ",", "data", ")", "data", "=", "list", "(", "data", ")", "# If n is 1, return a single unpacked data.", "# Otherwise, return a list of unpacked data", "return", "data", "[", "0", "]", "if", "n", "==", "1", "else", "data" ]
Get the n data starting from the ith. Keyword argument: i -- position in file n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise. *This method changes file.tell value.
[ "Get", "the", "n", "data", "starting", "from", "the", "ith", "." ]
80e7edaacfbcb191a26ac449f049bbce878c67a3
https://github.com/btrevizan/pystrct/blob/80e7edaacfbcb191a26ac449f049bbce878c67a3/pystrct/pystrct.py#L87-L121
249,660
btrevizan/pystrct
pystrct/pystrct.py
StructFile.last
def last(self): """Get the last object in file.""" # End of file self.__file.seek(0, 2) # Get the last struct data = self.get(self.length - 1) return data
python
def last(self): """Get the last object in file.""" # End of file self.__file.seek(0, 2) # Get the last struct data = self.get(self.length - 1) return data
[ "def", "last", "(", "self", ")", ":", "# End of file", "self", ".", "__file", ".", "seek", "(", "0", ",", "2", ")", "# Get the last struct", "data", "=", "self", ".", "get", "(", "self", ".", "length", "-", "1", ")", "return", "data" ]
Get the last object in file.
[ "Get", "the", "last", "object", "in", "file", "." ]
80e7edaacfbcb191a26ac449f049bbce878c67a3
https://github.com/btrevizan/pystrct/blob/80e7edaacfbcb191a26ac449f049bbce878c67a3/pystrct/pystrct.py#L123-L131
249,661
btrevizan/pystrct
pystrct/pystrct.py
StructFile.append
def append(self, value): """Write the value into the file. Keyword arguments: value -- value to be writen (tuple) """ # Pack value data = self.pack(value) # End of file self.__file.seek(0, 2) # Write packed value self.__file.write(data)
python
def append(self, value): """Write the value into the file. Keyword arguments: value -- value to be writen (tuple) """ # Pack value data = self.pack(value) # End of file self.__file.seek(0, 2) # Write packed value self.__file.write(data)
[ "def", "append", "(", "self", ",", "value", ")", ":", "# Pack value", "data", "=", "self", ".", "pack", "(", "value", ")", "# End of file", "self", ".", "__file", ".", "seek", "(", "0", ",", "2", ")", "# Write packed value", "self", ".", "__file", ".", "write", "(", "data", ")" ]
Write the value into the file. Keyword arguments: value -- value to be writen (tuple)
[ "Write", "the", "value", "into", "the", "file", "." ]
80e7edaacfbcb191a26ac449f049bbce878c67a3
https://github.com/btrevizan/pystrct/blob/80e7edaacfbcb191a26ac449f049bbce878c67a3/pystrct/pystrct.py#L175-L188
249,662
btrevizan/pystrct
pystrct/pystrct.py
StructFile.write
def write(self, i, value): """Write value in ith position in file. Keyword arguments: i -- position in file value -- value to be packed (tuple) """ # Current byte position - (n * data_size) offset = i * self.__strct.size # Set file pointer to -(#data) self.__file.seek(offset) # Pack value data = self.pack(value) # Write packed value self.__file.write(data)
python
def write(self, i, value): """Write value in ith position in file. Keyword arguments: i -- position in file value -- value to be packed (tuple) """ # Current byte position - (n * data_size) offset = i * self.__strct.size # Set file pointer to -(#data) self.__file.seek(offset) # Pack value data = self.pack(value) # Write packed value self.__file.write(data)
[ "def", "write", "(", "self", ",", "i", ",", "value", ")", ":", "# Current byte position - (n * data_size)", "offset", "=", "i", "*", "self", ".", "__strct", ".", "size", "# Set file pointer to -(#data)", "self", ".", "__file", ".", "seek", "(", "offset", ")", "# Pack value", "data", "=", "self", ".", "pack", "(", "value", ")", "# Write packed value", "self", ".", "__file", ".", "write", "(", "data", ")" ]
Write value in ith position in file. Keyword arguments: i -- position in file value -- value to be packed (tuple)
[ "Write", "value", "in", "ith", "position", "in", "file", "." ]
80e7edaacfbcb191a26ac449f049bbce878c67a3
https://github.com/btrevizan/pystrct/blob/80e7edaacfbcb191a26ac449f049bbce878c67a3/pystrct/pystrct.py#L190-L207
249,663
radjkarl/appBase
appbase/Session.py
Session._inspectArguments
def _inspectArguments(self, args): """inspect the command-line-args and give them to appBase""" if args: self.exec_path = PathStr(args[0]) else: self.exec_path = None session_name = None args = args[1:] openSession = False for arg in args: if arg in ('-h', '--help'): self._showHelp() elif arg in ('-d', '--debug'): print('RUNNGING IN DEBUG-MODE') self.opts['debugMode'] = True elif arg in ('-l', '--log'): print('CREATE LOG') self.opts['createLog'] = True elif arg in ('-s', '--server'): self.opts['server'] = True elif arg in ('-o', '--open'): openSession = True elif openSession: session_name = arg else: print("Argument '%s' not known." % arg) return self._showHelp() return session_name
python
def _inspectArguments(self, args): """inspect the command-line-args and give them to appBase""" if args: self.exec_path = PathStr(args[0]) else: self.exec_path = None session_name = None args = args[1:] openSession = False for arg in args: if arg in ('-h', '--help'): self._showHelp() elif arg in ('-d', '--debug'): print('RUNNGING IN DEBUG-MODE') self.opts['debugMode'] = True elif arg in ('-l', '--log'): print('CREATE LOG') self.opts['createLog'] = True elif arg in ('-s', '--server'): self.opts['server'] = True elif arg in ('-o', '--open'): openSession = True elif openSession: session_name = arg else: print("Argument '%s' not known." % arg) return self._showHelp() return session_name
[ "def", "_inspectArguments", "(", "self", ",", "args", ")", ":", "if", "args", ":", "self", ".", "exec_path", "=", "PathStr", "(", "args", "[", "0", "]", ")", "else", ":", "self", ".", "exec_path", "=", "None", "session_name", "=", "None", "args", "=", "args", "[", "1", ":", "]", "openSession", "=", "False", "for", "arg", "in", "args", ":", "if", "arg", "in", "(", "'-h'", ",", "'--help'", ")", ":", "self", ".", "_showHelp", "(", ")", "elif", "arg", "in", "(", "'-d'", ",", "'--debug'", ")", ":", "print", "(", "'RUNNGING IN DEBUG-MODE'", ")", "self", ".", "opts", "[", "'debugMode'", "]", "=", "True", "elif", "arg", "in", "(", "'-l'", ",", "'--log'", ")", ":", "print", "(", "'CREATE LOG'", ")", "self", ".", "opts", "[", "'createLog'", "]", "=", "True", "elif", "arg", "in", "(", "'-s'", ",", "'--server'", ")", ":", "self", ".", "opts", "[", "'server'", "]", "=", "True", "elif", "arg", "in", "(", "'-o'", ",", "'--open'", ")", ":", "openSession", "=", "True", "elif", "openSession", ":", "session_name", "=", "arg", "else", ":", "print", "(", "\"Argument '%s' not known.\"", "%", "arg", ")", "return", "self", ".", "_showHelp", "(", ")", "return", "session_name" ]
inspect the command-line-args and give them to appBase
[ "inspect", "the", "command", "-", "line", "-", "args", "and", "give", "them", "to", "appBase" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L327-L356
249,664
radjkarl/appBase
appbase/Session.py
Session.save
def save(self): """save the current session override, if session was saved earlier""" if self.path: self._saveState(self.path) else: self.saveAs()
python
def save(self): """save the current session override, if session was saved earlier""" if self.path: self._saveState(self.path) else: self.saveAs()
[ "def", "save", "(", "self", ")", ":", "if", "self", ".", "path", ":", "self", ".", "_saveState", "(", "self", ".", "path", ")", "else", ":", "self", ".", "saveAs", "(", ")" ]
save the current session override, if session was saved earlier
[ "save", "the", "current", "session", "override", "if", "session", "was", "saved", "earlier" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L368-L374
249,665
radjkarl/appBase
appbase/Session.py
Session.open
def open(self): """open a session to define in a dialog in an extra window""" filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE) if filename: self.new(filename)
python
def open(self): """open a session to define in a dialog in an extra window""" filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE) if filename: self.new(filename)
[ "def", "open", "(", "self", ")", ":", "filename", "=", "self", ".", "dialogs", ".", "getOpenFileName", "(", "filter", "=", "\"*.%s\"", "%", "self", ".", "FTYPE", ")", "if", "filename", ":", "self", ".", "new", "(", "filename", ")" ]
open a session to define in a dialog in an extra window
[ "open", "a", "session", "to", "define", "in", "a", "dialog", "in", "an", "extra", "window" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L397-L401
249,666
radjkarl/appBase
appbase/Session.py
Session.new
def new(self, filename=None): """start a session an independent process""" path = (self.exec_path,) if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE): # get the absolute path to the python-executable p = find_executable("python") path = (p, 'python') + path else: # if run in frozen env (.exe): # first arg if execpath of the next session: path += (self.exec_path,) if filename: path += ('-o', filename) os.spawnl(os.P_NOWAIT, *path)
python
def new(self, filename=None): """start a session an independent process""" path = (self.exec_path,) if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE): # get the absolute path to the python-executable p = find_executable("python") path = (p, 'python') + path else: # if run in frozen env (.exe): # first arg if execpath of the next session: path += (self.exec_path,) if filename: path += ('-o', filename) os.spawnl(os.P_NOWAIT, *path)
[ "def", "new", "(", "self", ",", "filename", "=", "None", ")", ":", "path", "=", "(", "self", ".", "exec_path", ",", ")", "if", "self", ".", "exec_path", ".", "filetype", "(", ")", "in", "(", "'py'", ",", "'pyw'", ",", "'pyz'", ",", "self", ".", "FTYPE", ")", ":", "# get the absolute path to the python-executable\r", "p", "=", "find_executable", "(", "\"python\"", ")", "path", "=", "(", "p", ",", "'python'", ")", "+", "path", "else", ":", "# if run in frozen env (.exe):\r", "# first arg if execpath of the next session:\r", "path", "+=", "(", "self", ".", "exec_path", ",", ")", "if", "filename", ":", "path", "+=", "(", "'-o'", ",", "filename", ")", "os", ".", "spawnl", "(", "os", ".", "P_NOWAIT", ",", "*", "path", ")" ]
start a session an independent process
[ "start", "a", "session", "an", "independent", "process" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L403-L416
249,667
radjkarl/appBase
appbase/Session.py
Session._saveState
def _saveState(self, path): """save current state and add a new state""" self.addSession() # next session self._save(str(self.n_sessions), path)
python
def _saveState(self, path): """save current state and add a new state""" self.addSession() # next session self._save(str(self.n_sessions), path)
[ "def", "_saveState", "(", "self", ",", "path", ")", ":", "self", ".", "addSession", "(", ")", "# next session\r", "self", ".", "_save", "(", "str", "(", "self", ".", "n_sessions", ")", ",", "path", ")" ]
save current state and add a new state
[ "save", "current", "state", "and", "add", "a", "new", "state" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L456-L459
249,668
radjkarl/appBase
appbase/Session.py
Session._autoSave
def _autoSave(self): """save state into 'autosave' """ a = 'autoSave' path = self.path if not path: path = self.dir.join('%s.%s' % (a, self.FTYPE)) self._createdAutosaveFile = path self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir() self._save(a, path)
python
def _autoSave(self): """save state into 'autosave' """ a = 'autoSave' path = self.path if not path: path = self.dir.join('%s.%s' % (a, self.FTYPE)) self._createdAutosaveFile = path self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir() self._save(a, path)
[ "def", "_autoSave", "(", "self", ")", ":", "a", "=", "'autoSave'", "path", "=", "self", ".", "path", "if", "not", "path", ":", "path", "=", "self", ".", "dir", ".", "join", "(", "'%s.%s'", "%", "(", "a", ",", "self", ".", "FTYPE", ")", ")", "self", ".", "_createdAutosaveFile", "=", "path", "self", ".", "tmp_dir_save_session", "=", "self", ".", "tmp_dir_session", ".", "join", "(", "a", ")", ".", "mkdir", "(", ")", "self", ".", "_save", "(", "a", ",", "path", ")" ]
save state into 'autosave'
[ "save", "state", "into", "autosave" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L461-L469
249,669
radjkarl/appBase
appbase/Session.py
Session.blockingSave
def blockingSave(self, path): """ saved session to file - returns after finish only called by interactiveTutorial-save at the moment """ self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir() state = {'session': dict(self.opts), 'dialogs': self.dialogs.saveState()} self.saveThread.prepare('0', path, self.tmp_dir_session, state) self.sigSave.emit(self) self.saveThread.run()
python
def blockingSave(self, path): """ saved session to file - returns after finish only called by interactiveTutorial-save at the moment """ self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir() state = {'session': dict(self.opts), 'dialogs': self.dialogs.saveState()} self.saveThread.prepare('0', path, self.tmp_dir_session, state) self.sigSave.emit(self) self.saveThread.run()
[ "def", "blockingSave", "(", "self", ",", "path", ")", ":", "self", ".", "tmp_dir_save_session", "=", "self", ".", "tmp_dir_session", ".", "join", "(", "'block'", ")", ".", "mkdir", "(", ")", "state", "=", "{", "'session'", ":", "dict", "(", "self", ".", "opts", ")", ",", "'dialogs'", ":", "self", ".", "dialogs", ".", "saveState", "(", ")", "}", "self", ".", "saveThread", ".", "prepare", "(", "'0'", ",", "path", ",", "self", ".", "tmp_dir_session", ",", "state", ")", "self", ".", "sigSave", ".", "emit", "(", "self", ")", "self", ".", "saveThread", ".", "run", "(", ")" ]
saved session to file - returns after finish only called by interactiveTutorial-save at the moment
[ "saved", "session", "to", "file", "-", "returns", "after", "finish", "only", "called", "by", "interactiveTutorial", "-", "save", "at", "the", "moment" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L471-L481
249,670
radjkarl/appBase
appbase/Session.py
Session._save
def _save(self, stateName, path): """save into 'stateName' to pyz-path""" print('saving...') state = {'session': dict(self.opts), 'dialogs': self.dialogs.saveState()} self.sigSave.emit(state) self.saveThread.prepare(stateName, path, self.tmp_dir_session, state) self.saveThread.start() self.current_session = stateName r = self.opts['recent sessions'] try: # is this session already exists: remove it r.pop(r.index(path)) except ValueError: pass # add this session at the beginning r.insert(0, path)
python
def _save(self, stateName, path): """save into 'stateName' to pyz-path""" print('saving...') state = {'session': dict(self.opts), 'dialogs': self.dialogs.saveState()} self.sigSave.emit(state) self.saveThread.prepare(stateName, path, self.tmp_dir_session, state) self.saveThread.start() self.current_session = stateName r = self.opts['recent sessions'] try: # is this session already exists: remove it r.pop(r.index(path)) except ValueError: pass # add this session at the beginning r.insert(0, path)
[ "def", "_save", "(", "self", ",", "stateName", ",", "path", ")", ":", "print", "(", "'saving...'", ")", "state", "=", "{", "'session'", ":", "dict", "(", "self", ".", "opts", ")", ",", "'dialogs'", ":", "self", ".", "dialogs", ".", "saveState", "(", ")", "}", "self", ".", "sigSave", ".", "emit", "(", "state", ")", "self", ".", "saveThread", ".", "prepare", "(", "stateName", ",", "path", ",", "self", ".", "tmp_dir_session", ",", "state", ")", "self", ".", "saveThread", ".", "start", "(", ")", "self", ".", "current_session", "=", "stateName", "r", "=", "self", ".", "opts", "[", "'recent sessions'", "]", "try", ":", "# is this session already exists: remove it\r", "r", ".", "pop", "(", "r", ".", "index", "(", "path", ")", ")", "except", "ValueError", ":", "pass", "# add this session at the beginning\r", "r", ".", "insert", "(", "0", ",", "path", ")" ]
save into 'stateName' to pyz-path
[ "save", "into", "stateName", "to", "pyz", "-", "path" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L483-L503
249,671
radjkarl/appBase
appbase/Session.py
_SaveThread._recusiveReplaceArrayWithPlaceholder
def _recusiveReplaceArrayWithPlaceholder(self, state): """ replace all numpy.array within the state dict with a placeholder this allows to save the arrays extra using numpy.save_compressed """ arrays = {} def recursive(state): for key, val in state.items(): if isinstance(val, dict): recursive(val) else: if isinstance(val, np.ndarray): name = 'arr_%i' % recursive.c arrays[name] = val state[key] = name recursive.c += 1 recursive.c = 0 recursive(state) return arrays
python
def _recusiveReplaceArrayWithPlaceholder(self, state): """ replace all numpy.array within the state dict with a placeholder this allows to save the arrays extra using numpy.save_compressed """ arrays = {} def recursive(state): for key, val in state.items(): if isinstance(val, dict): recursive(val) else: if isinstance(val, np.ndarray): name = 'arr_%i' % recursive.c arrays[name] = val state[key] = name recursive.c += 1 recursive.c = 0 recursive(state) return arrays
[ "def", "_recusiveReplaceArrayWithPlaceholder", "(", "self", ",", "state", ")", ":", "arrays", "=", "{", "}", "def", "recursive", "(", "state", ")", ":", "for", "key", ",", "val", "in", "state", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "recursive", "(", "val", ")", "else", ":", "if", "isinstance", "(", "val", ",", "np", ".", "ndarray", ")", ":", "name", "=", "'arr_%i'", "%", "recursive", ".", "c", "arrays", "[", "name", "]", "=", "val", "state", "[", "key", "]", "=", "name", "recursive", ".", "c", "+=", "1", "recursive", ".", "c", "=", "0", "recursive", "(", "state", ")", "return", "arrays" ]
replace all numpy.array within the state dict with a placeholder this allows to save the arrays extra using numpy.save_compressed
[ "replace", "all", "numpy", ".", "array", "within", "the", "state", "dict", "with", "a", "placeholder", "this", "allows", "to", "save", "the", "arrays", "extra", "using", "numpy", ".", "save_compressed" ]
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/Session.py#L516-L536
249,672
eeshangarg/nukefilewalker
nukefilewalker/indexer.py
FileIndexer.start_indexing
def start_indexing(self): """ Read all the files and tokenize their text into words and accumulate all the words in a list. """ for filepath in self.filepaths: with open(filepath) as fp: blob = fp.read() self.words.extend(self.tokenize(blob))
python
def start_indexing(self): """ Read all the files and tokenize their text into words and accumulate all the words in a list. """ for filepath in self.filepaths: with open(filepath) as fp: blob = fp.read() self.words.extend(self.tokenize(blob))
[ "def", "start_indexing", "(", "self", ")", ":", "for", "filepath", "in", "self", ".", "filepaths", ":", "with", "open", "(", "filepath", ")", "as", "fp", ":", "blob", "=", "fp", ".", "read", "(", ")", "self", ".", "words", ".", "extend", "(", "self", ".", "tokenize", "(", "blob", ")", ")" ]
Read all the files and tokenize their text into words and accumulate all the words in a list.
[ "Read", "all", "the", "files", "and", "tokenize", "their", "text", "into", "words", "and", "accumulate", "all", "the", "words", "in", "a", "list", "." ]
8538d8b70a95691b8d2d01248cd69eae18558d11
https://github.com/eeshangarg/nukefilewalker/blob/8538d8b70a95691b8d2d01248cd69eae18558d11/nukefilewalker/indexer.py#L61-L69
249,673
pudo/mqlparser
mqlparser/util.py
parse_name
def parse_name(name): """ Split a query name into field name, operator and whether it is inverted. """ inverted, op = False, OP_EQ if name is not None: for op_ in (OP_NIN, OP_IN, OP_NOT, OP_LIKE): if name.endswith(op_): op = op_ name = name[:len(name) - len(op)] break if name.startswith('!'): inverted = True name = name[1:] return name, inverted, op
python
def parse_name(name): """ Split a query name into field name, operator and whether it is inverted. """ inverted, op = False, OP_EQ if name is not None: for op_ in (OP_NIN, OP_IN, OP_NOT, OP_LIKE): if name.endswith(op_): op = op_ name = name[:len(name) - len(op)] break if name.startswith('!'): inverted = True name = name[1:] return name, inverted, op
[ "def", "parse_name", "(", "name", ")", ":", "inverted", ",", "op", "=", "False", ",", "OP_EQ", "if", "name", "is", "not", "None", ":", "for", "op_", "in", "(", "OP_NIN", ",", "OP_IN", ",", "OP_NOT", ",", "OP_LIKE", ")", ":", "if", "name", ".", "endswith", "(", "op_", ")", ":", "op", "=", "op_", "name", "=", "name", "[", ":", "len", "(", "name", ")", "-", "len", "(", "op", ")", "]", "break", "if", "name", ".", "startswith", "(", "'!'", ")", ":", "inverted", "=", "True", "name", "=", "name", "[", "1", ":", "]", "return", "name", ",", "inverted", ",", "op" ]
Split a query name into field name, operator and whether it is inverted.
[ "Split", "a", "query", "name", "into", "field", "name", "operator", "and", "whether", "it", "is", "inverted", "." ]
80f2e8c837a31ff1f5d0b8fb89dba9f3b2fef4bf
https://github.com/pudo/mqlparser/blob/80f2e8c837a31ff1f5d0b8fb89dba9f3b2fef4bf/mqlparser/util.py#L9-L22
249,674
ECESeniorDesign/lazy_record
lazy_record/__init__.py
connect_db
def connect_db(database_name=":memory:"): """ Connect lazy_record to the database at the path specified in +database_name+. """ db = repo.Repo.connect_db(database_name) base.Repo.db = db query.Repo.db = db
python
def connect_db(database_name=":memory:"): """ Connect lazy_record to the database at the path specified in +database_name+. """ db = repo.Repo.connect_db(database_name) base.Repo.db = db query.Repo.db = db
[ "def", "connect_db", "(", "database_name", "=", "\":memory:\"", ")", ":", "db", "=", "repo", ".", "Repo", ".", "connect_db", "(", "database_name", ")", "base", ".", "Repo", ".", "db", "=", "db", "query", ".", "Repo", ".", "db", "=", "db" ]
Connect lazy_record to the database at the path specified in +database_name+.
[ "Connect", "lazy_record", "to", "the", "database", "at", "the", "path", "specified", "in", "+", "database_name", "+", "." ]
929d3cc7c2538b0f792365c0d2b0e0d41084c2dd
https://github.com/ECESeniorDesign/lazy_record/blob/929d3cc7c2538b0f792365c0d2b0e0d41084c2dd/lazy_record/__init__.py#L10-L17
249,675
ECESeniorDesign/lazy_record
lazy_record/__init__.py
close_db
def close_db(): """ Close the connection to the database opened in `connect_db` """ db = repo.Repo.db if db is not None: db.close() repo.Repo.db = None base.Repo.db = None query.Repo.db = None
python
def close_db(): """ Close the connection to the database opened in `connect_db` """ db = repo.Repo.db if db is not None: db.close() repo.Repo.db = None base.Repo.db = None query.Repo.db = None
[ "def", "close_db", "(", ")", ":", "db", "=", "repo", ".", "Repo", ".", "db", "if", "db", "is", "not", "None", ":", "db", ".", "close", "(", ")", "repo", ".", "Repo", ".", "db", "=", "None", "base", ".", "Repo", ".", "db", "=", "None", "query", ".", "Repo", ".", "db", "=", "None" ]
Close the connection to the database opened in `connect_db`
[ "Close", "the", "connection", "to", "the", "database", "opened", "in", "connect_db" ]
929d3cc7c2538b0f792365c0d2b0e0d41084c2dd
https://github.com/ECESeniorDesign/lazy_record/blob/929d3cc7c2538b0f792365c0d2b0e0d41084c2dd/lazy_record/__init__.py#L20-L29
249,676
ECESeniorDesign/lazy_record
lazy_record/__init__.py
load_schema
def load_schema(schema): """ Load a schema file with path +schema+ into the database. Assumes that there exists an active database connection. """ with repo.Repo.db: repo.Repo.db.executescript(schema)
python
def load_schema(schema): """ Load a schema file with path +schema+ into the database. Assumes that there exists an active database connection. """ with repo.Repo.db: repo.Repo.db.executescript(schema)
[ "def", "load_schema", "(", "schema", ")", ":", "with", "repo", ".", "Repo", ".", "db", ":", "repo", ".", "Repo", ".", "db", ".", "executescript", "(", "schema", ")" ]
Load a schema file with path +schema+ into the database. Assumes that there exists an active database connection.
[ "Load", "a", "schema", "file", "with", "path", "+", "schema", "+", "into", "the", "database", ".", "Assumes", "that", "there", "exists", "an", "active", "database", "connection", "." ]
929d3cc7c2538b0f792365c0d2b0e0d41084c2dd
https://github.com/ECESeniorDesign/lazy_record/blob/929d3cc7c2538b0f792365c0d2b0e0d41084c2dd/lazy_record/__init__.py#L31-L37
249,677
mverleg/package_versions
package_versions/versions.py
VersionRange.update_values
def update_values(self, min=None, max=None, conflict='warning'): """ Update the boundaries, handling possible conflicts. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'. """ conflict_txt = None if min is not None: if min > self.min: if min > self.max: self.max = self.highest self.prefer_highest = False conflict_txt = 'Minimum {0:s} conflicts with maximum {1:s}; minimum is higher so it takes precedence, but lower values in range are not preferred.'.format('{0:d}.{1:d}'.format(*self.to_tup(min)), '{0:d}.{1:d}'.format(*self.to_tup(self.max))) self.min = min if max is not None: if max < self.max: if max >= self.min: self.max = max else: self.prefer_highest = False conflict_txt = 'Maximum {0:s} conflicts with minimum {1:s}; minimum is higher so takes it precedence, but lower values in range are now preferred.'.format('{0:d}.{1:d}'.format(*self.to_tup(max)), '{0:d}.{1:d}'.format(*self.to_tup(self.min))) if conflict_txt: version_problem_notify(conflict_txt, conflict=conflict)
python
def update_values(self, min=None, max=None, conflict='warning'): """ Update the boundaries, handling possible conflicts. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'. """ conflict_txt = None if min is not None: if min > self.min: if min > self.max: self.max = self.highest self.prefer_highest = False conflict_txt = 'Minimum {0:s} conflicts with maximum {1:s}; minimum is higher so it takes precedence, but lower values in range are not preferred.'.format('{0:d}.{1:d}'.format(*self.to_tup(min)), '{0:d}.{1:d}'.format(*self.to_tup(self.max))) self.min = min if max is not None: if max < self.max: if max >= self.min: self.max = max else: self.prefer_highest = False conflict_txt = 'Maximum {0:s} conflicts with minimum {1:s}; minimum is higher so takes it precedence, but lower values in range are now preferred.'.format('{0:d}.{1:d}'.format(*self.to_tup(max)), '{0:d}.{1:d}'.format(*self.to_tup(self.min))) if conflict_txt: version_problem_notify(conflict_txt, conflict=conflict)
[ "def", "update_values", "(", "self", ",", "min", "=", "None", ",", "max", "=", "None", ",", "conflict", "=", "'warning'", ")", ":", "conflict_txt", "=", "None", "if", "min", "is", "not", "None", ":", "if", "min", ">", "self", ".", "min", ":", "if", "min", ">", "self", ".", "max", ":", "self", ".", "max", "=", "self", ".", "highest", "self", ".", "prefer_highest", "=", "False", "conflict_txt", "=", "'Minimum {0:s} conflicts with maximum {1:s}; minimum is higher so it takes precedence, but lower values in range are not preferred.'", ".", "format", "(", "'{0:d}.{1:d}'", ".", "format", "(", "*", "self", ".", "to_tup", "(", "min", ")", ")", ",", "'{0:d}.{1:d}'", ".", "format", "(", "*", "self", ".", "to_tup", "(", "self", ".", "max", ")", ")", ")", "self", ".", "min", "=", "min", "if", "max", "is", "not", "None", ":", "if", "max", "<", "self", ".", "max", ":", "if", "max", ">=", "self", ".", "min", ":", "self", ".", "max", "=", "max", "else", ":", "self", ".", "prefer_highest", "=", "False", "conflict_txt", "=", "'Maximum {0:s} conflicts with minimum {1:s}; minimum is higher so takes it precedence, but lower values in range are now preferred.'", ".", "format", "(", "'{0:d}.{1:d}'", ".", "format", "(", "*", "self", ".", "to_tup", "(", "max", ")", ")", ",", "'{0:d}.{1:d}'", ".", "format", "(", "*", "self", ".", "to_tup", "(", "self", ".", "min", ")", ")", ")", "if", "conflict_txt", ":", "version_problem_notify", "(", "conflict_txt", ",", "conflict", "=", "conflict", ")" ]
Update the boundaries, handling possible conflicts. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'.
[ "Update", "the", "boundaries", "handling", "possible", "conflicts", "." ]
187c6ff709199e6abc2765d64e0851bca5906088
https://github.com/mverleg/package_versions/blob/187c6ff709199e6abc2765d64e0851bca5906088/package_versions/versions.py#L70-L92
249,678
mverleg/package_versions
package_versions/versions.py
VersionRange.add_selection
def add_selection(self, selection, conflict = 'warning'): """ Restrict the range given a selection string :param selection: A single selection (without comma), like '>=1.3'. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'. """ selection = selection.replace(' ', '').replace('=.', '=0.') if not selection: return if selection.count(',') or selection.count('_'): raise VersionFormatError(('Version string "{0:s}" is incorrect. Perhaps you\'re trying to add a combined one; ' + 'you should use add_selections for that').format(selection)) if selection.count('.') > 1: raise VersionFormatError(('Version string "{0:s}" is incorrect. Perhaps it contains a version longer than 2 numbers ' + '(e.g. "3.14)" which is intentionally not supported. Version numbers beyond the second are for bugfixes only.').format(selection)) regex = r'^([><=]=?)(\d+|\*)(?:\.(\d*|\*))?$' found = findall(regex, selection) if not found: raise VersionFormatError('Version string "{0:s}" not properly formatted according to "{1:s}".'.format(selection, regex)) operation, majorstr, minorstr = found[0] if majorstr == '*': return major = int(majorstr) if minorstr == '*': self.update_values(conflict=conflict, min = self.to_nr(major, 0), max = self.to_nr(major + 1, 0) - 1, ) return exclusive = int(not operation.endswith('=')) major_only = int(not minorstr) nr = self.to_nr(major, int(minorstr or 0)) if operation.startswith('='): self.update_values(conflict=conflict, min = nr, max = nr + major_only * self.limit - major_only, ) elif operation.startswith('<'): self.update_values(conflict=conflict, max = nr - exclusive + (not exclusive) * (major_only * self.limit - major_only), ) elif operation.startswith('>'): self.update_values(conflict=conflict, min = nr + exclusive + exclusive * (major_only * self.limit - major_only), ) else: raise VersionFormatError('Version (in)equality operator "{0:s}" not recognized. ' + 'Full operation "{1:s}"'.format(operation, selection))
python
def add_selection(self, selection, conflict = 'warning'): """ Restrict the range given a selection string :param selection: A single selection (without comma), like '>=1.3'. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'. """ selection = selection.replace(' ', '').replace('=.', '=0.') if not selection: return if selection.count(',') or selection.count('_'): raise VersionFormatError(('Version string "{0:s}" is incorrect. Perhaps you\'re trying to add a combined one; ' + 'you should use add_selections for that').format(selection)) if selection.count('.') > 1: raise VersionFormatError(('Version string "{0:s}" is incorrect. Perhaps it contains a version longer than 2 numbers ' + '(e.g. "3.14)" which is intentionally not supported. Version numbers beyond the second are for bugfixes only.').format(selection)) regex = r'^([><=]=?)(\d+|\*)(?:\.(\d*|\*))?$' found = findall(regex, selection) if not found: raise VersionFormatError('Version string "{0:s}" not properly formatted according to "{1:s}".'.format(selection, regex)) operation, majorstr, minorstr = found[0] if majorstr == '*': return major = int(majorstr) if minorstr == '*': self.update_values(conflict=conflict, min = self.to_nr(major, 0), max = self.to_nr(major + 1, 0) - 1, ) return exclusive = int(not operation.endswith('=')) major_only = int(not minorstr) nr = self.to_nr(major, int(minorstr or 0)) if operation.startswith('='): self.update_values(conflict=conflict, min = nr, max = nr + major_only * self.limit - major_only, ) elif operation.startswith('<'): self.update_values(conflict=conflict, max = nr - exclusive + (not exclusive) * (major_only * self.limit - major_only), ) elif operation.startswith('>'): self.update_values(conflict=conflict, min = nr + exclusive + exclusive * (major_only * self.limit - major_only), ) else: raise VersionFormatError('Version (in)equality operator "{0:s}" not recognized. ' + 'Full operation "{1:s}"'.format(operation, selection))
[ "def", "add_selection", "(", "self", ",", "selection", ",", "conflict", "=", "'warning'", ")", ":", "selection", "=", "selection", ".", "replace", "(", "' '", ",", "''", ")", ".", "replace", "(", "'=.'", ",", "'=0.'", ")", "if", "not", "selection", ":", "return", "if", "selection", ".", "count", "(", "','", ")", "or", "selection", ".", "count", "(", "'_'", ")", ":", "raise", "VersionFormatError", "(", "(", "'Version string \"{0:s}\" is incorrect. Perhaps you\\'re trying to add a combined one; '", "+", "'you should use add_selections for that'", ")", ".", "format", "(", "selection", ")", ")", "if", "selection", ".", "count", "(", "'.'", ")", ">", "1", ":", "raise", "VersionFormatError", "(", "(", "'Version string \"{0:s}\" is incorrect. Perhaps it contains a version longer than 2 numbers '", "+", "'(e.g. \"3.14)\" which is intentionally not supported. Version numbers beyond the second are for bugfixes only.'", ")", ".", "format", "(", "selection", ")", ")", "regex", "=", "r'^([><=]=?)(\\d+|\\*)(?:\\.(\\d*|\\*))?$'", "found", "=", "findall", "(", "regex", ",", "selection", ")", "if", "not", "found", ":", "raise", "VersionFormatError", "(", "'Version string \"{0:s}\" not properly formatted according to \"{1:s}\".'", ".", "format", "(", "selection", ",", "regex", ")", ")", "operation", ",", "majorstr", ",", "minorstr", "=", "found", "[", "0", "]", "if", "majorstr", "==", "'*'", ":", "return", "major", "=", "int", "(", "majorstr", ")", "if", "minorstr", "==", "'*'", ":", "self", ".", "update_values", "(", "conflict", "=", "conflict", ",", "min", "=", "self", ".", "to_nr", "(", "major", ",", "0", ")", ",", "max", "=", "self", ".", "to_nr", "(", "major", "+", "1", ",", "0", ")", "-", "1", ",", ")", "return", "exclusive", "=", "int", "(", "not", "operation", ".", "endswith", "(", "'='", ")", ")", "major_only", "=", "int", "(", "not", "minorstr", ")", "nr", "=", "self", ".", "to_nr", "(", "major", ",", "int", "(", "minorstr", "or", "0", ")", ")", "if", "operation", ".", "startswith", "(", "'='", ")", ":", "self", ".", "update_values", "(", "conflict", "=", "conflict", ",", "min", "=", "nr", ",", "max", "=", "nr", "+", "major_only", "*", "self", ".", "limit", "-", "major_only", ",", ")", "elif", "operation", ".", "startswith", "(", "'<'", ")", ":", "self", ".", "update_values", "(", "conflict", "=", "conflict", ",", "max", "=", "nr", "-", "exclusive", "+", "(", "not", "exclusive", ")", "*", "(", "major_only", "*", "self", ".", "limit", "-", "major_only", ")", ",", ")", "elif", "operation", ".", "startswith", "(", "'>'", ")", ":", "self", ".", "update_values", "(", "conflict", "=", "conflict", ",", "min", "=", "nr", "+", "exclusive", "+", "exclusive", "*", "(", "major_only", "*", "self", ".", "limit", "-", "major_only", ")", ",", ")", "else", ":", "raise", "VersionFormatError", "(", "'Version (in)equality operator \"{0:s}\" not recognized. '", "+", "'Full operation \"{1:s}\"'", ".", "format", "(", "operation", ",", "selection", ")", ")" ]
Restrict the range given a selection string :param selection: A single selection (without comma), like '>=1.3'. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'.
[ "Restrict", "the", "range", "given", "a", "selection", "string" ]
187c6ff709199e6abc2765d64e0851bca5906088
https://github.com/mverleg/package_versions/blob/187c6ff709199e6abc2765d64e0851bca5906088/package_versions/versions.py#L101-L149
249,679
mverleg/package_versions
package_versions/versions.py
VersionRange.choose
def choose(self, versions, conflict='silent'): """ Choose the highest version in the range. :param versions: Iterable of available versions. """ assert conflict in ('silent', 'warning', 'error') if not versions: raise VersionRangeMismatch('No versions to choose from') version_map = {} for version in versions: version_map[version] = str2nr(version, mx=self.limit) top_version, top_nr = None, 0 """ Try to find the highest value in range. """ for version, nr in version_map.items(): if nr >= top_nr: if self.min <= nr <= self.max: top_version, top_nr = version, nr if top_version: return top_version """ We need to look outside the range, so maybe give a warning. """ version_problem_notify('No matching version found for range "{0:s}" from options "{1:s}"; other options might be considered.'.format( str(self), '/'.join(str(v) for v in versions)), conflict=conflict) """ Failing the above, try to find the lowest value above the range. """ top_nr = self.highest for version, nr in version_map.items(): if nr < top_nr: if nr >= self.max: top_version, top_nr = version, nr if top_version: return top_version """ Failing the above two, try to highest value below the range (so just the highest). """ top_nr = 0 for version, nr in version_map.items(): if nr > top_nr: top_version, top_nr = version, nr if top_version: return top_version
python
def choose(self, versions, conflict='silent'): """ Choose the highest version in the range. :param versions: Iterable of available versions. """ assert conflict in ('silent', 'warning', 'error') if not versions: raise VersionRangeMismatch('No versions to choose from') version_map = {} for version in versions: version_map[version] = str2nr(version, mx=self.limit) top_version, top_nr = None, 0 """ Try to find the highest value in range. """ for version, nr in version_map.items(): if nr >= top_nr: if self.min <= nr <= self.max: top_version, top_nr = version, nr if top_version: return top_version """ We need to look outside the range, so maybe give a warning. """ version_problem_notify('No matching version found for range "{0:s}" from options "{1:s}"; other options might be considered.'.format( str(self), '/'.join(str(v) for v in versions)), conflict=conflict) """ Failing the above, try to find the lowest value above the range. """ top_nr = self.highest for version, nr in version_map.items(): if nr < top_nr: if nr >= self.max: top_version, top_nr = version, nr if top_version: return top_version """ Failing the above two, try to highest value below the range (so just the highest). """ top_nr = 0 for version, nr in version_map.items(): if nr > top_nr: top_version, top_nr = version, nr if top_version: return top_version
[ "def", "choose", "(", "self", ",", "versions", ",", "conflict", "=", "'silent'", ")", ":", "assert", "conflict", "in", "(", "'silent'", ",", "'warning'", ",", "'error'", ")", "if", "not", "versions", ":", "raise", "VersionRangeMismatch", "(", "'No versions to choose from'", ")", "version_map", "=", "{", "}", "for", "version", "in", "versions", ":", "version_map", "[", "version", "]", "=", "str2nr", "(", "version", ",", "mx", "=", "self", ".", "limit", ")", "top_version", ",", "top_nr", "=", "None", ",", "0", "\"\"\" Try to find the highest value in range. \"\"\"", "for", "version", ",", "nr", "in", "version_map", ".", "items", "(", ")", ":", "if", "nr", ">=", "top_nr", ":", "if", "self", ".", "min", "<=", "nr", "<=", "self", ".", "max", ":", "top_version", ",", "top_nr", "=", "version", ",", "nr", "if", "top_version", ":", "return", "top_version", "\"\"\" We need to look outside the range, so maybe give a warning. \"\"\"", "version_problem_notify", "(", "'No matching version found for range \"{0:s}\" from options \"{1:s}\"; other options might be considered.'", ".", "format", "(", "str", "(", "self", ")", ",", "'/'", ".", "join", "(", "str", "(", "v", ")", "for", "v", "in", "versions", ")", ")", ",", "conflict", "=", "conflict", ")", "\"\"\" Failing the above, try to find the lowest value above the range. \"\"\"", "top_nr", "=", "self", ".", "highest", "for", "version", ",", "nr", "in", "version_map", ".", "items", "(", ")", ":", "if", "nr", "<", "top_nr", ":", "if", "nr", ">=", "self", ".", "max", ":", "top_version", ",", "top_nr", "=", "version", ",", "nr", "if", "top_version", ":", "return", "top_version", "\"\"\" Failing the above two, try to highest value below the range (so just the highest). \"\"\"", "top_nr", "=", "0", "for", "version", ",", "nr", "in", "version_map", ".", "items", "(", ")", ":", "if", "nr", ">", "top_nr", ":", "top_version", ",", "top_nr", "=", "version", ",", "nr", "if", "top_version", ":", "return", "top_version" ]
Choose the highest version in the range. :param versions: Iterable of available versions.
[ "Choose", "the", "highest", "version", "in", "the", "range", "." ]
187c6ff709199e6abc2765d64e0851bca5906088
https://github.com/mverleg/package_versions/blob/187c6ff709199e6abc2765d64e0851bca5906088/package_versions/versions.py#L151-L188
249,680
OpenGov/python_data_wrap
datawrap/listwrap.py
is_slice_or_dim_range_request
def is_slice_or_dim_range_request(key, depth=0): ''' Checks if a particular key is a slice, DimensionRange or list of those types ''' # Slice, DimensionRange, or list of those elements return (is_slice_or_dim_range(key) or # Don't check more than the first depth (depth == 0 and non_str_len_no_throw(key) > 0 and all(is_slice_or_dim_range_request(subkey, depth+1) for subkey in key)))
python
def is_slice_or_dim_range_request(key, depth=0): ''' Checks if a particular key is a slice, DimensionRange or list of those types ''' # Slice, DimensionRange, or list of those elements return (is_slice_or_dim_range(key) or # Don't check more than the first depth (depth == 0 and non_str_len_no_throw(key) > 0 and all(is_slice_or_dim_range_request(subkey, depth+1) for subkey in key)))
[ "def", "is_slice_or_dim_range_request", "(", "key", ",", "depth", "=", "0", ")", ":", "# Slice, DimensionRange, or list of those elements", "return", "(", "is_slice_or_dim_range", "(", "key", ")", "or", "# Don't check more than the first depth", "(", "depth", "==", "0", "and", "non_str_len_no_throw", "(", "key", ")", ">", "0", "and", "all", "(", "is_slice_or_dim_range_request", "(", "subkey", ",", "depth", "+", "1", ")", "for", "subkey", "in", "key", ")", ")", ")" ]
Checks if a particular key is a slice, DimensionRange or list of those types
[ "Checks", "if", "a", "particular", "key", "is", "a", "slice", "DimensionRange", "or", "list", "of", "those", "types" ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L52-L61
249,681
OpenGov/python_data_wrap
datawrap/listwrap.py
get_restricted_index
def get_restricted_index(index, length, length_index_allowed=True): ''' Converts negative indices to positive ones and indices above length to length or length-1 depending on lengthAllowed. ''' if index and index >= length: index = length if length_index_allowed else length-1 return get_non_negative_index(index, length)
python
def get_restricted_index(index, length, length_index_allowed=True): ''' Converts negative indices to positive ones and indices above length to length or length-1 depending on lengthAllowed. ''' if index and index >= length: index = length if length_index_allowed else length-1 return get_non_negative_index(index, length)
[ "def", "get_restricted_index", "(", "index", ",", "length", ",", "length_index_allowed", "=", "True", ")", ":", "if", "index", "and", "index", ">=", "length", ":", "index", "=", "length", "if", "length_index_allowed", "else", "length", "-", "1", "return", "get_non_negative_index", "(", "index", ",", "length", ")" ]
Converts negative indices to positive ones and indices above length to length or length-1 depending on lengthAllowed.
[ "Converts", "negative", "indices", "to", "positive", "ones", "and", "indices", "above", "length", "to", "length", "or", "length", "-", "1", "depending", "on", "lengthAllowed", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L63-L70
249,682
OpenGov/python_data_wrap
datawrap/listwrap.py
DimensionRange.slice_on_length
def slice_on_length(self, data_len, *addSlices): ''' Returns a slice representing the dimension range restrictions applied to a list of length data_len. If addSlices contains additional slice requirements, they are processed in the order they are given. ''' if len(self.ordered_ranges) + len(addSlices) == 0: return slice(None,None,None) ranges = self.ordered_ranges if len(addSlices) > 0: ranges = ranges + DimensionRange(*addSlices).ordered_ranges return self._combine_lists_of_ranges_on_length(data_len, *ranges)
python
def slice_on_length(self, data_len, *addSlices): ''' Returns a slice representing the dimension range restrictions applied to a list of length data_len. If addSlices contains additional slice requirements, they are processed in the order they are given. ''' if len(self.ordered_ranges) + len(addSlices) == 0: return slice(None,None,None) ranges = self.ordered_ranges if len(addSlices) > 0: ranges = ranges + DimensionRange(*addSlices).ordered_ranges return self._combine_lists_of_ranges_on_length(data_len, *ranges)
[ "def", "slice_on_length", "(", "self", ",", "data_len", ",", "*", "addSlices", ")", ":", "if", "len", "(", "self", ".", "ordered_ranges", ")", "+", "len", "(", "addSlices", ")", "==", "0", ":", "return", "slice", "(", "None", ",", "None", ",", "None", ")", "ranges", "=", "self", ".", "ordered_ranges", "if", "len", "(", "addSlices", ")", ">", "0", ":", "ranges", "=", "ranges", "+", "DimensionRange", "(", "*", "addSlices", ")", ".", "ordered_ranges", "return", "self", ".", "_combine_lists_of_ranges_on_length", "(", "data_len", ",", "*", "ranges", ")" ]
Returns a slice representing the dimension range restrictions applied to a list of length data_len. If addSlices contains additional slice requirements, they are processed in the order they are given.
[ "Returns", "a", "slice", "representing", "the", "dimension", "range", "restrictions", "applied", "to", "a", "list", "of", "length", "data_len", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L142-L155
249,683
OpenGov/python_data_wrap
datawrap/listwrap.py
DimensionRange._combine_ranges_on_length
def _combine_ranges_on_length(self, data_len, first, second): ''' Combines a first range with a second range, where the second range is considered within the scope of the first. ''' first = get_true_slice(first, data_len) second = get_true_slice(second, data_len) final_start, final_step, final_stop = (None, None, None) # Get our start if first.start == None and second.start == None: final_start = None else: final_start = (first.start if first.start else 0)+(second.start if second.start else 0) # Get our stop if second.stop == None: final_stop = first.stop elif first.stop == None: final_stop = (first.start if first.start else 0) + second.stop else: final_stop = min(first.stop, (first.start if first.start else 0) + second.stop) # Get our step if first.step == None and second.step == None: final_step = None else: final_step = (first.step if first.step else 1)*(second.step if second.step else 1) # If we have a start above our stop, set them to be equal if final_start > final_stop: final_start = final_stop return slice(final_start, final_stop, final_step)
python
def _combine_ranges_on_length(self, data_len, first, second): ''' Combines a first range with a second range, where the second range is considered within the scope of the first. ''' first = get_true_slice(first, data_len) second = get_true_slice(second, data_len) final_start, final_step, final_stop = (None, None, None) # Get our start if first.start == None and second.start == None: final_start = None else: final_start = (first.start if first.start else 0)+(second.start if second.start else 0) # Get our stop if second.stop == None: final_stop = first.stop elif first.stop == None: final_stop = (first.start if first.start else 0) + second.stop else: final_stop = min(first.stop, (first.start if first.start else 0) + second.stop) # Get our step if first.step == None and second.step == None: final_step = None else: final_step = (first.step if first.step else 1)*(second.step if second.step else 1) # If we have a start above our stop, set them to be equal if final_start > final_stop: final_start = final_stop return slice(final_start, final_stop, final_step)
[ "def", "_combine_ranges_on_length", "(", "self", ",", "data_len", ",", "first", ",", "second", ")", ":", "first", "=", "get_true_slice", "(", "first", ",", "data_len", ")", "second", "=", "get_true_slice", "(", "second", ",", "data_len", ")", "final_start", ",", "final_step", ",", "final_stop", "=", "(", "None", ",", "None", ",", "None", ")", "# Get our start", "if", "first", ".", "start", "==", "None", "and", "second", ".", "start", "==", "None", ":", "final_start", "=", "None", "else", ":", "final_start", "=", "(", "first", ".", "start", "if", "first", ".", "start", "else", "0", ")", "+", "(", "second", ".", "start", "if", "second", ".", "start", "else", "0", ")", "# Get our stop", "if", "second", ".", "stop", "==", "None", ":", "final_stop", "=", "first", ".", "stop", "elif", "first", ".", "stop", "==", "None", ":", "final_stop", "=", "(", "first", ".", "start", "if", "first", ".", "start", "else", "0", ")", "+", "second", ".", "stop", "else", ":", "final_stop", "=", "min", "(", "first", ".", "stop", ",", "(", "first", ".", "start", "if", "first", ".", "start", "else", "0", ")", "+", "second", ".", "stop", ")", "# Get our step", "if", "first", ".", "step", "==", "None", "and", "second", ".", "step", "==", "None", ":", "final_step", "=", "None", "else", ":", "final_step", "=", "(", "first", ".", "step", "if", "first", ".", "step", "else", "1", ")", "*", "(", "second", ".", "step", "if", "second", ".", "step", "else", "1", ")", "# If we have a start above our stop, set them to be equal", "if", "final_start", ">", "final_stop", ":", "final_start", "=", "final_stop", "return", "slice", "(", "final_start", ",", "final_stop", ",", "final_step", ")" ]
Combines a first range with a second range, where the second range is considered within the scope of the first.
[ "Combines", "a", "first", "range", "with", "a", "second", "range", "where", "the", "second", "range", "is", "considered", "within", "the", "scope", "of", "the", "first", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L157-L190
249,684
OpenGov/python_data_wrap
datawrap/listwrap.py
DimensionRange._combine_lists_of_ranges_on_length
def _combine_lists_of_ranges_on_length(self, data_len, first, *range_list): ''' Combines an arbitrary length list of ranges into a single slice. ''' current_range = first for next_range in range_list: current_range = self._combine_ranges_on_length(data_len, current_range, next_range) return current_range
python
def _combine_lists_of_ranges_on_length(self, data_len, first, *range_list): ''' Combines an arbitrary length list of ranges into a single slice. ''' current_range = first for next_range in range_list: current_range = self._combine_ranges_on_length(data_len, current_range, next_range) return current_range
[ "def", "_combine_lists_of_ranges_on_length", "(", "self", ",", "data_len", ",", "first", ",", "*", "range_list", ")", ":", "current_range", "=", "first", "for", "next_range", "in", "range_list", ":", "current_range", "=", "self", ".", "_combine_ranges_on_length", "(", "data_len", ",", "current_range", ",", "next_range", ")", "return", "current_range" ]
Combines an arbitrary length list of ranges into a single slice.
[ "Combines", "an", "arbitrary", "length", "list", "of", "ranges", "into", "a", "single", "slice", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L192-L199
249,685
OpenGov/python_data_wrap
datawrap/listwrap.py
FixedListSubset._get_single_depth
def _get_single_depth(self, multi_index): ''' Helper method for determining how many single index entries there are in a particular multi-index ''' single_depth = 0 for subind in multi_index: if is_slice_or_dim_range(subind): break single_depth += 1 return single_depth
python
def _get_single_depth(self, multi_index): ''' Helper method for determining how many single index entries there are in a particular multi-index ''' single_depth = 0 for subind in multi_index: if is_slice_or_dim_range(subind): break single_depth += 1 return single_depth
[ "def", "_get_single_depth", "(", "self", ",", "multi_index", ")", ":", "single_depth", "=", "0", "for", "subind", "in", "multi_index", ":", "if", "is_slice_or_dim_range", "(", "subind", ")", ":", "break", "single_depth", "+=", "1", "return", "single_depth" ]
Helper method for determining how many single index entries there are in a particular multi-index
[ "Helper", "method", "for", "determining", "how", "many", "single", "index", "entries", "there", "are", "in", "a", "particular", "multi", "-", "index" ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L354-L364
249,686
OpenGov/python_data_wrap
datawrap/listwrap.py
ZeroList._generate_splice
def _generate_splice(self, slice_ind): ''' Creates a splice size version of the ZeroList ''' step_size = slice_ind.step if slice_ind.step else 1 # Check for each of the four possible scenarios if slice_ind.start != None: if slice_ind.stop != None: newListLen = ((get_non_negative_index(slice_ind.stop, self._length) - get_non_negative_index(slice_ind.start, self._length)) // step_size) else: newListLen = ((self._length - get_non_negative_index(slice_ind.start, self._length)) // step_size) else: if slice_ind.stop != None: newListLen = ((get_non_negative_index(slice_ind.stop, self._length)) // step_size) else: newListLen = (self._length // step_size) return ZeroList(newListLen)
python
def _generate_splice(self, slice_ind): ''' Creates a splice size version of the ZeroList ''' step_size = slice_ind.step if slice_ind.step else 1 # Check for each of the four possible scenarios if slice_ind.start != None: if slice_ind.stop != None: newListLen = ((get_non_negative_index(slice_ind.stop, self._length) - get_non_negative_index(slice_ind.start, self._length)) // step_size) else: newListLen = ((self._length - get_non_negative_index(slice_ind.start, self._length)) // step_size) else: if slice_ind.stop != None: newListLen = ((get_non_negative_index(slice_ind.stop, self._length)) // step_size) else: newListLen = (self._length // step_size) return ZeroList(newListLen)
[ "def", "_generate_splice", "(", "self", ",", "slice_ind", ")", ":", "step_size", "=", "slice_ind", ".", "step", "if", "slice_ind", ".", "step", "else", "1", "# Check for each of the four possible scenarios", "if", "slice_ind", ".", "start", "!=", "None", ":", "if", "slice_ind", ".", "stop", "!=", "None", ":", "newListLen", "=", "(", "(", "get_non_negative_index", "(", "slice_ind", ".", "stop", ",", "self", ".", "_length", ")", "-", "get_non_negative_index", "(", "slice_ind", ".", "start", ",", "self", ".", "_length", ")", ")", "//", "step_size", ")", "else", ":", "newListLen", "=", "(", "(", "self", ".", "_length", "-", "get_non_negative_index", "(", "slice_ind", ".", "start", ",", "self", ".", "_length", ")", ")", "//", "step_size", ")", "else", ":", "if", "slice_ind", ".", "stop", "!=", "None", ":", "newListLen", "=", "(", "(", "get_non_negative_index", "(", "slice_ind", ".", "stop", ",", "self", ".", "_length", ")", ")", "//", "step_size", ")", "else", ":", "newListLen", "=", "(", "self", ".", "_length", "//", "step_size", ")", "return", "ZeroList", "(", "newListLen", ")" ]
Creates a splice size version of the ZeroList
[ "Creates", "a", "splice", "size", "version", "of", "the", "ZeroList" ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L547-L566
249,687
minhhoit/yacms
yacms/core/models.py
SiteRelated.save
def save(self, update_site=False, *args, **kwargs): """ Set the site to the current site when the record is first created, or the ``update_site`` argument is explicitly set to ``True``. """ if update_site or (self.id is None and self.site_id is None): self.site_id = current_site_id() super(SiteRelated, self).save(*args, **kwargs)
python
def save(self, update_site=False, *args, **kwargs): """ Set the site to the current site when the record is first created, or the ``update_site`` argument is explicitly set to ``True``. """ if update_site or (self.id is None and self.site_id is None): self.site_id = current_site_id() super(SiteRelated, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "update_site", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "update_site", "or", "(", "self", ".", "id", "is", "None", "and", "self", ".", "site_id", "is", "None", ")", ":", "self", ".", "site_id", "=", "current_site_id", "(", ")", "super", "(", "SiteRelated", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Set the site to the current site when the record is first created, or the ``update_site`` argument is explicitly set to ``True``.
[ "Set", "the", "site", "to", "the", "current", "site", "when", "the", "record", "is", "first", "created", "or", "the", "update_site", "argument", "is", "explicitly", "set", "to", "True", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L51-L59
249,688
minhhoit/yacms
yacms/core/models.py
MetaData.save
def save(self, *args, **kwargs): """ Set the description field on save. """ if self.gen_description: self.description = strip_tags(self.description_from_content()) super(MetaData, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """ Set the description field on save. """ if self.gen_description: self.description = strip_tags(self.description_from_content()) super(MetaData, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "gen_description", ":", "self", ".", "description", "=", "strip_tags", "(", "self", ".", "description_from_content", "(", ")", ")", "super", "(", "MetaData", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Set the description field on save.
[ "Set", "the", "description", "field", "on", "save", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L137-L143
249,689
minhhoit/yacms
yacms/core/models.py
MetaData.description_from_content
def description_from_content(self): """ Returns the first block or sentence of the first content-like field. """ description = "" # Use the first RichTextField, or TextField if none found. for field_type in (RichTextField, models.TextField): if not description: for field in self._meta.fields: if (isinstance(field, field_type) and field.name != "description"): description = getattr(self, field.name) if description: from yacms.core.templatetags.yacms_tags \ import richtext_filters description = richtext_filters(description) break # Fall back to the title if description couldn't be determined. if not description: description = str(self) # Strip everything after the first block or sentence. ends = ("</p>", "<br />", "<br/>", "<br>", "</ul>", "\n", ". ", "! ", "? ") for end in ends: pos = description.lower().find(end) if pos > -1: description = TagCloser(description[:pos]).html break else: description = truncatewords_html(description, 100) try: description = unicode(description) except NameError: pass # Python 3. return description
python
def description_from_content(self): """ Returns the first block or sentence of the first content-like field. """ description = "" # Use the first RichTextField, or TextField if none found. for field_type in (RichTextField, models.TextField): if not description: for field in self._meta.fields: if (isinstance(field, field_type) and field.name != "description"): description = getattr(self, field.name) if description: from yacms.core.templatetags.yacms_tags \ import richtext_filters description = richtext_filters(description) break # Fall back to the title if description couldn't be determined. if not description: description = str(self) # Strip everything after the first block or sentence. ends = ("</p>", "<br />", "<br/>", "<br>", "</ul>", "\n", ". ", "! ", "? ") for end in ends: pos = description.lower().find(end) if pos > -1: description = TagCloser(description[:pos]).html break else: description = truncatewords_html(description, 100) try: description = unicode(description) except NameError: pass # Python 3. return description
[ "def", "description_from_content", "(", "self", ")", ":", "description", "=", "\"\"", "# Use the first RichTextField, or TextField if none found.", "for", "field_type", "in", "(", "RichTextField", ",", "models", ".", "TextField", ")", ":", "if", "not", "description", ":", "for", "field", "in", "self", ".", "_meta", ".", "fields", ":", "if", "(", "isinstance", "(", "field", ",", "field_type", ")", "and", "field", ".", "name", "!=", "\"description\"", ")", ":", "description", "=", "getattr", "(", "self", ",", "field", ".", "name", ")", "if", "description", ":", "from", "yacms", ".", "core", ".", "templatetags", ".", "yacms_tags", "import", "richtext_filters", "description", "=", "richtext_filters", "(", "description", ")", "break", "# Fall back to the title if description couldn't be determined.", "if", "not", "description", ":", "description", "=", "str", "(", "self", ")", "# Strip everything after the first block or sentence.", "ends", "=", "(", "\"</p>\"", ",", "\"<br />\"", ",", "\"<br/>\"", ",", "\"<br>\"", ",", "\"</ul>\"", ",", "\"\\n\"", ",", "\". \"", ",", "\"! \"", ",", "\"? \"", ")", "for", "end", "in", "ends", ":", "pos", "=", "description", ".", "lower", "(", ")", ".", "find", "(", "end", ")", "if", "pos", ">", "-", "1", ":", "description", "=", "TagCloser", "(", "description", "[", ":", "pos", "]", ")", ".", "html", "break", "else", ":", "description", "=", "truncatewords_html", "(", "description", ",", "100", ")", "try", ":", "description", "=", "unicode", "(", "description", ")", "except", "NameError", ":", "pass", "# Python 3.", "return", "description" ]
Returns the first block or sentence of the first content-like field.
[ "Returns", "the", "first", "block", "or", "sentence", "of", "the", "first", "content", "-", "like", "field", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L152-L187
249,690
minhhoit/yacms
yacms/core/models.py
Displayable.save
def save(self, *args, **kwargs): """ Set default for ``publish_date``. We can't use ``auto_now_add`` on the field as it will be blank when a blog post is created from the quick blog form in the admin dashboard. """ if self.publish_date is None: self.publish_date = now() super(Displayable, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """ Set default for ``publish_date``. We can't use ``auto_now_add`` on the field as it will be blank when a blog post is created from the quick blog form in the admin dashboard. """ if self.publish_date is None: self.publish_date = now() super(Displayable, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "publish_date", "is", "None", ":", "self", ".", "publish_date", "=", "now", "(", ")", "super", "(", "Displayable", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Set default for ``publish_date``. We can't use ``auto_now_add`` on the field as it will be blank when a blog post is created from the quick blog form in the admin dashboard.
[ "Set", "default", "for", "publish_date", ".", "We", "can", "t", "use", "auto_now_add", "on", "the", "field", "as", "it", "will", "be", "blank", "when", "a", "blog", "post", "is", "created", "from", "the", "quick", "blog", "form", "in", "the", "admin", "dashboard", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L245-L253
249,691
minhhoit/yacms
yacms/core/models.py
Displayable.set_short_url
def set_short_url(self): """ Generates the ``short_url`` attribute if the model does not already have one. Used by the ``set_short_url_for`` template tag and ``TweetableAdmin``. If no sharing service is defined (bitly is the one implemented, but others could be by overriding ``generate_short_url``), the ``SHORT_URL_UNSET`` marker gets stored in the DB. In this case, ``short_url`` is temporarily (eg not persisted) set to host + ``get_absolute_url`` - this is so that we don't permanently store ``get_absolute_url``, since it may change over time. """ if self.short_url == SHORT_URL_UNSET: self.short_url = self.get_absolute_url_with_host() elif not self.short_url: self.short_url = self.generate_short_url() self.save()
python
def set_short_url(self): """ Generates the ``short_url`` attribute if the model does not already have one. Used by the ``set_short_url_for`` template tag and ``TweetableAdmin``. If no sharing service is defined (bitly is the one implemented, but others could be by overriding ``generate_short_url``), the ``SHORT_URL_UNSET`` marker gets stored in the DB. In this case, ``short_url`` is temporarily (eg not persisted) set to host + ``get_absolute_url`` - this is so that we don't permanently store ``get_absolute_url``, since it may change over time. """ if self.short_url == SHORT_URL_UNSET: self.short_url = self.get_absolute_url_with_host() elif not self.short_url: self.short_url = self.generate_short_url() self.save()
[ "def", "set_short_url", "(", "self", ")", ":", "if", "self", ".", "short_url", "==", "SHORT_URL_UNSET", ":", "self", ".", "short_url", "=", "self", ".", "get_absolute_url_with_host", "(", ")", "elif", "not", "self", ".", "short_url", ":", "self", ".", "short_url", "=", "self", ".", "generate_short_url", "(", ")", "self", ".", "save", "(", ")" ]
Generates the ``short_url`` attribute if the model does not already have one. Used by the ``set_short_url_for`` template tag and ``TweetableAdmin``. If no sharing service is defined (bitly is the one implemented, but others could be by overriding ``generate_short_url``), the ``SHORT_URL_UNSET`` marker gets stored in the DB. In this case, ``short_url`` is temporarily (eg not persisted) set to host + ``get_absolute_url`` - this is so that we don't permanently store ``get_absolute_url``, since it may change over time.
[ "Generates", "the", "short_url", "attribute", "if", "the", "model", "does", "not", "already", "have", "one", ".", "Used", "by", "the", "set_short_url_for", "template", "tag", "and", "TweetableAdmin", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L289-L307
249,692
minhhoit/yacms
yacms/core/models.py
Displayable.generate_short_url
def generate_short_url(self): """ Returns a new short URL generated using bit.ly if credentials for the service have been specified. """ from yacms.conf import settings if settings.BITLY_ACCESS_TOKEN: url = "https://api-ssl.bit.ly/v3/shorten?%s" % urlencode({ "access_token": settings.BITLY_ACCESS_TOKEN, "uri": self.get_absolute_url_with_host(), }) response = loads(urlopen(url).read().decode("utf-8")) if response["status_code"] == 200: return response["data"]["url"] return SHORT_URL_UNSET
python
def generate_short_url(self): """ Returns a new short URL generated using bit.ly if credentials for the service have been specified. """ from yacms.conf import settings if settings.BITLY_ACCESS_TOKEN: url = "https://api-ssl.bit.ly/v3/shorten?%s" % urlencode({ "access_token": settings.BITLY_ACCESS_TOKEN, "uri": self.get_absolute_url_with_host(), }) response = loads(urlopen(url).read().decode("utf-8")) if response["status_code"] == 200: return response["data"]["url"] return SHORT_URL_UNSET
[ "def", "generate_short_url", "(", "self", ")", ":", "from", "yacms", ".", "conf", "import", "settings", "if", "settings", ".", "BITLY_ACCESS_TOKEN", ":", "url", "=", "\"https://api-ssl.bit.ly/v3/shorten?%s\"", "%", "urlencode", "(", "{", "\"access_token\"", ":", "settings", ".", "BITLY_ACCESS_TOKEN", ",", "\"uri\"", ":", "self", ".", "get_absolute_url_with_host", "(", ")", ",", "}", ")", "response", "=", "loads", "(", "urlopen", "(", "url", ")", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "if", "response", "[", "\"status_code\"", "]", "==", "200", ":", "return", "response", "[", "\"data\"", "]", "[", "\"url\"", "]", "return", "SHORT_URL_UNSET" ]
Returns a new short URL generated using bit.ly if credentials for the service have been specified.
[ "Returns", "a", "new", "short", "URL", "generated", "using", "bit", ".", "ly", "if", "credentials", "for", "the", "service", "have", "been", "specified", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L309-L323
249,693
minhhoit/yacms
yacms/core/models.py
Displayable._get_next_or_previous_by_publish_date
def _get_next_or_previous_by_publish_date(self, is_next, **kwargs): """ Retrieves next or previous object by publish date. We implement our own version instead of Django's so we can hook into the published manager and concrete subclasses. """ arg = "publish_date__gt" if is_next else "publish_date__lt" order = "publish_date" if is_next else "-publish_date" lookup = {arg: self.publish_date} concrete_model = base_concrete_model(Displayable, self) try: queryset = concrete_model.objects.published except AttributeError: queryset = concrete_model.objects.all try: return queryset(**kwargs).filter(**lookup).order_by(order)[0] except IndexError: pass
python
def _get_next_or_previous_by_publish_date(self, is_next, **kwargs): """ Retrieves next or previous object by publish date. We implement our own version instead of Django's so we can hook into the published manager and concrete subclasses. """ arg = "publish_date__gt" if is_next else "publish_date__lt" order = "publish_date" if is_next else "-publish_date" lookup = {arg: self.publish_date} concrete_model = base_concrete_model(Displayable, self) try: queryset = concrete_model.objects.published except AttributeError: queryset = concrete_model.objects.all try: return queryset(**kwargs).filter(**lookup).order_by(order)[0] except IndexError: pass
[ "def", "_get_next_or_previous_by_publish_date", "(", "self", ",", "is_next", ",", "*", "*", "kwargs", ")", ":", "arg", "=", "\"publish_date__gt\"", "if", "is_next", "else", "\"publish_date__lt\"", "order", "=", "\"publish_date\"", "if", "is_next", "else", "\"-publish_date\"", "lookup", "=", "{", "arg", ":", "self", ".", "publish_date", "}", "concrete_model", "=", "base_concrete_model", "(", "Displayable", ",", "self", ")", "try", ":", "queryset", "=", "concrete_model", ".", "objects", ".", "published", "except", "AttributeError", ":", "queryset", "=", "concrete_model", ".", "objects", ".", "all", "try", ":", "return", "queryset", "(", "*", "*", "kwargs", ")", ".", "filter", "(", "*", "*", "lookup", ")", ".", "order_by", "(", "order", ")", "[", "0", "]", "except", "IndexError", ":", "pass" ]
Retrieves next or previous object by publish date. We implement our own version instead of Django's so we can hook into the published manager and concrete subclasses.
[ "Retrieves", "next", "or", "previous", "object", "by", "publish", "date", ".", "We", "implement", "our", "own", "version", "instead", "of", "Django", "s", "so", "we", "can", "hook", "into", "the", "published", "manager", "and", "concrete", "subclasses", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L325-L342
249,694
minhhoit/yacms
yacms/core/models.py
Orderable.with_respect_to
def with_respect_to(self): """ Returns a dict to use as a filter for ordering operations containing the original ``Meta.order_with_respect_to`` value if provided. If the field is a Generic Relation, the dict returned contains names and values for looking up the relation's ``ct_field`` and ``fk_field`` attributes. """ try: name = self.order_with_respect_to value = getattr(self, name) except AttributeError: # No ``order_with_respect_to`` specified on the model. return {} # Support for generic relations. field = getattr(self.__class__, name) if isinstance(field, GenericForeignKey): names = (field.ct_field, field.fk_field) return dict([(n, getattr(self, n)) for n in names]) return {name: value}
python
def with_respect_to(self): """ Returns a dict to use as a filter for ordering operations containing the original ``Meta.order_with_respect_to`` value if provided. If the field is a Generic Relation, the dict returned contains names and values for looking up the relation's ``ct_field`` and ``fk_field`` attributes. """ try: name = self.order_with_respect_to value = getattr(self, name) except AttributeError: # No ``order_with_respect_to`` specified on the model. return {} # Support for generic relations. field = getattr(self.__class__, name) if isinstance(field, GenericForeignKey): names = (field.ct_field, field.fk_field) return dict([(n, getattr(self, n)) for n in names]) return {name: value}
[ "def", "with_respect_to", "(", "self", ")", ":", "try", ":", "name", "=", "self", ".", "order_with_respect_to", "value", "=", "getattr", "(", "self", ",", "name", ")", "except", "AttributeError", ":", "# No ``order_with_respect_to`` specified on the model.", "return", "{", "}", "# Support for generic relations.", "field", "=", "getattr", "(", "self", ".", "__class__", ",", "name", ")", "if", "isinstance", "(", "field", ",", "GenericForeignKey", ")", ":", "names", "=", "(", "field", ".", "ct_field", ",", "field", ".", "fk_field", ")", "return", "dict", "(", "[", "(", "n", ",", "getattr", "(", "self", ",", "n", ")", ")", "for", "n", "in", "names", "]", ")", "return", "{", "name", ":", "value", "}" ]
Returns a dict to use as a filter for ordering operations containing the original ``Meta.order_with_respect_to`` value if provided. If the field is a Generic Relation, the dict returned contains names and values for looking up the relation's ``ct_field`` and ``fk_field`` attributes.
[ "Returns", "a", "dict", "to", "use", "as", "a", "filter", "for", "ordering", "operations", "containing", "the", "original", "Meta", ".", "order_with_respect_to", "value", "if", "provided", ".", "If", "the", "field", "is", "a", "Generic", "Relation", "the", "dict", "returned", "contains", "names", "and", "values", "for", "looking", "up", "the", "relation", "s", "ct_field", "and", "fk_field", "attributes", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L408-L427
249,695
minhhoit/yacms
yacms/core/models.py
Orderable.save
def save(self, *args, **kwargs): """ Set the initial ordering value. """ if self._order is None: lookup = self.with_respect_to() lookup["_order__isnull"] = False concrete_model = base_concrete_model(Orderable, self) self._order = concrete_model.objects.filter(**lookup).count() super(Orderable, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """ Set the initial ordering value. """ if self._order is None: lookup = self.with_respect_to() lookup["_order__isnull"] = False concrete_model = base_concrete_model(Orderable, self) self._order = concrete_model.objects.filter(**lookup).count() super(Orderable, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_order", "is", "None", ":", "lookup", "=", "self", ".", "with_respect_to", "(", ")", "lookup", "[", "\"_order__isnull\"", "]", "=", "False", "concrete_model", "=", "base_concrete_model", "(", "Orderable", ",", "self", ")", "self", ".", "_order", "=", "concrete_model", ".", "objects", ".", "filter", "(", "*", "*", "lookup", ")", ".", "count", "(", ")", "super", "(", "Orderable", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Set the initial ordering value.
[ "Set", "the", "initial", "ordering", "value", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L429-L438
249,696
minhhoit/yacms
yacms/core/models.py
Orderable.delete
def delete(self, *args, **kwargs): """ Update the ordering values for siblings. """ lookup = self.with_respect_to() lookup["_order__gte"] = self._order concrete_model = base_concrete_model(Orderable, self) after = concrete_model.objects.filter(**lookup) after.update(_order=models.F("_order") - 1) super(Orderable, self).delete(*args, **kwargs)
python
def delete(self, *args, **kwargs): """ Update the ordering values for siblings. """ lookup = self.with_respect_to() lookup["_order__gte"] = self._order concrete_model = base_concrete_model(Orderable, self) after = concrete_model.objects.filter(**lookup) after.update(_order=models.F("_order") - 1) super(Orderable, self).delete(*args, **kwargs)
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lookup", "=", "self", ".", "with_respect_to", "(", ")", "lookup", "[", "\"_order__gte\"", "]", "=", "self", ".", "_order", "concrete_model", "=", "base_concrete_model", "(", "Orderable", ",", "self", ")", "after", "=", "concrete_model", ".", "objects", ".", "filter", "(", "*", "*", "lookup", ")", "after", ".", "update", "(", "_order", "=", "models", ".", "F", "(", "\"_order\"", ")", "-", "1", ")", "super", "(", "Orderable", ",", "self", ")", ".", "delete", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Update the ordering values for siblings.
[ "Update", "the", "ordering", "values", "for", "siblings", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L440-L449
249,697
minhhoit/yacms
yacms/core/models.py
Orderable._get_next_or_previous_by_order
def _get_next_or_previous_by_order(self, is_next, **kwargs): """ Retrieves next or previous object by order. We implement our own version instead of Django's so we can hook into the published manager, concrete subclasses and our custom ``with_respect_to`` method. """ lookup = self.with_respect_to() lookup["_order"] = self._order + (1 if is_next else -1) concrete_model = base_concrete_model(Orderable, self) try: queryset = concrete_model.objects.published except AttributeError: queryset = concrete_model.objects.filter try: return queryset(**kwargs).get(**lookup) except concrete_model.DoesNotExist: pass
python
def _get_next_or_previous_by_order(self, is_next, **kwargs): """ Retrieves next or previous object by order. We implement our own version instead of Django's so we can hook into the published manager, concrete subclasses and our custom ``with_respect_to`` method. """ lookup = self.with_respect_to() lookup["_order"] = self._order + (1 if is_next else -1) concrete_model = base_concrete_model(Orderable, self) try: queryset = concrete_model.objects.published except AttributeError: queryset = concrete_model.objects.filter try: return queryset(**kwargs).get(**lookup) except concrete_model.DoesNotExist: pass
[ "def", "_get_next_or_previous_by_order", "(", "self", ",", "is_next", ",", "*", "*", "kwargs", ")", ":", "lookup", "=", "self", ".", "with_respect_to", "(", ")", "lookup", "[", "\"_order\"", "]", "=", "self", ".", "_order", "+", "(", "1", "if", "is_next", "else", "-", "1", ")", "concrete_model", "=", "base_concrete_model", "(", "Orderable", ",", "self", ")", "try", ":", "queryset", "=", "concrete_model", ".", "objects", ".", "published", "except", "AttributeError", ":", "queryset", "=", "concrete_model", ".", "objects", ".", "filter", "try", ":", "return", "queryset", "(", "*", "*", "kwargs", ")", ".", "get", "(", "*", "*", "lookup", ")", "except", "concrete_model", ".", "DoesNotExist", ":", "pass" ]
Retrieves next or previous object by order. We implement our own version instead of Django's so we can hook into the published manager, concrete subclasses and our custom ``with_respect_to`` method.
[ "Retrieves", "next", "or", "previous", "object", "by", "order", ".", "We", "implement", "our", "own", "version", "instead", "of", "Django", "s", "so", "we", "can", "hook", "into", "the", "published", "manager", "concrete", "subclasses", "and", "our", "custom", "with_respect_to", "method", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L451-L468
249,698
minhhoit/yacms
yacms/core/models.py
Ownable.is_editable
def is_editable(self, request): """ Restrict in-line editing to the objects's owner and superusers. """ return request.user.is_superuser or request.user.id == self.user_id
python
def is_editable(self, request): """ Restrict in-line editing to the objects's owner and superusers. """ return request.user.is_superuser or request.user.id == self.user_id
[ "def", "is_editable", "(", "self", ",", "request", ")", ":", "return", "request", ".", "user", ".", "is_superuser", "or", "request", ".", "user", ".", "id", "==", "self", ".", "user_id" ]
Restrict in-line editing to the objects's owner and superusers.
[ "Restrict", "in", "-", "line", "editing", "to", "the", "objects", "s", "owner", "and", "superusers", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L494-L498
249,699
minhhoit/yacms
yacms/core/models.py
ContentTyped.get_content_models
def get_content_models(cls): """ Return all subclasses of the concrete model. """ concrete_model = base_concrete_model(ContentTyped, cls) return [m for m in apps.get_models() if m is not concrete_model and issubclass(m, concrete_model)]
python
def get_content_models(cls): """ Return all subclasses of the concrete model. """ concrete_model = base_concrete_model(ContentTyped, cls) return [m for m in apps.get_models() if m is not concrete_model and issubclass(m, concrete_model)]
[ "def", "get_content_models", "(", "cls", ")", ":", "concrete_model", "=", "base_concrete_model", "(", "ContentTyped", ",", "cls", ")", "return", "[", "m", "for", "m", "in", "apps", ".", "get_models", "(", ")", "if", "m", "is", "not", "concrete_model", "and", "issubclass", "(", "m", ",", "concrete_model", ")", "]" ]
Return all subclasses of the concrete model.
[ "Return", "all", "subclasses", "of", "the", "concrete", "model", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L526-L530