repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
rflamary/POT
ot/utils.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/utils.py#L32-L36
def toc(message='Elapsed time : {} s'): """ Python implementation of Matlab toc() function """ t = time.time() print(message.format(t - __time_tic_toc)) return t - __time_tic_toc
[ "def", "toc", "(", "message", "=", "'Elapsed time : {} s'", ")", ":", "t", "=", "time", ".", "time", "(", ")", "print", "(", "message", ".", "format", "(", "t", "-", "__time_tic_toc", ")", ")", "return", "t", "-", "__time_tic_toc" ]
Python implementation of Matlab toc() function
[ "Python", "implementation", "of", "Matlab", "toc", "()", "function" ]
python
train
nerdvegas/rez
src/rez/vendor/pyparsing/pyparsing.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pyparsing/pyparsing.py#L617-L627
def col (loc,strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
[ "def", "col", "(", "loc", ",", "strg", ")", ":", "return", "(", "loc", "<", "len", "(", "strg", ")", "and", "strg", "[", "loc", "]", "==", "'\\n'", ")", "and", "1", "or", "loc", "-", "strg", ".", "rfind", "(", "\"\\n\"", ",", "0", ",", "loc", ")" ]
Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string.
[ "Returns", "current", "column", "within", "a", "string", "counting", "newlines", "as", "line", "separators", ".", "The", "first", "column", "is", "number", "1", ".", "Note", ":", "the", "default", "parsing", "behavior", "is", "to", "expand", "tabs", "in", "the", "input", "string", "before", "starting", "the", "parsing", "process", ".", "See", "L", "{", "I", "{", "ParserElement", ".", "parseString", "}", "<ParserElement", ".", "parseString", ">", "}", "for", "more", "information", "on", "parsing", "strings", "containing", "C", "{", "<TAB", ">", "}", "s", "and", "suggested", "methods", "to", "maintain", "a", "consistent", "view", "of", "the", "parsed", "string", "the", "parse", "location", "and", "line", "and", "column", "positions", "within", "the", "parsed", "string", "." ]
python
train
BlueBrain/hpcbench
hpcbench/driver/base.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/base.py#L136-L150
def call_decorator(cls, func): """class function that MUST be specified as decorator to the `__call__` method overriden by sub-classes. """ @wraps(func) def _wrap(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception: self.logger.exception('While executing benchmark') if not (self.catch_child_exception or False): raise return _wrap
[ "def", "call_decorator", "(", "cls", ",", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "_wrap", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "self", ".", "logger", ".", "exception", "(", "'While executing benchmark'", ")", "if", "not", "(", "self", ".", "catch_child_exception", "or", "False", ")", ":", "raise", "return", "_wrap" ]
class function that MUST be specified as decorator to the `__call__` method overriden by sub-classes.
[ "class", "function", "that", "MUST", "be", "specified", "as", "decorator", "to", "the", "__call__", "method", "overriden", "by", "sub", "-", "classes", "." ]
python
train
dfm/casjobs
casjobs.py
https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L350-L362
def list_tables(self): """ Lists the tables in mydb. ## Returns * `tables` (list): A list of strings with all the table names from mydb. """ q = 'SELECT Distinct TABLE_NAME FROM information_schema.TABLES' res = self.quick(q, context='MYDB', task_name='listtables', system=True) # the first line is a header and the last is always empty # also, the table names have " as the first and last characters return [l[1:-1]for l in res.split('\n')[1:-1]]
[ "def", "list_tables", "(", "self", ")", ":", "q", "=", "'SELECT Distinct TABLE_NAME FROM information_schema.TABLES'", "res", "=", "self", ".", "quick", "(", "q", ",", "context", "=", "'MYDB'", ",", "task_name", "=", "'listtables'", ",", "system", "=", "True", ")", "# the first line is a header and the last is always empty", "# also, the table names have \" as the first and last characters", "return", "[", "l", "[", "1", ":", "-", "1", "]", "for", "l", "in", "res", ".", "split", "(", "'\\n'", ")", "[", "1", ":", "-", "1", "]", "]" ]
Lists the tables in mydb. ## Returns * `tables` (list): A list of strings with all the table names from mydb.
[ "Lists", "the", "tables", "in", "mydb", "." ]
python
train
CZ-NIC/yangson
yangson/instance.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/instance.py#L524-L527
def qual_name(self) -> QualName: """Return the receiver's qualified name.""" p, s, loc = self._key.partition(":") return (loc, p) if s else (p, self.namespace)
[ "def", "qual_name", "(", "self", ")", "->", "QualName", ":", "p", ",", "s", ",", "loc", "=", "self", ".", "_key", ".", "partition", "(", "\":\"", ")", "return", "(", "loc", ",", "p", ")", "if", "s", "else", "(", "p", ",", "self", ".", "namespace", ")" ]
Return the receiver's qualified name.
[ "Return", "the", "receiver", "s", "qualified", "name", "." ]
python
train
apache/incubator-mxnet
example/rcnn/symimdb/imdb.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rcnn/symimdb/imdb.py#L83-L98
def append_flipped_images(self): """Only flip boxes coordinates, images will be flipped when loading into network""" logger.info('%s append flipped images to roidb' % self._name) roidb_flipped = [] for roi_rec in self._roidb: boxes = roi_rec['boxes'].copy() oldx1 = boxes[:, 0].copy() oldx2 = boxes[:, 2].copy() boxes[:, 0] = roi_rec['width'] - oldx2 - 1 boxes[:, 2] = roi_rec['width'] - oldx1 - 1 assert (boxes[:, 2] >= boxes[:, 0]).all() roi_rec_flipped = roi_rec.copy() roi_rec_flipped['boxes'] = boxes roi_rec_flipped['flipped'] = True roidb_flipped.append(roi_rec_flipped) self._roidb.extend(roidb_flipped)
[ "def", "append_flipped_images", "(", "self", ")", ":", "logger", ".", "info", "(", "'%s append flipped images to roidb'", "%", "self", ".", "_name", ")", "roidb_flipped", "=", "[", "]", "for", "roi_rec", "in", "self", ".", "_roidb", ":", "boxes", "=", "roi_rec", "[", "'boxes'", "]", ".", "copy", "(", ")", "oldx1", "=", "boxes", "[", ":", ",", "0", "]", ".", "copy", "(", ")", "oldx2", "=", "boxes", "[", ":", ",", "2", "]", ".", "copy", "(", ")", "boxes", "[", ":", ",", "0", "]", "=", "roi_rec", "[", "'width'", "]", "-", "oldx2", "-", "1", "boxes", "[", ":", ",", "2", "]", "=", "roi_rec", "[", "'width'", "]", "-", "oldx1", "-", "1", "assert", "(", "boxes", "[", ":", ",", "2", "]", ">=", "boxes", "[", ":", ",", "0", "]", ")", ".", "all", "(", ")", "roi_rec_flipped", "=", "roi_rec", ".", "copy", "(", ")", "roi_rec_flipped", "[", "'boxes'", "]", "=", "boxes", "roi_rec_flipped", "[", "'flipped'", "]", "=", "True", "roidb_flipped", ".", "append", "(", "roi_rec_flipped", ")", "self", ".", "_roidb", ".", "extend", "(", "roidb_flipped", ")" ]
Only flip boxes coordinates, images will be flipped when loading into network
[ "Only", "flip", "boxes", "coordinates", "images", "will", "be", "flipped", "when", "loading", "into", "network" ]
python
train
20c/xbahn
xbahn/message.py
https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/message.py#L71-L79
def export(self, contentType): """ Export message to specified contentType via munge contentType <str> - eg. "json", "yaml" """ cls = munge.get_codec(contentType) codec = cls() return codec.dumps(self.__dict__())
[ "def", "export", "(", "self", ",", "contentType", ")", ":", "cls", "=", "munge", ".", "get_codec", "(", "contentType", ")", "codec", "=", "cls", "(", ")", "return", "codec", ".", "dumps", "(", "self", ".", "__dict__", "(", ")", ")" ]
Export message to specified contentType via munge contentType <str> - eg. "json", "yaml"
[ "Export", "message", "to", "specified", "contentType", "via", "munge" ]
python
train
quantopian/zipline
zipline/pipeline/graph.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L498-L503
def maybe_specialize(term, domain): """Specialize a term if it's loadable. """ if isinstance(term, LoadableTerm): return term.specialize(domain) return term
[ "def", "maybe_specialize", "(", "term", ",", "domain", ")", ":", "if", "isinstance", "(", "term", ",", "LoadableTerm", ")", ":", "return", "term", ".", "specialize", "(", "domain", ")", "return", "term" ]
Specialize a term if it's loadable.
[ "Specialize", "a", "term", "if", "it", "s", "loadable", "." ]
python
train
OSSOS/MOP
src/jjk/preproc/wcsutil.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/wcsutil.py#L344-L350
def copy(self,deep=yes): """ Makes a (deep)copy of this object for use by other objects. """ if deep: return copy.deepcopy(self) else: return copy.copy(self)
[ "def", "copy", "(", "self", ",", "deep", "=", "yes", ")", ":", "if", "deep", ":", "return", "copy", ".", "deepcopy", "(", "self", ")", "else", ":", "return", "copy", ".", "copy", "(", "self", ")" ]
Makes a (deep)copy of this object for use by other objects.
[ "Makes", "a", "(", "deep", ")", "copy", "of", "this", "object", "for", "use", "by", "other", "objects", "." ]
python
train
Chilipp/model-organization
model_organization/config.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/config.py#L447-L471
def save(self): """Save the experiment configuration This method stores the configuration of each of the experiments in a file ``'<project-dir>/.project/<experiment>.yml'``, where ``'<project-dir>'`` corresponds to the project directory of the specific ``'<experiment>'``. Furthermore it dumps all experiments to the :attr:`exp_file` configuration file. """ for exp, d in dict(self).items(): if isinstance(d, dict): project_path = self.projects[d['project']]['root'] d = self.rel_paths(copy.deepcopy(d)) fname = osp.join(project_path, '.project', exp + '.yml') if not osp.exists(osp.dirname(fname)): os.makedirs(osp.dirname(fname)) safe_dump(d, fname, default_flow_style=False) exp_file = self.exp_file # to be 100% sure we do not write to the file from multiple processes lock = fasteners.InterProcessLock(exp_file + '.lck') lock.acquire() safe_dump(OrderedDict((exp, val if isinstance(val, Archive) else None) for exp, val in self.items()), exp_file, default_flow_style=False) lock.release()
[ "def", "save", "(", "self", ")", ":", "for", "exp", ",", "d", "in", "dict", "(", "self", ")", ".", "items", "(", ")", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "project_path", "=", "self", ".", "projects", "[", "d", "[", "'project'", "]", "]", "[", "'root'", "]", "d", "=", "self", ".", "rel_paths", "(", "copy", ".", "deepcopy", "(", "d", ")", ")", "fname", "=", "osp", ".", "join", "(", "project_path", ",", "'.project'", ",", "exp", "+", "'.yml'", ")", "if", "not", "osp", ".", "exists", "(", "osp", ".", "dirname", "(", "fname", ")", ")", ":", "os", ".", "makedirs", "(", "osp", ".", "dirname", "(", "fname", ")", ")", "safe_dump", "(", "d", ",", "fname", ",", "default_flow_style", "=", "False", ")", "exp_file", "=", "self", ".", "exp_file", "# to be 100% sure we do not write to the file from multiple processes", "lock", "=", "fasteners", ".", "InterProcessLock", "(", "exp_file", "+", "'.lck'", ")", "lock", ".", "acquire", "(", ")", "safe_dump", "(", "OrderedDict", "(", "(", "exp", ",", "val", "if", "isinstance", "(", "val", ",", "Archive", ")", "else", "None", ")", "for", "exp", ",", "val", "in", "self", ".", "items", "(", ")", ")", ",", "exp_file", ",", "default_flow_style", "=", "False", ")", "lock", ".", "release", "(", ")" ]
Save the experiment configuration This method stores the configuration of each of the experiments in a file ``'<project-dir>/.project/<experiment>.yml'``, where ``'<project-dir>'`` corresponds to the project directory of the specific ``'<experiment>'``. Furthermore it dumps all experiments to the :attr:`exp_file` configuration file.
[ "Save", "the", "experiment", "configuration" ]
python
train
jaraco/jaraco.itertools
jaraco/itertools.py
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L589-L605
def remove_duplicates(iterable, key=None): """ Given an iterable with items that may come in as sequential duplicates, remove those duplicates. Unlike unique_justseen, this function does not remove triplicates. >>> ' '.join(remove_duplicates('abcaabbccaaabbbcccbcbc')) 'a b c a b c a a b b c c b c b c' >>> ' '.join(remove_duplicates('aaaabbbbb')) 'a a b b b' """ return itertools.chain.from_iterable(six.moves.map( every_other, six.moves.map( operator.itemgetter(1), itertools.groupby(iterable, key) )))
[ "def", "remove_duplicates", "(", "iterable", ",", "key", "=", "None", ")", ":", "return", "itertools", ".", "chain", ".", "from_iterable", "(", "six", ".", "moves", ".", "map", "(", "every_other", ",", "six", ".", "moves", ".", "map", "(", "operator", ".", "itemgetter", "(", "1", ")", ",", "itertools", ".", "groupby", "(", "iterable", ",", "key", ")", ")", ")", ")" ]
Given an iterable with items that may come in as sequential duplicates, remove those duplicates. Unlike unique_justseen, this function does not remove triplicates. >>> ' '.join(remove_duplicates('abcaabbccaaabbbcccbcbc')) 'a b c a b c a a b b c c b c b c' >>> ' '.join(remove_duplicates('aaaabbbbb')) 'a a b b b'
[ "Given", "an", "iterable", "with", "items", "that", "may", "come", "in", "as", "sequential", "duplicates", "remove", "those", "duplicates", "." ]
python
test
molmod/molmod
molmod/molecules.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/molecules.py#L191-L202
def chemical_formula(self): """the chemical formula of the molecule""" counts = {} for number in self.numbers: counts[number] = counts.get(number, 0)+1 items = [] for number, count in sorted(counts.items(), reverse=True): if count == 1: items.append(periodic[number].symbol) else: items.append("%s%i" % (periodic[number].symbol, count)) return "".join(items)
[ "def", "chemical_formula", "(", "self", ")", ":", "counts", "=", "{", "}", "for", "number", "in", "self", ".", "numbers", ":", "counts", "[", "number", "]", "=", "counts", ".", "get", "(", "number", ",", "0", ")", "+", "1", "items", "=", "[", "]", "for", "number", ",", "count", "in", "sorted", "(", "counts", ".", "items", "(", ")", ",", "reverse", "=", "True", ")", ":", "if", "count", "==", "1", ":", "items", ".", "append", "(", "periodic", "[", "number", "]", ".", "symbol", ")", "else", ":", "items", ".", "append", "(", "\"%s%i\"", "%", "(", "periodic", "[", "number", "]", ".", "symbol", ",", "count", ")", ")", "return", "\"\"", ".", "join", "(", "items", ")" ]
the chemical formula of the molecule
[ "the", "chemical", "formula", "of", "the", "molecule" ]
python
train
jmbhughes/suvi-trainer
scripts/make_movie_frames.py
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/scripts/make_movie_frames.py#L61-L74
def main(): """ process the main task """ args = get_args() args.start = date_parser.parse(args.start) args.end = date_parser.parse(args.end) args.step = timedelta(args.step) config = Config(args.config) times = [args.start + i * args.step for i in range(int((args.end - args.start) / args.step))] for i, time in enumerate(times): make_plot(time, config, args.step)
[ "def", "main", "(", ")", ":", "args", "=", "get_args", "(", ")", "args", ".", "start", "=", "date_parser", ".", "parse", "(", "args", ".", "start", ")", "args", ".", "end", "=", "date_parser", ".", "parse", "(", "args", ".", "end", ")", "args", ".", "step", "=", "timedelta", "(", "args", ".", "step", ")", "config", "=", "Config", "(", "args", ".", "config", ")", "times", "=", "[", "args", ".", "start", "+", "i", "*", "args", ".", "step", "for", "i", "in", "range", "(", "int", "(", "(", "args", ".", "end", "-", "args", ".", "start", ")", "/", "args", ".", "step", ")", ")", "]", "for", "i", ",", "time", "in", "enumerate", "(", "times", ")", ":", "make_plot", "(", "time", ",", "config", ",", "args", ".", "step", ")" ]
process the main task
[ "process", "the", "main", "task" ]
python
train
saltstack/salt
salt/modules/mac_desktop.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_desktop.py#L111-L132
def say(*words): ''' Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN> ''' cmd = 'say {0}'.format(' '.join(words)) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True
[ "def", "say", "(", "*", "words", ")", ":", "cmd", "=", "'say {0}'", ".", "format", "(", "' '", ".", "join", "(", "words", ")", ")", "call", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "output_loglevel", "=", "'debug'", ",", "python_shell", "=", "False", ")", "_check_cmd", "(", "call", ")", "return", "True" ]
Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN>
[ "Say", "some", "words", "." ]
python
train
quodlibet/mutagen
mutagen/aac.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/aac.py#L132-L141
def frequency(self): """0 means unknown""" assert self.parsed_frames, "no frame parsed yet" f_index = self._fixed_header_key[4] try: return _FREQS[f_index] except IndexError: return 0
[ "def", "frequency", "(", "self", ")", ":", "assert", "self", ".", "parsed_frames", ",", "\"no frame parsed yet\"", "f_index", "=", "self", ".", "_fixed_header_key", "[", "4", "]", "try", ":", "return", "_FREQS", "[", "f_index", "]", "except", "IndexError", ":", "return", "0" ]
0 means unknown
[ "0", "means", "unknown" ]
python
train
cokelaer/spectrum
src/spectrum/arma.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/arma.py#L130-L221
def arma_estimate(X, P, Q, lag): """Autoregressive and moving average estimators. This function provides an estimate of the autoregressive parameters, the moving average parameters, and the driving white noise variance of an ARMA(P,Q) for a complex or real data sequence. The parameters are estimated using three steps: * Estimate the AR parameters from the original data based on a least squares modified Yule-Walker technique, * Produce a residual time sequence by filtering the original data with a filter based on the AR parameters, * Estimate the MA parameters from the residual time sequence. :param array X: Array of data samples (length N) :param int P: Desired number of AR parameters :param int Q: Desired number of MA parameters :param int lag: Maximum lag to use for autocorrelation estimates :return: * A - Array of complex P AR parameter estimates * B - Array of complex Q MA parameter estimates * RHO - White noise variance estimate .. note:: * lag must be >= Q (MA order) **dependencies**: * :meth:`spectrum.correlation.CORRELATION` * :meth:`spectrum.covar.arcovar` * :meth:`spectrum.arma.ma` .. plot:: :width: 80% :include-source: from spectrum import arma_estimate, arma2psd, marple_data import pylab a,b, rho = arma_estimate(marple_data, 15, 15, 30) psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True) pylab.plot(10 * pylab.log10(psd)) pylab.ylim([-50,0]) :reference: [Marple]_ """ R = CORRELATION(X, maxlags=lag, norm='unbiased') R0 = R[0] #C Estimate the AR parameters (no error weighting is used). #C Number of equation errors is M-Q . MPQ = lag - Q + P N = len(X) Y = np.zeros(N-P, dtype=complex) for K in range(0, MPQ): KPQ = K + Q - P+1 if KPQ < 0: Y[K] = R[-KPQ].conjugate() if KPQ == 0: Y[K] = R0 if KPQ > 0: Y[K] = R[KPQ] # The resize is very important for the normalissation. Y.resize(lag) if P <= 4: res = arcovar_marple(Y.copy(), P) #! Eq. (10.12) ar_params = res[0] else: res = arcovar(Y.copy(), P) #! Eq. (10.12) ar_params = res[0] # the .copy is used to prevent a reference somewhere. this is a bug # to be tracked down. Y.resize(N-P) #C Filter the original time series for k in range(P, N): SUM = X[k] #SUM += sum([ar_params[j]*X[k-j-1] for j in range(0,P)]) for j in range(0, P): SUM = SUM + ar_params[j] * X[k-j-1] #! Eq. (10.17) Y[k-P] = SUM # Estimate the MA parameters (a "long" AR of order at least 2*IQ #C is suggested) #Y.resize(N-P) ma_params, rho = ma(Y, Q, 2*Q) #! Eq. (10.3) return ar_params, ma_params, rho
[ "def", "arma_estimate", "(", "X", ",", "P", ",", "Q", ",", "lag", ")", ":", "R", "=", "CORRELATION", "(", "X", ",", "maxlags", "=", "lag", ",", "norm", "=", "'unbiased'", ")", "R0", "=", "R", "[", "0", "]", "#C Estimate the AR parameters (no error weighting is used).", "#C Number of equation errors is M-Q .", "MPQ", "=", "lag", "-", "Q", "+", "P", "N", "=", "len", "(", "X", ")", "Y", "=", "np", ".", "zeros", "(", "N", "-", "P", ",", "dtype", "=", "complex", ")", "for", "K", "in", "range", "(", "0", ",", "MPQ", ")", ":", "KPQ", "=", "K", "+", "Q", "-", "P", "+", "1", "if", "KPQ", "<", "0", ":", "Y", "[", "K", "]", "=", "R", "[", "-", "KPQ", "]", ".", "conjugate", "(", ")", "if", "KPQ", "==", "0", ":", "Y", "[", "K", "]", "=", "R0", "if", "KPQ", ">", "0", ":", "Y", "[", "K", "]", "=", "R", "[", "KPQ", "]", "# The resize is very important for the normalissation.", "Y", ".", "resize", "(", "lag", ")", "if", "P", "<=", "4", ":", "res", "=", "arcovar_marple", "(", "Y", ".", "copy", "(", ")", ",", "P", ")", "#! Eq. (10.12)", "ar_params", "=", "res", "[", "0", "]", "else", ":", "res", "=", "arcovar", "(", "Y", ".", "copy", "(", ")", ",", "P", ")", "#! Eq. (10.12)", "ar_params", "=", "res", "[", "0", "]", "# the .copy is used to prevent a reference somewhere. this is a bug", "# to be tracked down.", "Y", ".", "resize", "(", "N", "-", "P", ")", "#C Filter the original time series", "for", "k", "in", "range", "(", "P", ",", "N", ")", ":", "SUM", "=", "X", "[", "k", "]", "#SUM += sum([ar_params[j]*X[k-j-1] for j in range(0,P)])", "for", "j", "in", "range", "(", "0", ",", "P", ")", ":", "SUM", "=", "SUM", "+", "ar_params", "[", "j", "]", "*", "X", "[", "k", "-", "j", "-", "1", "]", "#! Eq. (10.17)", "Y", "[", "k", "-", "P", "]", "=", "SUM", "# Estimate the MA parameters (a \"long\" AR of order at least 2*IQ", "#C is suggested)", "#Y.resize(N-P)", "ma_params", ",", "rho", "=", "ma", "(", "Y", ",", "Q", ",", "2", "*", "Q", ")", "#! Eq. (10.3)", "return", "ar_params", ",", "ma_params", ",", "rho" ]
Autoregressive and moving average estimators. This function provides an estimate of the autoregressive parameters, the moving average parameters, and the driving white noise variance of an ARMA(P,Q) for a complex or real data sequence. The parameters are estimated using three steps: * Estimate the AR parameters from the original data based on a least squares modified Yule-Walker technique, * Produce a residual time sequence by filtering the original data with a filter based on the AR parameters, * Estimate the MA parameters from the residual time sequence. :param array X: Array of data samples (length N) :param int P: Desired number of AR parameters :param int Q: Desired number of MA parameters :param int lag: Maximum lag to use for autocorrelation estimates :return: * A - Array of complex P AR parameter estimates * B - Array of complex Q MA parameter estimates * RHO - White noise variance estimate .. note:: * lag must be >= Q (MA order) **dependencies**: * :meth:`spectrum.correlation.CORRELATION` * :meth:`spectrum.covar.arcovar` * :meth:`spectrum.arma.ma` .. plot:: :width: 80% :include-source: from spectrum import arma_estimate, arma2psd, marple_data import pylab a,b, rho = arma_estimate(marple_data, 15, 15, 30) psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True) pylab.plot(10 * pylab.log10(psd)) pylab.ylim([-50,0]) :reference: [Marple]_
[ "Autoregressive", "and", "moving", "average", "estimators", "." ]
python
valid
yyuu/botornado
boto/pyami/installers/ubuntu/installer.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/pyami/installers/ubuntu/installer.py#L34-L49
def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None): """ Write a file to /etc/cron.d to schedule a command env is a dict containing environment variables you want to set in the file name will be used as the name of the file """ if minute == 'random': minute = str(random.randrange(60)) if hour == 'random': hour = str(random.randrange(24)) fp = open('/etc/cron.d/%s' % name, "w") if env: for key, value in env.items(): fp.write('%s=%s\n' % (key, value)) fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command)) fp.close()
[ "def", "add_cron", "(", "self", ",", "name", ",", "command", ",", "minute", "=", "\"*\"", ",", "hour", "=", "\"*\"", ",", "mday", "=", "\"*\"", ",", "month", "=", "\"*\"", ",", "wday", "=", "\"*\"", ",", "who", "=", "\"root\"", ",", "env", "=", "None", ")", ":", "if", "minute", "==", "'random'", ":", "minute", "=", "str", "(", "random", ".", "randrange", "(", "60", ")", ")", "if", "hour", "==", "'random'", ":", "hour", "=", "str", "(", "random", ".", "randrange", "(", "24", ")", ")", "fp", "=", "open", "(", "'/etc/cron.d/%s'", "%", "name", ",", "\"w\"", ")", "if", "env", ":", "for", "key", ",", "value", "in", "env", ".", "items", "(", ")", ":", "fp", ".", "write", "(", "'%s=%s\\n'", "%", "(", "key", ",", "value", ")", ")", "fp", ".", "write", "(", "'%s %s %s %s %s %s %s\\n'", "%", "(", "minute", ",", "hour", ",", "mday", ",", "month", ",", "wday", ",", "who", ",", "command", ")", ")", "fp", ".", "close", "(", ")" ]
Write a file to /etc/cron.d to schedule a command env is a dict containing environment variables you want to set in the file name will be used as the name of the file
[ "Write", "a", "file", "to", "/", "etc", "/", "cron", ".", "d", "to", "schedule", "a", "command", "env", "is", "a", "dict", "containing", "environment", "variables", "you", "want", "to", "set", "in", "the", "file", "name", "will", "be", "used", "as", "the", "name", "of", "the", "file" ]
python
train
bennylope/django-organizations
organizations/base.py
https://github.com/bennylope/django-organizations/blob/85f753a8f7a8f0f31636c9209fb69e7030a5c79a/organizations/base.py#L190-L230
def update_org_invite(cls, module): """ Adds the links to the organization and to the organization user """ try: cls.module_registry[module]["OrgInviteModel"]._meta.get_field("invited_by") except FieldDoesNotExist: cls.module_registry[module]["OrgInviteModel"].add_to_class( "invited_by", models.ForeignKey( USER_MODEL, related_name="%(app_label)s_%(class)s_sent_invitations", on_delete=models.CASCADE, ), ) try: cls.module_registry[module]["OrgInviteModel"]._meta.get_field("invitee") except FieldDoesNotExist: cls.module_registry[module]["OrgInviteModel"].add_to_class( "invitee", models.ForeignKey( USER_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_invitations", on_delete=models.CASCADE, ), ) try: cls.module_registry[module]["OrgInviteModel"]._meta.get_field( "organization" ) except FieldDoesNotExist: cls.module_registry[module]["OrgInviteModel"].add_to_class( "organization", models.ForeignKey( cls.module_registry[module]["OrgModel"], related_name="organization_invites", on_delete=models.CASCADE, ), )
[ "def", "update_org_invite", "(", "cls", ",", "module", ")", ":", "try", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgInviteModel\"", "]", ".", "_meta", ".", "get_field", "(", "\"invited_by\"", ")", "except", "FieldDoesNotExist", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgInviteModel\"", "]", ".", "add_to_class", "(", "\"invited_by\"", ",", "models", ".", "ForeignKey", "(", "USER_MODEL", ",", "related_name", "=", "\"%(app_label)s_%(class)s_sent_invitations\"", ",", "on_delete", "=", "models", ".", "CASCADE", ",", ")", ",", ")", "try", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgInviteModel\"", "]", ".", "_meta", ".", "get_field", "(", "\"invitee\"", ")", "except", "FieldDoesNotExist", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgInviteModel\"", "]", ".", "add_to_class", "(", "\"invitee\"", ",", "models", ".", "ForeignKey", "(", "USER_MODEL", ",", "null", "=", "True", ",", "blank", "=", "True", ",", "related_name", "=", "\"%(app_label)s_%(class)s_invitations\"", ",", "on_delete", "=", "models", ".", "CASCADE", ",", ")", ",", ")", "try", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgInviteModel\"", "]", ".", "_meta", ".", "get_field", "(", "\"organization\"", ")", "except", "FieldDoesNotExist", ":", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgInviteModel\"", "]", ".", "add_to_class", "(", "\"organization\"", ",", "models", ".", "ForeignKey", "(", "cls", ".", "module_registry", "[", "module", "]", "[", "\"OrgModel\"", "]", ",", "related_name", "=", "\"organization_invites\"", ",", "on_delete", "=", "models", ".", "CASCADE", ",", ")", ",", ")" ]
Adds the links to the organization and to the organization user
[ "Adds", "the", "links", "to", "the", "organization", "and", "to", "the", "organization", "user" ]
python
train
rdireen/spherepy
spherepy/spherepy.py
https://github.com/rdireen/spherepy/blob/241521401d4d76851d4a1a564a365cfab8e98496/spherepy/spherepy.py#L271-L294
def angular_power_spectrum(self): """Returns the angular power spectrum for the set of coefficients. That is, we compute n c_n = sum cnm * conj( cnm ) m=-n Returns: power_spectrum (numpy.array, dtype=double) spectrum as a function of n. """ # Added this routine as a result of my discussions with Ajinkya Nene #https://github.com/anene list_of_modes = self._reshape_m_vecs() Nmodes = len(list_of_modes) angular_power = np.zeros( Nmodes, dtype = np.double) for n in range(0, Nmodes): mode = np.array( list_of_modes[n], dtype = np.complex128 ) angular_power[n] = np.sum( np.abs(mode) ** 2 ) return angular_power
[ "def", "angular_power_spectrum", "(", "self", ")", ":", "# Added this routine as a result of my discussions with Ajinkya Nene\t \r", "#https://github.com/anene\r", "list_of_modes", "=", "self", ".", "_reshape_m_vecs", "(", ")", "Nmodes", "=", "len", "(", "list_of_modes", ")", "angular_power", "=", "np", ".", "zeros", "(", "Nmodes", ",", "dtype", "=", "np", ".", "double", ")", "for", "n", "in", "range", "(", "0", ",", "Nmodes", ")", ":", "mode", "=", "np", ".", "array", "(", "list_of_modes", "[", "n", "]", ",", "dtype", "=", "np", ".", "complex128", ")", "angular_power", "[", "n", "]", "=", "np", ".", "sum", "(", "np", ".", "abs", "(", "mode", ")", "**", "2", ")", "return", "angular_power" ]
Returns the angular power spectrum for the set of coefficients. That is, we compute n c_n = sum cnm * conj( cnm ) m=-n Returns: power_spectrum (numpy.array, dtype=double) spectrum as a function of n.
[ "Returns", "the", "angular", "power", "spectrum", "for", "the", "set", "of", "coefficients", ".", "That", "is", "we", "compute", "n", "c_n", "=", "sum", "cnm", "*", "conj", "(", "cnm", ")", "m", "=", "-", "n", "Returns", ":", "power_spectrum", "(", "numpy", ".", "array", "dtype", "=", "double", ")", "spectrum", "as", "a", "function", "of", "n", "." ]
python
train
BernardFW/bernard
src/bernard/platforms/facebook/platform.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L1004-L1022
def _message_from_sr(self, token: Text, payload: Any) \ -> Optional[BaseMessage]: """ Tries to verify the signed request """ page = self.settings() secret = page['app_secret'] try: sr_data = SignedRequest.parse(token, secret) except (TypeError, ValueError, SignedRequestError) as e: return return self._make_fake_message( sr_data['psid'], page['page_id'], payload, )
[ "def", "_message_from_sr", "(", "self", ",", "token", ":", "Text", ",", "payload", ":", "Any", ")", "->", "Optional", "[", "BaseMessage", "]", ":", "page", "=", "self", ".", "settings", "(", ")", "secret", "=", "page", "[", "'app_secret'", "]", "try", ":", "sr_data", "=", "SignedRequest", ".", "parse", "(", "token", ",", "secret", ")", "except", "(", "TypeError", ",", "ValueError", ",", "SignedRequestError", ")", "as", "e", ":", "return", "return", "self", ".", "_make_fake_message", "(", "sr_data", "[", "'psid'", "]", ",", "page", "[", "'page_id'", "]", ",", "payload", ",", ")" ]
Tries to verify the signed request
[ "Tries", "to", "verify", "the", "signed", "request" ]
python
train
acrazing/dbapi
dbapi/Group.py
https://github.com/acrazing/dbapi/blob/8c1f85cb1a051daf7be1fc97a62c4499983e9898/dbapi/Group.py#L255-L263
def list_user_topics(self, start=0): """ 发表的话题 :param start: 翻页 :return: 带下一页的列表 """ xml = self.api.xml(API_GROUP_LIST_USER_PUBLISHED_TOPICS % self.api.user_alias, params={'start': start}) return build_list_result(self._parse_topic_table(xml, 'title,comment,created,group'), xml)
[ "def", "list_user_topics", "(", "self", ",", "start", "=", "0", ")", ":", "xml", "=", "self", ".", "api", ".", "xml", "(", "API_GROUP_LIST_USER_PUBLISHED_TOPICS", "%", "self", ".", "api", ".", "user_alias", ",", "params", "=", "{", "'start'", ":", "start", "}", ")", "return", "build_list_result", "(", "self", ".", "_parse_topic_table", "(", "xml", ",", "'title,comment,created,group'", ")", ",", "xml", ")" ]
发表的话题 :param start: 翻页 :return: 带下一页的列表
[ "发表的话题", ":", "param", "start", ":", "翻页", ":", "return", ":", "带下一页的列表" ]
python
train
juju/charm-helpers
charmhelpers/core/host.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/host.py#L669-L681
def check_hash(path, checksum, hash_type='md5'): """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
[ "def", "check_hash", "(", "path", ",", "checksum", ",", "hash_type", "=", "'md5'", ")", ":", "actual_checksum", "=", "file_hash", "(", "path", ",", "hash_type", ")", "if", "checksum", "!=", "actual_checksum", ":", "raise", "ChecksumError", "(", "\"'%s' != '%s'\"", "%", "(", "checksum", ",", "actual_checksum", ")", ")" ]
Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum
[ "Validate", "a", "file", "using", "a", "cryptographic", "checksum", "." ]
python
train
Contraz/demosys-py
demosys/project/base.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/project/base.py#L174-L182
def reload_programs(self): """ Reload all shader programs with the reloadable flag set """ print("Reloading programs:") for name, program in self._programs.items(): if getattr(program, 'program', None): print(" - {}".format(program.meta.label)) program.program = resources.programs.load(program.meta)
[ "def", "reload_programs", "(", "self", ")", ":", "print", "(", "\"Reloading programs:\"", ")", "for", "name", ",", "program", "in", "self", ".", "_programs", ".", "items", "(", ")", ":", "if", "getattr", "(", "program", ",", "'program'", ",", "None", ")", ":", "print", "(", "\" - {}\"", ".", "format", "(", "program", ".", "meta", ".", "label", ")", ")", "program", ".", "program", "=", "resources", ".", "programs", ".", "load", "(", "program", ".", "meta", ")" ]
Reload all shader programs with the reloadable flag set
[ "Reload", "all", "shader", "programs", "with", "the", "reloadable", "flag", "set" ]
python
valid
Delgan/loguru
loguru/_logger.py
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1013-L1079
def opt(self, *, exception=None, record=False, lazy=False, ansi=False, raw=False, depth=0): r"""Parametrize a logging call to slightly change generated log message. Parameters ---------- exception : |bool|, |tuple| or |Exception|, optional If it does not evaluate as ``False``, the passed exception is formatted and added to the log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple, otherwise the exception information is retrieved from |sys.exc_info|. record : |bool|, optional If ``True``, the record dict contextualizing the logging call can be used to format the message by using ``{record[key]}`` in the log message. lazy : |bool|, optional If ``True``, the logging call attribute to format the message should be functions which will be called only if the level is high enough. This can be used to avoid expensive functions if not necessary. ansi : |bool|, optional If ``True``, logged message will be colorized according to the markups it possibly contains. raw : |bool|, optional If ``True``, the formatting of each sink will be bypassed and the message will be send as is. depth : |int|, optional Specify which stacktrace should be used to contextualize the logged message. This is useful while using the logger from inside a wrapped function to retrieve worthwhile information. Returns ------- :class:`~Logger` A logger wrapping the core logger, but transforming logged message adequately before sending. Examples -------- >>> try: ... 1 / 0 ... except ZeroDivisionError: ... logger.opt(exception=True).debug("Exception logged with debug level:") ... [18:10:02] DEBUG in '<module>' - Exception logged with debug level: Traceback (most recent call last, catch point marked): > File "<stdin>", line 2, in <module> ZeroDivisionError: division by zero >>> logger.opt(record=True).info("Current line is: {record[line]}") [18:10:33] INFO in '<module>' - Current line is: 1 >>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5)) [18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000 >>> logger.opt(ansi=True).warning("We got a <red>BIG</red> problem") [18:11:30] WARNING in '<module>' - We got a BIG problem >>> logger.opt(raw=True).debug("No formatting\n") No formatting >>> def wrapped(): ... logger.opt(depth=1).info("Get parent context") ... >>> def func(): ... wrapped() ... >>> func() [18:11:54] DEBUG in 'func' - Get parent context """ return Logger(self._extra, exception, record, lazy, ansi, raw, depth)
[ "def", "opt", "(", "self", ",", "*", ",", "exception", "=", "None", ",", "record", "=", "False", ",", "lazy", "=", "False", ",", "ansi", "=", "False", ",", "raw", "=", "False", ",", "depth", "=", "0", ")", ":", "return", "Logger", "(", "self", ".", "_extra", ",", "exception", ",", "record", ",", "lazy", ",", "ansi", ",", "raw", ",", "depth", ")" ]
r"""Parametrize a logging call to slightly change generated log message. Parameters ---------- exception : |bool|, |tuple| or |Exception|, optional If it does not evaluate as ``False``, the passed exception is formatted and added to the log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple, otherwise the exception information is retrieved from |sys.exc_info|. record : |bool|, optional If ``True``, the record dict contextualizing the logging call can be used to format the message by using ``{record[key]}`` in the log message. lazy : |bool|, optional If ``True``, the logging call attribute to format the message should be functions which will be called only if the level is high enough. This can be used to avoid expensive functions if not necessary. ansi : |bool|, optional If ``True``, logged message will be colorized according to the markups it possibly contains. raw : |bool|, optional If ``True``, the formatting of each sink will be bypassed and the message will be send as is. depth : |int|, optional Specify which stacktrace should be used to contextualize the logged message. This is useful while using the logger from inside a wrapped function to retrieve worthwhile information. Returns ------- :class:`~Logger` A logger wrapping the core logger, but transforming logged message adequately before sending. Examples -------- >>> try: ... 1 / 0 ... except ZeroDivisionError: ... logger.opt(exception=True).debug("Exception logged with debug level:") ... [18:10:02] DEBUG in '<module>' - Exception logged with debug level: Traceback (most recent call last, catch point marked): > File "<stdin>", line 2, in <module> ZeroDivisionError: division by zero >>> logger.opt(record=True).info("Current line is: {record[line]}") [18:10:33] INFO in '<module>' - Current line is: 1 >>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5)) [18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000 >>> logger.opt(ansi=True).warning("We got a <red>BIG</red> problem") [18:11:30] WARNING in '<module>' - We got a BIG problem >>> logger.opt(raw=True).debug("No formatting\n") No formatting >>> def wrapped(): ... logger.opt(depth=1).info("Get parent context") ... >>> def func(): ... wrapped() ... >>> func() [18:11:54] DEBUG in 'func' - Get parent context
[ "r", "Parametrize", "a", "logging", "call", "to", "slightly", "change", "generated", "log", "message", "." ]
python
train
inveniosoftware/invenio-pidrelations
invenio_pidrelations/serializers/schemas.py
https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/serializers/schemas.py#L104-L109
def dump_type(self, obj): """Dump the text name of the relation.""" if not isinstance(obj.relation_type, RelationType): return resolve_relation_type_config(obj.relation_type).name else: return obj.relation_type.name
[ "def", "dump_type", "(", "self", ",", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ".", "relation_type", ",", "RelationType", ")", ":", "return", "resolve_relation_type_config", "(", "obj", ".", "relation_type", ")", ".", "name", "else", ":", "return", "obj", ".", "relation_type", ".", "name" ]
Dump the text name of the relation.
[ "Dump", "the", "text", "name", "of", "the", "relation", "." ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L718-L754
def send_until(self, send, regexps, not_there=False, shutit_pexpect_child=None, cadence=5, retries=100, echo=None, note=None, debug_command=None, pause_point_on_fail=True, nonewline=False, loglevel=logging.INFO): """Send string on a regular cadence until a string is either seen, or the timeout is triggered. @param send: See send() @param regexps: List of regexps to wait for. @param not_there: If True, wait until this a regexp is not seen in the output. If False wait until a regexp is seen in the output (default) @param shutit_pexpect_child: See send() @param echo: See send() @param note: See send() """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.send_until(send, regexps, not_there=not_there, cadence=cadence, retries=retries, echo=echo, note=note, loglevel=loglevel, debug_command=debug_command, nonewline=nonewline, pause_point_on_fail=pause_point_on_fail)
[ "def", "send_until", "(", "self", ",", "send", ",", "regexps", ",", "not_there", "=", "False", ",", "shutit_pexpect_child", "=", "None", ",", "cadence", "=", "5", ",", "retries", "=", "100", ",", "echo", "=", "None", ",", "note", "=", "None", ",", "debug_command", "=", "None", ",", "pause_point_on_fail", "=", "True", ",", "nonewline", "=", "False", ",", "loglevel", "=", "logging", ".", "INFO", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "shutit_pexpect_child", "=", "shutit_pexpect_child", "or", "self", ".", "get_current_shutit_pexpect_session", "(", ")", ".", "pexpect_child", "shutit_pexpect_session", "=", "self", ".", "get_shutit_pexpect_session_from_child", "(", "shutit_pexpect_child", ")", "return", "shutit_pexpect_session", ".", "send_until", "(", "send", ",", "regexps", ",", "not_there", "=", "not_there", ",", "cadence", "=", "cadence", ",", "retries", "=", "retries", ",", "echo", "=", "echo", ",", "note", "=", "note", ",", "loglevel", "=", "loglevel", ",", "debug_command", "=", "debug_command", ",", "nonewline", "=", "nonewline", ",", "pause_point_on_fail", "=", "pause_point_on_fail", ")" ]
Send string on a regular cadence until a string is either seen, or the timeout is triggered. @param send: See send() @param regexps: List of regexps to wait for. @param not_there: If True, wait until this a regexp is not seen in the output. If False wait until a regexp is seen in the output (default) @param shutit_pexpect_child: See send() @param echo: See send() @param note: See send()
[ "Send", "string", "on", "a", "regular", "cadence", "until", "a", "string", "is", "either", "seen", "or", "the", "timeout", "is", "triggered", "." ]
python
train
Roastero/freshroastsr700
freshroastsr700/__init__.py
https://github.com/Roastero/freshroastsr700/blob/49cf4961444c0f56d051d5ac5088ace480b54f02/freshroastsr700/__init__.py#L259-L264
def heat_setting(self, value): """Verifies that the heat setting is between 0 and 3.""" if value not in range(0, 4): raise exceptions.RoasterValueError self._heat_setting.value = value
[ "def", "heat_setting", "(", "self", ",", "value", ")", ":", "if", "value", "not", "in", "range", "(", "0", ",", "4", ")", ":", "raise", "exceptions", ".", "RoasterValueError", "self", ".", "_heat_setting", ".", "value", "=", "value" ]
Verifies that the heat setting is between 0 and 3.
[ "Verifies", "that", "the", "heat", "setting", "is", "between", "0", "and", "3", "." ]
python
train
saltstack/salt
salt/modules/solarisipspkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L136-L155
def upgrade_available(name, **kwargs): ''' Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22 ''' version = None cmd = ['pkg', 'list', '-Huv', name] lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: return {} ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) return ret
[ "def", "upgrade_available", "(", "name", ",", "*", "*", "kwargs", ")", ":", "version", "=", "None", "cmd", "=", "[", "'pkg'", ",", "'list'", ",", "'-Huv'", ",", "name", "]", "lines", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "cmd", ")", ".", "splitlines", "(", ")", "if", "not", "lines", ":", "return", "{", "}", "ret", "=", "{", "}", "for", "line", "in", "lines", ":", "ret", "[", "_ips_get_pkgname", "(", "line", ")", "]", "=", "_ips_get_pkgversion", "(", "line", ")", "return", "ret" ]
Check if there is an upgrade available for a certain package Accepts full or partial FMRI. Returns all matches found. CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available apache-22
[ "Check", "if", "there", "is", "an", "upgrade", "available", "for", "a", "certain", "package", "Accepts", "full", "or", "partial", "FMRI", ".", "Returns", "all", "matches", "found", "." ]
python
train
arviz-devs/arviz
arviz/plots/compareplot.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/compareplot.py#L7-L171
def plot_compare( comp_df, insample_dev=True, plot_standard_error=True, plot_ic_diff=True, figsize=None, textsize=None, plot_kwargs=None, ax=None, ): """ Summary plot for model comparison. This plot is in the style of the one used in the book Statistical Rethinking (Chapter 6) by Richard McElreath. Notes ----- Defaults to comparing Widely Accepted Information Criterion (WAIC) if present in comp_df column, otherwise compares Leave-one-out (loo) Parameters ---------- comp_df: pd.DataFrame Result of the `az.compare()` method insample_dev : bool, optional Plot in-sample deviance, that is the value of the information criteria without the penalization given by the effective number of parameters (pIC). Defaults to True plot_standard_error : bool, optional Plot the standard error of the information criteria estimate. Defaults to True plot_ic_diff : bool, optional Plot standard error of the difference in information criteria between each model and the top-ranked model. Defaults to True figsize : tuple, optional If None, size is (6, num of models) inches textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. plot_kwargs : dict, optional Optional arguments for plot elements. Currently accepts 'color_ic', 'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse', 'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize' ax : axes, optional Matplotlib axes Returns ------- ax : matplotlib axes Examples -------- Show default compare plot .. plot:: :context: close-figs >>> import arviz as az >>> model_compare = az.compare({'Centered 8 schools': az.load_arviz_data('centered_eight'), >>> 'Non-centered 8 schools': az.load_arviz_data('non_centered_eight')}) >>> az.plot_compare(model_compare) Plot standard error and information criteria difference only .. plot:: :context: close-figs >>> az.plot_compare(model_compare, insample_dev=False) """ if figsize is None: figsize = (6, len(comp_df)) figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1) if ax is None: _, ax = plt.subplots(figsize=figsize, constrained_layout=True) if plot_kwargs is None: plot_kwargs = {} yticks_pos, step = np.linspace(0, -1, (comp_df.shape[0] * 2) - 1, retstep=True) yticks_pos[1::2] = yticks_pos[1::2] + step / 2 yticks_labels = [""] * len(yticks_pos) _information_criterion = ["waic", "loo"] for information_criterion in _information_criterion: if information_criterion in comp_df.columns: break else: raise ValueError( "comp_df must contain one of the following" " information criterion: {}".format(_information_criterion) ) if plot_ic_diff: yticks_labels[0] = comp_df.index[0] yticks_labels[2::2] = comp_df.index[1:] ax.set_yticks(yticks_pos) ax.errorbar( x=comp_df[information_criterion].iloc[1:], y=yticks_pos[1::2], xerr=comp_df.dse[1:], color=plot_kwargs.get("color_dse", "grey"), fmt=plot_kwargs.get("marker_dse", "^"), mew=linewidth, elinewidth=linewidth, ) else: yticks_labels = comp_df.index ax.set_yticks(yticks_pos[::2]) if plot_standard_error: ax.errorbar( x=comp_df[information_criterion], y=yticks_pos[::2], xerr=comp_df.se, color=plot_kwargs.get("color_ic", "k"), fmt=plot_kwargs.get("marker_ic", "o"), mfc="None", mew=linewidth, lw=linewidth, ) else: ax.plot( comp_df[information_criterion], yticks_pos[::2], color=plot_kwargs.get("color_ic", "k"), marker=plot_kwargs.get("marker_ic", "o"), mfc="None", mew=linewidth, lw=0, ) if insample_dev: ax.plot( comp_df[information_criterion] - (2 * comp_df["p_" + information_criterion]), yticks_pos[::2], color=plot_kwargs.get("color_insample_dev", "k"), marker=plot_kwargs.get("marker_insample_dev", "o"), mew=linewidth, lw=0, ) ax.axvline( comp_df[information_criterion].iloc[0], ls=plot_kwargs.get("ls_min_ic", "--"), color=plot_kwargs.get("color_ls_min_ic", "grey"), lw=linewidth, ) scale_col = information_criterion + "_scale" if scale_col in comp_df: scale = comp_df[scale_col].iloc[0].capitalize() else: scale = "Deviance" ax.set_xlabel(scale, fontsize=ax_labelsize) ax.set_yticklabels(yticks_labels) ax.set_ylim(-1 + step, 0 - step) ax.tick_params(labelsize=xt_labelsize) return ax
[ "def", "plot_compare", "(", "comp_df", ",", "insample_dev", "=", "True", ",", "plot_standard_error", "=", "True", ",", "plot_ic_diff", "=", "True", ",", "figsize", "=", "None", ",", "textsize", "=", "None", ",", "plot_kwargs", "=", "None", ",", "ax", "=", "None", ",", ")", ":", "if", "figsize", "is", "None", ":", "figsize", "=", "(", "6", ",", "len", "(", "comp_df", ")", ")", "figsize", ",", "ax_labelsize", ",", "_", ",", "xt_labelsize", ",", "linewidth", ",", "_", "=", "_scale_fig_size", "(", "figsize", ",", "textsize", ",", "1", ",", "1", ")", "if", "ax", "is", "None", ":", "_", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ",", "constrained_layout", "=", "True", ")", "if", "plot_kwargs", "is", "None", ":", "plot_kwargs", "=", "{", "}", "yticks_pos", ",", "step", "=", "np", ".", "linspace", "(", "0", ",", "-", "1", ",", "(", "comp_df", ".", "shape", "[", "0", "]", "*", "2", ")", "-", "1", ",", "retstep", "=", "True", ")", "yticks_pos", "[", "1", ":", ":", "2", "]", "=", "yticks_pos", "[", "1", ":", ":", "2", "]", "+", "step", "/", "2", "yticks_labels", "=", "[", "\"\"", "]", "*", "len", "(", "yticks_pos", ")", "_information_criterion", "=", "[", "\"waic\"", ",", "\"loo\"", "]", "for", "information_criterion", "in", "_information_criterion", ":", "if", "information_criterion", "in", "comp_df", ".", "columns", ":", "break", "else", ":", "raise", "ValueError", "(", "\"comp_df must contain one of the following\"", "\" information criterion: {}\"", ".", "format", "(", "_information_criterion", ")", ")", "if", "plot_ic_diff", ":", "yticks_labels", "[", "0", "]", "=", "comp_df", ".", "index", "[", "0", "]", "yticks_labels", "[", "2", ":", ":", "2", "]", "=", "comp_df", ".", "index", "[", "1", ":", "]", "ax", ".", "set_yticks", "(", "yticks_pos", ")", "ax", ".", "errorbar", "(", "x", "=", "comp_df", "[", "information_criterion", "]", ".", "iloc", "[", "1", ":", "]", ",", "y", "=", "yticks_pos", "[", "1", ":", ":", "2", "]", ",", "xerr", "=", "comp_df", ".", "dse", "[", "1", ":", "]", ",", "color", "=", "plot_kwargs", ".", "get", "(", "\"color_dse\"", ",", "\"grey\"", ")", ",", "fmt", "=", "plot_kwargs", ".", "get", "(", "\"marker_dse\"", ",", "\"^\"", ")", ",", "mew", "=", "linewidth", ",", "elinewidth", "=", "linewidth", ",", ")", "else", ":", "yticks_labels", "=", "comp_df", ".", "index", "ax", ".", "set_yticks", "(", "yticks_pos", "[", ":", ":", "2", "]", ")", "if", "plot_standard_error", ":", "ax", ".", "errorbar", "(", "x", "=", "comp_df", "[", "information_criterion", "]", ",", "y", "=", "yticks_pos", "[", ":", ":", "2", "]", ",", "xerr", "=", "comp_df", ".", "se", ",", "color", "=", "plot_kwargs", ".", "get", "(", "\"color_ic\"", ",", "\"k\"", ")", ",", "fmt", "=", "plot_kwargs", ".", "get", "(", "\"marker_ic\"", ",", "\"o\"", ")", ",", "mfc", "=", "\"None\"", ",", "mew", "=", "linewidth", ",", "lw", "=", "linewidth", ",", ")", "else", ":", "ax", ".", "plot", "(", "comp_df", "[", "information_criterion", "]", ",", "yticks_pos", "[", ":", ":", "2", "]", ",", "color", "=", "plot_kwargs", ".", "get", "(", "\"color_ic\"", ",", "\"k\"", ")", ",", "marker", "=", "plot_kwargs", ".", "get", "(", "\"marker_ic\"", ",", "\"o\"", ")", ",", "mfc", "=", "\"None\"", ",", "mew", "=", "linewidth", ",", "lw", "=", "0", ",", ")", "if", "insample_dev", ":", "ax", ".", "plot", "(", "comp_df", "[", "information_criterion", "]", "-", "(", "2", "*", "comp_df", "[", "\"p_\"", "+", "information_criterion", "]", ")", ",", "yticks_pos", "[", ":", ":", "2", "]", ",", "color", "=", "plot_kwargs", ".", "get", "(", "\"color_insample_dev\"", ",", "\"k\"", ")", ",", "marker", "=", "plot_kwargs", ".", "get", "(", "\"marker_insample_dev\"", ",", "\"o\"", ")", ",", "mew", "=", "linewidth", ",", "lw", "=", "0", ",", ")", "ax", ".", "axvline", "(", "comp_df", "[", "information_criterion", "]", ".", "iloc", "[", "0", "]", ",", "ls", "=", "plot_kwargs", ".", "get", "(", "\"ls_min_ic\"", ",", "\"--\"", ")", ",", "color", "=", "plot_kwargs", ".", "get", "(", "\"color_ls_min_ic\"", ",", "\"grey\"", ")", ",", "lw", "=", "linewidth", ",", ")", "scale_col", "=", "information_criterion", "+", "\"_scale\"", "if", "scale_col", "in", "comp_df", ":", "scale", "=", "comp_df", "[", "scale_col", "]", ".", "iloc", "[", "0", "]", ".", "capitalize", "(", ")", "else", ":", "scale", "=", "\"Deviance\"", "ax", ".", "set_xlabel", "(", "scale", ",", "fontsize", "=", "ax_labelsize", ")", "ax", ".", "set_yticklabels", "(", "yticks_labels", ")", "ax", ".", "set_ylim", "(", "-", "1", "+", "step", ",", "0", "-", "step", ")", "ax", ".", "tick_params", "(", "labelsize", "=", "xt_labelsize", ")", "return", "ax" ]
Summary plot for model comparison. This plot is in the style of the one used in the book Statistical Rethinking (Chapter 6) by Richard McElreath. Notes ----- Defaults to comparing Widely Accepted Information Criterion (WAIC) if present in comp_df column, otherwise compares Leave-one-out (loo) Parameters ---------- comp_df: pd.DataFrame Result of the `az.compare()` method insample_dev : bool, optional Plot in-sample deviance, that is the value of the information criteria without the penalization given by the effective number of parameters (pIC). Defaults to True plot_standard_error : bool, optional Plot the standard error of the information criteria estimate. Defaults to True plot_ic_diff : bool, optional Plot standard error of the difference in information criteria between each model and the top-ranked model. Defaults to True figsize : tuple, optional If None, size is (6, num of models) inches textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. plot_kwargs : dict, optional Optional arguments for plot elements. Currently accepts 'color_ic', 'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse', 'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize' ax : axes, optional Matplotlib axes Returns ------- ax : matplotlib axes Examples -------- Show default compare plot .. plot:: :context: close-figs >>> import arviz as az >>> model_compare = az.compare({'Centered 8 schools': az.load_arviz_data('centered_eight'), >>> 'Non-centered 8 schools': az.load_arviz_data('non_centered_eight')}) >>> az.plot_compare(model_compare) Plot standard error and information criteria difference only .. plot:: :context: close-figs >>> az.plot_compare(model_compare, insample_dev=False)
[ "Summary", "plot", "for", "model", "comparison", ".", "This", "plot", "is", "in", "the", "style", "of", "the", "one", "used", "in", "the", "book", "Statistical", "Rethinking", "(", "Chapter", "6", ")", "by", "Richard", "McElreath", ".", "Notes", "-----", "Defaults", "to", "comparing", "Widely", "Accepted", "Information", "Criterion", "(", "WAIC", ")", "if", "present", "in", "comp_df", "column", "otherwise", "compares", "Leave", "-", "one", "-", "out", "(", "loo", ")", "Parameters", "----------", "comp_df", ":", "pd", ".", "DataFrame", "Result", "of", "the", "az", ".", "compare", "()", "method", "insample_dev", ":", "bool", "optional", "Plot", "in", "-", "sample", "deviance", "that", "is", "the", "value", "of", "the", "information", "criteria", "without", "the", "penalization", "given", "by", "the", "effective", "number", "of", "parameters", "(", "pIC", ")", ".", "Defaults", "to", "True", "plot_standard_error", ":", "bool", "optional", "Plot", "the", "standard", "error", "of", "the", "information", "criteria", "estimate", ".", "Defaults", "to", "True", "plot_ic_diff", ":", "bool", "optional", "Plot", "standard", "error", "of", "the", "difference", "in", "information", "criteria", "between", "each", "model", "and", "the", "top", "-", "ranked", "model", ".", "Defaults", "to", "True", "figsize", ":", "tuple", "optional", "If", "None", "size", "is", "(", "6", "num", "of", "models", ")", "inches", "textsize", ":", "float", "Text", "size", "scaling", "factor", "for", "labels", "titles", "and", "lines", ".", "If", "None", "it", "will", "be", "autoscaled", "based", "on", "figsize", ".", "plot_kwargs", ":", "dict", "optional", "Optional", "arguments", "for", "plot", "elements", ".", "Currently", "accepts", "color_ic", "marker_ic", "color_insample_dev", "marker_insample_dev", "color_dse", "marker_dse", "ls_min_ic", "color_ls_min_ic", "fontsize", "ax", ":", "axes", "optional", "Matplotlib", "axes", "Returns", "-------", "ax", ":", "matplotlib", "axes", "Examples", "--------", "Show", "default", "compare", "plot", "..", "plot", "::", ":", "context", ":", "close", "-", "figs", ">>>", "import", "arviz", "as", "az", ">>>", "model_compare", "=", "az", ".", "compare", "(", "{", "Centered", "8", "schools", ":", "az", ".", "load_arviz_data", "(", "centered_eight", ")", ">>>", "Non", "-", "centered", "8", "schools", ":", "az", ".", "load_arviz_data", "(", "non_centered_eight", ")", "}", ")", ">>>", "az", ".", "plot_compare", "(", "model_compare", ")", "Plot", "standard", "error", "and", "information", "criteria", "difference", "only", "..", "plot", "::", ":", "context", ":", "close", "-", "figs", ">>>", "az", ".", "plot_compare", "(", "model_compare", "insample_dev", "=", "False", ")" ]
python
train
boriel/zxbasic
arch/zx48k/translator.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/translator.py#L217-L225
def norm_attr(self): """ Normalize attr state """ if not self.HAS_ATTR: return self.HAS_ATTR = False self.emit('call', 'COPY_ATTR', 0) backend.REQUIRES.add('copy_attr.asm')
[ "def", "norm_attr", "(", "self", ")", ":", "if", "not", "self", ".", "HAS_ATTR", ":", "return", "self", ".", "HAS_ATTR", "=", "False", "self", ".", "emit", "(", "'call'", ",", "'COPY_ATTR'", ",", "0", ")", "backend", ".", "REQUIRES", ".", "add", "(", "'copy_attr.asm'", ")" ]
Normalize attr state
[ "Normalize", "attr", "state" ]
python
train
CodyKochmann/stricttuple
stricttuple/__init__.py
https://github.com/CodyKochmann/stricttuple/blob/072cbd6f7b90f3f666dc0f2c10ab6056d86dfc72/stricttuple/__init__.py#L40-L55
def shorten(string, max_length=80, trailing_chars=3): ''' trims the 'string' argument down to 'max_length' to make previews to long string values ''' assert type(string).__name__ in {'str', 'unicode'}, 'shorten needs string to be a string, not {}'.format(type(string)) assert type(max_length) == int, 'shorten needs max_length to be an int, not {}'.format(type(max_length)) assert type(trailing_chars) == int, 'shorten needs trailing_chars to be an int, not {}'.format(type(trailing_chars)) assert max_length > 0, 'shorten needs max_length to be positive, not {}'.format(max_length) assert trailing_chars >= 0, 'shorten needs trailing_chars to be greater than or equal to 0, not {}'.format(trailing_chars) return ( string ) if len(string) <= max_length else ( '{before:}...{after:}'.format( before=string[:max_length-(trailing_chars+3)], after=string[-trailing_chars:] if trailing_chars>0 else '' ) )
[ "def", "shorten", "(", "string", ",", "max_length", "=", "80", ",", "trailing_chars", "=", "3", ")", ":", "assert", "type", "(", "string", ")", ".", "__name__", "in", "{", "'str'", ",", "'unicode'", "}", ",", "'shorten needs string to be a string, not {}'", ".", "format", "(", "type", "(", "string", ")", ")", "assert", "type", "(", "max_length", ")", "==", "int", ",", "'shorten needs max_length to be an int, not {}'", ".", "format", "(", "type", "(", "max_length", ")", ")", "assert", "type", "(", "trailing_chars", ")", "==", "int", ",", "'shorten needs trailing_chars to be an int, not {}'", ".", "format", "(", "type", "(", "trailing_chars", ")", ")", "assert", "max_length", ">", "0", ",", "'shorten needs max_length to be positive, not {}'", ".", "format", "(", "max_length", ")", "assert", "trailing_chars", ">=", "0", ",", "'shorten needs trailing_chars to be greater than or equal to 0, not {}'", ".", "format", "(", "trailing_chars", ")", "return", "(", "string", ")", "if", "len", "(", "string", ")", "<=", "max_length", "else", "(", "'{before:}...{after:}'", ".", "format", "(", "before", "=", "string", "[", ":", "max_length", "-", "(", "trailing_chars", "+", "3", ")", "]", ",", "after", "=", "string", "[", "-", "trailing_chars", ":", "]", "if", "trailing_chars", ">", "0", "else", "''", ")", ")" ]
trims the 'string' argument down to 'max_length' to make previews to long string values
[ "trims", "the", "string", "argument", "down", "to", "max_length", "to", "make", "previews", "to", "long", "string", "values" ]
python
train
bfrog/whizzer
whizzer/protocol.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/protocol.py#L27-L31
def make_connection(self, transport, address): """Called externally when the transport is ready.""" self.connected = True self.transport = transport self.connection_made(address)
[ "def", "make_connection", "(", "self", ",", "transport", ",", "address", ")", ":", "self", ".", "connected", "=", "True", "self", ".", "transport", "=", "transport", "self", ".", "connection_made", "(", "address", ")" ]
Called externally when the transport is ready.
[ "Called", "externally", "when", "the", "transport", "is", "ready", "." ]
python
train
peerplays-network/python-peerplays
peerplays/cli/account.py
https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/cli/account.py#L56-L63
def disallow(ctx, foreign_account, permission, threshold, account): """ Remove a key/account from an account's permission """ pprint( ctx.peerplays.disallow( foreign_account, account=account, permission=permission, threshold=threshold ) )
[ "def", "disallow", "(", "ctx", ",", "foreign_account", ",", "permission", ",", "threshold", ",", "account", ")", ":", "pprint", "(", "ctx", ".", "peerplays", ".", "disallow", "(", "foreign_account", ",", "account", "=", "account", ",", "permission", "=", "permission", ",", "threshold", "=", "threshold", ")", ")" ]
Remove a key/account from an account's permission
[ "Remove", "a", "key", "/", "account", "from", "an", "account", "s", "permission" ]
python
train
openp2pdesign/makerlabs
makerlabs/fablabs_io.py
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/fablabs_io.py#L82-L172
def get_labs(format): """Gets Fab Lab data from fablabs.io.""" fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0) fablabs = {} # Load all the FabLabs for i in fablabs_json["labs"]: current_lab = FabLab() current_lab.name = i["name"] current_lab.address_1 = i["address_1"] current_lab.address_2 = i["address_2"] current_lab.address_notes = i["address_notes"] current_lab.avatar = i["avatar_url"] current_lab.blurb = i["blurb"] current_lab.capabilities = i["capabilities"] if i["city"].isupper(): i["city"] = i["city"].title() current_lab.city = i["city"] current_lab.country_code = i["country_code"] current_lab.county = i["county"] current_lab.description = i["description"] current_lab.email = i["email"] current_lab.id = i["id"] current_lab.phone = i["phone"] current_lab.postal_code = i["postal_code"] current_lab.slug = i["slug"] current_lab.url = i["url"] current_lab.continent = country_alpha2_to_continent_code(i["country_code"].upper()) current_country = pycountry.countries.get(alpha_2=i["country_code"].upper()) current_lab.country_code = current_country.alpha_3 current_lab.country = current_country.name # Check coordinates if i["longitude"] is not None: current_lab.longitude = i["longitude"] else: current_lab.longitude = 0.0 if i["latitude"] is not None: current_lab.latitude = i["latitude"] else: current_lab.latitude = 0.0 # Find Facebook and Twitter links, add also the other ones current_lab.links = {"facebook": "", "twitter": ""} for link in i["links"]: if "facebook" in link["url"]: current_lab.links["facebook"] = link["url"] elif "twitter" in link["url"]: current_lab.links["twitter"] = link["url"] else: current_lab.links[link["id"]] = link["url"] # Add the lab to the list fablabs[i["slug"]] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in fablabs: output[j] = fablabs[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in fablabs: single = fablabs[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in fablabs: output[j] = fablabs[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = fablabs # Default: return an oject else: output = fablabs # Return a proper json if format.lower() == "json": output = json.dumps(output) return output
[ "def", "get_labs", "(", "format", ")", ":", "fablabs_json", "=", "data_from_fablabs_io", "(", "fablabs_io_labs_api_url_v0", ")", "fablabs", "=", "{", "}", "# Load all the FabLabs", "for", "i", "in", "fablabs_json", "[", "\"labs\"", "]", ":", "current_lab", "=", "FabLab", "(", ")", "current_lab", ".", "name", "=", "i", "[", "\"name\"", "]", "current_lab", ".", "address_1", "=", "i", "[", "\"address_1\"", "]", "current_lab", ".", "address_2", "=", "i", "[", "\"address_2\"", "]", "current_lab", ".", "address_notes", "=", "i", "[", "\"address_notes\"", "]", "current_lab", ".", "avatar", "=", "i", "[", "\"avatar_url\"", "]", "current_lab", ".", "blurb", "=", "i", "[", "\"blurb\"", "]", "current_lab", ".", "capabilities", "=", "i", "[", "\"capabilities\"", "]", "if", "i", "[", "\"city\"", "]", ".", "isupper", "(", ")", ":", "i", "[", "\"city\"", "]", "=", "i", "[", "\"city\"", "]", ".", "title", "(", ")", "current_lab", ".", "city", "=", "i", "[", "\"city\"", "]", "current_lab", ".", "country_code", "=", "i", "[", "\"country_code\"", "]", "current_lab", ".", "county", "=", "i", "[", "\"county\"", "]", "current_lab", ".", "description", "=", "i", "[", "\"description\"", "]", "current_lab", ".", "email", "=", "i", "[", "\"email\"", "]", "current_lab", ".", "id", "=", "i", "[", "\"id\"", "]", "current_lab", ".", "phone", "=", "i", "[", "\"phone\"", "]", "current_lab", ".", "postal_code", "=", "i", "[", "\"postal_code\"", "]", "current_lab", ".", "slug", "=", "i", "[", "\"slug\"", "]", "current_lab", ".", "url", "=", "i", "[", "\"url\"", "]", "current_lab", ".", "continent", "=", "country_alpha2_to_continent_code", "(", "i", "[", "\"country_code\"", "]", ".", "upper", "(", ")", ")", "current_country", "=", "pycountry", ".", "countries", ".", "get", "(", "alpha_2", "=", "i", "[", "\"country_code\"", "]", ".", "upper", "(", ")", ")", "current_lab", ".", "country_code", "=", "current_country", ".", "alpha_3", "current_lab", ".", "country", "=", "current_country", ".", "name", "# Check coordinates", "if", "i", "[", "\"longitude\"", "]", "is", "not", "None", ":", "current_lab", ".", "longitude", "=", "i", "[", "\"longitude\"", "]", "else", ":", "current_lab", ".", "longitude", "=", "0.0", "if", "i", "[", "\"latitude\"", "]", "is", "not", "None", ":", "current_lab", ".", "latitude", "=", "i", "[", "\"latitude\"", "]", "else", ":", "current_lab", ".", "latitude", "=", "0.0", "# Find Facebook and Twitter links, add also the other ones", "current_lab", ".", "links", "=", "{", "\"facebook\"", ":", "\"\"", ",", "\"twitter\"", ":", "\"\"", "}", "for", "link", "in", "i", "[", "\"links\"", "]", ":", "if", "\"facebook\"", "in", "link", "[", "\"url\"", "]", ":", "current_lab", ".", "links", "[", "\"facebook\"", "]", "=", "link", "[", "\"url\"", "]", "elif", "\"twitter\"", "in", "link", "[", "\"url\"", "]", ":", "current_lab", ".", "links", "[", "\"twitter\"", "]", "=", "link", "[", "\"url\"", "]", "else", ":", "current_lab", ".", "links", "[", "link", "[", "\"id\"", "]", "]", "=", "link", "[", "\"url\"", "]", "# Add the lab to the list", "fablabs", "[", "i", "[", "\"slug\"", "]", "]", "=", "current_lab", "# Return a dictiornary / json", "if", "format", ".", "lower", "(", ")", "==", "\"dict\"", "or", "format", ".", "lower", "(", ")", "==", "\"json\"", ":", "output", "=", "{", "}", "for", "j", "in", "fablabs", ":", "output", "[", "j", "]", "=", "fablabs", "[", "j", "]", ".", "__dict__", "# Return a geojson", "elif", "format", ".", "lower", "(", ")", "==", "\"geojson\"", "or", "format", ".", "lower", "(", ")", "==", "\"geo\"", ":", "labs_list", "=", "[", "]", "for", "l", "in", "fablabs", ":", "single", "=", "fablabs", "[", "l", "]", ".", "__dict__", "single_lab", "=", "Feature", "(", "type", "=", "\"Feature\"", ",", "geometry", "=", "Point", "(", "(", "single", "[", "\"latitude\"", "]", ",", "single", "[", "\"longitude\"", "]", ")", ")", ",", "properties", "=", "single", ")", "labs_list", ".", "append", "(", "single_lab", ")", "output", "=", "dumps", "(", "FeatureCollection", "(", "labs_list", ")", ")", "# Return a Pandas DataFrame", "elif", "format", ".", "lower", "(", ")", "==", "\"pandas\"", "or", "format", ".", "lower", "(", ")", "==", "\"dataframe\"", ":", "output", "=", "{", "}", "for", "j", "in", "fablabs", ":", "output", "[", "j", "]", "=", "fablabs", "[", "j", "]", ".", "__dict__", "# Transform the dict into a Pandas DataFrame", "output", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "output", ")", "output", "=", "output", ".", "transpose", "(", ")", "# Return an object", "elif", "format", ".", "lower", "(", ")", "==", "\"object\"", "or", "format", ".", "lower", "(", ")", "==", "\"obj\"", ":", "output", "=", "fablabs", "# Default: return an oject", "else", ":", "output", "=", "fablabs", "# Return a proper json", "if", "format", ".", "lower", "(", ")", "==", "\"json\"", ":", "output", "=", "json", ".", "dumps", "(", "output", ")", "return", "output" ]
Gets Fab Lab data from fablabs.io.
[ "Gets", "Fab", "Lab", "data", "from", "fablabs", ".", "io", "." ]
python
train
yyuu/botornado
boto/sqs/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/sqs/connection.py#L339-L354
def remove_permission(self, queue, label): """ Remove a permission from a queue. :type queue: :class:`boto.sqs.queue.Queue` :param queue: The queue object :type label: str or unicode :param label: The unique label associated with the permission being removed. :rtype: bool :return: True if successful, False otherwise. """ params = {'Label': label} return self.get_status('RemovePermission', params, queue.id)
[ "def", "remove_permission", "(", "self", ",", "queue", ",", "label", ")", ":", "params", "=", "{", "'Label'", ":", "label", "}", "return", "self", ".", "get_status", "(", "'RemovePermission'", ",", "params", ",", "queue", ".", "id", ")" ]
Remove a permission from a queue. :type queue: :class:`boto.sqs.queue.Queue` :param queue: The queue object :type label: str or unicode :param label: The unique label associated with the permission being removed. :rtype: bool :return: True if successful, False otherwise.
[ "Remove", "a", "permission", "from", "a", "queue", "." ]
python
train
thisfred/val
val/_val.py
https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L380-L387
def _validated(self, values): """Validate if the values are validated one by one in order.""" if self.length != len(values): raise NotValid( "%r does not have exactly %d values. (Got %d.)" % ( values, self.length, len(values))) return type(self.schemas)( self.schemas[i].validate(v) for i, v in enumerate(values))
[ "def", "_validated", "(", "self", ",", "values", ")", ":", "if", "self", ".", "length", "!=", "len", "(", "values", ")", ":", "raise", "NotValid", "(", "\"%r does not have exactly %d values. (Got %d.)\"", "%", "(", "values", ",", "self", ".", "length", ",", "len", "(", "values", ")", ")", ")", "return", "type", "(", "self", ".", "schemas", ")", "(", "self", ".", "schemas", "[", "i", "]", ".", "validate", "(", "v", ")", "for", "i", ",", "v", "in", "enumerate", "(", "values", ")", ")" ]
Validate if the values are validated one by one in order.
[ "Validate", "if", "the", "values", "are", "validated", "one", "by", "one", "in", "order", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/TinyQuant/TinyQuantBase.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/TinyQuantBase.py#L179-L187
def boll(self, n, dev, array=False): """布林通道""" mid = self.sma(n, array) std = self.std(n, array) up = mid + std * dev down = mid - std * dev return up, down
[ "def", "boll", "(", "self", ",", "n", ",", "dev", ",", "array", "=", "False", ")", ":", "mid", "=", "self", ".", "sma", "(", "n", ",", "array", ")", "std", "=", "self", ".", "std", "(", "n", ",", "array", ")", "up", "=", "mid", "+", "std", "*", "dev", "down", "=", "mid", "-", "std", "*", "dev", "return", "up", ",", "down" ]
布林通道
[ "布林通道" ]
python
train
ngmarchant/oasis
oasis/oasis.py
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/oasis.py#L350-L368
def _sample_item(self, **kwargs): """Sample an item from the pool according to the instrumental distribution """ t = self.t_ # Update instrumental distribution self._calc_inst_pmf() if self.record_inst_hist: inst_pmf = self._inst_pmf[:,t] else: inst_pmf = self._inst_pmf # Sample label and record weight loc, stratum_idx = self.strata.sample(pmf = inst_pmf) weight = self.strata.weights_[stratum_idx]/inst_pmf[stratum_idx] return loc, weight, {'stratum': stratum_idx}
[ "def", "_sample_item", "(", "self", ",", "*", "*", "kwargs", ")", ":", "t", "=", "self", ".", "t_", "# Update instrumental distribution", "self", ".", "_calc_inst_pmf", "(", ")", "if", "self", ".", "record_inst_hist", ":", "inst_pmf", "=", "self", ".", "_inst_pmf", "[", ":", ",", "t", "]", "else", ":", "inst_pmf", "=", "self", ".", "_inst_pmf", "# Sample label and record weight", "loc", ",", "stratum_idx", "=", "self", ".", "strata", ".", "sample", "(", "pmf", "=", "inst_pmf", ")", "weight", "=", "self", ".", "strata", ".", "weights_", "[", "stratum_idx", "]", "/", "inst_pmf", "[", "stratum_idx", "]", "return", "loc", ",", "weight", ",", "{", "'stratum'", ":", "stratum_idx", "}" ]
Sample an item from the pool according to the instrumental distribution
[ "Sample", "an", "item", "from", "the", "pool", "according", "to", "the", "instrumental", "distribution" ]
python
train
tanghaibao/jcvi
jcvi/formats/agp.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/agp.py#L269-L275
def getNorthSouthClone(self, i): """ Returns the adjacent clone name from both sides. """ north = self.getAdjacentClone(i, south=False) south = self.getAdjacentClone(i) return north, south
[ "def", "getNorthSouthClone", "(", "self", ",", "i", ")", ":", "north", "=", "self", ".", "getAdjacentClone", "(", "i", ",", "south", "=", "False", ")", "south", "=", "self", ".", "getAdjacentClone", "(", "i", ")", "return", "north", ",", "south" ]
Returns the adjacent clone name from both sides.
[ "Returns", "the", "adjacent", "clone", "name", "from", "both", "sides", "." ]
python
train
lmjohns3/theanets
theanets/feedforward.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/feedforward.py#L155-L178
def decode(self, z, layer=None, **kwargs): '''Decode an encoded dataset by computing the output layer activation. Parameters ---------- z : ndarray A matrix containing encoded data from this autoencoder. layer : int or str or :class:`Layer <layers.Layer>`, optional The index or name of the hidden layer that was used to encode `z`. Returns ------- decoded : ndarray The decoded dataset. ''' key = self._find_output(layer) if key not in self._functions: regs = regularizers.from_kwargs(self, **kwargs) outputs, updates = self.build_graph(regs) self._functions[key] = theano.function( [outputs[key]], [outputs[self.layers[-1].output_name]], updates=updates) return self._functions[key](z)[0]
[ "def", "decode", "(", "self", ",", "z", ",", "layer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "key", "=", "self", ".", "_find_output", "(", "layer", ")", "if", "key", "not", "in", "self", ".", "_functions", ":", "regs", "=", "regularizers", ".", "from_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", "outputs", ",", "updates", "=", "self", ".", "build_graph", "(", "regs", ")", "self", ".", "_functions", "[", "key", "]", "=", "theano", ".", "function", "(", "[", "outputs", "[", "key", "]", "]", ",", "[", "outputs", "[", "self", ".", "layers", "[", "-", "1", "]", ".", "output_name", "]", "]", ",", "updates", "=", "updates", ")", "return", "self", ".", "_functions", "[", "key", "]", "(", "z", ")", "[", "0", "]" ]
Decode an encoded dataset by computing the output layer activation. Parameters ---------- z : ndarray A matrix containing encoded data from this autoencoder. layer : int or str or :class:`Layer <layers.Layer>`, optional The index or name of the hidden layer that was used to encode `z`. Returns ------- decoded : ndarray The decoded dataset.
[ "Decode", "an", "encoded", "dataset", "by", "computing", "the", "output", "layer", "activation", "." ]
python
test
pyroscope/pyrocore
docs/examples/rt-down-stats.py
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/docs/examples/rt-down-stats.py#L18-L21
def disk_free(path): """Return free bytes on partition holding `path`.""" stats = os.statvfs(path) return stats.f_bavail * stats.f_frsize
[ "def", "disk_free", "(", "path", ")", ":", "stats", "=", "os", ".", "statvfs", "(", "path", ")", "return", "stats", ".", "f_bavail", "*", "stats", ".", "f_frsize" ]
Return free bytes on partition holding `path`.
[ "Return", "free", "bytes", "on", "partition", "holding", "path", "." ]
python
train
coleifer/peewee
examples/analytics/reports.py
https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L99-L108
def top_referrers(self, domain_only=True): """ What domains send us the most traffic? """ referrer = self._referrer_clause(domain_only) return (self.get_query() .select(referrer, fn.Count(PageView.id)) .group_by(referrer) .order_by(fn.Count(PageView.id).desc()) .tuples())
[ "def", "top_referrers", "(", "self", ",", "domain_only", "=", "True", ")", ":", "referrer", "=", "self", ".", "_referrer_clause", "(", "domain_only", ")", "return", "(", "self", ".", "get_query", "(", ")", ".", "select", "(", "referrer", ",", "fn", ".", "Count", "(", "PageView", ".", "id", ")", ")", ".", "group_by", "(", "referrer", ")", ".", "order_by", "(", "fn", ".", "Count", "(", "PageView", ".", "id", ")", ".", "desc", "(", ")", ")", ".", "tuples", "(", ")", ")" ]
What domains send us the most traffic?
[ "What", "domains", "send", "us", "the", "most", "traffic?" ]
python
train
eonpatapon/contrail-api-cli
contrail_api_cli/schema.py
https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/schema.py#L103-L113
def create_schema_from_xsd_directory(directory, version): """Create and fill the schema from a directory which contains xsd files. It calls fill_schema_from_xsd_file for each xsd file found. """ schema = Schema(version) for f in _get_xsd_from_directory(directory): logger.info("Loading schema %s" % f) fill_schema_from_xsd_file(f, schema) return schema
[ "def", "create_schema_from_xsd_directory", "(", "directory", ",", "version", ")", ":", "schema", "=", "Schema", "(", "version", ")", "for", "f", "in", "_get_xsd_from_directory", "(", "directory", ")", ":", "logger", ".", "info", "(", "\"Loading schema %s\"", "%", "f", ")", "fill_schema_from_xsd_file", "(", "f", ",", "schema", ")", "return", "schema" ]
Create and fill the schema from a directory which contains xsd files. It calls fill_schema_from_xsd_file for each xsd file found.
[ "Create", "and", "fill", "the", "schema", "from", "a", "directory", "which", "contains", "xsd", "files", ".", "It", "calls", "fill_schema_from_xsd_file", "for", "each", "xsd", "file", "found", "." ]
python
train
Pipoline/rocket-python
rocketchat/api.py
https://github.com/Pipoline/rocket-python/blob/643ece8a9db106922e019984a859ca04283262ff/rocketchat/api.py#L172-L178
def get_users(self, **kwargs): """ Gets all of the users in the system and their information :param kwargs: :return: """ return GetUsers(settings=self.settings, **kwargs).call(**kwargs)
[ "def", "get_users", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "GetUsers", "(", "settings", "=", "self", ".", "settings", ",", "*", "*", "kwargs", ")", ".", "call", "(", "*", "*", "kwargs", ")" ]
Gets all of the users in the system and their information :param kwargs: :return:
[ "Gets", "all", "of", "the", "users", "in", "the", "system", "and", "their", "information", ":", "param", "kwargs", ":", ":", "return", ":" ]
python
train
tsroten/dragonmapper
dragonmapper/transcriptions.py
https://github.com/tsroten/dragonmapper/blob/68eaf43c32725f4b4923c01284cfc0112079e8ab/dragonmapper/transcriptions.py#L83-L94
def _parse_numbered_syllable(unparsed_syllable): """Return the syllable and tone of a numbered Pinyin syllable.""" tone_number = unparsed_syllable[-1] if not tone_number.isdigit(): syllable, tone = unparsed_syllable, '5' elif tone_number == '0': syllable, tone = unparsed_syllable[:-1], '5' elif tone_number in '12345': syllable, tone = unparsed_syllable[:-1], tone_number else: raise ValueError("Invalid syllable: %s" % unparsed_syllable) return syllable, tone
[ "def", "_parse_numbered_syllable", "(", "unparsed_syllable", ")", ":", "tone_number", "=", "unparsed_syllable", "[", "-", "1", "]", "if", "not", "tone_number", ".", "isdigit", "(", ")", ":", "syllable", ",", "tone", "=", "unparsed_syllable", ",", "'5'", "elif", "tone_number", "==", "'0'", ":", "syllable", ",", "tone", "=", "unparsed_syllable", "[", ":", "-", "1", "]", ",", "'5'", "elif", "tone_number", "in", "'12345'", ":", "syllable", ",", "tone", "=", "unparsed_syllable", "[", ":", "-", "1", "]", ",", "tone_number", "else", ":", "raise", "ValueError", "(", "\"Invalid syllable: %s\"", "%", "unparsed_syllable", ")", "return", "syllable", ",", "tone" ]
Return the syllable and tone of a numbered Pinyin syllable.
[ "Return", "the", "syllable", "and", "tone", "of", "a", "numbered", "Pinyin", "syllable", "." ]
python
train
senaite/senaite.core
bika/lims/controlpanel/auditlog.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/controlpanel/auditlog.py#L119-L191
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # We are using the existing logic from the auditview logview = api.get_view("auditlog", context=obj, request=self.request) # get the last snapshot snapshot = get_last_snapshot(obj) # get the metadata of the last snapshot metadata = get_snapshot_metadata(snapshot) title = obj.Title() url = obj.absolute_url() auditlog_url = "{}/@@auditlog".format(url) # Title item["title"] = title # Link the title to the auditlog of the object item["replace"]["title"] = get_link(auditlog_url, value=title) # Version version = get_snapshot_version(obj, snapshot) item["version"] = version # Modification Date m_date = metadata.get("modified") item["modified"] = logview.to_localized_time(m_date) # Actor actor = metadata.get("actor") item["actor"] = actor # Fullname properties = api.get_user_properties(actor) item["fullname"] = properties.get("fullname", actor) # Roles roles = metadata.get("roles", []) item["roles"] = ", ".join(roles) # Remote Address remote_address = metadata.get("remote_address") item["remote_address"] = remote_address # Action action = metadata.get("action") item["action"] = logview.translate_state(action) # Review State review_state = metadata.get("review_state") item["review_state"] = logview.translate_state(review_state) # get the previous snapshot prev_snapshot = get_snapshot_by_version(obj, version-1) if prev_snapshot: prev_metadata = get_snapshot_metadata(prev_snapshot) prev_review_state = prev_metadata.get("review_state") if prev_review_state != review_state: item["replace"]["review_state"] = "{} &rarr; {}".format( logview.translate_state(prev_review_state), logview.translate_state(review_state)) # Rendered Diff diff = compare_snapshots(snapshot, prev_snapshot) item["diff"] = logview.render_diff(diff) return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "# We are using the existing logic from the auditview", "logview", "=", "api", ".", "get_view", "(", "\"auditlog\"", ",", "context", "=", "obj", ",", "request", "=", "self", ".", "request", ")", "# get the last snapshot", "snapshot", "=", "get_last_snapshot", "(", "obj", ")", "# get the metadata of the last snapshot", "metadata", "=", "get_snapshot_metadata", "(", "snapshot", ")", "title", "=", "obj", ".", "Title", "(", ")", "url", "=", "obj", ".", "absolute_url", "(", ")", "auditlog_url", "=", "\"{}/@@auditlog\"", ".", "format", "(", "url", ")", "# Title", "item", "[", "\"title\"", "]", "=", "title", "# Link the title to the auditlog of the object", "item", "[", "\"replace\"", "]", "[", "\"title\"", "]", "=", "get_link", "(", "auditlog_url", ",", "value", "=", "title", ")", "# Version", "version", "=", "get_snapshot_version", "(", "obj", ",", "snapshot", ")", "item", "[", "\"version\"", "]", "=", "version", "# Modification Date", "m_date", "=", "metadata", ".", "get", "(", "\"modified\"", ")", "item", "[", "\"modified\"", "]", "=", "logview", ".", "to_localized_time", "(", "m_date", ")", "# Actor", "actor", "=", "metadata", ".", "get", "(", "\"actor\"", ")", "item", "[", "\"actor\"", "]", "=", "actor", "# Fullname", "properties", "=", "api", ".", "get_user_properties", "(", "actor", ")", "item", "[", "\"fullname\"", "]", "=", "properties", ".", "get", "(", "\"fullname\"", ",", "actor", ")", "# Roles", "roles", "=", "metadata", ".", "get", "(", "\"roles\"", ",", "[", "]", ")", "item", "[", "\"roles\"", "]", "=", "\", \"", ".", "join", "(", "roles", ")", "# Remote Address", "remote_address", "=", "metadata", ".", "get", "(", "\"remote_address\"", ")", "item", "[", "\"remote_address\"", "]", "=", "remote_address", "# Action", "action", "=", "metadata", ".", "get", "(", "\"action\"", ")", "item", "[", "\"action\"", "]", "=", "logview", ".", "translate_state", "(", "action", ")", "# Review State", "review_state", "=", "metadata", ".", "get", "(", "\"review_state\"", ")", "item", "[", "\"review_state\"", "]", "=", "logview", ".", "translate_state", "(", "review_state", ")", "# get the previous snapshot", "prev_snapshot", "=", "get_snapshot_by_version", "(", "obj", ",", "version", "-", "1", ")", "if", "prev_snapshot", ":", "prev_metadata", "=", "get_snapshot_metadata", "(", "prev_snapshot", ")", "prev_review_state", "=", "prev_metadata", ".", "get", "(", "\"review_state\"", ")", "if", "prev_review_state", "!=", "review_state", ":", "item", "[", "\"replace\"", "]", "[", "\"review_state\"", "]", "=", "\"{} &rarr; {}\"", ".", "format", "(", "logview", ".", "translate_state", "(", "prev_review_state", ")", ",", "logview", ".", "translate_state", "(", "review_state", ")", ")", "# Rendered Diff", "diff", "=", "compare_snapshots", "(", "snapshot", ",", "prev_snapshot", ")", "item", "[", "\"diff\"", "]", "=", "logview", ".", "render_diff", "(", "diff", ")", "return", "item" ]
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
[ "Service", "triggered", "each", "time", "an", "item", "is", "iterated", "in", "folderitems", ".", "The", "use", "of", "this", "service", "prevents", "the", "extra", "-", "loops", "in", "child", "objects", ".", ":", "obj", ":", "the", "instance", "of", "the", "class", "to", "be", "foldered", ":", "item", ":", "dict", "containing", "the", "properties", "of", "the", "object", "to", "be", "used", "by", "the", "template", ":", "index", ":", "current", "index", "of", "the", "item" ]
python
train
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow_base.py#L361-L382
def SaveResourceUsage(self, status): """Method to tally resources.""" user_cpu = status.cpu_time_used.user_cpu_time system_cpu = status.cpu_time_used.system_cpu_time self.rdf_flow.cpu_time_used.user_cpu_time += user_cpu self.rdf_flow.cpu_time_used.system_cpu_time += system_cpu self.rdf_flow.network_bytes_sent += status.network_bytes_sent if self.rdf_flow.cpu_limit: user_cpu_total = self.rdf_flow.cpu_time_used.user_cpu_time system_cpu_total = self.rdf_flow.cpu_time_used.system_cpu_time if self.rdf_flow.cpu_limit < (user_cpu_total + system_cpu_total): # We have exceeded our limit, stop this flow. raise flow.FlowError("CPU limit exceeded for {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id)) if (self.rdf_flow.network_bytes_limit and self.rdf_flow.network_bytes_limit < self.rdf_flow.network_bytes_sent): # We have exceeded our byte limit, stop this flow. raise flow.FlowError("Network bytes limit exceeded {} {}.".format( self.rdf_flow.flow_class_name, self.rdf_flow.flow_id))
[ "def", "SaveResourceUsage", "(", "self", ",", "status", ")", ":", "user_cpu", "=", "status", ".", "cpu_time_used", ".", "user_cpu_time", "system_cpu", "=", "status", ".", "cpu_time_used", ".", "system_cpu_time", "self", ".", "rdf_flow", ".", "cpu_time_used", ".", "user_cpu_time", "+=", "user_cpu", "self", ".", "rdf_flow", ".", "cpu_time_used", ".", "system_cpu_time", "+=", "system_cpu", "self", ".", "rdf_flow", ".", "network_bytes_sent", "+=", "status", ".", "network_bytes_sent", "if", "self", ".", "rdf_flow", ".", "cpu_limit", ":", "user_cpu_total", "=", "self", ".", "rdf_flow", ".", "cpu_time_used", ".", "user_cpu_time", "system_cpu_total", "=", "self", ".", "rdf_flow", ".", "cpu_time_used", ".", "system_cpu_time", "if", "self", ".", "rdf_flow", ".", "cpu_limit", "<", "(", "user_cpu_total", "+", "system_cpu_total", ")", ":", "# We have exceeded our limit, stop this flow.", "raise", "flow", ".", "FlowError", "(", "\"CPU limit exceeded for {} {}.\"", ".", "format", "(", "self", ".", "rdf_flow", ".", "flow_class_name", ",", "self", ".", "rdf_flow", ".", "flow_id", ")", ")", "if", "(", "self", ".", "rdf_flow", ".", "network_bytes_limit", "and", "self", ".", "rdf_flow", ".", "network_bytes_limit", "<", "self", ".", "rdf_flow", ".", "network_bytes_sent", ")", ":", "# We have exceeded our byte limit, stop this flow.", "raise", "flow", ".", "FlowError", "(", "\"Network bytes limit exceeded {} {}.\"", ".", "format", "(", "self", ".", "rdf_flow", ".", "flow_class_name", ",", "self", ".", "rdf_flow", ".", "flow_id", ")", ")" ]
Method to tally resources.
[ "Method", "to", "tally", "resources", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/cli/repomanager.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/cli/repomanager.py#L324-L340
def addPhenotypeAssociationSet(self): """ Adds a new phenotype association set to this repo. """ self._openRepo() name = self._args.name if name is None: name = getNameFromPath(self._args.dirPath) dataset = self._repo.getDatasetByName(self._args.datasetName) phenotypeAssociationSet = \ genotype_phenotype.RdfPhenotypeAssociationSet( dataset, name, self._args.dirPath) phenotypeAssociationSet.setAttributes( json.loads(self._args.attributes)) self._updateRepo( self._repo.insertPhenotypeAssociationSet, phenotypeAssociationSet)
[ "def", "addPhenotypeAssociationSet", "(", "self", ")", ":", "self", ".", "_openRepo", "(", ")", "name", "=", "self", ".", "_args", ".", "name", "if", "name", "is", "None", ":", "name", "=", "getNameFromPath", "(", "self", ".", "_args", ".", "dirPath", ")", "dataset", "=", "self", ".", "_repo", ".", "getDatasetByName", "(", "self", ".", "_args", ".", "datasetName", ")", "phenotypeAssociationSet", "=", "genotype_phenotype", ".", "RdfPhenotypeAssociationSet", "(", "dataset", ",", "name", ",", "self", ".", "_args", ".", "dirPath", ")", "phenotypeAssociationSet", ".", "setAttributes", "(", "json", ".", "loads", "(", "self", ".", "_args", ".", "attributes", ")", ")", "self", ".", "_updateRepo", "(", "self", ".", "_repo", ".", "insertPhenotypeAssociationSet", ",", "phenotypeAssociationSet", ")" ]
Adds a new phenotype association set to this repo.
[ "Adds", "a", "new", "phenotype", "association", "set", "to", "this", "repo", "." ]
python
train
mopidy/mopidy-spotify
mopidy_spotify/translator.py
https://github.com/mopidy/mopidy-spotify/blob/77a293088e63a7b4b77bf9409ce57cb14048d18c/mopidy_spotify/translator.py#L205-L225
def sp_search_query(query): """Translate a Mopidy search query to a Spotify search query""" result = [] for (field, values) in query.items(): field = SEARCH_FIELD_MAP.get(field, field) if field is None: continue for value in values: if field == 'year': value = _transform_year(value) if value is not None: result.append('%s:%d' % (field, value)) elif field == 'any': result.append('"%s"' % value) else: result.append('%s:"%s"' % (field, value)) return ' '.join(result)
[ "def", "sp_search_query", "(", "query", ")", ":", "result", "=", "[", "]", "for", "(", "field", ",", "values", ")", "in", "query", ".", "items", "(", ")", ":", "field", "=", "SEARCH_FIELD_MAP", ".", "get", "(", "field", ",", "field", ")", "if", "field", "is", "None", ":", "continue", "for", "value", "in", "values", ":", "if", "field", "==", "'year'", ":", "value", "=", "_transform_year", "(", "value", ")", "if", "value", "is", "not", "None", ":", "result", ".", "append", "(", "'%s:%d'", "%", "(", "field", ",", "value", ")", ")", "elif", "field", "==", "'any'", ":", "result", ".", "append", "(", "'\"%s\"'", "%", "value", ")", "else", ":", "result", ".", "append", "(", "'%s:\"%s\"'", "%", "(", "field", ",", "value", ")", ")", "return", "' '", ".", "join", "(", "result", ")" ]
Translate a Mopidy search query to a Spotify search query
[ "Translate", "a", "Mopidy", "search", "query", "to", "a", "Spotify", "search", "query" ]
python
test
sivy/pystatsd
pystatsd/statsd.py
https://github.com/sivy/pystatsd/blob/69e362654c37df28582b12b964901334326620a7/pystatsd/statsd.py#L32-L39
def timing_since(self, stat, start, sample_rate=1): """ Log timing information as the number of microseconds since the provided time float >>> start = time.time() >>> # do stuff >>> statsd_client.timing_since('some.time', start) """ self.timing(stat, int((time.time() - start) * 1000000), sample_rate)
[ "def", "timing_since", "(", "self", ",", "stat", ",", "start", ",", "sample_rate", "=", "1", ")", ":", "self", ".", "timing", "(", "stat", ",", "int", "(", "(", "time", ".", "time", "(", ")", "-", "start", ")", "*", "1000000", ")", ",", "sample_rate", ")" ]
Log timing information as the number of microseconds since the provided time float >>> start = time.time() >>> # do stuff >>> statsd_client.timing_since('some.time', start)
[ "Log", "timing", "information", "as", "the", "number", "of", "microseconds", "since", "the", "provided", "time", "float", ">>>", "start", "=", "time", ".", "time", "()", ">>>", "#", "do", "stuff", ">>>", "statsd_client", ".", "timing_since", "(", "some", ".", "time", "start", ")" ]
python
train
wonambi-python/wonambi
wonambi/scroll_data.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/scroll_data.py#L243-L251
def closeEvent(self, event): """save the name of the last open dataset.""" max_dataset_history = self.value('max_dataset_history') keep_recent_datasets(max_dataset_history, self.info) settings.setValue('window/geometry', self.saveGeometry()) settings.setValue('window/state', self.saveState()) event.accept()
[ "def", "closeEvent", "(", "self", ",", "event", ")", ":", "max_dataset_history", "=", "self", ".", "value", "(", "'max_dataset_history'", ")", "keep_recent_datasets", "(", "max_dataset_history", ",", "self", ".", "info", ")", "settings", ".", "setValue", "(", "'window/geometry'", ",", "self", ".", "saveGeometry", "(", ")", ")", "settings", ".", "setValue", "(", "'window/state'", ",", "self", ".", "saveState", "(", ")", ")", "event", ".", "accept", "(", ")" ]
save the name of the last open dataset.
[ "save", "the", "name", "of", "the", "last", "open", "dataset", "." ]
python
train
sigvaldm/frmt
frmt.py
https://github.com/sigvaldm/frmt/blob/d077af06c83a7a0533ca2218be55ce086df274b7/frmt.py#L28-L49
def format_fit(text, width=None, align='<', suffix="..."): """ Fits a piece of text to ``width`` characters by truncating too long text and padding too short text with spaces. Defaults to terminal width. Truncation is indicated by a customizable suffix. ``align`` specifies the alignment of the contents if it is padded, and can be: * ``<`` - Left aligned (default) * ``^`` - Centered * ``>`` - Right aligned """ if width==None: width = get_terminal_size().columns if len(text)>width: if len(suffix)>width: return suffix[len(suffix)-width:] else: return text[:width-len(suffix)]+suffix else: return "{{:{}{{w}}}}".format(align).format(text,w=width)
[ "def", "format_fit", "(", "text", ",", "width", "=", "None", ",", "align", "=", "'<'", ",", "suffix", "=", "\"...\"", ")", ":", "if", "width", "==", "None", ":", "width", "=", "get_terminal_size", "(", ")", ".", "columns", "if", "len", "(", "text", ")", ">", "width", ":", "if", "len", "(", "suffix", ")", ">", "width", ":", "return", "suffix", "[", "len", "(", "suffix", ")", "-", "width", ":", "]", "else", ":", "return", "text", "[", ":", "width", "-", "len", "(", "suffix", ")", "]", "+", "suffix", "else", ":", "return", "\"{{:{}{{w}}}}\"", ".", "format", "(", "align", ")", ".", "format", "(", "text", ",", "w", "=", "width", ")" ]
Fits a piece of text to ``width`` characters by truncating too long text and padding too short text with spaces. Defaults to terminal width. Truncation is indicated by a customizable suffix. ``align`` specifies the alignment of the contents if it is padded, and can be: * ``<`` - Left aligned (default) * ``^`` - Centered * ``>`` - Right aligned
[ "Fits", "a", "piece", "of", "text", "to", "width", "characters", "by", "truncating", "too", "long", "text", "and", "padding", "too", "short", "text", "with", "spaces", ".", "Defaults", "to", "terminal", "width", ".", "Truncation", "is", "indicated", "by", "a", "customizable", "suffix", ".", "align", "specifies", "the", "alignment", "of", "the", "contents", "if", "it", "is", "padded", "and", "can", "be", ":" ]
python
train
ArabellaTech/django-basic-cms
basic_cms/http.py
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/http.py#L54-L78
def pages_view(view): """ Make sure the decorated view gets the essential pages variables. """ def pages_view_decorator(request, *args, **kwargs): # if the current page is already there if(kwargs.get('current_page', False) or kwargs.get('pages_navigation', False)): return view(request, *args, **kwargs) path = kwargs.pop('path', None) lang = kwargs.pop('lang', None) if path: from basic_cms.views import details response = details(request, path=path, lang=lang, only_context=True, delegation=False) context = response extra_context_var = kwargs.pop('extra_context_var', None) if extra_context_var: kwargs.update({extra_context_var: context}) else: kwargs.update(context) return view(request, *args, **kwargs) return pages_view_decorator
[ "def", "pages_view", "(", "view", ")", ":", "def", "pages_view_decorator", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# if the current page is already there", "if", "(", "kwargs", ".", "get", "(", "'current_page'", ",", "False", ")", "or", "kwargs", ".", "get", "(", "'pages_navigation'", ",", "False", ")", ")", ":", "return", "view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "path", "=", "kwargs", ".", "pop", "(", "'path'", ",", "None", ")", "lang", "=", "kwargs", ".", "pop", "(", "'lang'", ",", "None", ")", "if", "path", ":", "from", "basic_cms", ".", "views", "import", "details", "response", "=", "details", "(", "request", ",", "path", "=", "path", ",", "lang", "=", "lang", ",", "only_context", "=", "True", ",", "delegation", "=", "False", ")", "context", "=", "response", "extra_context_var", "=", "kwargs", ".", "pop", "(", "'extra_context_var'", ",", "None", ")", "if", "extra_context_var", ":", "kwargs", ".", "update", "(", "{", "extra_context_var", ":", "context", "}", ")", "else", ":", "kwargs", ".", "update", "(", "context", ")", "return", "view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "pages_view_decorator" ]
Make sure the decorated view gets the essential pages variables.
[ "Make", "sure", "the", "decorated", "view", "gets", "the", "essential", "pages", "variables", "." ]
python
train
saltstack/salt
salt/modules/x509.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/x509.py#L183-L206
def _get_csr_extensions(csr): ''' Returns a list of dicts containing the name, value and critical value of any extension contained in a csr object. ''' ret = OrderedDict() csrtempfile = tempfile.NamedTemporaryFile() csrtempfile.write(csr.as_pem()) csrtempfile.flush() csryaml = _parse_openssl_req(csrtempfile.name) csrtempfile.close() if csryaml and 'Requested Extensions' in csryaml['Certificate Request']['Data']: csrexts = csryaml['Certificate Request']['Data']['Requested Extensions'] if not csrexts: return ret for short_name, long_name in six.iteritems(EXT_NAME_MAPPINGS): if long_name in csrexts: csrexts[short_name] = csrexts[long_name] del csrexts[long_name] ret = csrexts return ret
[ "def", "_get_csr_extensions", "(", "csr", ")", ":", "ret", "=", "OrderedDict", "(", ")", "csrtempfile", "=", "tempfile", ".", "NamedTemporaryFile", "(", ")", "csrtempfile", ".", "write", "(", "csr", ".", "as_pem", "(", ")", ")", "csrtempfile", ".", "flush", "(", ")", "csryaml", "=", "_parse_openssl_req", "(", "csrtempfile", ".", "name", ")", "csrtempfile", ".", "close", "(", ")", "if", "csryaml", "and", "'Requested Extensions'", "in", "csryaml", "[", "'Certificate Request'", "]", "[", "'Data'", "]", ":", "csrexts", "=", "csryaml", "[", "'Certificate Request'", "]", "[", "'Data'", "]", "[", "'Requested Extensions'", "]", "if", "not", "csrexts", ":", "return", "ret", "for", "short_name", ",", "long_name", "in", "six", ".", "iteritems", "(", "EXT_NAME_MAPPINGS", ")", ":", "if", "long_name", "in", "csrexts", ":", "csrexts", "[", "short_name", "]", "=", "csrexts", "[", "long_name", "]", "del", "csrexts", "[", "long_name", "]", "ret", "=", "csrexts", "return", "ret" ]
Returns a list of dicts containing the name, value and critical value of any extension contained in a csr object.
[ "Returns", "a", "list", "of", "dicts", "containing", "the", "name", "value", "and", "critical", "value", "of", "any", "extension", "contained", "in", "a", "csr", "object", "." ]
python
train
scnerd/miniutils
miniutils/progress_bar.py
https://github.com/scnerd/miniutils/blob/fe927e26afc5877416dead28dabdf6604387f42c/miniutils/progress_bar.py#L23-L36
def progbar(iterable, *a, verbose=True, **kw): """Prints a progress bar as the iterable is iterated over :param iterable: The iterator to iterate over :param a: Arguments to get passed to tqdm (or tqdm_notebook, if in a Jupyter notebook) :param verbose: Whether or not to print the progress bar at all :param kw: Keyword arguments to get passed to tqdm :return: The iterable that will report a progress bar """ iterable = range(iterable) if isinstance(iterable, int) else iterable if verbose: return _tqdm(iterable, *a, **kw) else: return iterable
[ "def", "progbar", "(", "iterable", ",", "*", "a", ",", "verbose", "=", "True", ",", "*", "*", "kw", ")", ":", "iterable", "=", "range", "(", "iterable", ")", "if", "isinstance", "(", "iterable", ",", "int", ")", "else", "iterable", "if", "verbose", ":", "return", "_tqdm", "(", "iterable", ",", "*", "a", ",", "*", "*", "kw", ")", "else", ":", "return", "iterable" ]
Prints a progress bar as the iterable is iterated over :param iterable: The iterator to iterate over :param a: Arguments to get passed to tqdm (or tqdm_notebook, if in a Jupyter notebook) :param verbose: Whether or not to print the progress bar at all :param kw: Keyword arguments to get passed to tqdm :return: The iterable that will report a progress bar
[ "Prints", "a", "progress", "bar", "as", "the", "iterable", "is", "iterated", "over" ]
python
train
pandas-dev/pandas
pandas/plotting/_misc.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L270-L356
def andrews_curves(frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds): """ Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlip.axis.Axes` """ from math import sqrt, pi import matplotlib.pyplot as plt def function(amplitudes): def f(t): x1 = amplitudes[0] result = x1 / sqrt(2.0) # Take the rest of the coefficients and resize them # appropriately. Take a copy of amplitudes as otherwise numpy # deletes the element from amplitudes itself. coeffs = np.delete(np.copy(amplitudes), 0) coeffs.resize(int((coeffs.size + 1) / 2), 2) # Generate the harmonics and arguments for the sin and cos # functions. harmonics = np.arange(0, coeffs.shape[0]) + 1 trig_args = np.outer(harmonics, t) result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) + coeffs[:, 1, np.newaxis] * np.cos(trig_args), axis=0) return result return f n = len(frame) class_col = frame[class_column] classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-pi, pi, samples) used_legends = set() color_values = _get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) colors = dict(zip(classes, color_values)) if ax is None: ax = plt.gca(xlim=(-pi, pi)) for i in range(n): row = df.iloc[i].values f = function(row) y = f(t) kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(t, y, color=colors[kls], label=label, **kwds) else: ax.plot(t, y, color=colors[kls], **kwds) ax.legend(loc='upper right') ax.grid() return ax
[ "def", "andrews_curves", "(", "frame", ",", "class_column", ",", "ax", "=", "None", ",", "samples", "=", "200", ",", "color", "=", "None", ",", "colormap", "=", "None", ",", "*", "*", "kwds", ")", ":", "from", "math", "import", "sqrt", ",", "pi", "import", "matplotlib", ".", "pyplot", "as", "plt", "def", "function", "(", "amplitudes", ")", ":", "def", "f", "(", "t", ")", ":", "x1", "=", "amplitudes", "[", "0", "]", "result", "=", "x1", "/", "sqrt", "(", "2.0", ")", "# Take the rest of the coefficients and resize them", "# appropriately. Take a copy of amplitudes as otherwise numpy", "# deletes the element from amplitudes itself.", "coeffs", "=", "np", ".", "delete", "(", "np", ".", "copy", "(", "amplitudes", ")", ",", "0", ")", "coeffs", ".", "resize", "(", "int", "(", "(", "coeffs", ".", "size", "+", "1", ")", "/", "2", ")", ",", "2", ")", "# Generate the harmonics and arguments for the sin and cos", "# functions.", "harmonics", "=", "np", ".", "arange", "(", "0", ",", "coeffs", ".", "shape", "[", "0", "]", ")", "+", "1", "trig_args", "=", "np", ".", "outer", "(", "harmonics", ",", "t", ")", "result", "+=", "np", ".", "sum", "(", "coeffs", "[", ":", ",", "0", ",", "np", ".", "newaxis", "]", "*", "np", ".", "sin", "(", "trig_args", ")", "+", "coeffs", "[", ":", ",", "1", ",", "np", ".", "newaxis", "]", "*", "np", ".", "cos", "(", "trig_args", ")", ",", "axis", "=", "0", ")", "return", "result", "return", "f", "n", "=", "len", "(", "frame", ")", "class_col", "=", "frame", "[", "class_column", "]", "classes", "=", "frame", "[", "class_column", "]", ".", "drop_duplicates", "(", ")", "df", "=", "frame", ".", "drop", "(", "class_column", ",", "axis", "=", "1", ")", "t", "=", "np", ".", "linspace", "(", "-", "pi", ",", "pi", ",", "samples", ")", "used_legends", "=", "set", "(", ")", "color_values", "=", "_get_standard_colors", "(", "num_colors", "=", "len", "(", "classes", ")", ",", "colormap", "=", "colormap", ",", "color_type", "=", "'random'", ",", "color", "=", "color", ")", "colors", "=", "dict", "(", "zip", "(", "classes", ",", "color_values", ")", ")", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", "xlim", "=", "(", "-", "pi", ",", "pi", ")", ")", "for", "i", "in", "range", "(", "n", ")", ":", "row", "=", "df", ".", "iloc", "[", "i", "]", ".", "values", "f", "=", "function", "(", "row", ")", "y", "=", "f", "(", "t", ")", "kls", "=", "class_col", ".", "iat", "[", "i", "]", "label", "=", "pprint_thing", "(", "kls", ")", "if", "label", "not", "in", "used_legends", ":", "used_legends", ".", "add", "(", "label", ")", "ax", ".", "plot", "(", "t", ",", "y", ",", "color", "=", "colors", "[", "kls", "]", ",", "label", "=", "label", ",", "*", "*", "kwds", ")", "else", ":", "ax", ".", "plot", "(", "t", ",", "y", ",", "color", "=", "colors", "[", "kls", "]", ",", "*", "*", "kwds", ")", "ax", ".", "legend", "(", "loc", "=", "'upper right'", ")", "ax", ".", "grid", "(", ")", "return", "ax" ]
Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlip.axis.Axes`
[ "Generate", "a", "matplotlib", "plot", "of", "Andrews", "curves", "for", "visualising", "clusters", "of", "multivariate", "data", "." ]
python
train
cbclab/MOT
mot/optimize/__init__.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/optimize/__init__.py#L245-L296
def _minimize_powell(func, x0, cl_runtime_info, lower_bounds, upper_bounds, constraints_func=None, data=None, options=None): """ Options: patience (int): Used to set the maximum number of iterations to patience*(number_of_parameters+1) reset_method (str): one of 'EXTRAPOLATED_POINT' or 'RESET_TO_IDENTITY' lower case or upper case. patience_line_search (int): the patience of the searching algorithm. Defaults to the same patience as for the Powell algorithm itself. """ options = options or {} nmr_problems = x0.shape[0] nmr_parameters = x0.shape[1] penalty_data, penalty_func = _get_penalty_function(nmr_parameters, constraints_func) eval_func = SimpleCLFunction.from_string(''' double evaluate(local mot_float_type* x, void* data){ double penalty = _mle_penalty( x, ((_powell_eval_func_data*)data)->data, ((_powell_eval_func_data*)data)->lower_bounds, ((_powell_eval_func_data*)data)->upper_bounds, ''' + str(options.get('penalty_weight', 1e30)) + ''', ((_powell_eval_func_data*)data)->penalty_data ); double func_val = ''' + func.get_cl_function_name() + '''(x, ((_powell_eval_func_data*)data)->data, 0); if(isnan(func_val)){ return INFINITY; } return func_val + penalty; } ''', dependencies=[func, penalty_func]) optimizer_func = Powell(eval_func, nmr_parameters, **_clean_options('Powell', options)) kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'), 'data': Struct({'data': data, 'lower_bounds': lower_bounds, 'upper_bounds': upper_bounds, 'penalty_data': penalty_data}, '_powell_eval_func_data')} kernel_data.update(optimizer_func.get_kernel_data()) return_code = optimizer_func.evaluate( kernel_data, nmr_problems, use_local_reduction=all(env.is_gpu for env in cl_runtime_info.cl_environments), cl_runtime_info=cl_runtime_info) return OptimizeResults({'x': kernel_data['model_parameters'].get_data(), 'status': return_code})
[ "def", "_minimize_powell", "(", "func", ",", "x0", ",", "cl_runtime_info", ",", "lower_bounds", ",", "upper_bounds", ",", "constraints_func", "=", "None", ",", "data", "=", "None", ",", "options", "=", "None", ")", ":", "options", "=", "options", "or", "{", "}", "nmr_problems", "=", "x0", ".", "shape", "[", "0", "]", "nmr_parameters", "=", "x0", ".", "shape", "[", "1", "]", "penalty_data", ",", "penalty_func", "=", "_get_penalty_function", "(", "nmr_parameters", ",", "constraints_func", ")", "eval_func", "=", "SimpleCLFunction", ".", "from_string", "(", "'''\n double evaluate(local mot_float_type* x, void* data){\n double penalty = _mle_penalty(\n x, \n ((_powell_eval_func_data*)data)->data,\n ((_powell_eval_func_data*)data)->lower_bounds,\n ((_powell_eval_func_data*)data)->upper_bounds,\n '''", "+", "str", "(", "options", ".", "get", "(", "'penalty_weight'", ",", "1e30", ")", ")", "+", "''', \n ((_powell_eval_func_data*)data)->penalty_data\n );\n \n double func_val = '''", "+", "func", ".", "get_cl_function_name", "(", ")", "+", "'''(x, ((_powell_eval_func_data*)data)->data, 0);\n \n if(isnan(func_val)){\n return INFINITY;\n }\n \n return func_val + penalty;\n }\n '''", ",", "dependencies", "=", "[", "func", ",", "penalty_func", "]", ")", "optimizer_func", "=", "Powell", "(", "eval_func", ",", "nmr_parameters", ",", "*", "*", "_clean_options", "(", "'Powell'", ",", "options", ")", ")", "kernel_data", "=", "{", "'model_parameters'", ":", "Array", "(", "x0", ",", "ctype", "=", "'mot_float_type'", ",", "mode", "=", "'rw'", ")", ",", "'data'", ":", "Struct", "(", "{", "'data'", ":", "data", ",", "'lower_bounds'", ":", "lower_bounds", ",", "'upper_bounds'", ":", "upper_bounds", ",", "'penalty_data'", ":", "penalty_data", "}", ",", "'_powell_eval_func_data'", ")", "}", "kernel_data", ".", "update", "(", "optimizer_func", ".", "get_kernel_data", "(", ")", ")", "return_code", "=", "optimizer_func", ".", "evaluate", "(", "kernel_data", ",", "nmr_problems", ",", "use_local_reduction", "=", "all", "(", "env", ".", "is_gpu", "for", "env", "in", "cl_runtime_info", ".", "cl_environments", ")", ",", "cl_runtime_info", "=", "cl_runtime_info", ")", "return", "OptimizeResults", "(", "{", "'x'", ":", "kernel_data", "[", "'model_parameters'", "]", ".", "get_data", "(", ")", ",", "'status'", ":", "return_code", "}", ")" ]
Options: patience (int): Used to set the maximum number of iterations to patience*(number_of_parameters+1) reset_method (str): one of 'EXTRAPOLATED_POINT' or 'RESET_TO_IDENTITY' lower case or upper case. patience_line_search (int): the patience of the searching algorithm. Defaults to the same patience as for the Powell algorithm itself.
[ "Options", ":", "patience", "(", "int", ")", ":", "Used", "to", "set", "the", "maximum", "number", "of", "iterations", "to", "patience", "*", "(", "number_of_parameters", "+", "1", ")", "reset_method", "(", "str", ")", ":", "one", "of", "EXTRAPOLATED_POINT", "or", "RESET_TO_IDENTITY", "lower", "case", "or", "upper", "case", ".", "patience_line_search", "(", "int", ")", ":", "the", "patience", "of", "the", "searching", "algorithm", ".", "Defaults", "to", "the", "same", "patience", "as", "for", "the", "Powell", "algorithm", "itself", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/cfg.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/cfg.py#L362-L388
def get (self, name, default=None): """Returns the attribute value *AitConfig.name* or *default* if name does not exist. The name may be a series of attributes separated periods. For example, "foo.bar.baz". In that case, lookups are attempted in the following order until one succeeeds: 1. AitConfig['foo.bar.baz'], and 2. AitConfig.foo.bar.baz 3. (If both fail, return *default*) """ if name in self: return self[name] config = self parts = name.split('.') heads = parts[:-1] tail = parts[-1] for part in heads: if part in config and type(config[part]) is AitConfig: config = config[part] else: return default return config[tail] if tail in config else default
[ "def", "get", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "name", "in", "self", ":", "return", "self", "[", "name", "]", "config", "=", "self", "parts", "=", "name", ".", "split", "(", "'.'", ")", "heads", "=", "parts", "[", ":", "-", "1", "]", "tail", "=", "parts", "[", "-", "1", "]", "for", "part", "in", "heads", ":", "if", "part", "in", "config", "and", "type", "(", "config", "[", "part", "]", ")", "is", "AitConfig", ":", "config", "=", "config", "[", "part", "]", "else", ":", "return", "default", "return", "config", "[", "tail", "]", "if", "tail", "in", "config", "else", "default" ]
Returns the attribute value *AitConfig.name* or *default* if name does not exist. The name may be a series of attributes separated periods. For example, "foo.bar.baz". In that case, lookups are attempted in the following order until one succeeeds: 1. AitConfig['foo.bar.baz'], and 2. AitConfig.foo.bar.baz 3. (If both fail, return *default*)
[ "Returns", "the", "attribute", "value", "*", "AitConfig", ".", "name", "*", "or", "*", "default", "*", "if", "name", "does", "not", "exist", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor_ext.py#L139-L152
def show_system_monitor_output_switch_status_port_status_port_area(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_system_monitor = ET.Element("show_system_monitor") config = show_system_monitor output = ET.SubElement(show_system_monitor, "output") switch_status = ET.SubElement(output, "switch-status") port_status = ET.SubElement(switch_status, "port-status") port_area = ET.SubElement(port_status, "port-area") port_area.text = kwargs.pop('port_area') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_system_monitor_output_switch_status_port_status_port_area", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_system_monitor", "=", "ET", ".", "Element", "(", "\"show_system_monitor\"", ")", "config", "=", "show_system_monitor", "output", "=", "ET", ".", "SubElement", "(", "show_system_monitor", ",", "\"output\"", ")", "switch_status", "=", "ET", ".", "SubElement", "(", "output", ",", "\"switch-status\"", ")", "port_status", "=", "ET", ".", "SubElement", "(", "switch_status", ",", "\"port-status\"", ")", "port_area", "=", "ET", ".", "SubElement", "(", "port_status", ",", "\"port-area\"", ")", "port_area", ".", "text", "=", "kwargs", ".", "pop", "(", "'port_area'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
paddycarey/dweepy
dweepy/streaming.py
https://github.com/paddycarey/dweepy/blob/1eb69de4a20c929c57be2a21e2aa39ae9a0ae298/dweepy/streaming.py#L57-L77
def listen_for_dweets_from(thing_name, timeout=900, key=None, session=None): """Create a real-time subscription to dweets """ url = BASE_URL + '/listen/for/dweets/from/{0}'.format(thing_name) session = session or requests.Session() if key is not None: params = {'key': key} else: params = None start = datetime.datetime.utcnow() while True: request = requests.Request("GET", url, params=params).prepare() resp = session.send(request, stream=True, timeout=timeout) try: for x in _listen_for_dweets_from_response(resp): yield x _check_stream_timeout(start, timeout) except (ChunkedEncodingError, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout): pass _check_stream_timeout(start, timeout)
[ "def", "listen_for_dweets_from", "(", "thing_name", ",", "timeout", "=", "900", ",", "key", "=", "None", ",", "session", "=", "None", ")", ":", "url", "=", "BASE_URL", "+", "'/listen/for/dweets/from/{0}'", ".", "format", "(", "thing_name", ")", "session", "=", "session", "or", "requests", ".", "Session", "(", ")", "if", "key", "is", "not", "None", ":", "params", "=", "{", "'key'", ":", "key", "}", "else", ":", "params", "=", "None", "start", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "while", "True", ":", "request", "=", "requests", ".", "Request", "(", "\"GET\"", ",", "url", ",", "params", "=", "params", ")", ".", "prepare", "(", ")", "resp", "=", "session", ".", "send", "(", "request", ",", "stream", "=", "True", ",", "timeout", "=", "timeout", ")", "try", ":", "for", "x", "in", "_listen_for_dweets_from_response", "(", "resp", ")", ":", "yield", "x", "_check_stream_timeout", "(", "start", ",", "timeout", ")", "except", "(", "ChunkedEncodingError", ",", "requests", ".", "exceptions", ".", "ConnectionError", ",", "requests", ".", "exceptions", ".", "ReadTimeout", ")", ":", "pass", "_check_stream_timeout", "(", "start", ",", "timeout", ")" ]
Create a real-time subscription to dweets
[ "Create", "a", "real", "-", "time", "subscription", "to", "dweets" ]
python
train
dusktreader/py-buzz
buzz/__init__.py
https://github.com/dusktreader/py-buzz/blob/f2fd97abe158a1688188647992a5be6531058ec3/buzz/__init__.py#L75-L133
def handle_errors( cls, message, *format_args, re_raise=True, exception_class=Exception, do_finally=None, do_except=None, do_else=None, **format_kwds ): """ provides a context manager that will intercept exceptions and repackage them as Buzz instances with a message attached: .. code-block:: python with Buzz.handle_errors("It didn't work"): some_code_that_might_raise_an_exception() :param: message: The message to attach to the raised Buzz :param: format_args: Format arguments. Follows str.format conv. :param: format_kwds: Format keyword args. Follows str.format conv. :param: re_raise: If true, the re-packaged exception will be raised :param: exception_class: Limits the class of exceptions that will be re-packaged as a Buzz exception. Any other exception types will not be caught and re-packaged. Defaults to Exception (will handle all exceptions) :param: do_finally: A function that should always be called at the end of the block. Should take no parameters :param: do_except: A function that should be called only if there was an exception. Should take the raised exception as its first parameter, the final message for the exception that will be raised as its second, and the traceback as its third :param: do_else: A function taht should be called only if there were no exceptions encountered """ try: yield except exception_class as err: try: final_message = cls.reformat_exception( message, err, *format_args, **format_kwds ) except Exception as msg_err: raise cls( "Failed while formatting message: {}".format(repr(msg_err)) ) trace = cls.get_traceback() if do_except is not None: do_except(err, final_message, trace) if re_raise: raise cls(final_message).with_traceback(trace) else: if do_else is not None: do_else() finally: if do_finally is not None: do_finally()
[ "def", "handle_errors", "(", "cls", ",", "message", ",", "*", "format_args", ",", "re_raise", "=", "True", ",", "exception_class", "=", "Exception", ",", "do_finally", "=", "None", ",", "do_except", "=", "None", ",", "do_else", "=", "None", ",", "*", "*", "format_kwds", ")", ":", "try", ":", "yield", "except", "exception_class", "as", "err", ":", "try", ":", "final_message", "=", "cls", ".", "reformat_exception", "(", "message", ",", "err", ",", "*", "format_args", ",", "*", "*", "format_kwds", ")", "except", "Exception", "as", "msg_err", ":", "raise", "cls", "(", "\"Failed while formatting message: {}\"", ".", "format", "(", "repr", "(", "msg_err", ")", ")", ")", "trace", "=", "cls", ".", "get_traceback", "(", ")", "if", "do_except", "is", "not", "None", ":", "do_except", "(", "err", ",", "final_message", ",", "trace", ")", "if", "re_raise", ":", "raise", "cls", "(", "final_message", ")", ".", "with_traceback", "(", "trace", ")", "else", ":", "if", "do_else", "is", "not", "None", ":", "do_else", "(", ")", "finally", ":", "if", "do_finally", "is", "not", "None", ":", "do_finally", "(", ")" ]
provides a context manager that will intercept exceptions and repackage them as Buzz instances with a message attached: .. code-block:: python with Buzz.handle_errors("It didn't work"): some_code_that_might_raise_an_exception() :param: message: The message to attach to the raised Buzz :param: format_args: Format arguments. Follows str.format conv. :param: format_kwds: Format keyword args. Follows str.format conv. :param: re_raise: If true, the re-packaged exception will be raised :param: exception_class: Limits the class of exceptions that will be re-packaged as a Buzz exception. Any other exception types will not be caught and re-packaged. Defaults to Exception (will handle all exceptions) :param: do_finally: A function that should always be called at the end of the block. Should take no parameters :param: do_except: A function that should be called only if there was an exception. Should take the raised exception as its first parameter, the final message for the exception that will be raised as its second, and the traceback as its third :param: do_else: A function taht should be called only if there were no exceptions encountered
[ "provides", "a", "context", "manager", "that", "will", "intercept", "exceptions", "and", "repackage", "them", "as", "Buzz", "instances", "with", "a", "message", "attached", ":" ]
python
train
pantsbuild/pants
src/python/pants/pantsd/process_manager.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/process_manager.py#L550-L563
def fingerprint(self): """The fingerprint of the current process. This can either read the current fingerprint from the running process's psutil.Process.cmdline (if the managed process supports that) or from the `ProcessManager` metadata. :returns: The fingerprint of the running process as read from the process table, ProcessManager metadata or `None`. :rtype: string """ return ( self.parse_fingerprint(self.cmdline) or self.read_metadata_by_name(self.name, self.FINGERPRINT_KEY) )
[ "def", "fingerprint", "(", "self", ")", ":", "return", "(", "self", ".", "parse_fingerprint", "(", "self", ".", "cmdline", ")", "or", "self", ".", "read_metadata_by_name", "(", "self", ".", "name", ",", "self", ".", "FINGERPRINT_KEY", ")", ")" ]
The fingerprint of the current process. This can either read the current fingerprint from the running process's psutil.Process.cmdline (if the managed process supports that) or from the `ProcessManager` metadata. :returns: The fingerprint of the running process as read from the process table, ProcessManager metadata or `None`. :rtype: string
[ "The", "fingerprint", "of", "the", "current", "process", "." ]
python
train
ray-project/ray
python/ray/tune/examples/genetic_example.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/examples/genetic_example.py#L16-L27
def michalewicz_function(config, reporter): """f(x) = -sum{sin(xi) * [sin(i*xi^2 / pi)]^(2m)}""" import numpy as np x = np.array( [config["x1"], config["x2"], config["x3"], config["x4"], config["x5"]]) sin_x = np.sin(x) z = (np.arange(1, 6) / np.pi * (x * x)) sin_z = np.power(np.sin(z), 20) # let m = 20 y = np.dot(sin_x, sin_z) # Negate y since we want to minimize y value reporter(timesteps_total=1, neg_mean_loss=-y)
[ "def", "michalewicz_function", "(", "config", ",", "reporter", ")", ":", "import", "numpy", "as", "np", "x", "=", "np", ".", "array", "(", "[", "config", "[", "\"x1\"", "]", ",", "config", "[", "\"x2\"", "]", ",", "config", "[", "\"x3\"", "]", ",", "config", "[", "\"x4\"", "]", ",", "config", "[", "\"x5\"", "]", "]", ")", "sin_x", "=", "np", ".", "sin", "(", "x", ")", "z", "=", "(", "np", ".", "arange", "(", "1", ",", "6", ")", "/", "np", ".", "pi", "*", "(", "x", "*", "x", ")", ")", "sin_z", "=", "np", ".", "power", "(", "np", ".", "sin", "(", "z", ")", ",", "20", ")", "# let m = 20", "y", "=", "np", ".", "dot", "(", "sin_x", ",", "sin_z", ")", "# Negate y since we want to minimize y value", "reporter", "(", "timesteps_total", "=", "1", ",", "neg_mean_loss", "=", "-", "y", ")" ]
f(x) = -sum{sin(xi) * [sin(i*xi^2 / pi)]^(2m)}
[ "f", "(", "x", ")", "=", "-", "sum", "{", "sin", "(", "xi", ")", "*", "[", "sin", "(", "i", "*", "xi^2", "/", "pi", ")", "]", "^", "(", "2m", ")", "}" ]
python
train
davidrpugh/pyCollocation
pycollocation/solvers/solvers.py
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L145-L163
def _construct_approximation(self, basis_kwargs, coefs_list): """ Construct a collection of derivatives and functions that approximate the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str: ) coefs_list : list(numpy.ndarray) Returns ------- basis_derivs : list(function) basis_funcs : list(function) """ derivs = self._construct_derivatives(coefs_list, **basis_kwargs) funcs = self._construct_functions(coefs_list, **basis_kwargs) return derivs, funcs
[ "def", "_construct_approximation", "(", "self", ",", "basis_kwargs", ",", "coefs_list", ")", ":", "derivs", "=", "self", ".", "_construct_derivatives", "(", "coefs_list", ",", "*", "*", "basis_kwargs", ")", "funcs", "=", "self", ".", "_construct_functions", "(", "coefs_list", ",", "*", "*", "basis_kwargs", ")", "return", "derivs", ",", "funcs" ]
Construct a collection of derivatives and functions that approximate the solution to the boundary value problem. Parameters ---------- basis_kwargs : dict(str: ) coefs_list : list(numpy.ndarray) Returns ------- basis_derivs : list(function) basis_funcs : list(function)
[ "Construct", "a", "collection", "of", "derivatives", "and", "functions", "that", "approximate", "the", "solution", "to", "the", "boundary", "value", "problem", "." ]
python
train
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L5725-L5752
def month(self, value=None): """Corresponds to IDD Field `month` Args: value (int): value for IDD Field `month` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `month`'.format(value)) if value < 1: raise ValueError('value need to be greater or equal 1 ' 'for field `month`') if value > 12: raise ValueError('value need to be smaller 12 ' 'for field `month`') self._month = value
[ "def", "month", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type int '", "'for field `month`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "1", ":", "raise", "ValueError", "(", "'value need to be greater or equal 1 '", "'for field `month`'", ")", "if", "value", ">", "12", ":", "raise", "ValueError", "(", "'value need to be smaller 12 '", "'for field `month`'", ")", "self", ".", "_month", "=", "value" ]
Corresponds to IDD Field `month` Args: value (int): value for IDD Field `month` value >= 1 value <= 12 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "month" ]
python
train
ldo/dbussy
dbussy.py
https://github.com/ldo/dbussy/blob/59e4fbe8b8111ceead884e50d1973901a0a2d240/dbussy.py#L3970-L3977
def new_signal(celf, path, iface, name) : "creates a new DBUS.MESSAGE_TYPE_SIGNAL message." result = dbus.dbus_message_new_signal(path.encode(), iface.encode(), name.encode()) if result == None : raise CallFailed("dbus_message_new_signal") #end if return \ celf(result)
[ "def", "new_signal", "(", "celf", ",", "path", ",", "iface", ",", "name", ")", ":", "result", "=", "dbus", ".", "dbus_message_new_signal", "(", "path", ".", "encode", "(", ")", ",", "iface", ".", "encode", "(", ")", ",", "name", ".", "encode", "(", ")", ")", "if", "result", "==", "None", ":", "raise", "CallFailed", "(", "\"dbus_message_new_signal\"", ")", "#end if", "return", "celf", "(", "result", ")" ]
creates a new DBUS.MESSAGE_TYPE_SIGNAL message.
[ "creates", "a", "new", "DBUS", ".", "MESSAGE_TYPE_SIGNAL", "message", "." ]
python
train
mapbox/mapbox-cli-py
mapboxcli/scripts/datasets.py
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/datasets.py#L13-L24
def datasets(ctx): """Read and write GeoJSON from Mapbox-hosted datasets All endpoints require authentication. An access token with appropriate dataset scopes is required, see `mapbox --help`. Note that this API is currently a limited-access beta. """ access_token = (ctx.obj and ctx.obj.get('access_token')) or None service = mapbox.Datasets(access_token=access_token) ctx.obj['service'] = service
[ "def", "datasets", "(", "ctx", ")", ":", "access_token", "=", "(", "ctx", ".", "obj", "and", "ctx", ".", "obj", ".", "get", "(", "'access_token'", ")", ")", "or", "None", "service", "=", "mapbox", ".", "Datasets", "(", "access_token", "=", "access_token", ")", "ctx", ".", "obj", "[", "'service'", "]", "=", "service" ]
Read and write GeoJSON from Mapbox-hosted datasets All endpoints require authentication. An access token with appropriate dataset scopes is required, see `mapbox --help`. Note that this API is currently a limited-access beta.
[ "Read", "and", "write", "GeoJSON", "from", "Mapbox", "-", "hosted", "datasets" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L118-L121
def _get_float(data, position, dummy0, dummy1, dummy2): """Decode a BSON double to python float.""" end = position + 8 return _UNPACK_FLOAT(data[position:end])[0], end
[ "def", "_get_float", "(", "data", ",", "position", ",", "dummy0", ",", "dummy1", ",", "dummy2", ")", ":", "end", "=", "position", "+", "8", "return", "_UNPACK_FLOAT", "(", "data", "[", "position", ":", "end", "]", ")", "[", "0", "]", ",", "end" ]
Decode a BSON double to python float.
[ "Decode", "a", "BSON", "double", "to", "python", "float", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1473-L1479
def workflow_remove_types(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /workflow-xxxx/removeTypes API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Types#API-method%3A-%2Fclass-xxxx%2FremoveTypes """ return DXHTTPRequest('/%s/removeTypes' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "workflow_remove_types", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/removeTypes'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /workflow-xxxx/removeTypes API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Types#API-method%3A-%2Fclass-xxxx%2FremoveTypes
[ "Invokes", "the", "/", "workflow", "-", "xxxx", "/", "removeTypes", "API", "method", "." ]
python
train
cds-astro/mocpy
mocpy/interval_set.py
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/interval_set.py#L264-L301
def from_nuniq_interval_set(cls, nuniq_is): """ Convert an IntervalSet containing NUNIQ intervals to an IntervalSet representing HEALPix cells following the NESTED numbering scheme. Parameters ---------- nuniq_is : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals. Returns ------- interval : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals. """ nested_is = IntervalSet() # Appending a list is faster than appending a numpy array # For these algorithms we append a list and create the interval set from the finished list rtmp = [] last_order = 0 intervals = nuniq_is._intervals diff_order = IntervalSet.HPY_MAX_ORDER shift_order = 2 * diff_order for interval in intervals: for j in range(interval[0], interval[1]): order, i_pix = uniq2orderipix(j) if order != last_order: nested_is = nested_is.union(IntervalSet(np.asarray(rtmp))) rtmp = [] last_order = order diff_order = IntervalSet.HPY_MAX_ORDER - order shift_order = 2 * diff_order rtmp.append((i_pix << shift_order, (i_pix + 1) << shift_order)) nested_is = nested_is.union(IntervalSet(np.asarray(rtmp))) return nested_is
[ "def", "from_nuniq_interval_set", "(", "cls", ",", "nuniq_is", ")", ":", "nested_is", "=", "IntervalSet", "(", ")", "# Appending a list is faster than appending a numpy array", "# For these algorithms we append a list and create the interval set from the finished list", "rtmp", "=", "[", "]", "last_order", "=", "0", "intervals", "=", "nuniq_is", ".", "_intervals", "diff_order", "=", "IntervalSet", ".", "HPY_MAX_ORDER", "shift_order", "=", "2", "*", "diff_order", "for", "interval", "in", "intervals", ":", "for", "j", "in", "range", "(", "interval", "[", "0", "]", ",", "interval", "[", "1", "]", ")", ":", "order", ",", "i_pix", "=", "uniq2orderipix", "(", "j", ")", "if", "order", "!=", "last_order", ":", "nested_is", "=", "nested_is", ".", "union", "(", "IntervalSet", "(", "np", ".", "asarray", "(", "rtmp", ")", ")", ")", "rtmp", "=", "[", "]", "last_order", "=", "order", "diff_order", "=", "IntervalSet", ".", "HPY_MAX_ORDER", "-", "order", "shift_order", "=", "2", "*", "diff_order", "rtmp", ".", "append", "(", "(", "i_pix", "<<", "shift_order", ",", "(", "i_pix", "+", "1", ")", "<<", "shift_order", ")", ")", "nested_is", "=", "nested_is", ".", "union", "(", "IntervalSet", "(", "np", ".", "asarray", "(", "rtmp", ")", ")", ")", "return", "nested_is" ]
Convert an IntervalSet containing NUNIQ intervals to an IntervalSet representing HEALPix cells following the NESTED numbering scheme. Parameters ---------- nuniq_is : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals. Returns ------- interval : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals.
[ "Convert", "an", "IntervalSet", "containing", "NUNIQ", "intervals", "to", "an", "IntervalSet", "representing", "HEALPix", "cells", "following", "the", "NESTED", "numbering", "scheme", "." ]
python
train
wummel/dosage
scripts/order-symlinks.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/scripts/order-symlinks.py#L23-L32
def prepare_output(d): """Clean pre-existing links in output directory.""" outDir = os.path.join(d, 'inorder') if not os.path.exists(outDir): os.mkdir(outDir) for f in os.listdir(outDir): f = os.path.join(outDir, f) if os.path.islink(f): os.remove(f) return outDir
[ "def", "prepare_output", "(", "d", ")", ":", "outDir", "=", "os", ".", "path", ".", "join", "(", "d", ",", "'inorder'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "outDir", ")", ":", "os", ".", "mkdir", "(", "outDir", ")", "for", "f", "in", "os", ".", "listdir", "(", "outDir", ")", ":", "f", "=", "os", ".", "path", ".", "join", "(", "outDir", ",", "f", ")", "if", "os", ".", "path", ".", "islink", "(", "f", ")", ":", "os", ".", "remove", "(", "f", ")", "return", "outDir" ]
Clean pre-existing links in output directory.
[ "Clean", "pre", "-", "existing", "links", "in", "output", "directory", "." ]
python
train
datastax/python-driver
cassandra/policies.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/policies.py#L543-L561
def make_query_plan(self, working_keyspace=None, query=None): """ Defers to the child policy's :meth:`.LoadBalancingPolicy.make_query_plan` and filters the results. Note that this filtering may break desirable properties of the wrapped policy in some cases. For instance, imagine if you configure this policy to filter out ``host2``, and to wrap a round-robin policy that rotates through three hosts in the order ``host1, host2, host3``, ``host2, host3, host1``, ``host3, host1, host2``, repeating. This policy will yield ``host1, host3``, ``host3, host1``, ``host3, host1``, disproportionately favoring ``host3``. """ child_qp = self._child_policy.make_query_plan( working_keyspace=working_keyspace, query=query ) for host in child_qp: if self.predicate(host): yield host
[ "def", "make_query_plan", "(", "self", ",", "working_keyspace", "=", "None", ",", "query", "=", "None", ")", ":", "child_qp", "=", "self", ".", "_child_policy", ".", "make_query_plan", "(", "working_keyspace", "=", "working_keyspace", ",", "query", "=", "query", ")", "for", "host", "in", "child_qp", ":", "if", "self", ".", "predicate", "(", "host", ")", ":", "yield", "host" ]
Defers to the child policy's :meth:`.LoadBalancingPolicy.make_query_plan` and filters the results. Note that this filtering may break desirable properties of the wrapped policy in some cases. For instance, imagine if you configure this policy to filter out ``host2``, and to wrap a round-robin policy that rotates through three hosts in the order ``host1, host2, host3``, ``host2, host3, host1``, ``host3, host1, host2``, repeating. This policy will yield ``host1, host3``, ``host3, host1``, ``host3, host1``, disproportionately favoring ``host3``.
[ "Defers", "to", "the", "child", "policy", "s", ":", "meth", ":", ".", "LoadBalancingPolicy", ".", "make_query_plan", "and", "filters", "the", "results", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L326-L356
def get_default_input_values_for_state(self, state): """ Computes the default input values for a state :param State state: the state to get the default input values for """ from rafcon.core.states.library_state import LibraryState result_dict = {} for input_port_key, value in state.input_data_ports.items(): if isinstance(state, LibraryState): if state.use_runtime_value_input_data_ports[input_port_key]: default = state.input_data_port_runtime_values[input_port_key] else: default = value.default_value else: default = value.default_value # if the user sets the default value to a string starting with $, try to retrieve the value # from the global variable manager if isinstance(default, string_types) and len(default) > 0 and default[0] == '$': from rafcon.core.singleton import global_variable_manager as gvm var_name = default[1:] if not gvm.variable_exist(var_name): logger.error("The global variable '{0}' does not exist".format(var_name)) global_value = None else: global_value = gvm.get_variable(var_name) result_dict[value.name] = global_value else: # set input to its default value result_dict[value.name] = copy.copy(default) return result_dict
[ "def", "get_default_input_values_for_state", "(", "self", ",", "state", ")", ":", "from", "rafcon", ".", "core", ".", "states", ".", "library_state", "import", "LibraryState", "result_dict", "=", "{", "}", "for", "input_port_key", ",", "value", "in", "state", ".", "input_data_ports", ".", "items", "(", ")", ":", "if", "isinstance", "(", "state", ",", "LibraryState", ")", ":", "if", "state", ".", "use_runtime_value_input_data_ports", "[", "input_port_key", "]", ":", "default", "=", "state", ".", "input_data_port_runtime_values", "[", "input_port_key", "]", "else", ":", "default", "=", "value", ".", "default_value", "else", ":", "default", "=", "value", ".", "default_value", "# if the user sets the default value to a string starting with $, try to retrieve the value", "# from the global variable manager", "if", "isinstance", "(", "default", ",", "string_types", ")", "and", "len", "(", "default", ")", ">", "0", "and", "default", "[", "0", "]", "==", "'$'", ":", "from", "rafcon", ".", "core", ".", "singleton", "import", "global_variable_manager", "as", "gvm", "var_name", "=", "default", "[", "1", ":", "]", "if", "not", "gvm", ".", "variable_exist", "(", "var_name", ")", ":", "logger", ".", "error", "(", "\"The global variable '{0}' does not exist\"", ".", "format", "(", "var_name", ")", ")", "global_value", "=", "None", "else", ":", "global_value", "=", "gvm", ".", "get_variable", "(", "var_name", ")", "result_dict", "[", "value", ".", "name", "]", "=", "global_value", "else", ":", "# set input to its default value", "result_dict", "[", "value", ".", "name", "]", "=", "copy", ".", "copy", "(", "default", ")", "return", "result_dict" ]
Computes the default input values for a state :param State state: the state to get the default input values for
[ "Computes", "the", "default", "input", "values", "for", "a", "state" ]
python
train
tamasgal/km3pipe
km3pipe/io/root.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/root.py#L70-L91
def interpol_hist2d(h2d, oversamp_factor=10): """Sample the interpolator of a root 2d hist. Root's hist2d has a weird internal interpolation routine, also using neighbouring bins. """ from rootpy import ROOTError xlim = h2d.bins(axis=0) ylim = h2d.bins(axis=1) xn = h2d.nbins(0) yn = h2d.nbins(1) x = np.linspace(xlim[0], xlim[1], xn * oversamp_factor) y = np.linspace(ylim[0], ylim[1], yn * oversamp_factor) mat = np.zeros((xn, yn)) for xi in range(xn): for yi in range(yn): try: mat[xi, yi] = h2d.interpolate(x[xi], y[yi]) except ROOTError: continue return mat, x, y
[ "def", "interpol_hist2d", "(", "h2d", ",", "oversamp_factor", "=", "10", ")", ":", "from", "rootpy", "import", "ROOTError", "xlim", "=", "h2d", ".", "bins", "(", "axis", "=", "0", ")", "ylim", "=", "h2d", ".", "bins", "(", "axis", "=", "1", ")", "xn", "=", "h2d", ".", "nbins", "(", "0", ")", "yn", "=", "h2d", ".", "nbins", "(", "1", ")", "x", "=", "np", ".", "linspace", "(", "xlim", "[", "0", "]", ",", "xlim", "[", "1", "]", ",", "xn", "*", "oversamp_factor", ")", "y", "=", "np", ".", "linspace", "(", "ylim", "[", "0", "]", ",", "ylim", "[", "1", "]", ",", "yn", "*", "oversamp_factor", ")", "mat", "=", "np", ".", "zeros", "(", "(", "xn", ",", "yn", ")", ")", "for", "xi", "in", "range", "(", "xn", ")", ":", "for", "yi", "in", "range", "(", "yn", ")", ":", "try", ":", "mat", "[", "xi", ",", "yi", "]", "=", "h2d", ".", "interpolate", "(", "x", "[", "xi", "]", ",", "y", "[", "yi", "]", ")", "except", "ROOTError", ":", "continue", "return", "mat", ",", "x", ",", "y" ]
Sample the interpolator of a root 2d hist. Root's hist2d has a weird internal interpolation routine, also using neighbouring bins.
[ "Sample", "the", "interpolator", "of", "a", "root", "2d", "hist", "." ]
python
train
loli/medpy
medpy/features/intensity.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L343-L371
def median(image, size = 5, voxelspacing = None, mask = slice(None)): """ Computes the multi-dimensional median filter and returns the resulting values per voxel. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). size : number or sequence of numbers Size of the structuring element. Can be given given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- median : ndarray Multi-dimesnional median filtered version of the input images. """ return _extract_feature(_extract_median, image, mask, size = size, voxelspacing = voxelspacing)
[ "def", "median", "(", "image", ",", "size", "=", "5", ",", "voxelspacing", "=", "None", ",", "mask", "=", "slice", "(", "None", ")", ")", ":", "return", "_extract_feature", "(", "_extract_median", ",", "image", ",", "mask", ",", "size", "=", "size", ",", "voxelspacing", "=", "voxelspacing", ")" ]
Computes the multi-dimensional median filter and returns the resulting values per voxel. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). size : number or sequence of numbers Size of the structuring element. Can be given given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- median : ndarray Multi-dimesnional median filtered version of the input images.
[ "Computes", "the", "multi", "-", "dimensional", "median", "filter", "and", "returns", "the", "resulting", "values", "per", "voxel", ".", "Optionally", "a", "binary", "mask", "can", "be", "supplied", "to", "select", "the", "voxels", "for", "which", "the", "feature", "should", "be", "extracted", ".", "Parameters", "----------", "image", ":", "array_like", "or", "list", "/", "tuple", "of", "array_like", "A", "single", "image", "or", "a", "list", "/", "tuple", "of", "images", "(", "for", "multi", "-", "spectral", "case", ")", ".", "size", ":", "number", "or", "sequence", "of", "numbers", "Size", "of", "the", "structuring", "element", ".", "Can", "be", "given", "given", "for", "each", "axis", "as", "a", "sequence", "or", "as", "a", "single", "number", "in", "which", "case", "it", "is", "equal", "for", "all", "axes", ".", "Note", "that", "the", "voxel", "spacing", "of", "the", "image", "is", "taken", "into", "account", "the", "given", "values", "are", "treated", "as", "mm", ".", "voxelspacing", ":", "sequence", "of", "floats", "The", "side", "-", "length", "of", "each", "voxel", ".", "mask", ":", "array_like", "A", "binary", "mask", "for", "the", "image", ".", "Returns", "-------", "median", ":", "ndarray", "Multi", "-", "dimesnional", "median", "filtered", "version", "of", "the", "input", "images", "." ]
python
train
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L102-L132
def get_additional_params(self, **params): """ Filter to get the additional params needed for polling """ # TODO: Move these params to their own vertical if needed. polling_params = [ 'locationschema', 'carrierschema', 'sorttype', 'sortorder', 'originairports', 'destinationairports', 'stops', 'outbounddeparttime', 'outbounddepartstarttime', 'outbounddepartendtime', 'inbounddeparttime', 'inbounddepartstarttime', 'inbounddepartendtime', 'duration', 'includecarriers', 'excludecarriers' ] additional_params = dict( (key, value) for key, value in params.items() if key in polling_params ) return additional_params
[ "def", "get_additional_params", "(", "self", ",", "*", "*", "params", ")", ":", "# TODO: Move these params to their own vertical if needed.", "polling_params", "=", "[", "'locationschema'", ",", "'carrierschema'", ",", "'sorttype'", ",", "'sortorder'", ",", "'originairports'", ",", "'destinationairports'", ",", "'stops'", ",", "'outbounddeparttime'", ",", "'outbounddepartstarttime'", ",", "'outbounddepartendtime'", ",", "'inbounddeparttime'", ",", "'inbounddepartstarttime'", ",", "'inbounddepartendtime'", ",", "'duration'", ",", "'includecarriers'", ",", "'excludecarriers'", "]", "additional_params", "=", "dict", "(", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", "if", "key", "in", "polling_params", ")", "return", "additional_params" ]
Filter to get the additional params needed for polling
[ "Filter", "to", "get", "the", "additional", "params", "needed", "for", "polling" ]
python
train
linkedin/luminol
src/luminol/__init__.py
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/__init__.py#L32-L43
def _analyze_root_causes(self): """ Conduct root cause analysis. The first metric of the list is taken as the root cause right now. """ causes = {} for a in self.anomalies: try: causes[a] = self.correlations[a][0] except IndexError: raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.') self.causes = causes
[ "def", "_analyze_root_causes", "(", "self", ")", ":", "causes", "=", "{", "}", "for", "a", "in", "self", ".", "anomalies", ":", "try", ":", "causes", "[", "a", "]", "=", "self", ".", "correlations", "[", "a", "]", "[", "0", "]", "except", "IndexError", ":", "raise", "exceptions", ".", "InvalidDataFormat", "(", "'luminol.luminol: dict correlations contains empty list.'", ")", "self", ".", "causes", "=", "causes" ]
Conduct root cause analysis. The first metric of the list is taken as the root cause right now.
[ "Conduct", "root", "cause", "analysis", ".", "The", "first", "metric", "of", "the", "list", "is", "taken", "as", "the", "root", "cause", "right", "now", "." ]
python
train
spyder-ide/spyder
spyder/utils/programs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/programs.py#L478-L484
def is_python_interpreter_valid_name(filename): """Check that the python interpreter file has a valid name.""" pattern = r'.*python(\d\.?\d*)?(w)?(.exe)?$' if re.match(pattern, filename, flags=re.I) is None: return False else: return True
[ "def", "is_python_interpreter_valid_name", "(", "filename", ")", ":", "pattern", "=", "r'.*python(\\d\\.?\\d*)?(w)?(.exe)?$'", "if", "re", ".", "match", "(", "pattern", ",", "filename", ",", "flags", "=", "re", ".", "I", ")", "is", "None", ":", "return", "False", "else", ":", "return", "True" ]
Check that the python interpreter file has a valid name.
[ "Check", "that", "the", "python", "interpreter", "file", "has", "a", "valid", "name", "." ]
python
train
abilian/abilian-core
abilian/services/security/service.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/security/service.py#L81-L130
def query_pa_no_flush(session, permission, role, obj): """Query for a :class:`PermissionAssignment` using `session` without any `flush()`. It works by looking in session `new`, `dirty` and `deleted`, and issuing a query with no autoflush. .. note:: This function is used by `add_permission` and `delete_permission` to allow to add/remove the same assignment twice without issuing any flush. Since :class:`Entity` creates its initial permissions in during :sqlalchemy:`sqlalchemy.orm.events.SessionEvents.after_attach`, it might be problematic to issue a flush when entity is not yet ready to be flushed (missing required attributes for example). """ to_visit = [session.deleted, session.dirty, session.new] with session.no_autoflush: # no_autoflush is required to visit PERMISSIONS_ATTR without emitting a # flush() if obj: to_visit.append(getattr(obj, PERMISSIONS_ATTR)) permissions = ( p for p in chain(*to_visit) if isinstance(p, PermissionAssignment) ) for instance in permissions: if ( instance.permission == permission and instance.role == role and instance.object == obj ): return instance # Last chance: perform a filtered query. If obj is not None, sometimes # getattr(obj, PERMISSIONS_ATTR) has objects not present in session # not in this query (maybe in a parent session transaction `new`?). if obj is not None and obj.id is None: obj = None return ( session.query(PermissionAssignment) .filter( PermissionAssignment.permission == permission, PermissionAssignment.role == role, PermissionAssignment.object == obj, ) .first() )
[ "def", "query_pa_no_flush", "(", "session", ",", "permission", ",", "role", ",", "obj", ")", ":", "to_visit", "=", "[", "session", ".", "deleted", ",", "session", ".", "dirty", ",", "session", ".", "new", "]", "with", "session", ".", "no_autoflush", ":", "# no_autoflush is required to visit PERMISSIONS_ATTR without emitting a", "# flush()", "if", "obj", ":", "to_visit", ".", "append", "(", "getattr", "(", "obj", ",", "PERMISSIONS_ATTR", ")", ")", "permissions", "=", "(", "p", "for", "p", "in", "chain", "(", "*", "to_visit", ")", "if", "isinstance", "(", "p", ",", "PermissionAssignment", ")", ")", "for", "instance", "in", "permissions", ":", "if", "(", "instance", ".", "permission", "==", "permission", "and", "instance", ".", "role", "==", "role", "and", "instance", ".", "object", "==", "obj", ")", ":", "return", "instance", "# Last chance: perform a filtered query. If obj is not None, sometimes", "# getattr(obj, PERMISSIONS_ATTR) has objects not present in session", "# not in this query (maybe in a parent session transaction `new`?).", "if", "obj", "is", "not", "None", "and", "obj", ".", "id", "is", "None", ":", "obj", "=", "None", "return", "(", "session", ".", "query", "(", "PermissionAssignment", ")", ".", "filter", "(", "PermissionAssignment", ".", "permission", "==", "permission", ",", "PermissionAssignment", ".", "role", "==", "role", ",", "PermissionAssignment", ".", "object", "==", "obj", ",", ")", ".", "first", "(", ")", ")" ]
Query for a :class:`PermissionAssignment` using `session` without any `flush()`. It works by looking in session `new`, `dirty` and `deleted`, and issuing a query with no autoflush. .. note:: This function is used by `add_permission` and `delete_permission` to allow to add/remove the same assignment twice without issuing any flush. Since :class:`Entity` creates its initial permissions in during :sqlalchemy:`sqlalchemy.orm.events.SessionEvents.after_attach`, it might be problematic to issue a flush when entity is not yet ready to be flushed (missing required attributes for example).
[ "Query", "for", "a", ":", "class", ":", "PermissionAssignment", "using", "session", "without", "any", "flush", "()", "." ]
python
train
baszoetekouw/janus-py
sr/sr.py
https://github.com/baszoetekouw/janus-py/blob/4f2034436eef010ec8d77e168f6198123b5eb226/sr/sr.py#L150-L155
def list_eids(self): """ Returns a list of all known eids """ entities = self.list() return sorted([int(eid) for eid in entities])
[ "def", "list_eids", "(", "self", ")", ":", "entities", "=", "self", ".", "list", "(", ")", "return", "sorted", "(", "[", "int", "(", "eid", ")", "for", "eid", "in", "entities", "]", ")" ]
Returns a list of all known eids
[ "Returns", "a", "list", "of", "all", "known", "eids" ]
python
train
iotaledger/iota.lib.py
iota/transaction/creation.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/transaction/creation.py#L315-L384
def finalize(self): # type: () -> None """ Finalizes the bundle, preparing it to be attached to the Tangle. """ if self.hash: raise RuntimeError('Bundle is already finalized.') if not self: raise ValueError('Bundle has no transactions.') # Quick validation. balance = self.balance if balance < 0: if self.change_address: self.add_transaction(ProposedTransaction( address=self.change_address, value=-balance, tag=self.tag, )) else: raise ValueError( 'Bundle has unspent inputs (balance: {balance}); ' 'use ``send_unspent_inputs_to`` to create ' 'change transaction.'.format( balance=balance, ), ) elif balance > 0: raise ValueError( 'Inputs are insufficient to cover bundle spend ' '(balance: {balance}).'.format( balance=balance, ), ) # Generate bundle hash. while True: sponge = Kerl() last_index = len(self) - 1 for i, txn in enumerate(self): txn.current_index = i txn.last_index = last_index sponge.absorb(txn.get_signature_validation_trytes().as_trits()) bundle_hash_trits = [0] * HASH_LENGTH sponge.squeeze(bundle_hash_trits) bundle_hash = BundleHash.from_trits(bundle_hash_trits) # Check that we generated a secure bundle hash. # https://github.com/iotaledger/iota.lib.py/issues/84 if any(13 in part for part in normalize(bundle_hash)): # Increment the legacy tag and try again. tail_transaction = ( self.tail_transaction ) # type: ProposedTransaction tail_transaction.increment_legacy_tag() else: break # Copy bundle hash to individual transactions. for txn in self: txn.bundle_hash = bundle_hash # Initialize signature/message fragment. txn.signature_message_fragment = Fragment(txn.message or b'')
[ "def", "finalize", "(", "self", ")", ":", "# type: () -> None", "if", "self", ".", "hash", ":", "raise", "RuntimeError", "(", "'Bundle is already finalized.'", ")", "if", "not", "self", ":", "raise", "ValueError", "(", "'Bundle has no transactions.'", ")", "# Quick validation.", "balance", "=", "self", ".", "balance", "if", "balance", "<", "0", ":", "if", "self", ".", "change_address", ":", "self", ".", "add_transaction", "(", "ProposedTransaction", "(", "address", "=", "self", ".", "change_address", ",", "value", "=", "-", "balance", ",", "tag", "=", "self", ".", "tag", ",", ")", ")", "else", ":", "raise", "ValueError", "(", "'Bundle has unspent inputs (balance: {balance}); '", "'use ``send_unspent_inputs_to`` to create '", "'change transaction.'", ".", "format", "(", "balance", "=", "balance", ",", ")", ",", ")", "elif", "balance", ">", "0", ":", "raise", "ValueError", "(", "'Inputs are insufficient to cover bundle spend '", "'(balance: {balance}).'", ".", "format", "(", "balance", "=", "balance", ",", ")", ",", ")", "# Generate bundle hash.", "while", "True", ":", "sponge", "=", "Kerl", "(", ")", "last_index", "=", "len", "(", "self", ")", "-", "1", "for", "i", ",", "txn", "in", "enumerate", "(", "self", ")", ":", "txn", ".", "current_index", "=", "i", "txn", ".", "last_index", "=", "last_index", "sponge", ".", "absorb", "(", "txn", ".", "get_signature_validation_trytes", "(", ")", ".", "as_trits", "(", ")", ")", "bundle_hash_trits", "=", "[", "0", "]", "*", "HASH_LENGTH", "sponge", ".", "squeeze", "(", "bundle_hash_trits", ")", "bundle_hash", "=", "BundleHash", ".", "from_trits", "(", "bundle_hash_trits", ")", "# Check that we generated a secure bundle hash.", "# https://github.com/iotaledger/iota.lib.py/issues/84", "if", "any", "(", "13", "in", "part", "for", "part", "in", "normalize", "(", "bundle_hash", ")", ")", ":", "# Increment the legacy tag and try again.", "tail_transaction", "=", "(", "self", ".", "tail_transaction", ")", "# type: ProposedTransaction", "tail_transaction", ".", "increment_legacy_tag", "(", ")", "else", ":", "break", "# Copy bundle hash to individual transactions.", "for", "txn", "in", "self", ":", "txn", ".", "bundle_hash", "=", "bundle_hash", "# Initialize signature/message fragment.", "txn", ".", "signature_message_fragment", "=", "Fragment", "(", "txn", ".", "message", "or", "b''", ")" ]
Finalizes the bundle, preparing it to be attached to the Tangle.
[ "Finalizes", "the", "bundle", "preparing", "it", "to", "be", "attached", "to", "the", "Tangle", "." ]
python
test
rpcope1/PythonConfluenceAPI
PythonConfluenceAPI/api.py
https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/api.py#L23-L52
def all_of(api_call, *args, **kwargs): """ Generator that iterates over all results of an API call that requires limit/start pagination. If the `limit` keyword argument is set, it is used to stop the generator after the given number of result items. >>> for i, v in enumerate(all_of(api.get_content)): >>> v = bunchify(v) >>> print('\t'.join((str(i), v.type, v.id, v.status, v.title))) :param api_call: Confluence API call (method). :param args: Positional arguments of the call. :param kwargs: Keyword arguments of the call. """ kwargs = kwargs.copy() pos, outer_limit = 0, kwargs.get('limit', 0) or sys.maxsize while True: response = api_call(*args, **kwargs) for item in response.get('results', []): pos += 1 if pos > outer_limit: return yield item ##print((pos, response['start'], response['limit'])) if response.get('_links', {}).get('next', None): kwargs['start'] = response['start'] + response['size'] kwargs['limit'] = response['limit'] else: return
[ "def", "all_of", "(", "api_call", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "pos", ",", "outer_limit", "=", "0", ",", "kwargs", ".", "get", "(", "'limit'", ",", "0", ")", "or", "sys", ".", "maxsize", "while", "True", ":", "response", "=", "api_call", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "item", "in", "response", ".", "get", "(", "'results'", ",", "[", "]", ")", ":", "pos", "+=", "1", "if", "pos", ">", "outer_limit", ":", "return", "yield", "item", "##print((pos, response['start'], response['limit']))", "if", "response", ".", "get", "(", "'_links'", ",", "{", "}", ")", ".", "get", "(", "'next'", ",", "None", ")", ":", "kwargs", "[", "'start'", "]", "=", "response", "[", "'start'", "]", "+", "response", "[", "'size'", "]", "kwargs", "[", "'limit'", "]", "=", "response", "[", "'limit'", "]", "else", ":", "return" ]
Generator that iterates over all results of an API call that requires limit/start pagination. If the `limit` keyword argument is set, it is used to stop the generator after the given number of result items. >>> for i, v in enumerate(all_of(api.get_content)): >>> v = bunchify(v) >>> print('\t'.join((str(i), v.type, v.id, v.status, v.title))) :param api_call: Confluence API call (method). :param args: Positional arguments of the call. :param kwargs: Keyword arguments of the call.
[ "Generator", "that", "iterates", "over", "all", "results", "of", "an", "API", "call", "that", "requires", "limit", "/", "start", "pagination", "." ]
python
train
gwpy/gwpy
gwpy/timeseries/core.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L267-L310
def read(cls, source, *args, **kwargs): """Read data into a `TimeSeries` Arguments and keywords depend on the output format, see the online documentation for full details for each format, the parameters below are common to most formats. Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. name : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS start time of required data, defaults to start of data found; any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, defaults to end of data found; any input parseable by `~gwpy.time.to_gps` is fine format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. nproc : `int`, optional number of parallel processes to use, serial process by default. pad : `float`, optional value with which to fill gaps in the source data, by default gaps will result in a `ValueError`. Notes -----""" from .io.core import read as timeseries_reader return timeseries_reader(cls, source, *args, **kwargs)
[ "def", "read", "(", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", ".", "io", ".", "core", "import", "read", "as", "timeseries_reader", "return", "timeseries_reader", "(", "cls", ",", "source", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Read data into a `TimeSeries` Arguments and keywords depend on the output format, see the online documentation for full details for each format, the parameters below are common to most formats. Parameters ---------- source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. name : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS start time of required data, defaults to start of data found; any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, defaults to end of data found; any input parseable by `~gwpy.time.to_gps` is fine format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. nproc : `int`, optional number of parallel processes to use, serial process by default. pad : `float`, optional value with which to fill gaps in the source data, by default gaps will result in a `ValueError`. Notes -----
[ "Read", "data", "into", "a", "TimeSeries" ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/ndarray.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2475-L2505
def array(source_array, ctx=None, dtype=None): """Creates an array from any object exposing the array interface. Parameters ---------- source_array : array_like An object exposing the array interface, an object whose `__array__` method returns an array, or any (nested) sequence. ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. The default dtype is ``source_array.dtype`` if `source_array` is an `NDArray`, `float32` otherwise. Returns ------- NDArray An `NDArray` with the same contents as the `source_array`. """ if isinstance(source_array, NDArray): dtype = source_array.dtype if dtype is None else dtype else: dtype = mx_real_t if dtype is None else dtype if not isinstance(source_array, np.ndarray): try: source_array = np.array(source_array, dtype=dtype) except: raise TypeError('source_array must be array like object') arr = empty(source_array.shape, ctx, dtype) arr[:] = source_array return arr
[ "def", "array", "(", "source_array", ",", "ctx", "=", "None", ",", "dtype", "=", "None", ")", ":", "if", "isinstance", "(", "source_array", ",", "NDArray", ")", ":", "dtype", "=", "source_array", ".", "dtype", "if", "dtype", "is", "None", "else", "dtype", "else", ":", "dtype", "=", "mx_real_t", "if", "dtype", "is", "None", "else", "dtype", "if", "not", "isinstance", "(", "source_array", ",", "np", ".", "ndarray", ")", ":", "try", ":", "source_array", "=", "np", ".", "array", "(", "source_array", ",", "dtype", "=", "dtype", ")", "except", ":", "raise", "TypeError", "(", "'source_array must be array like object'", ")", "arr", "=", "empty", "(", "source_array", ".", "shape", ",", "ctx", ",", "dtype", ")", "arr", "[", ":", "]", "=", "source_array", "return", "arr" ]
Creates an array from any object exposing the array interface. Parameters ---------- source_array : array_like An object exposing the array interface, an object whose `__array__` method returns an array, or any (nested) sequence. ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. The default dtype is ``source_array.dtype`` if `source_array` is an `NDArray`, `float32` otherwise. Returns ------- NDArray An `NDArray` with the same contents as the `source_array`.
[ "Creates", "an", "array", "from", "any", "object", "exposing", "the", "array", "interface", "." ]
python
train
andreikop/qutepart
qutepart/indenter/base.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/indenter/base.py#L132-L161
def findBracketBackward(self, block, column, bracket): """Search for a needle and return (block, column) Raise ValueError, if not found NOTE this method ignores comments """ if bracket in ('(', ')'): opening = '(' closing = ')' elif bracket in ('[', ']'): opening = '[' closing = ']' elif bracket in ('{', '}'): opening = '{' closing = '}' else: raise AssertionError('Invalid bracket "%s"' % bracket) depth = 1 for foundBlock, foundColumn, char in self.iterateCharsBackwardFrom(block, column): if not self._qpart.isComment(foundBlock.blockNumber(), foundColumn): if char == opening: depth = depth - 1 elif char == closing: depth = depth + 1 if depth == 0: return foundBlock, foundColumn else: raise ValueError('Not found')
[ "def", "findBracketBackward", "(", "self", ",", "block", ",", "column", ",", "bracket", ")", ":", "if", "bracket", "in", "(", "'('", ",", "')'", ")", ":", "opening", "=", "'('", "closing", "=", "')'", "elif", "bracket", "in", "(", "'['", ",", "']'", ")", ":", "opening", "=", "'['", "closing", "=", "']'", "elif", "bracket", "in", "(", "'{'", ",", "'}'", ")", ":", "opening", "=", "'{'", "closing", "=", "'}'", "else", ":", "raise", "AssertionError", "(", "'Invalid bracket \"%s\"'", "%", "bracket", ")", "depth", "=", "1", "for", "foundBlock", ",", "foundColumn", ",", "char", "in", "self", ".", "iterateCharsBackwardFrom", "(", "block", ",", "column", ")", ":", "if", "not", "self", ".", "_qpart", ".", "isComment", "(", "foundBlock", ".", "blockNumber", "(", ")", ",", "foundColumn", ")", ":", "if", "char", "==", "opening", ":", "depth", "=", "depth", "-", "1", "elif", "char", "==", "closing", ":", "depth", "=", "depth", "+", "1", "if", "depth", "==", "0", ":", "return", "foundBlock", ",", "foundColumn", "else", ":", "raise", "ValueError", "(", "'Not found'", ")" ]
Search for a needle and return (block, column) Raise ValueError, if not found NOTE this method ignores comments
[ "Search", "for", "a", "needle", "and", "return", "(", "block", "column", ")", "Raise", "ValueError", "if", "not", "found" ]
python
train
google/grr
grr/client/grr_response_client/client_actions/tempfiles.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/tempfiles.py#L272-L346
def Run(self, args): """Delete all the GRR temp files in path. If path is a directory, look in the top level for filenames beginning with Client.tempfile_prefix, and delete them. If path is a regular file and starts with Client.tempfile_prefix delete it. Args: args: pathspec pointing to directory containing temp files to be deleted, or a single file to be deleted. Returns: deleted: array of filename strings that were deleted Raises: ErrorBadPath: if path doesn't exist or is not a regular file or directory """ allowed_temp_dirs = [ GetTempDirForRoot(root) for root in config.CONFIG["Client.tempdir_roots"] ] if args.path: # Normalize the path, so DeleteGRRTempFile can correctly check if # it is within Client.tempdir. path = utils.NormalizePath(args.path) if platform.system() == "Windows": # TODO: On non-Windows systems `CanonicalPathToLocalPath` # is equivalent to `SmartStr`, so it does nothing except for breaking # the types. However, a lot of code actually depends on this behaviour # so we cannot easily change it. As a workaround for now we simply do # not call it on Linux and macOS but ideally we should get rid of this # `SmartStr` call and not branch here. path = client_utils.CanonicalPathToLocalPath(path) paths = [path] else: paths = allowed_temp_dirs deleted = [] errors = [] for path in paths: if os.path.isdir(path): for filename in os.listdir(path): abs_filename = os.path.join(path, filename) try: DeleteGRRTempFile(abs_filename) deleted.append(abs_filename) except Exception as e: # pylint: disable=broad-except # The error we are most likely to get is ErrorNotTempFile but # especially on Windows there might be locking issues that raise # various WindowsErrors so we just catch them all and continue # deleting all other temp files in this directory. errors.append(e) elif os.path.isfile(path): DeleteGRRTempFile(path) deleted = [path] elif path not in allowed_temp_dirs: if not os.path.exists(path): raise ErrorBadPath("File %s does not exist" % path) else: raise ErrorBadPath("Not a regular file or directory: %s" % path) reply = "" if deleted: reply = "Deleted: %s." % deleted else: reply = "Nothing deleted." if errors: reply += "\n%s" % errors self.SendReply(rdf_client.LogMessage(data=reply))
[ "def", "Run", "(", "self", ",", "args", ")", ":", "allowed_temp_dirs", "=", "[", "GetTempDirForRoot", "(", "root", ")", "for", "root", "in", "config", ".", "CONFIG", "[", "\"Client.tempdir_roots\"", "]", "]", "if", "args", ".", "path", ":", "# Normalize the path, so DeleteGRRTempFile can correctly check if", "# it is within Client.tempdir.", "path", "=", "utils", ".", "NormalizePath", "(", "args", ".", "path", ")", "if", "platform", ".", "system", "(", ")", "==", "\"Windows\"", ":", "# TODO: On non-Windows systems `CanonicalPathToLocalPath`", "# is equivalent to `SmartStr`, so it does nothing except for breaking", "# the types. However, a lot of code actually depends on this behaviour", "# so we cannot easily change it. As a workaround for now we simply do", "# not call it on Linux and macOS but ideally we should get rid of this", "# `SmartStr` call and not branch here.", "path", "=", "client_utils", ".", "CanonicalPathToLocalPath", "(", "path", ")", "paths", "=", "[", "path", "]", "else", ":", "paths", "=", "allowed_temp_dirs", "deleted", "=", "[", "]", "errors", "=", "[", "]", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "filename", "in", "os", ".", "listdir", "(", "path", ")", ":", "abs_filename", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "try", ":", "DeleteGRRTempFile", "(", "abs_filename", ")", "deleted", ".", "append", "(", "abs_filename", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "# The error we are most likely to get is ErrorNotTempFile but", "# especially on Windows there might be locking issues that raise", "# various WindowsErrors so we just catch them all and continue", "# deleting all other temp files in this directory.", "errors", ".", "append", "(", "e", ")", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "DeleteGRRTempFile", "(", "path", ")", "deleted", "=", "[", "path", "]", "elif", "path", "not", "in", "allowed_temp_dirs", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "ErrorBadPath", "(", "\"File %s does not exist\"", "%", "path", ")", "else", ":", "raise", "ErrorBadPath", "(", "\"Not a regular file or directory: %s\"", "%", "path", ")", "reply", "=", "\"\"", "if", "deleted", ":", "reply", "=", "\"Deleted: %s.\"", "%", "deleted", "else", ":", "reply", "=", "\"Nothing deleted.\"", "if", "errors", ":", "reply", "+=", "\"\\n%s\"", "%", "errors", "self", ".", "SendReply", "(", "rdf_client", ".", "LogMessage", "(", "data", "=", "reply", ")", ")" ]
Delete all the GRR temp files in path. If path is a directory, look in the top level for filenames beginning with Client.tempfile_prefix, and delete them. If path is a regular file and starts with Client.tempfile_prefix delete it. Args: args: pathspec pointing to directory containing temp files to be deleted, or a single file to be deleted. Returns: deleted: array of filename strings that were deleted Raises: ErrorBadPath: if path doesn't exist or is not a regular file or directory
[ "Delete", "all", "the", "GRR", "temp", "files", "in", "path", "." ]
python
train
spry-group/python-vultr
vultr/v1_server.py
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L14-L22
def bandwidth(self, subid, params=None): ''' /v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/bandwidth', params, 'GET')
[ "def", "bandwidth", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/bandwidth'", ",", "params", ",", "'GET'", ")" ]
/v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth
[ "/", "v1", "/", "server", "/", "bandwidth", "GET", "-", "account", "Get", "the", "bandwidth", "used", "by", "a", "virtual", "machine" ]
python
train
icometrix/dicom2nifti
dicom2nifti/convert_philips.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L75-L95
def _is_multiframe_diffusion_imaging(dicom_input): """ Use this function to detect if a dicom series is a philips multiframe dti dataset NOTE: We already assue this is a 4D dataset as input """ header = dicom_input[0] if "PerFrameFunctionalGroupsSequence" not in header: return False # check if there is diffusion info in the frame found_diffusion = False diffusion_tag = Tag(0x0018, 0x9117) for frame in header.PerFrameFunctionalGroupsSequence: if diffusion_tag in frame: found_diffusion = True break if not found_diffusion: return False return True
[ "def", "_is_multiframe_diffusion_imaging", "(", "dicom_input", ")", ":", "header", "=", "dicom_input", "[", "0", "]", "if", "\"PerFrameFunctionalGroupsSequence\"", "not", "in", "header", ":", "return", "False", "# check if there is diffusion info in the frame", "found_diffusion", "=", "False", "diffusion_tag", "=", "Tag", "(", "0x0018", ",", "0x9117", ")", "for", "frame", "in", "header", ".", "PerFrameFunctionalGroupsSequence", ":", "if", "diffusion_tag", "in", "frame", ":", "found_diffusion", "=", "True", "break", "if", "not", "found_diffusion", ":", "return", "False", "return", "True" ]
Use this function to detect if a dicom series is a philips multiframe dti dataset NOTE: We already assue this is a 4D dataset as input
[ "Use", "this", "function", "to", "detect", "if", "a", "dicom", "series", "is", "a", "philips", "multiframe", "dti", "dataset", "NOTE", ":", "We", "already", "assue", "this", "is", "a", "4D", "dataset", "as", "input" ]
python
train
apache/incubator-heron
heron/instance/src/python/basics/bolt_instance.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/basics/bolt_instance.py#L247-L269
def ack(self, tup): """Indicate that processing of a Tuple has succeeded It is compatible with StreamParse API. """ if not isinstance(tup, HeronTuple): Log.error("Only HeronTuple type is supported in ack()") return if self.acking_enabled: ack_tuple = tuple_pb2.AckTuple() ack_tuple.ackedtuple = int(tup.id) tuple_size_in_bytes = 0 for rt in tup.roots: to_add = ack_tuple.roots.add() to_add.CopyFrom(rt) tuple_size_in_bytes += rt.ByteSize() super(BoltInstance, self).admit_control_tuple(ack_tuple, tuple_size_in_bytes, True) process_latency_ns = (time.time() - tup.creation_time) * system_constants.SEC_TO_NS self.pplan_helper.context.invoke_hook_bolt_ack(tup, process_latency_ns) self.bolt_metrics.acked_tuple(tup.stream, tup.component, process_latency_ns)
[ "def", "ack", "(", "self", ",", "tup", ")", ":", "if", "not", "isinstance", "(", "tup", ",", "HeronTuple", ")", ":", "Log", ".", "error", "(", "\"Only HeronTuple type is supported in ack()\"", ")", "return", "if", "self", ".", "acking_enabled", ":", "ack_tuple", "=", "tuple_pb2", ".", "AckTuple", "(", ")", "ack_tuple", ".", "ackedtuple", "=", "int", "(", "tup", ".", "id", ")", "tuple_size_in_bytes", "=", "0", "for", "rt", "in", "tup", ".", "roots", ":", "to_add", "=", "ack_tuple", ".", "roots", ".", "add", "(", ")", "to_add", ".", "CopyFrom", "(", "rt", ")", "tuple_size_in_bytes", "+=", "rt", ".", "ByteSize", "(", ")", "super", "(", "BoltInstance", ",", "self", ")", ".", "admit_control_tuple", "(", "ack_tuple", ",", "tuple_size_in_bytes", ",", "True", ")", "process_latency_ns", "=", "(", "time", ".", "time", "(", ")", "-", "tup", ".", "creation_time", ")", "*", "system_constants", ".", "SEC_TO_NS", "self", ".", "pplan_helper", ".", "context", ".", "invoke_hook_bolt_ack", "(", "tup", ",", "process_latency_ns", ")", "self", ".", "bolt_metrics", ".", "acked_tuple", "(", "tup", ".", "stream", ",", "tup", ".", "component", ",", "process_latency_ns", ")" ]
Indicate that processing of a Tuple has succeeded It is compatible with StreamParse API.
[ "Indicate", "that", "processing", "of", "a", "Tuple", "has", "succeeded" ]
python
valid
CitrineInformatics/pypif-sdk
pypif_sdk/func/calculate_funcs.py
https://github.com/CitrineInformatics/pypif-sdk/blob/8b01d10d9a1426d5eef12e4b2f31c4657aa0fe59/pypif_sdk/func/calculate_funcs.py#L277-L287
def _add_atomic_percents_(elemental_array): """ Adds ideal atomic percents to a emperical compositional element array generated using _create_emprical_compositional_array_() :param elemental_array: an array of dictionaries containing information about the elements in the system :return: the elemental_array with the atomic percent of each element added """ n_atoms = _calculate_n_atoms_(elemental_array) for e in elemental_array: e["atomic_percent"] = e["occurances"] / n_atoms * 100 return elemental_array
[ "def", "_add_atomic_percents_", "(", "elemental_array", ")", ":", "n_atoms", "=", "_calculate_n_atoms_", "(", "elemental_array", ")", "for", "e", "in", "elemental_array", ":", "e", "[", "\"atomic_percent\"", "]", "=", "e", "[", "\"occurances\"", "]", "/", "n_atoms", "*", "100", "return", "elemental_array" ]
Adds ideal atomic percents to a emperical compositional element array generated using _create_emprical_compositional_array_() :param elemental_array: an array of dictionaries containing information about the elements in the system :return: the elemental_array with the atomic percent of each element added
[ "Adds", "ideal", "atomic", "percents", "to", "a", "emperical", "compositional", "element", "array", "generated", "using", "_create_emprical_compositional_array_", "()" ]
python
train
LasLabs/python-five9
five9/models/base_model.py
https://github.com/LasLabs/python-five9/blob/ef53160d6658604524a2577391280d2b4501a7ce/five9/models/base_model.py#L206-L213
def __check_field(self, key): """Raises a KeyError if the field doesn't exist.""" if not self._props.get(key): raise KeyError( 'The field "%s" does not exist on "%s"' % ( key, self.__class__.__name__, ), )
[ "def", "__check_field", "(", "self", ",", "key", ")", ":", "if", "not", "self", ".", "_props", ".", "get", "(", "key", ")", ":", "raise", "KeyError", "(", "'The field \"%s\" does not exist on \"%s\"'", "%", "(", "key", ",", "self", ".", "__class__", ".", "__name__", ",", ")", ",", ")" ]
Raises a KeyError if the field doesn't exist.
[ "Raises", "a", "KeyError", "if", "the", "field", "doesn", "t", "exist", "." ]
python
train
crypto101/arthur
arthur/util.py
https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/util.py#L19-L40
def tee(self): """ Produces a new deferred and returns it. If this C{MultiDeferred} has not been fired (callbacked or errbacked) yet, the deferred will not have been fired yet either, but will be fired when and if this C{MultiDeferred} gets fired in the future. If this C{MultiDeferred} has been fired, returns a deferred synchronously fired with the same result. @return: A deferred that will fire with whatever this object is fired with. @rtype: L{Deferred} """ if self._result is not _NO_RESULT: if not self._isFailure: return succeed(self._result) else: return fail(self._result) d = Deferred() self._deferreds.append(d) return d
[ "def", "tee", "(", "self", ")", ":", "if", "self", ".", "_result", "is", "not", "_NO_RESULT", ":", "if", "not", "self", ".", "_isFailure", ":", "return", "succeed", "(", "self", ".", "_result", ")", "else", ":", "return", "fail", "(", "self", ".", "_result", ")", "d", "=", "Deferred", "(", ")", "self", ".", "_deferreds", ".", "append", "(", "d", ")", "return", "d" ]
Produces a new deferred and returns it. If this C{MultiDeferred} has not been fired (callbacked or errbacked) yet, the deferred will not have been fired yet either, but will be fired when and if this C{MultiDeferred} gets fired in the future. If this C{MultiDeferred} has been fired, returns a deferred synchronously fired with the same result. @return: A deferred that will fire with whatever this object is fired with. @rtype: L{Deferred}
[ "Produces", "a", "new", "deferred", "and", "returns", "it", ".", "If", "this", "C", "{", "MultiDeferred", "}", "has", "not", "been", "fired", "(", "callbacked", "or", "errbacked", ")", "yet", "the", "deferred", "will", "not", "have", "been", "fired", "yet", "either", "but", "will", "be", "fired", "when", "and", "if", "this", "C", "{", "MultiDeferred", "}", "gets", "fired", "in", "the", "future", ".", "If", "this", "C", "{", "MultiDeferred", "}", "has", "been", "fired", "returns", "a", "deferred", "synchronously", "fired", "with", "the", "same", "result", "." ]
python
train
czielinski/portfolioopt
portfolioopt/portfolioopt.py
https://github.com/czielinski/portfolioopt/blob/96ac25daab0c0dbc8933330a92ff31fb898112f2/portfolioopt/portfolioopt.py#L183-L246
def tangency_portfolio(cov_mat, exp_rets, allow_short=False): """ Computes a tangency portfolio, i.e. a maximum Sharpe ratio portfolio. Note: As the Sharpe ratio is not invariant with respect to leverage, it is not possible to construct non-trivial market neutral tangency portfolios. This is because for a positive initial Sharpe ratio the sharpe grows unbound with increasing leverage. Parameters ---------- cov_mat: pandas.DataFrame Covariance matrix of asset returns. exp_rets: pandas.Series Expected asset returns (often historical returns). allow_short: bool, optional If 'False' construct a long-only portfolio. If 'True' allow shorting, i.e. negative weights. Returns ------- weights: pandas.Series Optimal asset weights. """ if not isinstance(cov_mat, pd.DataFrame): raise ValueError("Covariance matrix is not a DataFrame") if not isinstance(exp_rets, pd.Series): raise ValueError("Expected returns is not a Series") if not cov_mat.index.equals(exp_rets.index): raise ValueError("Indices do not match") n = len(cov_mat) P = opt.matrix(cov_mat.values) q = opt.matrix(0.0, (n, 1)) # Constraints Gx <= h if not allow_short: # exp_rets*x >= 1 and x >= 0 G = opt.matrix(np.vstack((-exp_rets.values, -np.identity(n)))) h = opt.matrix(np.vstack((-1.0, np.zeros((n, 1))))) else: # exp_rets*x >= 1 G = opt.matrix(-exp_rets.values).T h = opt.matrix(-1.0) # Solve optsolvers.options['show_progress'] = False sol = optsolvers.qp(P, q, G, h) if sol['status'] != 'optimal': warnings.warn("Convergence problem") # Put weights into a labeled series weights = pd.Series(sol['x'], index=cov_mat.index) # Rescale weights, so that sum(weights) = 1 weights /= weights.sum() return weights
[ "def", "tangency_portfolio", "(", "cov_mat", ",", "exp_rets", ",", "allow_short", "=", "False", ")", ":", "if", "not", "isinstance", "(", "cov_mat", ",", "pd", ".", "DataFrame", ")", ":", "raise", "ValueError", "(", "\"Covariance matrix is not a DataFrame\"", ")", "if", "not", "isinstance", "(", "exp_rets", ",", "pd", ".", "Series", ")", ":", "raise", "ValueError", "(", "\"Expected returns is not a Series\"", ")", "if", "not", "cov_mat", ".", "index", ".", "equals", "(", "exp_rets", ".", "index", ")", ":", "raise", "ValueError", "(", "\"Indices do not match\"", ")", "n", "=", "len", "(", "cov_mat", ")", "P", "=", "opt", ".", "matrix", "(", "cov_mat", ".", "values", ")", "q", "=", "opt", ".", "matrix", "(", "0.0", ",", "(", "n", ",", "1", ")", ")", "# Constraints Gx <= h", "if", "not", "allow_short", ":", "# exp_rets*x >= 1 and x >= 0", "G", "=", "opt", ".", "matrix", "(", "np", ".", "vstack", "(", "(", "-", "exp_rets", ".", "values", ",", "-", "np", ".", "identity", "(", "n", ")", ")", ")", ")", "h", "=", "opt", ".", "matrix", "(", "np", ".", "vstack", "(", "(", "-", "1.0", ",", "np", ".", "zeros", "(", "(", "n", ",", "1", ")", ")", ")", ")", ")", "else", ":", "# exp_rets*x >= 1", "G", "=", "opt", ".", "matrix", "(", "-", "exp_rets", ".", "values", ")", ".", "T", "h", "=", "opt", ".", "matrix", "(", "-", "1.0", ")", "# Solve", "optsolvers", ".", "options", "[", "'show_progress'", "]", "=", "False", "sol", "=", "optsolvers", ".", "qp", "(", "P", ",", "q", ",", "G", ",", "h", ")", "if", "sol", "[", "'status'", "]", "!=", "'optimal'", ":", "warnings", ".", "warn", "(", "\"Convergence problem\"", ")", "# Put weights into a labeled series", "weights", "=", "pd", ".", "Series", "(", "sol", "[", "'x'", "]", ",", "index", "=", "cov_mat", ".", "index", ")", "# Rescale weights, so that sum(weights) = 1", "weights", "/=", "weights", ".", "sum", "(", ")", "return", "weights" ]
Computes a tangency portfolio, i.e. a maximum Sharpe ratio portfolio. Note: As the Sharpe ratio is not invariant with respect to leverage, it is not possible to construct non-trivial market neutral tangency portfolios. This is because for a positive initial Sharpe ratio the sharpe grows unbound with increasing leverage. Parameters ---------- cov_mat: pandas.DataFrame Covariance matrix of asset returns. exp_rets: pandas.Series Expected asset returns (often historical returns). allow_short: bool, optional If 'False' construct a long-only portfolio. If 'True' allow shorting, i.e. negative weights. Returns ------- weights: pandas.Series Optimal asset weights.
[ "Computes", "a", "tangency", "portfolio", "i", ".", "e", ".", "a", "maximum", "Sharpe", "ratio", "portfolio", ".", "Note", ":", "As", "the", "Sharpe", "ratio", "is", "not", "invariant", "with", "respect", "to", "leverage", "it", "is", "not", "possible", "to", "construct", "non", "-", "trivial", "market", "neutral", "tangency", "portfolios", ".", "This", "is", "because", "for", "a", "positive", "initial", "Sharpe", "ratio", "the", "sharpe", "grows", "unbound", "with", "increasing", "leverage", ".", "Parameters", "----------", "cov_mat", ":", "pandas", ".", "DataFrame", "Covariance", "matrix", "of", "asset", "returns", ".", "exp_rets", ":", "pandas", ".", "Series", "Expected", "asset", "returns", "(", "often", "historical", "returns", ")", ".", "allow_short", ":", "bool", "optional", "If", "False", "construct", "a", "long", "-", "only", "portfolio", ".", "If", "True", "allow", "shorting", "i", ".", "e", ".", "negative", "weights", "." ]
python
train
jldbc/pybaseball
pybaseball/retrosheet.py
https://github.com/jldbc/pybaseball/blob/085ea26bfd1b5f5926d79d4fac985c88278115f2/pybaseball/retrosheet.py#L125-L133
def wild_card_logs(): """ Pull Retrosheet Wild Card Game Logs """ file_name = 'GLWC.TXT' z = get_zip_file(wild_card_url) data = pd.read_csv(z.open(file_name), header=None, sep=',', quotechar='"') data.columns = gamelog_columns return data
[ "def", "wild_card_logs", "(", ")", ":", "file_name", "=", "'GLWC.TXT'", "z", "=", "get_zip_file", "(", "wild_card_url", ")", "data", "=", "pd", ".", "read_csv", "(", "z", ".", "open", "(", "file_name", ")", ",", "header", "=", "None", ",", "sep", "=", "','", ",", "quotechar", "=", "'\"'", ")", "data", ".", "columns", "=", "gamelog_columns", "return", "data" ]
Pull Retrosheet Wild Card Game Logs
[ "Pull", "Retrosheet", "Wild", "Card", "Game", "Logs" ]
python
train
CalebBell/fluids
fluids/particle_size_distribution.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/particle_size_distribution.py#L1985-L2007
def vssa(self): r'''The volume-specific surface area of a particle size distribution. Note this uses the diameters provided by the method `Dis`. .. math:: \text{VSSA} = \sum_i \text{fraction}_i \frac{SA_i}{V_i} Returns ------- VSSA : float The volume-specific surface area of the distribution, [m^2/m^3] References ---------- .. [1] ISO 9276-2:2014 - Representation of Results of Particle Size Analysis - Part 2: Calculation of Average Particle Sizes/Diameters and Moments from Particle Size Distributions. ''' ds = self.Dis Vs = [pi/6*di**3 for di in ds] SAs = [pi*di**2 for di in ds] SASs = [SA/V for SA, V in zip(SAs, Vs)] VSSA = sum([fi*SASi for fi, SASi in zip(self.fractions, SASs)]) return VSSA
[ "def", "vssa", "(", "self", ")", ":", "ds", "=", "self", ".", "Dis", "Vs", "=", "[", "pi", "/", "6", "*", "di", "**", "3", "for", "di", "in", "ds", "]", "SAs", "=", "[", "pi", "*", "di", "**", "2", "for", "di", "in", "ds", "]", "SASs", "=", "[", "SA", "/", "V", "for", "SA", ",", "V", "in", "zip", "(", "SAs", ",", "Vs", ")", "]", "VSSA", "=", "sum", "(", "[", "fi", "*", "SASi", "for", "fi", ",", "SASi", "in", "zip", "(", "self", ".", "fractions", ",", "SASs", ")", "]", ")", "return", "VSSA" ]
r'''The volume-specific surface area of a particle size distribution. Note this uses the diameters provided by the method `Dis`. .. math:: \text{VSSA} = \sum_i \text{fraction}_i \frac{SA_i}{V_i} Returns ------- VSSA : float The volume-specific surface area of the distribution, [m^2/m^3] References ---------- .. [1] ISO 9276-2:2014 - Representation of Results of Particle Size Analysis - Part 2: Calculation of Average Particle Sizes/Diameters and Moments from Particle Size Distributions.
[ "r", "The", "volume", "-", "specific", "surface", "area", "of", "a", "particle", "size", "distribution", ".", "Note", "this", "uses", "the", "diameters", "provided", "by", "the", "method", "Dis", ".", "..", "math", "::", "\\", "text", "{", "VSSA", "}", "=", "\\", "sum_i", "\\", "text", "{", "fraction", "}", "_i", "\\", "frac", "{", "SA_i", "}", "{", "V_i", "}", "Returns", "-------", "VSSA", ":", "float", "The", "volume", "-", "specific", "surface", "area", "of", "the", "distribution", "[", "m^2", "/", "m^3", "]" ]
python
train
tanghaibao/jcvi
jcvi/compara/reconstruct.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/reconstruct.py#L111-L186
def adjgraph(args): """ %prog adjgraph adjacency.txt subgraph.txt Construct adjacency graph for graphviz. The file may look like sample below. The lines with numbers are chromosomes with gene order information. genome 0 chr 0 -1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360 chr 1 138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143 """ import pygraphviz as pgv from jcvi.utils.iter import pairwise from jcvi.formats.base import SetFile p = OptionParser(adjgraph.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) infile, subgraph = args subgraph = SetFile(subgraph) subgraph = set(x.strip("-") for x in subgraph) G = pgv.AGraph(strict=False) # allow multi-edge SG = pgv.AGraph(strict=False) palette = ("green", "magenta", "tomato", "peachpuff") fp = open(infile) genome_id = -1 key = 0 for row in fp: if row.strip() == "": continue atoms = row.split() tag = atoms[0] if tag in ("ChrNumber", "chr"): continue if tag == "genome": genome_id += 1 gcolor = palette[genome_id] continue nodeseq = [] for p in atoms: np = p.strip("-") nodeL, nodeR = np + "L", np + "R" if p[0] == "-": # negative strand nodeseq += [nodeR, nodeL] else: nodeseq += [nodeL, nodeR] for a, b in pairwise(nodeseq): G.add_edge(a, b, key, color=gcolor) key += 1 na, nb = a[:-1], b[:-1] if na not in subgraph and nb not in subgraph: continue SG.add_edge(a, b, key, color=gcolor) G.graph_attr.update(dpi="300") fw = open("graph.dot", "w") G.write(fw) fw.close() fw = open("subgraph.dot", "w") SG.write(fw) fw.close()
[ "def", "adjgraph", "(", "args", ")", ":", "import", "pygraphviz", "as", "pgv", "from", "jcvi", ".", "utils", ".", "iter", "import", "pairwise", "from", "jcvi", ".", "formats", ".", "base", "import", "SetFile", "p", "=", "OptionParser", "(", "adjgraph", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "infile", ",", "subgraph", "=", "args", "subgraph", "=", "SetFile", "(", "subgraph", ")", "subgraph", "=", "set", "(", "x", ".", "strip", "(", "\"-\"", ")", "for", "x", "in", "subgraph", ")", "G", "=", "pgv", ".", "AGraph", "(", "strict", "=", "False", ")", "# allow multi-edge", "SG", "=", "pgv", ".", "AGraph", "(", "strict", "=", "False", ")", "palette", "=", "(", "\"green\"", ",", "\"magenta\"", ",", "\"tomato\"", ",", "\"peachpuff\"", ")", "fp", "=", "open", "(", "infile", ")", "genome_id", "=", "-", "1", "key", "=", "0", "for", "row", "in", "fp", ":", "if", "row", ".", "strip", "(", ")", "==", "\"\"", ":", "continue", "atoms", "=", "row", ".", "split", "(", ")", "tag", "=", "atoms", "[", "0", "]", "if", "tag", "in", "(", "\"ChrNumber\"", ",", "\"chr\"", ")", ":", "continue", "if", "tag", "==", "\"genome\"", ":", "genome_id", "+=", "1", "gcolor", "=", "palette", "[", "genome_id", "]", "continue", "nodeseq", "=", "[", "]", "for", "p", "in", "atoms", ":", "np", "=", "p", ".", "strip", "(", "\"-\"", ")", "nodeL", ",", "nodeR", "=", "np", "+", "\"L\"", ",", "np", "+", "\"R\"", "if", "p", "[", "0", "]", "==", "\"-\"", ":", "# negative strand", "nodeseq", "+=", "[", "nodeR", ",", "nodeL", "]", "else", ":", "nodeseq", "+=", "[", "nodeL", ",", "nodeR", "]", "for", "a", ",", "b", "in", "pairwise", "(", "nodeseq", ")", ":", "G", ".", "add_edge", "(", "a", ",", "b", ",", "key", ",", "color", "=", "gcolor", ")", "key", "+=", "1", "na", ",", "nb", "=", "a", "[", ":", "-", "1", "]", ",", "b", "[", ":", "-", "1", "]", "if", "na", "not", "in", "subgraph", "and", "nb", "not", "in", "subgraph", ":", "continue", "SG", ".", "add_edge", "(", "a", ",", "b", ",", "key", ",", "color", "=", "gcolor", ")", "G", ".", "graph_attr", ".", "update", "(", "dpi", "=", "\"300\"", ")", "fw", "=", "open", "(", "\"graph.dot\"", ",", "\"w\"", ")", "G", ".", "write", "(", "fw", ")", "fw", ".", "close", "(", ")", "fw", "=", "open", "(", "\"subgraph.dot\"", ",", "\"w\"", ")", "SG", ".", "write", "(", "fw", ")", "fw", ".", "close", "(", ")" ]
%prog adjgraph adjacency.txt subgraph.txt Construct adjacency graph for graphviz. The file may look like sample below. The lines with numbers are chromosomes with gene order information. genome 0 chr 0 -1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360 chr 1 138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143
[ "%prog", "adjgraph", "adjacency", ".", "txt", "subgraph", ".", "txt" ]
python
train
thieman/dagobah
dagobah/core/core.py
https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/core/core.py#L888-L893
def local_not_complete(self): """ Returns True if task is local and not completed""" if self.process and self.process.poll() is None: self._timeout_check() return True return False
[ "def", "local_not_complete", "(", "self", ")", ":", "if", "self", ".", "process", "and", "self", ".", "process", ".", "poll", "(", ")", "is", "None", ":", "self", ".", "_timeout_check", "(", ")", "return", "True", "return", "False" ]
Returns True if task is local and not completed
[ "Returns", "True", "if", "task", "is", "local", "and", "not", "completed" ]
python
train
peercoin/peercoin_rpc
peercoin_rpc/peercoin_rpc.py
https://github.com/peercoin/peercoin_rpc/blob/6edd854c7fd607ad9f6f4d5eb8b8b7c7fd8c16cc/peercoin_rpc/peercoin_rpc.py#L191-L194
def sendmany(self, recv_dict, account="", comment=""): """send outgoing tx to many addresses, input is dict of addr:coins, returns txid""" # {"addr1":#coin,"addr2":#coin,"addr3":#coin...} return self.req("sendmany", [account, recv_dict, comment])
[ "def", "sendmany", "(", "self", ",", "recv_dict", ",", "account", "=", "\"\"", ",", "comment", "=", "\"\"", ")", ":", "# {\"addr1\":#coin,\"addr2\":#coin,\"addr3\":#coin...}", "return", "self", ".", "req", "(", "\"sendmany\"", ",", "[", "account", ",", "recv_dict", ",", "comment", "]", ")" ]
send outgoing tx to many addresses, input is dict of addr:coins, returns txid
[ "send", "outgoing", "tx", "to", "many", "addresses", "input", "is", "dict", "of", "addr", ":", "coins", "returns", "txid" ]
python
train
noahbenson/neuropythy
neuropythy/mri/images.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/mri/images.py#L120-L129
def create(self, arr, meta_data={}, **kwargs): ''' itype.create(dataobj) yields an image of the given image type itype that represents the given data object dataobj. itype.create(dataobj, meta_data) uses the given meta/header data to create the image. Any number of keyword arguments may also be appended to the call; these are merged into the meta_data argument. ''' return self.to_image(arr, hdat=pimms.merge(meta_data, kwargs))
[ "def", "create", "(", "self", ",", "arr", ",", "meta_data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "to_image", "(", "arr", ",", "hdat", "=", "pimms", ".", "merge", "(", "meta_data", ",", "kwargs", ")", ")" ]
itype.create(dataobj) yields an image of the given image type itype that represents the given data object dataobj. itype.create(dataobj, meta_data) uses the given meta/header data to create the image. Any number of keyword arguments may also be appended to the call; these are merged into the meta_data argument.
[ "itype", ".", "create", "(", "dataobj", ")", "yields", "an", "image", "of", "the", "given", "image", "type", "itype", "that", "represents", "the", "given", "data", "object", "dataobj", ".", "itype", ".", "create", "(", "dataobj", "meta_data", ")", "uses", "the", "given", "meta", "/", "header", "data", "to", "create", "the", "image", "." ]
python
train
jepegit/cellpy
cellpy/readers/core.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/core.py#L98-L103
def get_raw(self): """Get a list with information about the file. The returned list contains name, size, last_modified and location. """ return [self.name, self.size, self.last_modified, self.location]
[ "def", "get_raw", "(", "self", ")", ":", "return", "[", "self", ".", "name", ",", "self", ".", "size", ",", "self", ".", "last_modified", ",", "self", ".", "location", "]" ]
Get a list with information about the file. The returned list contains name, size, last_modified and location.
[ "Get", "a", "list", "with", "information", "about", "the", "file", "." ]
python
train