repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
camsci/meteor-pi
src/pythonModules/meteorpi_client/meteorpi_client/__init__.py
https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_client/meteorpi_client/__init__.py#L190-L196
def _augment_observation_files(self, e): """ Augment all the file records in an event :internal: """ e.file_records = [self._augment_file(f) for f in e.file_records] return e
[ "def", "_augment_observation_files", "(", "self", ",", "e", ")", ":", "e", ".", "file_records", "=", "[", "self", ".", "_augment_file", "(", "f", ")", "for", "f", "in", "e", ".", "file_records", "]", "return", "e" ]
Augment all the file records in an event :internal:
[ "Augment", "all", "the", "file", "records", "in", "an", "event", ":", "internal", ":" ]
python
train
30.857143
Godley/MuseParse
MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/MeasureNode.py#L247-L284
def Backup(self, duration=0): ''' method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration> :param duration: :return: ''' total = 0 duration_total = duration * 4 children = self.GetChildrenIndexes() notes = 0 for voice in children: v = self.GetChild(voice) indexes = v.GetChildrenIndexes() if len(indexes) > 1: indexes.reverse() for index in indexes: notes += 1 note = v.GetChild(index) if hasattr(note, "duration"): total += note.duration if total >= duration_total: break gap = [ v.GetChild(i).duration for i in range( 0, self.index - notes) if hasattr( v.GetChild(i), "duration")] previous = 0 for item in gap: if item == previous: self.gap -= previous item = item / 2 self.gap += item previous = item #self.gap = sum([]) self.index -= notes
[ "def", "Backup", "(", "self", ",", "duration", "=", "0", ")", ":", "total", "=", "0", "duration_total", "=", "duration", "*", "4", "children", "=", "self", ".", "GetChildrenIndexes", "(", ")", "notes", "=", "0", "for", "voice", "in", "children", ":", "v", "=", "self", ".", "GetChild", "(", "voice", ")", "indexes", "=", "v", ".", "GetChildrenIndexes", "(", ")", "if", "len", "(", "indexes", ")", ">", "1", ":", "indexes", ".", "reverse", "(", ")", "for", "index", "in", "indexes", ":", "notes", "+=", "1", "note", "=", "v", ".", "GetChild", "(", "index", ")", "if", "hasattr", "(", "note", ",", "\"duration\"", ")", ":", "total", "+=", "note", ".", "duration", "if", "total", ">=", "duration_total", ":", "break", "gap", "=", "[", "v", ".", "GetChild", "(", "i", ")", ".", "duration", "for", "i", "in", "range", "(", "0", ",", "self", ".", "index", "-", "notes", ")", "if", "hasattr", "(", "v", ".", "GetChild", "(", "i", ")", ",", "\"duration\"", ")", "]", "previous", "=", "0", "for", "item", "in", "gap", ":", "if", "item", "==", "previous", ":", "self", ".", "gap", "-=", "previous", "item", "=", "item", "/", "2", "self", ".", "gap", "+=", "item", "previous", "=", "item", "#self.gap = sum([])", "self", ".", "index", "-=", "notes" ]
method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration> :param duration: :return:
[ "method", "to", "use", "when", "a", "backup", "tag", "is", "encountered", "in", "musicXML", ".", "Moves", "back", "in", "the", "bar", "by", "<duration", ">", ":", "param", "duration", ":", ":", "return", ":" ]
python
train
33.184211
versae/qbe
django_qbe/utils.py
https://github.com/versae/qbe/blob/be8b28de5bc67cf527ae5bcb183bffe5a91a41db/django_qbe/utils.py#L444-L447
def pickle_encode(session_dict): "Returns the given session dictionary pickled and encoded as a string." pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL) return base64.encodestring(pickled + get_query_hash(pickled).encode())
[ "def", "pickle_encode", "(", "session_dict", ")", ":", "pickled", "=", "pickle", ".", "dumps", "(", "session_dict", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", "return", "base64", ".", "encodestring", "(", "pickled", "+", "get_query_hash", "(", "pickled", ")", ".", "encode", "(", ")", ")" ]
Returns the given session dictionary pickled and encoded as a string.
[ "Returns", "the", "given", "session", "dictionary", "pickled", "and", "encoded", "as", "a", "string", "." ]
python
train
61.5
combust/mleap
python/mleap/sklearn/preprocessing/data.py
https://github.com/combust/mleap/blob/dc6b79db03ec27a0ba08b289842551e73d517ab3/python/mleap/sklearn/preprocessing/data.py#L1004-L1027
def transform(self, y): """ Transform features per specified math function. :param y: :return: """ if isinstance(y, pd.DataFrame): x = y.ix[:,0] y = y.ix[:,1] else: x = y[:,0] y = y[:,1] if self.transform_type == 'add': return pd.DataFrame(np.add(x, y)) elif self.transform_type == 'sub': return pd.DataFrame(np.subtract(x, y)) elif self.transform_type == 'mul': return pd.DataFrame(np.multiply(x, y)) elif self.transform_type == 'div': return pd.DataFrame(np.divide(x, y)) elif self.transform_type == 'rem': return pd.DataFrame(np.remainder(x, y)) elif self.transform_type == 'pow': return pd.DataFrame(x**y)
[ "def", "transform", "(", "self", ",", "y", ")", ":", "if", "isinstance", "(", "y", ",", "pd", ".", "DataFrame", ")", ":", "x", "=", "y", ".", "ix", "[", ":", ",", "0", "]", "y", "=", "y", ".", "ix", "[", ":", ",", "1", "]", "else", ":", "x", "=", "y", "[", ":", ",", "0", "]", "y", "=", "y", "[", ":", ",", "1", "]", "if", "self", ".", "transform_type", "==", "'add'", ":", "return", "pd", ".", "DataFrame", "(", "np", ".", "add", "(", "x", ",", "y", ")", ")", "elif", "self", ".", "transform_type", "==", "'sub'", ":", "return", "pd", ".", "DataFrame", "(", "np", ".", "subtract", "(", "x", ",", "y", ")", ")", "elif", "self", ".", "transform_type", "==", "'mul'", ":", "return", "pd", ".", "DataFrame", "(", "np", ".", "multiply", "(", "x", ",", "y", ")", ")", "elif", "self", ".", "transform_type", "==", "'div'", ":", "return", "pd", ".", "DataFrame", "(", "np", ".", "divide", "(", "x", ",", "y", ")", ")", "elif", "self", ".", "transform_type", "==", "'rem'", ":", "return", "pd", ".", "DataFrame", "(", "np", ".", "remainder", "(", "x", ",", "y", ")", ")", "elif", "self", ".", "transform_type", "==", "'pow'", ":", "return", "pd", ".", "DataFrame", "(", "x", "**", "y", ")" ]
Transform features per specified math function. :param y: :return:
[ "Transform", "features", "per", "specified", "math", "function", ".", ":", "param", "y", ":", ":", "return", ":" ]
python
train
33.75
delfick/harpoon
harpoon/collector.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/collector.py#L110-L116
def find_harpoon_options(self, configuration, args_dict): """Return us all the harpoon options""" d = lambda r: {} if r in (None, "", NotSpecified) else r return MergedOptions.using( dict(d(configuration.get('harpoon')).items()) , dict(d(args_dict.get("harpoon")).items()) ).as_dict()
[ "def", "find_harpoon_options", "(", "self", ",", "configuration", ",", "args_dict", ")", ":", "d", "=", "lambda", "r", ":", "{", "}", "if", "r", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", "else", "r", "return", "MergedOptions", ".", "using", "(", "dict", "(", "d", "(", "configuration", ".", "get", "(", "'harpoon'", ")", ")", ".", "items", "(", ")", ")", ",", "dict", "(", "d", "(", "args_dict", ".", "get", "(", "\"harpoon\"", ")", ")", ".", "items", "(", ")", ")", ")", ".", "as_dict", "(", ")" ]
Return us all the harpoon options
[ "Return", "us", "all", "the", "harpoon", "options" ]
python
train
48.571429
mikicz/arca
arca/backend/base.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/base.py#L92-L97
def hash_file_contents(requirements_option: RequirementsOptions, path: Path) -> str: """ Returns a SHA256 hash of the contents of ``path`` combined with the Arca version. """ return hashlib.sha256(path.read_bytes() + bytes( requirements_option.name + arca.__version__, "utf-8" )).hexdigest()
[ "def", "hash_file_contents", "(", "requirements_option", ":", "RequirementsOptions", ",", "path", ":", "Path", ")", "->", "str", ":", "return", "hashlib", ".", "sha256", "(", "path", ".", "read_bytes", "(", ")", "+", "bytes", "(", "requirements_option", ".", "name", "+", "arca", ".", "__version__", ",", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")" ]
Returns a SHA256 hash of the contents of ``path`` combined with the Arca version.
[ "Returns", "a", "SHA256", "hash", "of", "the", "contents", "of", "path", "combined", "with", "the", "Arca", "version", "." ]
python
train
55
AndresMWeber/Nomenclate
nomenclate/core/nameparser.py
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L390-L421
def is_valid_camel(cls, input_string, strcmp=None, ignore=''): """ Checks to see if an input string is valid for use in camel casing This assumes that all lowercase strings are not valid camel case situations and no camel string can just be a capitalized word. Took ideas from here: http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python :param input_string: str, input word :param strcmp: str, force detection on a substring just in case its undetectable (e.g. part of a section of text that's all lowercase) :param ignore: str, what kind of string to ignore in the regex search :return: bool, whether it is valid or not """ # clear any non chars from the string if not input_string: return False input_string = ''.join([c for c in input_string if c.isalpha()]) matches = cls._get_regex_search(input_string, cls.REGEX_CAMEL.format(SEP=cls.REGEX_SEPARATORS), match_index=0, ignore=ignore) if matches or input_string == strcmp: if strcmp: index = input_string.find(strcmp) - 1 is_camel = strcmp[0].isupper() and input_string[index].islower() is_input = strcmp == input_string is_start = index + 1 == 0 return is_camel or is_input or is_start return True elif len(input_string) == 1: return True return False
[ "def", "is_valid_camel", "(", "cls", ",", "input_string", ",", "strcmp", "=", "None", ",", "ignore", "=", "''", ")", ":", "# clear any non chars from the string", "if", "not", "input_string", ":", "return", "False", "input_string", "=", "''", ".", "join", "(", "[", "c", "for", "c", "in", "input_string", "if", "c", ".", "isalpha", "(", ")", "]", ")", "matches", "=", "cls", ".", "_get_regex_search", "(", "input_string", ",", "cls", ".", "REGEX_CAMEL", ".", "format", "(", "SEP", "=", "cls", ".", "REGEX_SEPARATORS", ")", ",", "match_index", "=", "0", ",", "ignore", "=", "ignore", ")", "if", "matches", "or", "input_string", "==", "strcmp", ":", "if", "strcmp", ":", "index", "=", "input_string", ".", "find", "(", "strcmp", ")", "-", "1", "is_camel", "=", "strcmp", "[", "0", "]", ".", "isupper", "(", ")", "and", "input_string", "[", "index", "]", ".", "islower", "(", ")", "is_input", "=", "strcmp", "==", "input_string", "is_start", "=", "index", "+", "1", "==", "0", "return", "is_camel", "or", "is_input", "or", "is_start", "return", "True", "elif", "len", "(", "input_string", ")", "==", "1", ":", "return", "True", "return", "False" ]
Checks to see if an input string is valid for use in camel casing This assumes that all lowercase strings are not valid camel case situations and no camel string can just be a capitalized word. Took ideas from here: http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python :param input_string: str, input word :param strcmp: str, force detection on a substring just in case its undetectable (e.g. part of a section of text that's all lowercase) :param ignore: str, what kind of string to ignore in the regex search :return: bool, whether it is valid or not
[ "Checks", "to", "see", "if", "an", "input", "string", "is", "valid", "for", "use", "in", "camel", "casing", "This", "assumes", "that", "all", "lowercase", "strings", "are", "not", "valid", "camel", "case", "situations", "and", "no", "camel", "string", "can", "just", "be", "a", "capitalized", "word", ".", "Took", "ideas", "from", "here", ":", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "29916065", "/", "how", "-", "to", "-", "do", "-", "camelcase", "-", "split", "-", "in", "-", "python" ]
python
train
50.59375
ssalentin/plip
plip/modules/supplemental.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/supplemental.py#L274-L289
def ring_is_planar(ring, r_atoms): """Given a set of ring atoms, check if the ring is sufficiently planar to be considered aromatic""" normals = [] for a in r_atoms: adj = pybel.ob.OBAtomAtomIter(a.OBAtom) # Check for neighboring atoms in the ring n_coords = [pybel.Atom(neigh).coords for neigh in adj if ring.IsMember(neigh)] vec1, vec2 = vector(a.coords, n_coords[0]), vector(a.coords, n_coords[1]) normals.append(np.cross(vec1, vec2)) # Given all normals of ring atoms and their neighbors, the angle between any has to be 5.0 deg or less for n1, n2 in itertools.product(normals, repeat=2): arom_angle = vecangle(n1, n2) if all([arom_angle > config.AROMATIC_PLANARITY, arom_angle < 180.0 - config.AROMATIC_PLANARITY]): return False return True
[ "def", "ring_is_planar", "(", "ring", ",", "r_atoms", ")", ":", "normals", "=", "[", "]", "for", "a", "in", "r_atoms", ":", "adj", "=", "pybel", ".", "ob", ".", "OBAtomAtomIter", "(", "a", ".", "OBAtom", ")", "# Check for neighboring atoms in the ring", "n_coords", "=", "[", "pybel", ".", "Atom", "(", "neigh", ")", ".", "coords", "for", "neigh", "in", "adj", "if", "ring", ".", "IsMember", "(", "neigh", ")", "]", "vec1", ",", "vec2", "=", "vector", "(", "a", ".", "coords", ",", "n_coords", "[", "0", "]", ")", ",", "vector", "(", "a", ".", "coords", ",", "n_coords", "[", "1", "]", ")", "normals", ".", "append", "(", "np", ".", "cross", "(", "vec1", ",", "vec2", ")", ")", "# Given all normals of ring atoms and their neighbors, the angle between any has to be 5.0 deg or less", "for", "n1", ",", "n2", "in", "itertools", ".", "product", "(", "normals", ",", "repeat", "=", "2", ")", ":", "arom_angle", "=", "vecangle", "(", "n1", ",", "n2", ")", "if", "all", "(", "[", "arom_angle", ">", "config", ".", "AROMATIC_PLANARITY", ",", "arom_angle", "<", "180.0", "-", "config", ".", "AROMATIC_PLANARITY", "]", ")", ":", "return", "False", "return", "True" ]
Given a set of ring atoms, check if the ring is sufficiently planar to be considered aromatic
[ "Given", "a", "set", "of", "ring", "atoms", "check", "if", "the", "ring", "is", "sufficiently", "planar", "to", "be", "considered", "aromatic" ]
python
train
51.625
cni/MRS
MRS/analysis.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/analysis.py#L99-L218
def coil_combine(data, w_idx=[1,2,3], coil_dim=2, sampling_rate=5000.): """ Combine data across coils based on the amplitude of the water peak, according to: .. math:: X = \sum_{i}{w_i S_i} Where X is the resulting combined signal, $S_i$ are the individual coil signals and $w_i$ are calculated as: .. math:: w_i = mean(S_i) / var (S_i) following [Hall2013]_. In addition, we apply a phase-correction, so that all the phases of the signals from each coil are 0 Parameters ---------- data : float array The data as it comes from the scanner, with shape (transients, echos, coils, time points) w_idx : list The indices to the non-water-suppressed transients. Per default we take the 2nd-4th transients. We dump the first one, because it seems to be quite different than the rest of them... coil_dim : int The dimension on which the coils are represented. Default: 2 sampling rate : float The sampling rate in Hz. Default : 5000. References ---------- .. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter G. Morris (2013). Methodology for improved detection of low concentration metabolites in MRS: Optimised combination of signals from multi-element coil arrays. Neuroimage 86: 35-42. .. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410. .. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second edition. Wiley (West Sussex, UK). """ w_data, w_supp_data = separate_signals(data, w_idx) fft_w = np.fft.fftshift(fft.fft(w_data)) fft_w_supp = np.fft.fftshift(fft.fft(w_supp_data)) freqs_w = np.linspace(-sampling_rate/2.0, sampling_rate/2.0, w_data.shape[-1]) # To determine phase and amplitude, fit a Lorentzian line-shape to each # coils data in each trial: # No bounds except for on the phase: bounds = [(None,None), (0,None), (0,None), (-np.pi, np.pi), (None,None), (None, None)] n_params = len(bounds) params = np.zeros(fft_w.shape[:-1] + (n_params,)) # Let's fit a Lorentzian line-shape to each one of these: for repeat in range(w_data.shape[0]): for echo in range(w_data.shape[1]): for coil in range(w_data.shape[2]): sig = fft_w[repeat, echo, coil] # Use the private function to do this: params[repeat, echo, coil] = _do_lorentzian_fit(freqs_w, sig, bounds) # The area parameter stands for the magnitude: area_w = params[..., 1] # In each coil, we derive S/(N^2): s = np.mean(area_w.reshape(-1, area_w.shape[-1]), 0) n = np.var(area_w.reshape(-1, area_w.shape[-1]), 0) amp_weight = s/n # Normalize to sum to 1: amp_weight = amp_weight / np.sum(amp_weight) # Next, we make sure that all the coils have the same phase. We will use # the phase of the Lorentzian to align the phases: phase_param = params[..., 3] zero_phi_w = np.mean(phase_param.reshape(-1, phase_param.shape[-1]),0) # This recalculates the weight with the phase alignment (see page 397 in # Wald paper): weight = amp_weight * np.exp(-1j * zero_phi_w) # Multiply each one of the signals by its coil-weights and average across # coils: na = np.newaxis # Short-hand # Collapse across coils for the combination in both the water weighted_w_data = np.mean(np.fft.ifft(np.fft.fftshift( weight[na, na, :, na] * fft_w)), coil_dim) weighted_w_supp_data = np.mean(np.fft.ifft(np.fft.fftshift( weight[na, na, : ,na] * fft_w_supp)) , coil_dim) # Normalize each series by the sqrt(rms): def normalize_this(x): return x * (x.shape[-1] / (np.sum(np.abs(x)))) weighted_w_data = normalize_this(weighted_w_data) weighted_w_supp_data = normalize_this(weighted_w_supp_data) # Squeeze in case that some extraneous dimensions were introduced (can # happen for SV data, for example) return weighted_w_data.squeeze(), weighted_w_supp_data.squeeze()
[ "def", "coil_combine", "(", "data", ",", "w_idx", "=", "[", "1", ",", "2", ",", "3", "]", ",", "coil_dim", "=", "2", ",", "sampling_rate", "=", "5000.", ")", ":", "w_data", ",", "w_supp_data", "=", "separate_signals", "(", "data", ",", "w_idx", ")", "fft_w", "=", "np", ".", "fft", ".", "fftshift", "(", "fft", ".", "fft", "(", "w_data", ")", ")", "fft_w_supp", "=", "np", ".", "fft", ".", "fftshift", "(", "fft", ".", "fft", "(", "w_supp_data", ")", ")", "freqs_w", "=", "np", ".", "linspace", "(", "-", "sampling_rate", "/", "2.0", ",", "sampling_rate", "/", "2.0", ",", "w_data", ".", "shape", "[", "-", "1", "]", ")", "# To determine phase and amplitude, fit a Lorentzian line-shape to each", "# coils data in each trial: ", "# No bounds except for on the phase:", "bounds", "=", "[", "(", "None", ",", "None", ")", ",", "(", "0", ",", "None", ")", ",", "(", "0", ",", "None", ")", ",", "(", "-", "np", ".", "pi", ",", "np", ".", "pi", ")", ",", "(", "None", ",", "None", ")", ",", "(", "None", ",", "None", ")", "]", "n_params", "=", "len", "(", "bounds", ")", "params", "=", "np", ".", "zeros", "(", "fft_w", ".", "shape", "[", ":", "-", "1", "]", "+", "(", "n_params", ",", ")", ")", "# Let's fit a Lorentzian line-shape to each one of these:", "for", "repeat", "in", "range", "(", "w_data", ".", "shape", "[", "0", "]", ")", ":", "for", "echo", "in", "range", "(", "w_data", ".", "shape", "[", "1", "]", ")", ":", "for", "coil", "in", "range", "(", "w_data", ".", "shape", "[", "2", "]", ")", ":", "sig", "=", "fft_w", "[", "repeat", ",", "echo", ",", "coil", "]", "# Use the private function to do this:", "params", "[", "repeat", ",", "echo", ",", "coil", "]", "=", "_do_lorentzian_fit", "(", "freqs_w", ",", "sig", ",", "bounds", ")", "# The area parameter stands for the magnitude:", "area_w", "=", "params", "[", "...", ",", "1", "]", "# In each coil, we derive S/(N^2):", "s", "=", "np", ".", "mean", "(", "area_w", ".", "reshape", "(", "-", "1", ",", "area_w", ".", "shape", "[", "-", "1", "]", ")", ",", "0", ")", "n", "=", "np", ".", "var", "(", "area_w", ".", "reshape", "(", "-", "1", ",", "area_w", ".", "shape", "[", "-", "1", "]", ")", ",", "0", ")", "amp_weight", "=", "s", "/", "n", "# Normalize to sum to 1: ", "amp_weight", "=", "amp_weight", "/", "np", ".", "sum", "(", "amp_weight", ")", "# Next, we make sure that all the coils have the same phase. We will use", "# the phase of the Lorentzian to align the phases:", "phase_param", "=", "params", "[", "...", ",", "3", "]", "zero_phi_w", "=", "np", ".", "mean", "(", "phase_param", ".", "reshape", "(", "-", "1", ",", "phase_param", ".", "shape", "[", "-", "1", "]", ")", ",", "0", ")", "# This recalculates the weight with the phase alignment (see page 397 in", "# Wald paper):", "weight", "=", "amp_weight", "*", "np", ".", "exp", "(", "-", "1j", "*", "zero_phi_w", ")", "# Multiply each one of the signals by its coil-weights and average across", "# coils:", "na", "=", "np", ".", "newaxis", "# Short-hand", "# Collapse across coils for the combination in both the water ", "weighted_w_data", "=", "np", ".", "mean", "(", "np", ".", "fft", ".", "ifft", "(", "np", ".", "fft", ".", "fftshift", "(", "weight", "[", "na", ",", "na", ",", ":", ",", "na", "]", "*", "fft_w", ")", ")", ",", "coil_dim", ")", "weighted_w_supp_data", "=", "np", ".", "mean", "(", "np", ".", "fft", ".", "ifft", "(", "np", ".", "fft", ".", "fftshift", "(", "weight", "[", "na", ",", "na", ",", ":", ",", "na", "]", "*", "fft_w_supp", ")", ")", ",", "coil_dim", ")", "# Normalize each series by the sqrt(rms):", "def", "normalize_this", "(", "x", ")", ":", "return", "x", "*", "(", "x", ".", "shape", "[", "-", "1", "]", "/", "(", "np", ".", "sum", "(", "np", ".", "abs", "(", "x", ")", ")", ")", ")", "weighted_w_data", "=", "normalize_this", "(", "weighted_w_data", ")", "weighted_w_supp_data", "=", "normalize_this", "(", "weighted_w_supp_data", ")", "# Squeeze in case that some extraneous dimensions were introduced (can", "# happen for SV data, for example)", "return", "weighted_w_data", ".", "squeeze", "(", ")", ",", "weighted_w_supp_data", ".", "squeeze", "(", ")" ]
Combine data across coils based on the amplitude of the water peak, according to: .. math:: X = \sum_{i}{w_i S_i} Where X is the resulting combined signal, $S_i$ are the individual coil signals and $w_i$ are calculated as: .. math:: w_i = mean(S_i) / var (S_i) following [Hall2013]_. In addition, we apply a phase-correction, so that all the phases of the signals from each coil are 0 Parameters ---------- data : float array The data as it comes from the scanner, with shape (transients, echos, coils, time points) w_idx : list The indices to the non-water-suppressed transients. Per default we take the 2nd-4th transients. We dump the first one, because it seems to be quite different than the rest of them... coil_dim : int The dimension on which the coils are represented. Default: 2 sampling rate : float The sampling rate in Hz. Default : 5000. References ---------- .. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter G. Morris (2013). Methodology for improved detection of low concentration metabolites in MRS: Optimised combination of signals from multi-element coil arrays. Neuroimage 86: 35-42. .. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410. .. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second edition. Wiley (West Sussex, UK).
[ "Combine", "data", "across", "coils", "based", "on", "the", "amplitude", "of", "the", "water", "peak", "according", "to", ":" ]
python
train
35.566667
googleads/googleads-python-lib
googleads/ad_manager.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/ad_manager.py#L365-L380
def GetSOAPHeaders(self, create_method): """Returns the SOAP headers required for request authorization. Args: create_method: The SOAP library specific method used to instantiate SOAP objects. Returns: A SOAP object containing the headers. """ header = create_method(self._SOAP_HEADER_CLASS) header.networkCode = self._ad_manager_client.network_code header.applicationName = ''.join([ self._ad_manager_client.application_name, googleads.common.GenerateLibSig(self._PRODUCT_SIG)]) return header
[ "def", "GetSOAPHeaders", "(", "self", ",", "create_method", ")", ":", "header", "=", "create_method", "(", "self", ".", "_SOAP_HEADER_CLASS", ")", "header", ".", "networkCode", "=", "self", ".", "_ad_manager_client", ".", "network_code", "header", ".", "applicationName", "=", "''", ".", "join", "(", "[", "self", ".", "_ad_manager_client", ".", "application_name", ",", "googleads", ".", "common", ".", "GenerateLibSig", "(", "self", ".", "_PRODUCT_SIG", ")", "]", ")", "return", "header" ]
Returns the SOAP headers required for request authorization. Args: create_method: The SOAP library specific method used to instantiate SOAP objects. Returns: A SOAP object containing the headers.
[ "Returns", "the", "SOAP", "headers", "required", "for", "request", "authorization", "." ]
python
train
34.125
rsalmei/clearly
clearly/event_core/events.py
https://github.com/rsalmei/clearly/blob/fd784843d13f0fed28fc192565bec3668f1363f4/clearly/event_core/events.py#L27-L34
def immutable_worker(worker, state, pre_state, created): """Converts to an immutable slots class to handle internally.""" # noinspection PyUnresolvedReferences,PyProtectedMember return WorkerData._make(chain( (getattr(worker, f) for f in WORKER_OWN_FIELDS), (state, pre_state, created), (worker.heartbeats[-1] if worker.heartbeats else None,), ))
[ "def", "immutable_worker", "(", "worker", ",", "state", ",", "pre_state", ",", "created", ")", ":", "# noinspection PyUnresolvedReferences,PyProtectedMember", "return", "WorkerData", ".", "_make", "(", "chain", "(", "(", "getattr", "(", "worker", ",", "f", ")", "for", "f", "in", "WORKER_OWN_FIELDS", ")", ",", "(", "state", ",", "pre_state", ",", "created", ")", ",", "(", "worker", ".", "heartbeats", "[", "-", "1", "]", "if", "worker", ".", "heartbeats", "else", "None", ",", ")", ",", ")", ")" ]
Converts to an immutable slots class to handle internally.
[ "Converts", "to", "an", "immutable", "slots", "class", "to", "handle", "internally", "." ]
python
train
47.375
saltstack/salt
salt/runners/asam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/asam.py#L77-L121
def _get_asam_configuration(driver_url=''): ''' Return the configuration read from the master configuration file or directory ''' asam_config = __opts__['asam'] if 'asam' in __opts__ else None if asam_config: try: for asam_server, service_config in six.iteritems(asam_config): username = service_config.get('username', None) password = service_config.get('password', None) protocol = service_config.get('protocol', 'https') port = service_config.get('port', 3451) if not username or not password: log.error( 'Username or Password has not been specified in the ' 'master configuration for %s', asam_server ) return False ret = { 'platform_edit_url': "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port), 'platform_config_url': "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port), 'platformset_edit_url': "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port), 'platformset_config_url': "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port), 'username': username, 'password': password } if (not driver_url) or (driver_url == asam_server): return ret except Exception as exc: log.error('Exception encountered: %s', exc) return False if driver_url: log.error( 'Configuration for %s has not been specified in the master ' 'configuration', driver_url ) return False return False
[ "def", "_get_asam_configuration", "(", "driver_url", "=", "''", ")", ":", "asam_config", "=", "__opts__", "[", "'asam'", "]", "if", "'asam'", "in", "__opts__", "else", "None", "if", "asam_config", ":", "try", ":", "for", "asam_server", ",", "service_config", "in", "six", ".", "iteritems", "(", "asam_config", ")", ":", "username", "=", "service_config", ".", "get", "(", "'username'", ",", "None", ")", "password", "=", "service_config", ".", "get", "(", "'password'", ",", "None", ")", "protocol", "=", "service_config", ".", "get", "(", "'protocol'", ",", "'https'", ")", "port", "=", "service_config", ".", "get", "(", "'port'", ",", "3451", ")", "if", "not", "username", "or", "not", "password", ":", "log", ".", "error", "(", "'Username or Password has not been specified in the '", "'master configuration for %s'", ",", "asam_server", ")", "return", "False", "ret", "=", "{", "'platform_edit_url'", ":", "\"{0}://{1}:{2}/config/PlatformEdit.html\"", ".", "format", "(", "protocol", ",", "asam_server", ",", "port", ")", ",", "'platform_config_url'", ":", "\"{0}://{1}:{2}/config/PlatformConfig.html\"", ".", "format", "(", "protocol", ",", "asam_server", ",", "port", ")", ",", "'platformset_edit_url'", ":", "\"{0}://{1}:{2}/config/PlatformSetEdit.html\"", ".", "format", "(", "protocol", ",", "asam_server", ",", "port", ")", ",", "'platformset_config_url'", ":", "\"{0}://{1}:{2}/config/PlatformSetConfig.html\"", ".", "format", "(", "protocol", ",", "asam_server", ",", "port", ")", ",", "'username'", ":", "username", ",", "'password'", ":", "password", "}", "if", "(", "not", "driver_url", ")", "or", "(", "driver_url", "==", "asam_server", ")", ":", "return", "ret", "except", "Exception", "as", "exc", ":", "log", ".", "error", "(", "'Exception encountered: %s'", ",", "exc", ")", "return", "False", "if", "driver_url", ":", "log", ".", "error", "(", "'Configuration for %s has not been specified in the master '", "'configuration'", ",", "driver_url", ")", "return", "False", "return", "False" ]
Return the configuration read from the master configuration file or directory
[ "Return", "the", "configuration", "read", "from", "the", "master", "configuration", "file", "or", "directory" ]
python
train
41.555556
Sean1708/HipPy
hippy/parser.py
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L71-L77
def _increment(self, n=1): """Move forward n tokens in the stream.""" if self._cur_position >= self.num_tokens-1: self._cur_positon = self.num_tokens - 1 self._finished = True else: self._cur_position += n
[ "def", "_increment", "(", "self", ",", "n", "=", "1", ")", ":", "if", "self", ".", "_cur_position", ">=", "self", ".", "num_tokens", "-", "1", ":", "self", ".", "_cur_positon", "=", "self", ".", "num_tokens", "-", "1", "self", ".", "_finished", "=", "True", "else", ":", "self", ".", "_cur_position", "+=", "n" ]
Move forward n tokens in the stream.
[ "Move", "forward", "n", "tokens", "in", "the", "stream", "." ]
python
train
37
datastax/python-driver
cassandra/cqlengine/query.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L1118-L1126
def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ if not self._values_list: # we want models return self.model._construct_instance elif self._flat_values_list: # the user has requested flattened list (1 value per row) key = self._only_fields[0] return lambda row: row[key] else: return lambda row: [row[f] for f in self._only_fields]
[ "def", "_get_result_constructor", "(", "self", ")", ":", "if", "not", "self", ".", "_values_list", ":", "# we want models", "return", "self", ".", "model", ".", "_construct_instance", "elif", "self", ".", "_flat_values_list", ":", "# the user has requested flattened list (1 value per row)", "key", "=", "self", ".", "_only_fields", "[", "0", "]", "return", "lambda", "row", ":", "row", "[", "key", "]", "else", ":", "return", "lambda", "row", ":", "[", "row", "[", "f", "]", "for", "f", "in", "self", ".", "_only_fields", "]" ]
Returns a function that will be used to instantiate query results
[ "Returns", "a", "function", "that", "will", "be", "used", "to", "instantiate", "query", "results" ]
python
train
51.777778
bcbio/bcbio-nextgen
bcbio/utils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L267-L277
def save_diskspace(fname, reason, config): """Overwrite a file in place with a short message to save disk. This keeps files as a sanity check on processes working, but saves disk by replacing them with a short message. """ if config["algorithm"].get("save_diskspace", False): for ext in ["", ".bai"]: if os.path.exists(fname + ext): with open(fname + ext, "w") as out_handle: out_handle.write("File removed to save disk space: %s" % reason)
[ "def", "save_diskspace", "(", "fname", ",", "reason", ",", "config", ")", ":", "if", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"save_diskspace\"", ",", "False", ")", ":", "for", "ext", "in", "[", "\"\"", ",", "\".bai\"", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "fname", "+", "ext", ")", ":", "with", "open", "(", "fname", "+", "ext", ",", "\"w\"", ")", "as", "out_handle", ":", "out_handle", ".", "write", "(", "\"File removed to save disk space: %s\"", "%", "reason", ")" ]
Overwrite a file in place with a short message to save disk. This keeps files as a sanity check on processes working, but saves disk by replacing them with a short message.
[ "Overwrite", "a", "file", "in", "place", "with", "a", "short", "message", "to", "save", "disk", "." ]
python
train
46.090909
celery/cell
cell/actors.py
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L398-L420
def cast(self, method, args={}, declare=None, retry=None, retry_policy=None, type=None, exchange=None, **props): """Send message to actor. Discarding replies.""" retry = self.retry if retry is None else retry body = {'class': self.name, 'method': method, 'args': args} _retry_policy = self.retry_policy if retry_policy: # merge default and custom policies. _retry_policy = dict(_retry_policy, **retry_policy) if type and type not in self.types: raise ValueError('Unsupported type: {0}'.format(type)) elif not type: type = ACTOR_TYPE.DIRECT props.setdefault('routing_key', self.routing_key) props.setdefault('serializer', self.serializer) exchange = exchange or self.type_to_exchange[type]() declare = (maybe_list(declare) or []) + [exchange] with producers[self._connection].acquire(block=True) as producer: return producer.publish(body, exchange=exchange, declare=declare, retry=retry, retry_policy=retry_policy, **props)
[ "def", "cast", "(", "self", ",", "method", ",", "args", "=", "{", "}", ",", "declare", "=", "None", ",", "retry", "=", "None", ",", "retry_policy", "=", "None", ",", "type", "=", "None", ",", "exchange", "=", "None", ",", "*", "*", "props", ")", ":", "retry", "=", "self", ".", "retry", "if", "retry", "is", "None", "else", "retry", "body", "=", "{", "'class'", ":", "self", ".", "name", ",", "'method'", ":", "method", ",", "'args'", ":", "args", "}", "_retry_policy", "=", "self", ".", "retry_policy", "if", "retry_policy", ":", "# merge default and custom policies.", "_retry_policy", "=", "dict", "(", "_retry_policy", ",", "*", "*", "retry_policy", ")", "if", "type", "and", "type", "not", "in", "self", ".", "types", ":", "raise", "ValueError", "(", "'Unsupported type: {0}'", ".", "format", "(", "type", ")", ")", "elif", "not", "type", ":", "type", "=", "ACTOR_TYPE", ".", "DIRECT", "props", ".", "setdefault", "(", "'routing_key'", ",", "self", ".", "routing_key", ")", "props", ".", "setdefault", "(", "'serializer'", ",", "self", ".", "serializer", ")", "exchange", "=", "exchange", "or", "self", ".", "type_to_exchange", "[", "type", "]", "(", ")", "declare", "=", "(", "maybe_list", "(", "declare", ")", "or", "[", "]", ")", "+", "[", "exchange", "]", "with", "producers", "[", "self", ".", "_connection", "]", ".", "acquire", "(", "block", "=", "True", ")", "as", "producer", ":", "return", "producer", ".", "publish", "(", "body", ",", "exchange", "=", "exchange", ",", "declare", "=", "declare", ",", "retry", "=", "retry", ",", "retry_policy", "=", "retry_policy", ",", "*", "*", "props", ")" ]
Send message to actor. Discarding replies.
[ "Send", "message", "to", "actor", ".", "Discarding", "replies", "." ]
python
train
49.304348
fhamborg/news-please
newsplease/helper_classes/parse_crawler.py
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/parse_crawler.py#L27-L46
def pass_to_pipeline_if_article( self, response, source_domain, original_url, rss_title=None ): """ Responsible for passing a NewscrawlerItem to the pipeline if the response contains an article. :param obj response: the scrapy response to work on :param str source_domain: the response's domain as set for the crawler :param str original_url: the url set in the json file :param str rss_title: the title extracted by an rssCrawler :return NewscrawlerItem: NewscrawlerItem to pass to the pipeline """ if self.helper.heuristics.is_article(response, original_url): return self.pass_to_pipeline( response, source_domain, rss_title=None)
[ "def", "pass_to_pipeline_if_article", "(", "self", ",", "response", ",", "source_domain", ",", "original_url", ",", "rss_title", "=", "None", ")", ":", "if", "self", ".", "helper", ".", "heuristics", ".", "is_article", "(", "response", ",", "original_url", ")", ":", "return", "self", ".", "pass_to_pipeline", "(", "response", ",", "source_domain", ",", "rss_title", "=", "None", ")" ]
Responsible for passing a NewscrawlerItem to the pipeline if the response contains an article. :param obj response: the scrapy response to work on :param str source_domain: the response's domain as set for the crawler :param str original_url: the url set in the json file :param str rss_title: the title extracted by an rssCrawler :return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
[ "Responsible", "for", "passing", "a", "NewscrawlerItem", "to", "the", "pipeline", "if", "the", "response", "contains", "an", "article", "." ]
python
train
39.3
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L702-L706
def update_service(self, service_id, **kwargs): """Update a service.""" body = self._formdata(kwargs, FastlyService.FIELDS) content = self._fetch("/service/%s" % service_id, method="PUT", body=body) return FastlyService(self, content)
[ "def", "update_service", "(", "self", ",", "service_id", ",", "*", "*", "kwargs", ")", ":", "body", "=", "self", ".", "_formdata", "(", "kwargs", ",", "FastlyService", ".", "FIELDS", ")", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s\"", "%", "service_id", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ")", "return", "FastlyService", "(", "self", ",", "content", ")" ]
Update a service.
[ "Update", "a", "service", "." ]
python
train
47.6
batiste/django-page-cms
pages/models.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/models.py#L321-L332
def get_template_name(self): """ Get the template name of this page if defined or if a closer parent has a defined template or :data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise. """ template = self.get_template() page_templates = settings.get_page_templates() for t in page_templates: if t[0] == template: return t[1] return template
[ "def", "get_template_name", "(", "self", ")", ":", "template", "=", "self", ".", "get_template", "(", ")", "page_templates", "=", "settings", ".", "get_page_templates", "(", ")", "for", "t", "in", "page_templates", ":", "if", "t", "[", "0", "]", "==", "template", ":", "return", "t", "[", "1", "]", "return", "template" ]
Get the template name of this page if defined or if a closer parent has a defined template or :data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise.
[ "Get", "the", "template", "name", "of", "this", "page", "if", "defined", "or", "if", "a", "closer", "parent", "has", "a", "defined", "template", "or", ":", "data", ":", "pages", ".", "settings", ".", "PAGE_DEFAULT_TEMPLATE", "otherwise", "." ]
python
train
35.583333
dustinmm80/healthy
pylint_runner.py
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/pylint_runner.py#L22-L44
def score(package_path): """ Runs pylint on a package and returns a score Lower score is better :param package_path: path of the package to score :return: number of score """ python_files = find_files(package_path, '*.py') total_counter = Counter() for python_file in python_files: output = run_pylint(python_file) counter = parse_pylint_output(output) total_counter += counter score_value = 0 for count, stat in enumerate(total_counter): score_value += SCORING_VALUES[stat] * count return score_value / 5
[ "def", "score", "(", "package_path", ")", ":", "python_files", "=", "find_files", "(", "package_path", ",", "'*.py'", ")", "total_counter", "=", "Counter", "(", ")", "for", "python_file", "in", "python_files", ":", "output", "=", "run_pylint", "(", "python_file", ")", "counter", "=", "parse_pylint_output", "(", "output", ")", "total_counter", "+=", "counter", "score_value", "=", "0", "for", "count", ",", "stat", "in", "enumerate", "(", "total_counter", ")", ":", "score_value", "+=", "SCORING_VALUES", "[", "stat", "]", "*", "count", "return", "score_value", "/", "5" ]
Runs pylint on a package and returns a score Lower score is better :param package_path: path of the package to score :return: number of score
[ "Runs", "pylint", "on", "a", "package", "and", "returns", "a", "score", "Lower", "score", "is", "better" ]
python
train
24.73913
moonso/vcftoolbox
vcftoolbox/add_variant_information.py
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/add_variant_information.py#L19-L79
def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None): """Replace the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: variant_line (str): A annotated variant line """ new_info = '{0}={1}'.format(keyword, annotation) logger.debug("Replacing the variant information {0}".format(new_info)) fixed_variant = None new_info_list = [] if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip('\n').split('\t') logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == '.': new_info_string = new_info else: splitted_info_string = old_info.split(';') for info in splitted_info_string: splitted_info_entry = info.split('=') if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = ';'.join(new_info_list) splitted_variant[7] = new_info_string fixed_variant = '\t'.join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict['INFO'] if old_info == '.': variant_dict['INFO'] = new_info else: for info in old_info.split(';'): splitted_info_entry = info.split('=') if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = ';'.join(new_info_list) variant_dict['INFO'] = new_info_string fixed_variant = variant_dict return fixed_variant
[ "def", "replace_vcf_info", "(", "keyword", ",", "annotation", ",", "variant_line", "=", "None", ",", "variant_dict", "=", "None", ")", ":", "new_info", "=", "'{0}={1}'", ".", "format", "(", "keyword", ",", "annotation", ")", "logger", ".", "debug", "(", "\"Replacing the variant information {0}\"", ".", "format", "(", "new_info", ")", ")", "fixed_variant", "=", "None", "new_info_list", "=", "[", "]", "if", "variant_line", ":", "logger", ".", "debug", "(", "\"Adding information to a variant line\"", ")", "splitted_variant", "=", "variant_line", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "'\\t'", ")", "logger", ".", "debug", "(", "\"Adding information to splitted variant line\"", ")", "old_info", "=", "splitted_variant", "[", "7", "]", "if", "old_info", "==", "'.'", ":", "new_info_string", "=", "new_info", "else", ":", "splitted_info_string", "=", "old_info", ".", "split", "(", "';'", ")", "for", "info", "in", "splitted_info_string", ":", "splitted_info_entry", "=", "info", ".", "split", "(", "'='", ")", "if", "splitted_info_entry", "[", "0", "]", "==", "keyword", ":", "new_info_list", ".", "append", "(", "new_info", ")", "else", ":", "new_info_list", ".", "append", "(", "info", ")", "new_info_string", "=", "';'", ".", "join", "(", "new_info_list", ")", "splitted_variant", "[", "7", "]", "=", "new_info_string", "fixed_variant", "=", "'\\t'", ".", "join", "(", "splitted_variant", ")", "elif", "variant_dict", ":", "logger", ".", "debug", "(", "\"Adding information to a variant dict\"", ")", "old_info", "=", "variant_dict", "[", "'INFO'", "]", "if", "old_info", "==", "'.'", ":", "variant_dict", "[", "'INFO'", "]", "=", "new_info", "else", ":", "for", "info", "in", "old_info", ".", "split", "(", "';'", ")", ":", "splitted_info_entry", "=", "info", ".", "split", "(", "'='", ")", "if", "splitted_info_entry", "[", "0", "]", "==", "keyword", ":", "new_info_list", ".", "append", "(", "new_info", ")", "else", ":", "new_info_list", ".", "append", "(", "info", ")", "new_info_string", "=", "';'", ".", "join", "(", "new_info_list", ")", "variant_dict", "[", "'INFO'", "]", "=", "new_info_string", "fixed_variant", "=", "variant_dict", "return", "fixed_variant" ]
Replace the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: variant_line (str): A annotated variant line
[ "Replace", "the", "information", "of", "a", "info", "field", "of", "a", "vcf", "variant", "line", "or", "a", "variant", "dict", ".", "Arguments", ":", "variant_line", "(", "str", ")", ":", "A", "vcf", "formatted", "variant", "line", "variant_dict", "(", "dict", ")", ":", "A", "variant", "dictionary", "keyword", "(", "str", ")", ":", "The", "info", "field", "key", "annotation", "(", "str", ")", ":", "If", "the", "annotation", "is", "a", "key", "value", "pair", "this", "is", "the", "string", "that", "represents", "the", "value", "Returns", ":", "variant_line", "(", "str", ")", ":", "A", "annotated", "variant", "line" ]
python
train
36.065574
openregister/openregister-python
openregister/entry.py
https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/entry.py#L50-L54
def primitive(self, primitive): """Entry from Python primitive.""" self.entry_number = primitive['entry-number'] self.item_hash = primitive['item-hash'] self.timestamp = primitive['timestamp']
[ "def", "primitive", "(", "self", ",", "primitive", ")", ":", "self", ".", "entry_number", "=", "primitive", "[", "'entry-number'", "]", "self", ".", "item_hash", "=", "primitive", "[", "'item-hash'", "]", "self", ".", "timestamp", "=", "primitive", "[", "'timestamp'", "]" ]
Entry from Python primitive.
[ "Entry", "from", "Python", "primitive", "." ]
python
train
44
gwastro/pycbc
pycbc/workflow/configuration.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/configuration.py#L542-L587
def interpolate_exe(self, testString): """ Replace testString with a path to an executable based on the format. If this looks like ${which:lalapps_tmpltbank} it will return the equivalent of which(lalapps_tmpltbank) Otherwise it will return an unchanged string. Parameters ----------- testString : string The input string Returns -------- newString : string The output string. """ # First check if any interpolation is needed and abort if not testString = testString.strip() if not (testString.startswith('${') and testString.endswith('}')): return testString # This may not be an exe interpolation, so even if it has ${XXX} form # I may not have to do anything newString = testString # Strip the ${ and } testString = testString[2:-1] testList = testString.split(':') # Maybe we can add a few different possibilities for substitution if len(testList) == 2: if testList[0] == 'which': newString = distutils.spawn.find_executable(testList[1]) if not newString: errmsg = "Cannot find exe %s in your path " %(testList[1]) errmsg += "and you specified ${which:%s}." %(testList[1]) raise ValueError(errmsg) return newString
[ "def", "interpolate_exe", "(", "self", ",", "testString", ")", ":", "# First check if any interpolation is needed and abort if not", "testString", "=", "testString", ".", "strip", "(", ")", "if", "not", "(", "testString", ".", "startswith", "(", "'${'", ")", "and", "testString", ".", "endswith", "(", "'}'", ")", ")", ":", "return", "testString", "# This may not be an exe interpolation, so even if it has ${XXX} form", "# I may not have to do anything", "newString", "=", "testString", "# Strip the ${ and }", "testString", "=", "testString", "[", "2", ":", "-", "1", "]", "testList", "=", "testString", ".", "split", "(", "':'", ")", "# Maybe we can add a few different possibilities for substitution", "if", "len", "(", "testList", ")", "==", "2", ":", "if", "testList", "[", "0", "]", "==", "'which'", ":", "newString", "=", "distutils", ".", "spawn", ".", "find_executable", "(", "testList", "[", "1", "]", ")", "if", "not", "newString", ":", "errmsg", "=", "\"Cannot find exe %s in your path \"", "%", "(", "testList", "[", "1", "]", ")", "errmsg", "+=", "\"and you specified ${which:%s}.\"", "%", "(", "testList", "[", "1", "]", ")", "raise", "ValueError", "(", "errmsg", ")", "return", "newString" ]
Replace testString with a path to an executable based on the format. If this looks like ${which:lalapps_tmpltbank} it will return the equivalent of which(lalapps_tmpltbank) Otherwise it will return an unchanged string. Parameters ----------- testString : string The input string Returns -------- newString : string The output string.
[ "Replace", "testString", "with", "a", "path", "to", "an", "executable", "based", "on", "the", "format", "." ]
python
train
30.956522
qba73/circleclient
circleclient/circleclient.py
https://github.com/qba73/circleclient/blob/8bf5b093e416c899cc39e43a770c17a5466487b0/circleclient/circleclient.py#L37-L44
def client_get(self, url, **kwargs): """Send GET request with given url.""" response = requests.get(self.make_url(url), headers=self.headers) if not response.ok: raise Exception( '{status}: {reason}.\nCircleCI Status NOT OK'.format( status=response.status_code, reason=response.reason)) return response.json()
[ "def", "client_get", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "make_url", "(", "url", ")", ",", "headers", "=", "self", ".", "headers", ")", "if", "not", "response", ".", "ok", ":", "raise", "Exception", "(", "'{status}: {reason}.\\nCircleCI Status NOT OK'", ".", "format", "(", "status", "=", "response", ".", "status_code", ",", "reason", "=", "response", ".", "reason", ")", ")", "return", "response", ".", "json", "(", ")" ]
Send GET request with given url.
[ "Send", "GET", "request", "with", "given", "url", "." ]
python
train
47.75
saltstack/salt
salt/states/mount.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mount.py#L1019-L1185
def fstab_present(name, fs_file, fs_vfstype, fs_mntops='defaults', fs_freq=0, fs_passno=0, mount_by=None, config='/etc/fstab', mount=True, match_on='auto'): ''' Makes sure that a fstab mount point is pressent. name The name of block device. Can be any valid fs_spec value. fs_file Mount point (target) for the filesystem. fs_vfstype The type of the filesystem (e.g. ext4, xfs, btrfs, ...) fs_mntops The mount options associated with the filesystem. Default is ``defaults``. fs_freq Field is used by dump to determine which fs need to be dumped. Default is ``0`` fs_passno Field is used by fsck to determine the order in which filesystem checks are done at boot time. Default is ``0`` mount_by Select the final value for fs_spec. Can be [``None``, ``device``, ``label``, ``uuid``, ``partlabel``, ``partuuid``]. If ``None``, the value for fs_spect will be the parameter ``name``, in other case will search the correct value based on the device name. For example, for ``uuid``, the value for fs_spec will be of type 'UUID=xxx' instead of the device name set in ``name``. config Place where the fstab file lives. Default is ``/etc/fstab`` mount Set if the mount should be mounted immediately. Default is ``True`` match_on A name or list of fstab properties on which this state should be applied. Default is ``auto``, a special value indicating to guess based on fstype. In general, ``auto`` matches on name for recognized special devices and device otherwise. ''' ret = { 'name': name, 'result': False, 'changes': {}, 'comment': [], } # Adjust fs_mntops based on the OS if fs_mntops == 'defaults': if __grains__['os'] in ['MacOS', 'Darwin']: fs_mntops = 'noowners' elif __grains__['os'] == 'AIX': fs_mntops = '' # Adjust the config file based on the OS if config == '/etc/fstab': if __grains__['os'] in ['MacOS', 'Darwin']: config = '/etc/auto_salt' elif __grains__['os'] == 'AIX': config = '/etc/filesystems' if not fs_file == '/': fs_file = fs_file.rstrip('/') fs_spec = _convert_to(name, mount_by) # Validate that the device is valid after the conversion if not fs_spec: msg = 'Device {} cannot be converted to {}' ret['comment'].append(msg.format(name, mount_by)) return ret if __opts__['test']: if __grains__['os'] in ['MacOS', 'Darwin']: out = __salt__['mount.set_automaster'](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, config=config, test=True) elif __grains__['os'] == 'AIX': out = __salt__['mount.set_filesystems'](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, mount=mount, config=config, test=True, match_on=match_on) else: out = __salt__['mount.set_fstab'](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, dump=fs_freq, pass_num=fs_passno, config=config, test=True, match_on=match_on) ret['result'] = None if out == 'present': msg = '{} entry is already in {}.' ret['comment'].append(msg.format(fs_file, config)) elif out == 'new': msg = '{} entry will be written in {}.' ret['comment'].append(msg.format(fs_file, config)) elif out == 'change': msg = '{} entry will be updated in {}.' ret['comment'].append(msg.format(fs_file, config)) else: ret['result'] = False msg = '{} entry cannot be created in {}: {}.' ret['comment'].append(msg.format(fs_file, config, out)) return ret if __grains__['os'] in ['MacOS', 'Darwin']: out = __salt__['mount.set_automaster'](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, config=config) elif __grains__['os'] == 'AIX': out = __salt__['mount.set_filesystems'](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, mount=mount, config=config, match_on=match_on) else: out = __salt__['mount.set_fstab'](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, dump=fs_freq, pass_num=fs_passno, config=config, match_on=match_on) ret['result'] = True if out == 'present': msg = '{} entry was already in {}.' ret['comment'].append(msg.format(fs_file, config)) elif out == 'new': ret['changes']['persist'] = out msg = '{} entry added in {}.' ret['comment'].append(msg.format(fs_file, config)) elif out == 'change': ret['changes']['persist'] = out msg = '{} entry updated in {}.' ret['comment'].append(msg.format(fs_file, config)) else: ret['result'] = False msg = '{} entry cannot be changed in {}: {}.' ret['comment'].append(msg.format(fs_file, config, out)) return ret
[ "def", "fstab_present", "(", "name", ",", "fs_file", ",", "fs_vfstype", ",", "fs_mntops", "=", "'defaults'", ",", "fs_freq", "=", "0", ",", "fs_passno", "=", "0", ",", "mount_by", "=", "None", ",", "config", "=", "'/etc/fstab'", ",", "mount", "=", "True", ",", "match_on", "=", "'auto'", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "[", "]", ",", "}", "# Adjust fs_mntops based on the OS", "if", "fs_mntops", "==", "'defaults'", ":", "if", "__grains__", "[", "'os'", "]", "in", "[", "'MacOS'", ",", "'Darwin'", "]", ":", "fs_mntops", "=", "'noowners'", "elif", "__grains__", "[", "'os'", "]", "==", "'AIX'", ":", "fs_mntops", "=", "''", "# Adjust the config file based on the OS", "if", "config", "==", "'/etc/fstab'", ":", "if", "__grains__", "[", "'os'", "]", "in", "[", "'MacOS'", ",", "'Darwin'", "]", ":", "config", "=", "'/etc/auto_salt'", "elif", "__grains__", "[", "'os'", "]", "==", "'AIX'", ":", "config", "=", "'/etc/filesystems'", "if", "not", "fs_file", "==", "'/'", ":", "fs_file", "=", "fs_file", ".", "rstrip", "(", "'/'", ")", "fs_spec", "=", "_convert_to", "(", "name", ",", "mount_by", ")", "# Validate that the device is valid after the conversion", "if", "not", "fs_spec", ":", "msg", "=", "'Device {} cannot be converted to {}'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "name", ",", "mount_by", ")", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "if", "__grains__", "[", "'os'", "]", "in", "[", "'MacOS'", ",", "'Darwin'", "]", ":", "out", "=", "__salt__", "[", "'mount.set_automaster'", "]", "(", "name", "=", "fs_file", ",", "device", "=", "fs_spec", ",", "fstype", "=", "fs_vfstype", ",", "opts", "=", "fs_mntops", ",", "config", "=", "config", ",", "test", "=", "True", ")", "elif", "__grains__", "[", "'os'", "]", "==", "'AIX'", ":", "out", "=", "__salt__", "[", "'mount.set_filesystems'", "]", "(", "name", "=", "fs_file", ",", "device", "=", "fs_spec", ",", "fstype", "=", "fs_vfstype", ",", "opts", "=", "fs_mntops", ",", "mount", "=", "mount", ",", "config", "=", "config", ",", "test", "=", "True", ",", "match_on", "=", "match_on", ")", "else", ":", "out", "=", "__salt__", "[", "'mount.set_fstab'", "]", "(", "name", "=", "fs_file", ",", "device", "=", "fs_spec", ",", "fstype", "=", "fs_vfstype", ",", "opts", "=", "fs_mntops", ",", "dump", "=", "fs_freq", ",", "pass_num", "=", "fs_passno", ",", "config", "=", "config", ",", "test", "=", "True", ",", "match_on", "=", "match_on", ")", "ret", "[", "'result'", "]", "=", "None", "if", "out", "==", "'present'", ":", "msg", "=", "'{} entry is already in {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ")", ")", "elif", "out", "==", "'new'", ":", "msg", "=", "'{} entry will be written in {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ")", ")", "elif", "out", "==", "'change'", ":", "msg", "=", "'{} entry will be updated in {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "msg", "=", "'{} entry cannot be created in {}: {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ",", "out", ")", ")", "return", "ret", "if", "__grains__", "[", "'os'", "]", "in", "[", "'MacOS'", ",", "'Darwin'", "]", ":", "out", "=", "__salt__", "[", "'mount.set_automaster'", "]", "(", "name", "=", "fs_file", ",", "device", "=", "fs_spec", ",", "fstype", "=", "fs_vfstype", ",", "opts", "=", "fs_mntops", ",", "config", "=", "config", ")", "elif", "__grains__", "[", "'os'", "]", "==", "'AIX'", ":", "out", "=", "__salt__", "[", "'mount.set_filesystems'", "]", "(", "name", "=", "fs_file", ",", "device", "=", "fs_spec", ",", "fstype", "=", "fs_vfstype", ",", "opts", "=", "fs_mntops", ",", "mount", "=", "mount", ",", "config", "=", "config", ",", "match_on", "=", "match_on", ")", "else", ":", "out", "=", "__salt__", "[", "'mount.set_fstab'", "]", "(", "name", "=", "fs_file", ",", "device", "=", "fs_spec", ",", "fstype", "=", "fs_vfstype", ",", "opts", "=", "fs_mntops", ",", "dump", "=", "fs_freq", ",", "pass_num", "=", "fs_passno", ",", "config", "=", "config", ",", "match_on", "=", "match_on", ")", "ret", "[", "'result'", "]", "=", "True", "if", "out", "==", "'present'", ":", "msg", "=", "'{} entry was already in {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ")", ")", "elif", "out", "==", "'new'", ":", "ret", "[", "'changes'", "]", "[", "'persist'", "]", "=", "out", "msg", "=", "'{} entry added in {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ")", ")", "elif", "out", "==", "'change'", ":", "ret", "[", "'changes'", "]", "[", "'persist'", "]", "=", "out", "msg", "=", "'{} entry updated in {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ")", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "msg", "=", "'{} entry cannot be changed in {}: {}.'", "ret", "[", "'comment'", "]", ".", "append", "(", "msg", ".", "format", "(", "fs_file", ",", "config", ",", "out", ")", ")", "return", "ret" ]
Makes sure that a fstab mount point is pressent. name The name of block device. Can be any valid fs_spec value. fs_file Mount point (target) for the filesystem. fs_vfstype The type of the filesystem (e.g. ext4, xfs, btrfs, ...) fs_mntops The mount options associated with the filesystem. Default is ``defaults``. fs_freq Field is used by dump to determine which fs need to be dumped. Default is ``0`` fs_passno Field is used by fsck to determine the order in which filesystem checks are done at boot time. Default is ``0`` mount_by Select the final value for fs_spec. Can be [``None``, ``device``, ``label``, ``uuid``, ``partlabel``, ``partuuid``]. If ``None``, the value for fs_spect will be the parameter ``name``, in other case will search the correct value based on the device name. For example, for ``uuid``, the value for fs_spec will be of type 'UUID=xxx' instead of the device name set in ``name``. config Place where the fstab file lives. Default is ``/etc/fstab`` mount Set if the mount should be mounted immediately. Default is ``True`` match_on A name or list of fstab properties on which this state should be applied. Default is ``auto``, a special value indicating to guess based on fstype. In general, ``auto`` matches on name for recognized special devices and device otherwise.
[ "Makes", "sure", "that", "a", "fstab", "mount", "point", "is", "pressent", "." ]
python
train
40.335329
mar10/wsgidav
wsgidav/request_server.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/request_server.py#L371-L484
def do_PROPPATCH(self, environ, start_response): """Handle PROPPATCH request to set or remove a property. @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH """ path = environ["PATH_INFO"] res = self._davProvider.get_resource_inst(path, environ) # Only accept Depth: 0 (but assume this, if omitted) environ.setdefault("HTTP_DEPTH", "0") if environ["HTTP_DEPTH"] != "0": self._fail(HTTP_BAD_REQUEST, "Depth must be '0'.") if res is None: self._fail(HTTP_NOT_FOUND) self._evaluate_if_headers(res, environ) self._check_write_permission(res, "0", environ) # Parse request requestEL = util.parse_xml_body(environ) if requestEL.tag != "{DAV:}propertyupdate": self._fail(HTTP_BAD_REQUEST) # Create a list of update request tuples: (name, value) propupdatelist = [] for ppnode in requestEL: propupdatemethod = None if ppnode.tag == "{DAV:}remove": propupdatemethod = "remove" elif ppnode.tag == "{DAV:}set": propupdatemethod = "set" else: self._fail( HTTP_BAD_REQUEST, "Unknown tag (expected 'set' or 'remove')." ) for propnode in ppnode: if propnode.tag != "{DAV:}prop": self._fail(HTTP_BAD_REQUEST, "Unknown tag (expected 'prop').") for propertynode in propnode: propvalue = None if propupdatemethod == "remove": propvalue = None # Mark as 'remove' if len(propertynode) > 0: # 14.23: All the XML elements in a 'prop' XML # element inside of a 'remove' XML element MUST be # empty self._fail( HTTP_BAD_REQUEST, "prop element must be empty for 'remove'.", ) else: propvalue = propertynode propupdatelist.append((propertynode.tag, propvalue)) # Apply updates in SIMULATION MODE and create a result list (name, # result) successflag = True writeresultlist = [] for (name, propvalue) in propupdatelist: try: res.set_property_value(name, propvalue, dry_run=True) except Exception as e: writeresult = as_DAVError(e) else: writeresult = "200 OK" writeresultlist.append((name, writeresult)) successflag = successflag and writeresult == "200 OK" # Generate response list of 2-tuples (name, value) # <value> is None on success, or an instance of DAVError propResponseList = [] responsedescription = [] if not successflag: # If dry run failed: convert all OK to FAILED_DEPENDENCY. for (name, result) in writeresultlist: if result == "200 OK": result = DAVError(HTTP_FAILED_DEPENDENCY) elif isinstance(result, DAVError): responsedescription.append(result.get_user_info()) propResponseList.append((name, result)) else: # Dry-run succeeded: set properties again, this time in 'real' mode # In theory, there should be no exceptions thrown here, but this is # real live... for (name, propvalue) in propupdatelist: try: res.set_property_value(name, propvalue, dry_run=False) # Set value to None, so the response xml contains empty tags propResponseList.append((name, None)) except Exception as e: e = as_DAVError(e) propResponseList.append((name, e)) responsedescription.append(e.get_user_info()) # Generate response XML multistatusEL = xml_tools.make_multistatus_el() href = res.get_href() util.add_property_response(multistatusEL, href, propResponseList) if responsedescription: etree.SubElement( multistatusEL, "{DAV:}responsedescription" ).text = "\n".join(responsedescription) # Send response return util.send_multi_status_response(environ, start_response, multistatusEL)
[ "def", "do_PROPPATCH", "(", "self", ",", "environ", ",", "start_response", ")", ":", "path", "=", "environ", "[", "\"PATH_INFO\"", "]", "res", "=", "self", ".", "_davProvider", ".", "get_resource_inst", "(", "path", ",", "environ", ")", "# Only accept Depth: 0 (but assume this, if omitted)", "environ", ".", "setdefault", "(", "\"HTTP_DEPTH\"", ",", "\"0\"", ")", "if", "environ", "[", "\"HTTP_DEPTH\"", "]", "!=", "\"0\"", ":", "self", ".", "_fail", "(", "HTTP_BAD_REQUEST", ",", "\"Depth must be '0'.\"", ")", "if", "res", "is", "None", ":", "self", ".", "_fail", "(", "HTTP_NOT_FOUND", ")", "self", ".", "_evaluate_if_headers", "(", "res", ",", "environ", ")", "self", ".", "_check_write_permission", "(", "res", ",", "\"0\"", ",", "environ", ")", "# Parse request", "requestEL", "=", "util", ".", "parse_xml_body", "(", "environ", ")", "if", "requestEL", ".", "tag", "!=", "\"{DAV:}propertyupdate\"", ":", "self", ".", "_fail", "(", "HTTP_BAD_REQUEST", ")", "# Create a list of update request tuples: (name, value)", "propupdatelist", "=", "[", "]", "for", "ppnode", "in", "requestEL", ":", "propupdatemethod", "=", "None", "if", "ppnode", ".", "tag", "==", "\"{DAV:}remove\"", ":", "propupdatemethod", "=", "\"remove\"", "elif", "ppnode", ".", "tag", "==", "\"{DAV:}set\"", ":", "propupdatemethod", "=", "\"set\"", "else", ":", "self", ".", "_fail", "(", "HTTP_BAD_REQUEST", ",", "\"Unknown tag (expected 'set' or 'remove').\"", ")", "for", "propnode", "in", "ppnode", ":", "if", "propnode", ".", "tag", "!=", "\"{DAV:}prop\"", ":", "self", ".", "_fail", "(", "HTTP_BAD_REQUEST", ",", "\"Unknown tag (expected 'prop').\"", ")", "for", "propertynode", "in", "propnode", ":", "propvalue", "=", "None", "if", "propupdatemethod", "==", "\"remove\"", ":", "propvalue", "=", "None", "# Mark as 'remove'", "if", "len", "(", "propertynode", ")", ">", "0", ":", "# 14.23: All the XML elements in a 'prop' XML", "# element inside of a 'remove' XML element MUST be", "# empty", "self", ".", "_fail", "(", "HTTP_BAD_REQUEST", ",", "\"prop element must be empty for 'remove'.\"", ",", ")", "else", ":", "propvalue", "=", "propertynode", "propupdatelist", ".", "append", "(", "(", "propertynode", ".", "tag", ",", "propvalue", ")", ")", "# Apply updates in SIMULATION MODE and create a result list (name,", "# result)", "successflag", "=", "True", "writeresultlist", "=", "[", "]", "for", "(", "name", ",", "propvalue", ")", "in", "propupdatelist", ":", "try", ":", "res", ".", "set_property_value", "(", "name", ",", "propvalue", ",", "dry_run", "=", "True", ")", "except", "Exception", "as", "e", ":", "writeresult", "=", "as_DAVError", "(", "e", ")", "else", ":", "writeresult", "=", "\"200 OK\"", "writeresultlist", ".", "append", "(", "(", "name", ",", "writeresult", ")", ")", "successflag", "=", "successflag", "and", "writeresult", "==", "\"200 OK\"", "# Generate response list of 2-tuples (name, value)", "# <value> is None on success, or an instance of DAVError", "propResponseList", "=", "[", "]", "responsedescription", "=", "[", "]", "if", "not", "successflag", ":", "# If dry run failed: convert all OK to FAILED_DEPENDENCY.", "for", "(", "name", ",", "result", ")", "in", "writeresultlist", ":", "if", "result", "==", "\"200 OK\"", ":", "result", "=", "DAVError", "(", "HTTP_FAILED_DEPENDENCY", ")", "elif", "isinstance", "(", "result", ",", "DAVError", ")", ":", "responsedescription", ".", "append", "(", "result", ".", "get_user_info", "(", ")", ")", "propResponseList", ".", "append", "(", "(", "name", ",", "result", ")", ")", "else", ":", "# Dry-run succeeded: set properties again, this time in 'real' mode", "# In theory, there should be no exceptions thrown here, but this is", "# real live...", "for", "(", "name", ",", "propvalue", ")", "in", "propupdatelist", ":", "try", ":", "res", ".", "set_property_value", "(", "name", ",", "propvalue", ",", "dry_run", "=", "False", ")", "# Set value to None, so the response xml contains empty tags", "propResponseList", ".", "append", "(", "(", "name", ",", "None", ")", ")", "except", "Exception", "as", "e", ":", "e", "=", "as_DAVError", "(", "e", ")", "propResponseList", ".", "append", "(", "(", "name", ",", "e", ")", ")", "responsedescription", ".", "append", "(", "e", ".", "get_user_info", "(", ")", ")", "# Generate response XML", "multistatusEL", "=", "xml_tools", ".", "make_multistatus_el", "(", ")", "href", "=", "res", ".", "get_href", "(", ")", "util", ".", "add_property_response", "(", "multistatusEL", ",", "href", ",", "propResponseList", ")", "if", "responsedescription", ":", "etree", ".", "SubElement", "(", "multistatusEL", ",", "\"{DAV:}responsedescription\"", ")", ".", "text", "=", "\"\\n\"", ".", "join", "(", "responsedescription", ")", "# Send response", "return", "util", ".", "send_multi_status_response", "(", "environ", ",", "start_response", ",", "multistatusEL", ")" ]
Handle PROPPATCH request to set or remove a property. @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
[ "Handle", "PROPPATCH", "request", "to", "set", "or", "remove", "a", "property", "." ]
python
valid
39.45614
python-openxml/python-docx
docx/package.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/package.py#L80-L88
def _add_image_part(self, image): """ Return an |ImagePart| instance newly created from image and appended to the collection. """ partname = self._next_image_partname(image.ext) image_part = ImagePart.from_image(image, partname) self.append(image_part) return image_part
[ "def", "_add_image_part", "(", "self", ",", "image", ")", ":", "partname", "=", "self", ".", "_next_image_partname", "(", "image", ".", "ext", ")", "image_part", "=", "ImagePart", ".", "from_image", "(", "image", ",", "partname", ")", "self", ".", "append", "(", "image_part", ")", "return", "image_part" ]
Return an |ImagePart| instance newly created from image and appended to the collection.
[ "Return", "an", "|ImagePart|", "instance", "newly", "created", "from", "image", "and", "appended", "to", "the", "collection", "." ]
python
train
36.222222
CiscoUcs/UcsPythonSDK
src/UcsSdk/utils/helper.py
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/utils/helper.py#L44-L58
def generate_ucsm_handle(hostname, username, password): """ Creates UCS Manager handle object and establishes a session with UCS Manager. :param hostname: UCS Manager hostname or IP-address :param username: Username to login to UCS Manager :param password: Login user password :raises UcsConnectionError: In case of error. """ ucs_handle = UcsHandle() try: success = ucs_handle.Login(hostname, username, password) except UcsException as e: print("Cisco client exception %(msg)s" % (e.message)) raise exception.UcsConnectionError(message=e.message) return success, ucs_handle
[ "def", "generate_ucsm_handle", "(", "hostname", ",", "username", ",", "password", ")", ":", "ucs_handle", "=", "UcsHandle", "(", ")", "try", ":", "success", "=", "ucs_handle", ".", "Login", "(", "hostname", ",", "username", ",", "password", ")", "except", "UcsException", "as", "e", ":", "print", "(", "\"Cisco client exception %(msg)s\"", "%", "(", "e", ".", "message", ")", ")", "raise", "exception", ".", "UcsConnectionError", "(", "message", "=", "e", ".", "message", ")", "return", "success", ",", "ucs_handle" ]
Creates UCS Manager handle object and establishes a session with UCS Manager. :param hostname: UCS Manager hostname or IP-address :param username: Username to login to UCS Manager :param password: Login user password :raises UcsConnectionError: In case of error.
[ "Creates", "UCS", "Manager", "handle", "object", "and", "establishes", "a", "session", "with", "UCS", "Manager", ".", ":", "param", "hostname", ":", "UCS", "Manager", "hostname", "or", "IP", "-", "address", ":", "param", "username", ":", "Username", "to", "login", "to", "UCS", "Manager", ":", "param", "password", ":", "Login", "user", "password", ":", "raises", "UcsConnectionError", ":", "In", "case", "of", "error", "." ]
python
train
42.6
pymc-devs/pymc
pymc/StepMethods.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1297-L1335
def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0): r"""Compute the covariance recursively. Return the new covariance and the new mean. .. math:: C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T) C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) & = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) :Parameters: - cov : matrix Previous covariance matrix. - length : int Length of chain used to compute the previous covariance. - mean : array Previous mean. - chain : array Sample used to update covariance. - scaling : float Scaling parameter - epsilon : float Set to a small value to avoid singular matrices. """ n = length + len(chain) k = length new_mean = self.recursive_mean(mean, length, chain) t0 = k * np.outer(mean, mean) t1 = np.dot(chain.T, chain) t2 = n * np.outer(new_mean, new_mean) t3 = epsilon * np.eye(cov.shape[0]) new_cov = ( k - 1) / ( n - 1.) * cov + scaling / ( n - 1.) * ( t0 + t1 - t2 + t3) return new_cov, new_mean
[ "def", "recursive_cov", "(", "self", ",", "cov", ",", "length", ",", "mean", ",", "chain", ",", "scaling", "=", "1", ",", "epsilon", "=", "0", ")", ":", "n", "=", "length", "+", "len", "(", "chain", ")", "k", "=", "length", "new_mean", "=", "self", ".", "recursive_mean", "(", "mean", ",", "length", ",", "chain", ")", "t0", "=", "k", "*", "np", ".", "outer", "(", "mean", ",", "mean", ")", "t1", "=", "np", ".", "dot", "(", "chain", ".", "T", ",", "chain", ")", "t2", "=", "n", "*", "np", ".", "outer", "(", "new_mean", ",", "new_mean", ")", "t3", "=", "epsilon", "*", "np", ".", "eye", "(", "cov", ".", "shape", "[", "0", "]", ")", "new_cov", "=", "(", "k", "-", "1", ")", "/", "(", "n", "-", "1.", ")", "*", "cov", "+", "scaling", "/", "(", "n", "-", "1.", ")", "*", "(", "t0", "+", "t1", "-", "t2", "+", "t3", ")", "return", "new_cov", ",", "new_mean" ]
r"""Compute the covariance recursively. Return the new covariance and the new mean. .. math:: C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T) C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) & = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) :Parameters: - cov : matrix Previous covariance matrix. - length : int Length of chain used to compute the previous covariance. - mean : array Previous mean. - chain : array Sample used to update covariance. - scaling : float Scaling parameter - epsilon : float Set to a small value to avoid singular matrices.
[ "r", "Compute", "the", "covariance", "recursively", "." ]
python
train
37.051282
inasafe/inasafe
safe/gui/tools/wizard/step_fc35_explayer_from_canvas.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc35_explayer_from_canvas.py#L38-L56
def get_next_step(self): """Find the proper step when user clicks the Next button. :returns: The step to be switched to :rtype: WizardStep instance or None """ if self.parent.is_selected_layer_keywordless: # insert keyword creation thread here self.parent.parent_step = self self.parent.existing_keywords = None self.parent.set_mode_label_to_keywords_creation() new_step = self.parent.step_kw_purpose else: if layers_intersect(self.parent.hazard_layer, self.parent.exposure_layer): new_step = self.parent.step_fc_agglayer_origin else: new_step = self.parent.step_fc_disjoint_layers return new_step
[ "def", "get_next_step", "(", "self", ")", ":", "if", "self", ".", "parent", ".", "is_selected_layer_keywordless", ":", "# insert keyword creation thread here", "self", ".", "parent", ".", "parent_step", "=", "self", "self", ".", "parent", ".", "existing_keywords", "=", "None", "self", ".", "parent", ".", "set_mode_label_to_keywords_creation", "(", ")", "new_step", "=", "self", ".", "parent", ".", "step_kw_purpose", "else", ":", "if", "layers_intersect", "(", "self", ".", "parent", ".", "hazard_layer", ",", "self", ".", "parent", ".", "exposure_layer", ")", ":", "new_step", "=", "self", ".", "parent", ".", "step_fc_agglayer_origin", "else", ":", "new_step", "=", "self", ".", "parent", ".", "step_fc_disjoint_layers", "return", "new_step" ]
Find the proper step when user clicks the Next button. :returns: The step to be switched to :rtype: WizardStep instance or None
[ "Find", "the", "proper", "step", "when", "user", "clicks", "the", "Next", "button", "." ]
python
train
41.263158
cloudnull/turbolift
turbolift/methods/__init__.py
https://github.com/cloudnull/turbolift/blob/da33034e88959226529ce762e2895e6f6356c448/turbolift/methods/__init__.py#L646-L655
def printer(self, message, color_level='info'): """Print Messages and Log it. :param message: item to print to screen """ if self.job_args.get('colorized'): print(cloud_utils.return_colorized(msg=message, color=color_level)) else: print(message)
[ "def", "printer", "(", "self", ",", "message", ",", "color_level", "=", "'info'", ")", ":", "if", "self", ".", "job_args", ".", "get", "(", "'colorized'", ")", ":", "print", "(", "cloud_utils", ".", "return_colorized", "(", "msg", "=", "message", ",", "color", "=", "color_level", ")", ")", "else", ":", "print", "(", "message", ")" ]
Print Messages and Log it. :param message: item to print to screen
[ "Print", "Messages", "and", "Log", "it", "." ]
python
train
30.2
esheldon/fitsio
fitsio/hdu/table.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2019-L2037
def _get_col_dimstr(tdim, is_string=False): """ not for variable length """ dimstr = '' if tdim is None: dimstr = 'array[bad TDIM]' else: if is_string: if len(tdim) > 1: dimstr = [str(d) for d in tdim[1:]] else: if len(tdim) > 1 or tdim[0] > 1: dimstr = [str(d) for d in tdim] if dimstr != '': dimstr = ','.join(dimstr) dimstr = 'array[%s]' % dimstr return dimstr
[ "def", "_get_col_dimstr", "(", "tdim", ",", "is_string", "=", "False", ")", ":", "dimstr", "=", "''", "if", "tdim", "is", "None", ":", "dimstr", "=", "'array[bad TDIM]'", "else", ":", "if", "is_string", ":", "if", "len", "(", "tdim", ")", ">", "1", ":", "dimstr", "=", "[", "str", "(", "d", ")", "for", "d", "in", "tdim", "[", "1", ":", "]", "]", "else", ":", "if", "len", "(", "tdim", ")", ">", "1", "or", "tdim", "[", "0", "]", ">", "1", ":", "dimstr", "=", "[", "str", "(", "d", ")", "for", "d", "in", "tdim", "]", "if", "dimstr", "!=", "''", ":", "dimstr", "=", "','", ".", "join", "(", "dimstr", ")", "dimstr", "=", "'array[%s]'", "%", "dimstr", "return", "dimstr" ]
not for variable length
[ "not", "for", "variable", "length" ]
python
train
25.578947
saltstack/salt
salt/cloud/clouds/gce.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gce.py#L1952-L1970
def show_disk(name=None, kwargs=None, call=None): # pylint: disable=W0613 ''' Show the details of an existing disk. CLI Example: .. code-block:: bash salt-cloud -a show_disk myinstance disk_name=mydisk salt-cloud -f show_disk gce disk_name=mydisk ''' if not kwargs or 'disk_name' not in kwargs: log.error( 'Must specify disk_name.' ) return False conn = get_conn() return _expand_disk(conn.ex_get_volume(kwargs['disk_name']))
[ "def", "show_disk", "(", "name", "=", "None", ",", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "# pylint: disable=W0613", "if", "not", "kwargs", "or", "'disk_name'", "not", "in", "kwargs", ":", "log", ".", "error", "(", "'Must specify disk_name.'", ")", "return", "False", "conn", "=", "get_conn", "(", ")", "return", "_expand_disk", "(", "conn", ".", "ex_get_volume", "(", "kwargs", "[", "'disk_name'", "]", ")", ")" ]
Show the details of an existing disk. CLI Example: .. code-block:: bash salt-cloud -a show_disk myinstance disk_name=mydisk salt-cloud -f show_disk gce disk_name=mydisk
[ "Show", "the", "details", "of", "an", "existing", "disk", "." ]
python
train
26.105263
napalm-automation/napalm-ios
napalm_ios/ios.py
https://github.com/napalm-automation/napalm-ios/blob/7bbbc6a4d9f70a5b8cf32b7c7072a7ab437ddb81/napalm_ios/ios.py#L1520-L1581
def get_environment(self): """ Get environment facts. power and fan are currently not implemented cpu is using 1-minute average cpu hard-coded to cpu0 (i.e. only a single CPU) """ environment = {} cpu_cmd = 'show proc cpu' mem_cmd = 'show memory statistics' temp_cmd = 'show env temperature status' output = self._send_command(cpu_cmd) environment.setdefault('cpu', {}) environment['cpu'][0] = {} environment['cpu'][0]['%usage'] = 0.0 for line in output.splitlines(): if 'CPU utilization' in line: # CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1% cpu_regex = r'^.*one minute: (\d+)%; five.*$' match = re.search(cpu_regex, line) environment['cpu'][0]['%usage'] = float(match.group(1)) break output = self._send_command(mem_cmd) for line in output.splitlines(): if 'Processor' in line: _, _, _, proc_used_mem, proc_free_mem = line.split()[:5] elif 'I/O' in line or 'io' in line: _, _, _, io_used_mem, io_free_mem = line.split()[:5] used_mem = int(proc_used_mem) + int(io_used_mem) free_mem = int(proc_free_mem) + int(io_free_mem) environment.setdefault('memory', {}) environment['memory']['used_ram'] = used_mem environment['memory']['available_ram'] = free_mem environment.setdefault('temperature', {}) # The 'show env temperature status' is not ubiquitous in Cisco IOS output = self._send_command(temp_cmd) if '% Invalid' not in output: for line in output.splitlines(): if 'System Temperature Value' in line: system_temp = float(line.split(':')[1].split()[0]) elif 'Yellow Threshold' in line: system_temp_alert = float(line.split(':')[1].split()[0]) elif 'Red Threshold' in line: system_temp_crit = float(line.split(':')[1].split()[0]) env_value = {'is_alert': system_temp >= system_temp_alert, 'is_critical': system_temp >= system_temp_crit, 'temperature': system_temp} environment['temperature']['system'] = env_value else: env_value = {'is_alert': False, 'is_critical': False, 'temperature': -1.0} environment['temperature']['invalid'] = env_value # Initialize 'power' and 'fan' to default values (not implemented) environment.setdefault('power', {}) environment['power']['invalid'] = {'status': True, 'output': -1.0, 'capacity': -1.0} environment.setdefault('fans', {}) environment['fans']['invalid'] = {'status': True} return environment
[ "def", "get_environment", "(", "self", ")", ":", "environment", "=", "{", "}", "cpu_cmd", "=", "'show proc cpu'", "mem_cmd", "=", "'show memory statistics'", "temp_cmd", "=", "'show env temperature status'", "output", "=", "self", ".", "_send_command", "(", "cpu_cmd", ")", "environment", ".", "setdefault", "(", "'cpu'", ",", "{", "}", ")", "environment", "[", "'cpu'", "]", "[", "0", "]", "=", "{", "}", "environment", "[", "'cpu'", "]", "[", "0", "]", "[", "'%usage'", "]", "=", "0.0", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "if", "'CPU utilization'", "in", "line", ":", "# CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1%", "cpu_regex", "=", "r'^.*one minute: (\\d+)%; five.*$'", "match", "=", "re", ".", "search", "(", "cpu_regex", ",", "line", ")", "environment", "[", "'cpu'", "]", "[", "0", "]", "[", "'%usage'", "]", "=", "float", "(", "match", ".", "group", "(", "1", ")", ")", "break", "output", "=", "self", ".", "_send_command", "(", "mem_cmd", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "if", "'Processor'", "in", "line", ":", "_", ",", "_", ",", "_", ",", "proc_used_mem", ",", "proc_free_mem", "=", "line", ".", "split", "(", ")", "[", ":", "5", "]", "elif", "'I/O'", "in", "line", "or", "'io'", "in", "line", ":", "_", ",", "_", ",", "_", ",", "io_used_mem", ",", "io_free_mem", "=", "line", ".", "split", "(", ")", "[", ":", "5", "]", "used_mem", "=", "int", "(", "proc_used_mem", ")", "+", "int", "(", "io_used_mem", ")", "free_mem", "=", "int", "(", "proc_free_mem", ")", "+", "int", "(", "io_free_mem", ")", "environment", ".", "setdefault", "(", "'memory'", ",", "{", "}", ")", "environment", "[", "'memory'", "]", "[", "'used_ram'", "]", "=", "used_mem", "environment", "[", "'memory'", "]", "[", "'available_ram'", "]", "=", "free_mem", "environment", ".", "setdefault", "(", "'temperature'", ",", "{", "}", ")", "# The 'show env temperature status' is not ubiquitous in Cisco IOS", "output", "=", "self", ".", "_send_command", "(", "temp_cmd", ")", "if", "'% Invalid'", "not", "in", "output", ":", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "if", "'System Temperature Value'", "in", "line", ":", "system_temp", "=", "float", "(", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ")", "elif", "'Yellow Threshold'", "in", "line", ":", "system_temp_alert", "=", "float", "(", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ")", "elif", "'Red Threshold'", "in", "line", ":", "system_temp_crit", "=", "float", "(", "line", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ")", "env_value", "=", "{", "'is_alert'", ":", "system_temp", ">=", "system_temp_alert", ",", "'is_critical'", ":", "system_temp", ">=", "system_temp_crit", ",", "'temperature'", ":", "system_temp", "}", "environment", "[", "'temperature'", "]", "[", "'system'", "]", "=", "env_value", "else", ":", "env_value", "=", "{", "'is_alert'", ":", "False", ",", "'is_critical'", ":", "False", ",", "'temperature'", ":", "-", "1.0", "}", "environment", "[", "'temperature'", "]", "[", "'invalid'", "]", "=", "env_value", "# Initialize 'power' and 'fan' to default values (not implemented)", "environment", ".", "setdefault", "(", "'power'", ",", "{", "}", ")", "environment", "[", "'power'", "]", "[", "'invalid'", "]", "=", "{", "'status'", ":", "True", ",", "'output'", ":", "-", "1.0", ",", "'capacity'", ":", "-", "1.0", "}", "environment", ".", "setdefault", "(", "'fans'", ",", "{", "}", ")", "environment", "[", "'fans'", "]", "[", "'invalid'", "]", "=", "{", "'status'", ":", "True", "}", "return", "environment" ]
Get environment facts. power and fan are currently not implemented cpu is using 1-minute average cpu hard-coded to cpu0 (i.e. only a single CPU)
[ "Get", "environment", "facts", "." ]
python
train
45.467742
ns1/ns1-python
ns1/__init__.py
https://github.com/ns1/ns1-python/blob/f3e1d90a3b76a1bd18f855f2c622a8a49d4b585e/ns1/__init__.py#L302-L310
def loadScopeGroupbyID(self, id, callback=None, errback=None): """ Load an existing Scope Group by ID into a high level Scope Group object :param int id: id of an existing ScopeGroup """ import ns1.ipam scope_group = ns1.ipam.Scopegroup(self.config, id=id) return scope_group.load(callback=callback, errback=errback)
[ "def", "loadScopeGroupbyID", "(", "self", ",", "id", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "import", "ns1", ".", "ipam", "scope_group", "=", "ns1", ".", "ipam", ".", "Scopegroup", "(", "self", ".", "config", ",", "id", "=", "id", ")", "return", "scope_group", ".", "load", "(", "callback", "=", "callback", ",", "errback", "=", "errback", ")" ]
Load an existing Scope Group by ID into a high level Scope Group object :param int id: id of an existing ScopeGroup
[ "Load", "an", "existing", "Scope", "Group", "by", "ID", "into", "a", "high", "level", "Scope", "Group", "object" ]
python
train
40.555556
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L258-L288
def mean(self, values, axis=0, weights=None, dtype=None): """compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) if weights is None: result = self.reduce(values, axis=axis, dtype=dtype) shape = [1] * values.ndim shape[axis] = self.groups weights = self.count.reshape(shape) else: weights = np.asarray(weights) result = self.reduce(values * weights, axis=axis, dtype=dtype) weights = self.reduce(weights, axis=axis, dtype=dtype) return self.unique, result / weights
[ "def", "mean", "(", "self", ",", "values", ",", "axis", "=", "0", ",", "weights", "=", "None", ",", "dtype", "=", "None", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "if", "weights", "is", "None", ":", "result", "=", "self", ".", "reduce", "(", "values", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "shape", "=", "[", "1", "]", "*", "values", ".", "ndim", "shape", "[", "axis", "]", "=", "self", ".", "groups", "weights", "=", "self", ".", "count", ".", "reshape", "(", "shape", ")", "else", ":", "weights", "=", "np", ".", "asarray", "(", "weights", ")", "result", "=", "self", ".", "reduce", "(", "values", "*", "weights", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "weights", "=", "self", ".", "reduce", "(", "weights", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "return", "self", ".", "unique", ",", "result", "/", "weights" ]
compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "compute", "the", "mean", "over", "each", "group" ]
python
train
34.903226
openstack/python-saharaclient
saharaclient/api/job_binary_internals.py
https://github.com/openstack/python-saharaclient/blob/c53831d686d9e94187ce5dfdbfa43883b792280e/saharaclient/api/job_binary_internals.py#L29-L36
def create(self, name, data): """Create a Job Binary Internal. :param str data: raw data of script text """ return self._update('/job-binary-internals/%s' % urlparse.quote(name.encode('utf-8')), data, 'job_binary_internal', dump_json=False)
[ "def", "create", "(", "self", ",", "name", ",", "data", ")", ":", "return", "self", ".", "_update", "(", "'/job-binary-internals/%s'", "%", "urlparse", ".", "quote", "(", "name", ".", "encode", "(", "'utf-8'", ")", ")", ",", "data", ",", "'job_binary_internal'", ",", "dump_json", "=", "False", ")" ]
Create a Job Binary Internal. :param str data: raw data of script text
[ "Create", "a", "Job", "Binary", "Internal", "." ]
python
train
40.25
GibbsConsulting/django-plotly-dash
django_plotly_dash/views.py
https://github.com/GibbsConsulting/django-plotly-dash/blob/773ed081fc2ea3cc7607590322a14686a7a79bc5/django_plotly_dash/views.py#L62-L102
def update(request, ident, stateless=False, **kwargs): 'Generate update json response' dash_app, app = DashApp.locate_item(ident, stateless) request_body = json.loads(request.body.decode('utf-8')) if app.use_dash_dispatch(): # Force call through dash view_func = app.locate_endpoint_function('dash-update-component') import flask with app.test_request_context(): # Fudge request object # pylint: disable=protected-access flask.request._cached_json = (request_body, flask.request._cached_json[True]) resp = view_func() else: # Use direct dispatch with extra arguments in the argMap app_state = request.session.get("django_plotly_dash", dict()) arg_map = {'dash_app_id': ident, 'dash_app': dash_app, 'user': request.user, 'session_state': app_state} resp = app.dispatch_with_args(request_body, arg_map) request.session['django_plotly_dash'] = app_state dash_app.handle_current_state() # Special for ws-driven edge case if str(resp) == 'EDGECASEEXIT': return HttpResponse("") # Change in returned value type try: rdata = resp.data rtype = resp.mimetype except: rdata = resp rtype = "application/json" return HttpResponse(rdata, content_type=rtype)
[ "def", "update", "(", "request", ",", "ident", ",", "stateless", "=", "False", ",", "*", "*", "kwargs", ")", ":", "dash_app", ",", "app", "=", "DashApp", ".", "locate_item", "(", "ident", ",", "stateless", ")", "request_body", "=", "json", ".", "loads", "(", "request", ".", "body", ".", "decode", "(", "'utf-8'", ")", ")", "if", "app", ".", "use_dash_dispatch", "(", ")", ":", "# Force call through dash", "view_func", "=", "app", ".", "locate_endpoint_function", "(", "'dash-update-component'", ")", "import", "flask", "with", "app", ".", "test_request_context", "(", ")", ":", "# Fudge request object", "# pylint: disable=protected-access", "flask", ".", "request", ".", "_cached_json", "=", "(", "request_body", ",", "flask", ".", "request", ".", "_cached_json", "[", "True", "]", ")", "resp", "=", "view_func", "(", ")", "else", ":", "# Use direct dispatch with extra arguments in the argMap", "app_state", "=", "request", ".", "session", ".", "get", "(", "\"django_plotly_dash\"", ",", "dict", "(", ")", ")", "arg_map", "=", "{", "'dash_app_id'", ":", "ident", ",", "'dash_app'", ":", "dash_app", ",", "'user'", ":", "request", ".", "user", ",", "'session_state'", ":", "app_state", "}", "resp", "=", "app", ".", "dispatch_with_args", "(", "request_body", ",", "arg_map", ")", "request", ".", "session", "[", "'django_plotly_dash'", "]", "=", "app_state", "dash_app", ".", "handle_current_state", "(", ")", "# Special for ws-driven edge case", "if", "str", "(", "resp", ")", "==", "'EDGECASEEXIT'", ":", "return", "HttpResponse", "(", "\"\"", ")", "# Change in returned value type", "try", ":", "rdata", "=", "resp", ".", "data", "rtype", "=", "resp", ".", "mimetype", "except", ":", "rdata", "=", "resp", "rtype", "=", "\"application/json\"", "return", "HttpResponse", "(", "rdata", ",", "content_type", "=", "rtype", ")" ]
Generate update json response
[ "Generate", "update", "json", "response" ]
python
train
34.219512
ECRL/ecabc
ecabc/abc.py
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L268-L284
def processes(self, processes): '''Set the number of concurrent processes the ABC will utilize for fitness function evaluation; if <= 1, single process is used Args: processes (int): number of concurrent processes ''' if self._processes > 1: self._pool.close() self._pool.join() self._pool = multiprocessing.Pool(processes) else: self._pool = None self._logger.log('debug', 'Number of processes set to {}'.format( processes ))
[ "def", "processes", "(", "self", ",", "processes", ")", ":", "if", "self", ".", "_processes", ">", "1", ":", "self", ".", "_pool", ".", "close", "(", ")", "self", ".", "_pool", ".", "join", "(", ")", "self", ".", "_pool", "=", "multiprocessing", ".", "Pool", "(", "processes", ")", "else", ":", "self", ".", "_pool", "=", "None", "self", ".", "_logger", ".", "log", "(", "'debug'", ",", "'Number of processes set to {}'", ".", "format", "(", "processes", ")", ")" ]
Set the number of concurrent processes the ABC will utilize for fitness function evaluation; if <= 1, single process is used Args: processes (int): number of concurrent processes
[ "Set", "the", "number", "of", "concurrent", "processes", "the", "ABC", "will", "utilize", "for", "fitness", "function", "evaluation", ";", "if", "<", "=", "1", "single", "process", "is", "used" ]
python
train
32.235294
hannes-brt/hebel
hebel/config.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/config.py#L103-L133
def load_path(path, overrides=None, **kwargs): """ Convenience function for loading a YAML configuration from a file. Parameters ---------- path : str The path to the file to load on disk. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`. """ f = open(path, 'r') content = ''.join(f.readlines()) f.close() if not isinstance(content, str): raise AssertionError("Expected content to be of type str but it is "+str(type(content))) return load(content, **kwargs)
[ "def", "load_path", "(", "path", ",", "overrides", "=", "None", ",", "*", "*", "kwargs", ")", ":", "f", "=", "open", "(", "path", ",", "'r'", ")", "content", "=", "''", ".", "join", "(", "f", ".", "readlines", "(", ")", ")", "f", ".", "close", "(", ")", "if", "not", "isinstance", "(", "content", ",", "str", ")", ":", "raise", "AssertionError", "(", "\"Expected content to be of type str but it is \"", "+", "str", "(", "type", "(", "content", ")", ")", ")", "return", "load", "(", "content", ",", "*", "*", "kwargs", ")" ]
Convenience function for loading a YAML configuration from a file. Parameters ---------- path : str The path to the file to load on disk. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`.
[ "Convenience", "function", "for", "loading", "a", "YAML", "configuration", "from", "a", "file", "." ]
python
train
29.870968
google/mobly
mobly/controllers/android_device_lib/sl4a_client.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device_lib/sl4a_client.py#L104-L123
def stop_app(self): """Overrides superclass.""" try: if self._conn: # Be polite; let the dest know we're shutting down. try: self.closeSl4aSession() except: self.log.exception('Failed to gracefully shut down %s.', self.app_name) # Close the socket connection. self.disconnect() self.stop_event_dispatcher() # Terminate the app self._adb.shell('am force-stop com.googlecode.android_scripting') finally: # Always clean up the adb port self.clear_host_port()
[ "def", "stop_app", "(", "self", ")", ":", "try", ":", "if", "self", ".", "_conn", ":", "# Be polite; let the dest know we're shutting down.", "try", ":", "self", ".", "closeSl4aSession", "(", ")", "except", ":", "self", ".", "log", ".", "exception", "(", "'Failed to gracefully shut down %s.'", ",", "self", ".", "app_name", ")", "# Close the socket connection.", "self", ".", "disconnect", "(", ")", "self", ".", "stop_event_dispatcher", "(", ")", "# Terminate the app", "self", ".", "_adb", ".", "shell", "(", "'am force-stop com.googlecode.android_scripting'", ")", "finally", ":", "# Always clean up the adb port", "self", ".", "clear_host_port", "(", ")" ]
Overrides superclass.
[ "Overrides", "superclass", "." ]
python
train
34.85
pybel/pybel
src/pybel/manager/lookup_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/lookup_manager.py#L60-L62
def get_evidence_by_hash(self, evidence_hash: str) -> Optional[Evidence]: """Look up an evidence by its hash.""" return self.session.query(Evidence).filter(Evidence.sha512 == evidence_hash).one_or_none()
[ "def", "get_evidence_by_hash", "(", "self", ",", "evidence_hash", ":", "str", ")", "->", "Optional", "[", "Evidence", "]", ":", "return", "self", ".", "session", ".", "query", "(", "Evidence", ")", ".", "filter", "(", "Evidence", ".", "sha512", "==", "evidence_hash", ")", ".", "one_or_none", "(", ")" ]
Look up an evidence by its hash.
[ "Look", "up", "an", "evidence", "by", "its", "hash", "." ]
python
train
72.333333
pkgw/pwkit
pwkit/numutil.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/numutil.py#L283-L348
def reduce_data_frame (df, chunk_slicers, avg_cols=(), uavg_cols=(), minmax_cols=(), nchunk_colname='nchunk', uncert_prefix='u', min_points_per_chunk=3): """"Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another DataFrame with similar columns but fewer rows. Arguments: df The input :class:`pandas.DataFrame`. chunk_slicers An iterable that returns values that are used to slice *df* with its :meth:`pandas.DataFrame.iloc` indexer. An example value might be the generator returned from :func:`slice_evenly_with_gaps`. avg_cols An iterable of names of columns that are to be reduced by taking the mean. uavg_cols An iterable of names of columns that are to be reduced by taking a weighted mean. minmax_cols An iterable of names of columns that are to be reduced by reporting minimum and maximum values. nchunk_colname The name of a column to create reporting the number of rows contributing to each chunk. uncert_prefix The column name prefix for locating uncertainty estimates. By default, the uncertainty on the column ``"temp"`` is given in the column ``"utemp"``. min_points_per_chunk Require at least this many rows in each chunk. Smaller chunks are discarded. Returns a new :class:`pandas.DataFrame`. """ subds = [df.iloc[idx] for idx in chunk_slicers] subds = [sd for sd in subds if sd.shape[0] >= min_points_per_chunk] chunked = df.__class__ ({nchunk_colname: np.zeros (len (subds), dtype=np.int)}) # Some future-proofing: allow possibility of different ways of mapping # from a column giving a value to a column giving its uncertainty. uncert_col_name = lambda c: uncert_prefix + c for i, subd in enumerate (subds): label = chunked.index[i] chunked.loc[label,nchunk_colname] = subd.shape[0] for col in avg_cols: chunked.loc[label,col] = subd[col].mean () for col in uavg_cols: ucol = uncert_col_name (col) v, u = weighted_mean (subd[col], subd[ucol]) chunked.loc[label,col] = v chunked.loc[label,ucol] = u for col in minmax_cols: chunked.loc[label, 'min_'+col] = subd[col].min () chunked.loc[label, 'max_'+col] = subd[col].max () return chunked
[ "def", "reduce_data_frame", "(", "df", ",", "chunk_slicers", ",", "avg_cols", "=", "(", ")", ",", "uavg_cols", "=", "(", ")", ",", "minmax_cols", "=", "(", ")", ",", "nchunk_colname", "=", "'nchunk'", ",", "uncert_prefix", "=", "'u'", ",", "min_points_per_chunk", "=", "3", ")", ":", "subds", "=", "[", "df", ".", "iloc", "[", "idx", "]", "for", "idx", "in", "chunk_slicers", "]", "subds", "=", "[", "sd", "for", "sd", "in", "subds", "if", "sd", ".", "shape", "[", "0", "]", ">=", "min_points_per_chunk", "]", "chunked", "=", "df", ".", "__class__", "(", "{", "nchunk_colname", ":", "np", ".", "zeros", "(", "len", "(", "subds", ")", ",", "dtype", "=", "np", ".", "int", ")", "}", ")", "# Some future-proofing: allow possibility of different ways of mapping", "# from a column giving a value to a column giving its uncertainty.", "uncert_col_name", "=", "lambda", "c", ":", "uncert_prefix", "+", "c", "for", "i", ",", "subd", "in", "enumerate", "(", "subds", ")", ":", "label", "=", "chunked", ".", "index", "[", "i", "]", "chunked", ".", "loc", "[", "label", ",", "nchunk_colname", "]", "=", "subd", ".", "shape", "[", "0", "]", "for", "col", "in", "avg_cols", ":", "chunked", ".", "loc", "[", "label", ",", "col", "]", "=", "subd", "[", "col", "]", ".", "mean", "(", ")", "for", "col", "in", "uavg_cols", ":", "ucol", "=", "uncert_col_name", "(", "col", ")", "v", ",", "u", "=", "weighted_mean", "(", "subd", "[", "col", "]", ",", "subd", "[", "ucol", "]", ")", "chunked", ".", "loc", "[", "label", ",", "col", "]", "=", "v", "chunked", ".", "loc", "[", "label", ",", "ucol", "]", "=", "u", "for", "col", "in", "minmax_cols", ":", "chunked", ".", "loc", "[", "label", ",", "'min_'", "+", "col", "]", "=", "subd", "[", "col", "]", ".", "min", "(", ")", "chunked", ".", "loc", "[", "label", ",", "'max_'", "+", "col", "]", "=", "subd", "[", "col", "]", ".", "max", "(", ")", "return", "chunked" ]
Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another DataFrame with similar columns but fewer rows. Arguments: df The input :class:`pandas.DataFrame`. chunk_slicers An iterable that returns values that are used to slice *df* with its :meth:`pandas.DataFrame.iloc` indexer. An example value might be the generator returned from :func:`slice_evenly_with_gaps`. avg_cols An iterable of names of columns that are to be reduced by taking the mean. uavg_cols An iterable of names of columns that are to be reduced by taking a weighted mean. minmax_cols An iterable of names of columns that are to be reduced by reporting minimum and maximum values. nchunk_colname The name of a column to create reporting the number of rows contributing to each chunk. uncert_prefix The column name prefix for locating uncertainty estimates. By default, the uncertainty on the column ``"temp"`` is given in the column ``"utemp"``. min_points_per_chunk Require at least this many rows in each chunk. Smaller chunks are discarded. Returns a new :class:`pandas.DataFrame`.
[ "Reduce", "a", "DataFrame", "by", "collapsing", "rows", "in", "grouped", "chunks", ".", "Returns", "another", "DataFrame", "with", "similar", "columns", "but", "fewer", "rows", "." ]
python
train
37.106061
edibledinos/pwnypack
pwnypack/flow.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/flow.py#L665-L687
def invoke_ssh_shell(cls, *args, **kwargs): """invoke_ssh(arguments..., pty=False, echo=False) Star a new shell on a remote server. It first calls :meth:`Flow.connect_ssh` using all positional and keyword arguments, then calls :meth:`SSHClient.invoke_shell` with the pty / echo options. Args: arguments...: The options for the SSH connection. pty(bool): Request a pseudo-terminal from the server. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the SSH channel. """ pty = kwargs.pop('pty', True) echo = kwargs.pop('echo', False) client = cls.connect_ssh(*args, **kwargs) f = client.invoke_shell(pty=pty, echo=echo) f.client = client return f
[ "def", "invoke_ssh_shell", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pty", "=", "kwargs", ".", "pop", "(", "'pty'", ",", "True", ")", "echo", "=", "kwargs", ".", "pop", "(", "'echo'", ",", "False", ")", "client", "=", "cls", ".", "connect_ssh", "(", "*", "args", ",", "*", "*", "kwargs", ")", "f", "=", "client", ".", "invoke_shell", "(", "pty", "=", "pty", ",", "echo", "=", "echo", ")", "f", ".", "client", "=", "client", "return", "f" ]
invoke_ssh(arguments..., pty=False, echo=False) Star a new shell on a remote server. It first calls :meth:`Flow.connect_ssh` using all positional and keyword arguments, then calls :meth:`SSHClient.invoke_shell` with the pty / echo options. Args: arguments...: The options for the SSH connection. pty(bool): Request a pseudo-terminal from the server. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the SSH channel.
[ "invoke_ssh", "(", "arguments", "...", "pty", "=", "False", "echo", "=", "False", ")" ]
python
train
37.391304
pantsbuild/pants
src/python/pants/pantsd/pants_daemon.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/pants_daemon.py#L435-L443
def post_fork_child(self): """Post-fork() child callback for ProcessManager.daemon_spawn().""" entry_point = '{}:launch'.format(__name__) exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point)) # Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`. cmd = [sys.executable] + sys.argv self._logger.debug('cmd is: PANTS_ENTRYPOINT={} {}'.format(entry_point, ' '.join(cmd))) # TODO: Improve error handling on launch failures. os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
[ "def", "post_fork_child", "(", "self", ")", ":", "entry_point", "=", "'{}:launch'", ".", "format", "(", "__name__", ")", "exec_env", "=", "combined_dict", "(", "os", ".", "environ", ",", "dict", "(", "PANTS_ENTRYPOINT", "=", "entry_point", ")", ")", "# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.", "cmd", "=", "[", "sys", ".", "executable", "]", "+", "sys", ".", "argv", "self", ".", "_logger", ".", "debug", "(", "'cmd is: PANTS_ENTRYPOINT={} {}'", ".", "format", "(", "entry_point", ",", "' '", ".", "join", "(", "cmd", ")", ")", ")", "# TODO: Improve error handling on launch failures.", "os", ".", "spawnve", "(", "os", ".", "P_NOWAIT", ",", "sys", ".", "executable", ",", "cmd", ",", "env", "=", "exec_env", ")" ]
Post-fork() child callback for ProcessManager.daemon_spawn().
[ "Post", "-", "fork", "()", "child", "callback", "for", "ProcessManager", ".", "daemon_spawn", "()", "." ]
python
train
59.555556
JoelBender/bacpypes
py25/bacpypes/tcp.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/tcp.py#L847-L853
def indication(self, pdu): """Message going downstream.""" if _debug: StreamToPacket._debug("indication %r", pdu) # hack it up into chunks for packet in self.packetize(pdu, self.downstreamBuffer): self.request(packet)
[ "def", "indication", "(", "self", ",", "pdu", ")", ":", "if", "_debug", ":", "StreamToPacket", ".", "_debug", "(", "\"indication %r\"", ",", "pdu", ")", "# hack it up into chunks", "for", "packet", "in", "self", ".", "packetize", "(", "pdu", ",", "self", ".", "downstreamBuffer", ")", ":", "self", ".", "request", "(", "packet", ")" ]
Message going downstream.
[ "Message", "going", "downstream", "." ]
python
train
36.571429
linkhub-sdk/popbill.py
popbill/taxinvoiceService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/taxinvoiceService.py#L972-L996
def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None): """ 관리번호할당 args CorpNum : 팝빌회원 사업자번호 MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁 ItemKey : 아이템키 (Search API로 조회 가능) MgtKey : 세금계산서에 할당할 파트너 관리 번호 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType == None or MgtKeyType == '': raise PopbillException(-99999999, "세금계산서 발행유형이 입력되지 않았습니다.") if ItemKey == None or ItemKey == '': raise PopbillException(-99999999, "아이템키가 입력되지 않았습니다.") if MgtKey == None or MgtKey == '': raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") postDate = "MgtKey=" + MgtKey return self._httppost('/Taxinvoice/' + ItemKey + '/' + MgtKeyType, postDate, CorpNum, UserID, "", "application/x-www-form-urlencoded; charset=utf-8")
[ "def", "assignMgtKey", "(", "self", ",", "CorpNum", ",", "MgtKeyType", ",", "ItemKey", ",", "MgtKey", ",", "UserID", "=", "None", ")", ":", "if", "MgtKeyType", "==", "None", "or", "MgtKeyType", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"세금계산서 발행유형이 입력되지 않았습니다.\")", "", "if", "ItemKey", "==", "None", "or", "ItemKey", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"아이템키가 입력되지 않았습니다.\")", "", "if", "MgtKey", "==", "None", "or", "MgtKey", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"관리번호가 입력되지 않았습니다.\")", "", "postDate", "=", "\"MgtKey=\"", "+", "MgtKey", "return", "self", ".", "_httppost", "(", "'/Taxinvoice/'", "+", "ItemKey", "+", "'/'", "+", "MgtKeyType", ",", "postDate", ",", "CorpNum", ",", "UserID", ",", "\"\"", ",", "\"application/x-www-form-urlencoded; charset=utf-8\"", ")" ]
관리번호할당 args CorpNum : 팝빌회원 사업자번호 MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁 ItemKey : 아이템키 (Search API로 조회 가능) MgtKey : 세금계산서에 할당할 파트너 관리 번호 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException
[ "관리번호할당", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "MgtKeyType", ":", "세금계산서", "유형", "SELL", "-", "매출", "BUY", "-", "매입", "TRUSTEE", "-", "위수탁", "ItemKey", ":", "아이템키", "(", "Search", "API로", "조회", "가능", ")", "MgtKey", ":", "세금계산서에", "할당할", "파트너", "관리", "번호", "UserID", ":", "팝빌회원", "아이디", "return", "처리결과", ".", "consist", "of", "code", "and", "message", "raise", "PopbillException" ]
python
train
41.16
HazyResearch/metal
metal/label_model/label_model.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/label_model/label_model.py#L220-L251
def get_conditional_probs(self, source=None): """Returns the full conditional probabilities table as a numpy array, where row i*(k+1) + ly is the conditional probabilities of source i emmiting label ly (including abstains 0), conditioned on different values of Y, i.e.: c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y) Note that this simply involves inferring the kth row by law of total probability and adding in to mu. If `source` is not None, returns only the corresponding block. """ c_probs = np.zeros((self.m * (self.k + 1), self.k)) mu = self.mu.detach().clone().numpy() for i in range(self.m): # si = self.c_data[(i,)]['start_index'] # ei = self.c_data[(i,)]['end_index'] # mu_i = mu[si:ei, :] mu_i = mu[i * self.k : (i + 1) * self.k, :] c_probs[i * (self.k + 1) + 1 : (i + 1) * (self.k + 1), :] = mu_i # The 0th row (corresponding to abstains) is the difference between # the sums of the other rows and one, by law of total prob c_probs[i * (self.k + 1), :] = 1 - mu_i.sum(axis=0) c_probs = np.clip(c_probs, 0.01, 0.99) if source is not None: return c_probs[source * (self.k + 1) : (source + 1) * (self.k + 1)] else: return c_probs
[ "def", "get_conditional_probs", "(", "self", ",", "source", "=", "None", ")", ":", "c_probs", "=", "np", ".", "zeros", "(", "(", "self", ".", "m", "*", "(", "self", ".", "k", "+", "1", ")", ",", "self", ".", "k", ")", ")", "mu", "=", "self", ".", "mu", ".", "detach", "(", ")", ".", "clone", "(", ")", ".", "numpy", "(", ")", "for", "i", "in", "range", "(", "self", ".", "m", ")", ":", "# si = self.c_data[(i,)]['start_index']", "# ei = self.c_data[(i,)]['end_index']", "# mu_i = mu[si:ei, :]", "mu_i", "=", "mu", "[", "i", "*", "self", ".", "k", ":", "(", "i", "+", "1", ")", "*", "self", ".", "k", ",", ":", "]", "c_probs", "[", "i", "*", "(", "self", ".", "k", "+", "1", ")", "+", "1", ":", "(", "i", "+", "1", ")", "*", "(", "self", ".", "k", "+", "1", ")", ",", ":", "]", "=", "mu_i", "# The 0th row (corresponding to abstains) is the difference between", "# the sums of the other rows and one, by law of total prob", "c_probs", "[", "i", "*", "(", "self", ".", "k", "+", "1", ")", ",", ":", "]", "=", "1", "-", "mu_i", ".", "sum", "(", "axis", "=", "0", ")", "c_probs", "=", "np", ".", "clip", "(", "c_probs", ",", "0.01", ",", "0.99", ")", "if", "source", "is", "not", "None", ":", "return", "c_probs", "[", "source", "*", "(", "self", ".", "k", "+", "1", ")", ":", "(", "source", "+", "1", ")", "*", "(", "self", ".", "k", "+", "1", ")", "]", "else", ":", "return", "c_probs" ]
Returns the full conditional probabilities table as a numpy array, where row i*(k+1) + ly is the conditional probabilities of source i emmiting label ly (including abstains 0), conditioned on different values of Y, i.e.: c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y) Note that this simply involves inferring the kth row by law of total probability and adding in to mu. If `source` is not None, returns only the corresponding block.
[ "Returns", "the", "full", "conditional", "probabilities", "table", "as", "a", "numpy", "array", "where", "row", "i", "*", "(", "k", "+", "1", ")", "+", "ly", "is", "the", "conditional", "probabilities", "of", "source", "i", "emmiting", "label", "ly", "(", "including", "abstains", "0", ")", "conditioned", "on", "different", "values", "of", "Y", "i", ".", "e", ".", ":" ]
python
train
42.59375
pybel/pybel
src/pybel/struct/pipeline/decorators.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/pipeline/decorators.py#L77-L92
def _build_register_function(universe: bool, in_place: bool): # noqa: D202 """Build a decorator function to tag transformation functions. :param universe: Does the first positional argument of this function correspond to a universe graph? :param in_place: Does this function return a new graph, or just modify it in-place? """ def register(func): """Tag a transformation function. :param func: A function :return: The same function, with additional properties added """ return _register_function(func.__name__, func, universe, in_place) return register
[ "def", "_build_register_function", "(", "universe", ":", "bool", ",", "in_place", ":", "bool", ")", ":", "# noqa: D202", "def", "register", "(", "func", ")", ":", "\"\"\"Tag a transformation function.\n\n :param func: A function\n :return: The same function, with additional properties added\n \"\"\"", "return", "_register_function", "(", "func", ".", "__name__", ",", "func", ",", "universe", ",", "in_place", ")", "return", "register" ]
Build a decorator function to tag transformation functions. :param universe: Does the first positional argument of this function correspond to a universe graph? :param in_place: Does this function return a new graph, or just modify it in-place?
[ "Build", "a", "decorator", "function", "to", "tag", "transformation", "functions", "." ]
python
train
37.875
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L3056-L3064
def p_expr_usr(p): """ bexpr : USR bexpr %prec UMINUS """ if p[2].type_ == TYPE.string: p[0] = make_builtin(p.lineno(1), 'USR_STR', p[2], type_=TYPE.uinteger) else: p[0] = make_builtin(p.lineno(1), 'USR', make_typecast(TYPE.uinteger, p[2], p.lineno(1)), type_=TYPE.uinteger)
[ "def", "p_expr_usr", "(", "p", ")", ":", "if", "p", "[", "2", "]", ".", "type_", "==", "TYPE", ".", "string", ":", "p", "[", "0", "]", "=", "make_builtin", "(", "p", ".", "lineno", "(", "1", ")", ",", "'USR_STR'", ",", "p", "[", "2", "]", ",", "type_", "=", "TYPE", ".", "uinteger", ")", "else", ":", "p", "[", "0", "]", "=", "make_builtin", "(", "p", ".", "lineno", "(", "1", ")", ",", "'USR'", ",", "make_typecast", "(", "TYPE", ".", "uinteger", ",", "p", "[", "2", "]", ",", "p", ".", "lineno", "(", "1", ")", ")", ",", "type_", "=", "TYPE", ".", "uinteger", ")" ]
bexpr : USR bexpr %prec UMINUS
[ "bexpr", ":", "USR", "bexpr", "%prec", "UMINUS" ]
python
train
39.333333
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L7703-L7719
def WaitHotKeyReleased(hotkey: tuple) -> None: """hotkey: tuple, two ints tuple(modifierKey, key)""" mod = {ModifierKey.Alt: Keys.VK_MENU, ModifierKey.Control: Keys.VK_CONTROL, ModifierKey.Shift: Keys.VK_SHIFT, ModifierKey.Win: Keys.VK_LWIN } while True: time.sleep(0.05) if IsKeyPressed(hotkey[1]): continue for k, v in mod.items(): if k & hotkey[0]: if IsKeyPressed(v): break else: break
[ "def", "WaitHotKeyReleased", "(", "hotkey", ":", "tuple", ")", "->", "None", ":", "mod", "=", "{", "ModifierKey", ".", "Alt", ":", "Keys", ".", "VK_MENU", ",", "ModifierKey", ".", "Control", ":", "Keys", ".", "VK_CONTROL", ",", "ModifierKey", ".", "Shift", ":", "Keys", ".", "VK_SHIFT", ",", "ModifierKey", ".", "Win", ":", "Keys", ".", "VK_LWIN", "}", "while", "True", ":", "time", ".", "sleep", "(", "0.05", ")", "if", "IsKeyPressed", "(", "hotkey", "[", "1", "]", ")", ":", "continue", "for", "k", ",", "v", "in", "mod", ".", "items", "(", ")", ":", "if", "k", "&", "hotkey", "[", "0", "]", ":", "if", "IsKeyPressed", "(", "v", ")", ":", "break", "else", ":", "break" ]
hotkey: tuple, two ints tuple(modifierKey, key)
[ "hotkey", ":", "tuple", "two", "ints", "tuple", "(", "modifierKey", "key", ")" ]
python
valid
32.058824
projectatomic/osbs-client
osbs/cli/render.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/cli/render.py#L170-L186
def _separate(self): """ get a width of separator for current column :return: int """ if self.total_free_space is None: return 0 else: sepa = self.default_column_space # we need to distribute remainders if self.default_column_space_remainder > 0: sepa += 1 self.default_column_space_remainder -= 1 logger.debug("remainder: %d, separator: %d", self.default_column_space_remainder, sepa) return sepa
[ "def", "_separate", "(", "self", ")", ":", "if", "self", ".", "total_free_space", "is", "None", ":", "return", "0", "else", ":", "sepa", "=", "self", ".", "default_column_space", "# we need to distribute remainders", "if", "self", ".", "default_column_space_remainder", ">", "0", ":", "sepa", "+=", "1", "self", ".", "default_column_space_remainder", "-=", "1", "logger", ".", "debug", "(", "\"remainder: %d, separator: %d\"", ",", "self", ".", "default_column_space_remainder", ",", "sepa", ")", "return", "sepa" ]
get a width of separator for current column :return: int
[ "get", "a", "width", "of", "separator", "for", "current", "column" ]
python
train
32.882353
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1015-L1018
def p_expression_sla(self, p): 'expression : expression LSHIFTA expression' p[0] = Sll(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_expression_sla", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Sll", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
expression : expression LSHIFTA expression
[ "expression", ":", "expression", "LSHIFTA", "expression" ]
python
train
42
ska-sa/katcp-python
katcp/inspecting_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/inspecting_client.py#L956-L959
def _cb_inform_interface_change(self, msg): """Update the sensors and requests available.""" self._logger.debug('cb_inform_interface_change(%s)', msg) self._interface_changed.set()
[ "def", "_cb_inform_interface_change", "(", "self", ",", "msg", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'cb_inform_interface_change(%s)'", ",", "msg", ")", "self", ".", "_interface_changed", ".", "set", "(", ")" ]
Update the sensors and requests available.
[ "Update", "the", "sensors", "and", "requests", "available", "." ]
python
train
50.25
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/status.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/status.py#L120-L155
def to_dict(mapreduce_yaml): """Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries. """ all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
[ "def", "to_dict", "(", "mapreduce_yaml", ")", ":", "all_configs", "=", "[", "]", "for", "config", "in", "mapreduce_yaml", ".", "mapreduce", ":", "out", "=", "{", "\"name\"", ":", "config", ".", "name", ",", "\"mapper_input_reader\"", ":", "config", ".", "mapper", ".", "input_reader", ",", "\"mapper_handler\"", ":", "config", ".", "mapper", ".", "handler", ",", "}", "if", "config", ".", "mapper", ".", "params_validator", ":", "out", "[", "\"mapper_params_validator\"", "]", "=", "config", ".", "mapper", ".", "params_validator", "if", "config", ".", "mapper", ".", "params", ":", "param_defaults", "=", "{", "}", "for", "param", "in", "config", ".", "mapper", ".", "params", ":", "param_defaults", "[", "param", ".", "name", "]", "=", "param", ".", "default", "or", "param", ".", "value", "out", "[", "\"mapper_params\"", "]", "=", "param_defaults", "if", "config", ".", "params", ":", "param_defaults", "=", "{", "}", "for", "param", "in", "config", ".", "params", ":", "param_defaults", "[", "param", ".", "name", "]", "=", "param", ".", "default", "or", "param", ".", "value", "out", "[", "\"params\"", "]", "=", "param_defaults", "if", "config", ".", "mapper", ".", "output_writer", ":", "out", "[", "\"mapper_output_writer\"", "]", "=", "config", ".", "mapper", ".", "output_writer", "all_configs", ".", "append", "(", "out", ")", "return", "all_configs" ]
Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries.
[ "Converts", "a", "MapReduceYaml", "file", "into", "a", "JSON", "-", "encodable", "dictionary", "." ]
python
train
34.861111
fastai/fastai
fastai/metrics.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/metrics.py#L83-L87
def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor: "Explained variance between `pred` and `targ`." pred,targ = flatten_check(pred,targ) var_pct = torch.var(targ - pred) / torch.var(targ) return 1 - var_pct
[ "def", "explained_variance", "(", "pred", ":", "Tensor", ",", "targ", ":", "Tensor", ")", "->", "Rank0Tensor", ":", "pred", ",", "targ", "=", "flatten_check", "(", "pred", ",", "targ", ")", "var_pct", "=", "torch", ".", "var", "(", "targ", "-", "pred", ")", "/", "torch", ".", "var", "(", "targ", ")", "return", "1", "-", "var_pct" ]
Explained variance between `pred` and `targ`.
[ "Explained", "variance", "between", "pred", "and", "targ", "." ]
python
train
45.8
Erotemic/utool
utool/util_decor.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_decor.py#L351-L367
def tracefunc_xml(func): """ Causes output of function to be printed in an XML style block """ funcname = meta_util_six.get_funcname(func) def wrp_tracefunc2(*args, **kwargs): verbose = kwargs.get('verbose', True) if verbose: print('<%s>' % (funcname,)) with util_print.Indenter(' '): ret = func(*args, **kwargs) if verbose: print('</%s>' % (funcname,)) return ret wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2) wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func) return wrp_tracefunc2_
[ "def", "tracefunc_xml", "(", "func", ")", ":", "funcname", "=", "meta_util_six", ".", "get_funcname", "(", "func", ")", "def", "wrp_tracefunc2", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "verbose", "=", "kwargs", ".", "get", "(", "'verbose'", ",", "True", ")", "if", "verbose", ":", "print", "(", "'<%s>'", "%", "(", "funcname", ",", ")", ")", "with", "util_print", ".", "Indenter", "(", "' '", ")", ":", "ret", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "verbose", ":", "print", "(", "'</%s>'", "%", "(", "funcname", ",", ")", ")", "return", "ret", "wrp_tracefunc2_", "=", "ignores_exc_tb", "(", "wrp_tracefunc2", ")", "wrp_tracefunc2_", "=", "preserve_sig", "(", "wrp_tracefunc2_", ",", "func", ")", "return", "wrp_tracefunc2_" ]
Causes output of function to be printed in an XML style block
[ "Causes", "output", "of", "function", "to", "be", "printed", "in", "an", "XML", "style", "block" ]
python
train
34.411765
lang-uk/tokenize-uk
tokenize_uk/tokenize_uk.py
https://github.com/lang-uk/tokenize-uk/blob/52769b0f43af29d4a5863a7836364b3b9c10dd09/tokenize_uk/tokenize_uk.py#L57-L91
def tokenize_sents(string): """ Tokenize input text to sentences. :param string: Text to tokenize :type string: str or unicode :return: sentences :rtype: list of strings """ string = six.text_type(string) spans = [] for match in re.finditer('[^\s]+', string): spans.append(match) spans_count = len(spans) rez = [] off = 0 for i in range(spans_count): tok = string[spans[i].start():spans[i].end()] if i == spans_count - 1: rez.append(string[off:spans[i].end()]) elif tok[-1] in ['.', '!', '?', '…', '»']: tok1 = tok[re.search('[.!?…»]', tok).start()-1] next_tok = string[spans[i + 1].start():spans[i + 1].end()] if (next_tok[0].isupper() and not tok1.isupper() and not (tok[-1] != '.' or tok1[0] == '(' or tok in ABBRS)): rez.append(string[off:spans[i].end()]) off = spans[i + 1].start() return rez
[ "def", "tokenize_sents", "(", "string", ")", ":", "string", "=", "six", ".", "text_type", "(", "string", ")", "spans", "=", "[", "]", "for", "match", "in", "re", ".", "finditer", "(", "'[^\\s]+'", ",", "string", ")", ":", "spans", ".", "append", "(", "match", ")", "spans_count", "=", "len", "(", "spans", ")", "rez", "=", "[", "]", "off", "=", "0", "for", "i", "in", "range", "(", "spans_count", ")", ":", "tok", "=", "string", "[", "spans", "[", "i", "]", ".", "start", "(", ")", ":", "spans", "[", "i", "]", ".", "end", "(", ")", "]", "if", "i", "==", "spans_count", "-", "1", ":", "rez", ".", "append", "(", "string", "[", "off", ":", "spans", "[", "i", "]", ".", "end", "(", ")", "]", ")", "elif", "tok", "[", "-", "1", "]", "in", "[", "'.'", ",", "'!'", ",", "'?'", ",", "'…', ", "'", "']:", "", "", "tok1", "=", "tok", "[", "re", ".", "search", "(", "'[.!?…»]', t", "o", ").s", "t", "a", "rt()-", "1", "]", "", "", "", "next_tok", "=", "string", "[", "spans", "[", "i", "+", "1", "]", ".", "start", "(", ")", ":", "spans", "[", "i", "+", "1", "]", ".", "end", "(", ")", "]", "if", "(", "next_tok", "[", "0", "]", ".", "isupper", "(", ")", "and", "not", "tok1", ".", "isupper", "(", ")", "and", "not", "(", "tok", "[", "-", "1", "]", "!=", "'.'", "or", "tok1", "[", "0", "]", "==", "'('", "or", "tok", "in", "ABBRS", ")", ")", ":", "rez", ".", "append", "(", "string", "[", "off", ":", "spans", "[", "i", "]", ".", "end", "(", ")", "]", ")", "off", "=", "spans", "[", "i", "+", "1", "]", ".", "start", "(", ")", "return", "rez" ]
Tokenize input text to sentences. :param string: Text to tokenize :type string: str or unicode :return: sentences :rtype: list of strings
[ "Tokenize", "input", "text", "to", "sentences", "." ]
python
train
29.2
etcher-be/emiz
emiz/avwx/translate.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/translate.py#L255-L284
def turb_ice(turbice: [str], unit: str = 'ft') -> str: # type: ignore """ Translate the list of turbulance or icing into a readable sentence Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft """ if not turbice: return '' # Determine turbulance or icing if turbice[0][0] == '5': conditions = TURBULANCE_CONDITIONS elif turbice[0][0] == '6': conditions = ICING_CONDITIONS else: return '' # Create list of split items (type, floor, height) split = [] for item in turbice: if len(item) == 6: split.append([item[1:2], item[2:5], item[5]]) # Combine items that cover a layer greater than 9000ft for i in reversed(range(len(split) - 1)): if split[i][2] == '9' and split[i][0] == split[i + 1][0] \ and int(split[i + 1][1]) == (int(split[i][1]) + int(split[i][2]) * 10): split[i][2] = str(int(split[i][2]) + int(split[i + 1][2])) split.pop(i + 1) # Return joined, formatted string from split items return ', '.join(['{conditions} from {low_alt}{unit} to {high_alt}{unit}'.format( conditions=conditions[item[0]], low_alt=int(item[1]) * 100, high_alt=int(item[1]) * 100 + int(item[2]) * 1000, unit=unit) for item in split])
[ "def", "turb_ice", "(", "turbice", ":", "[", "str", "]", ",", "unit", ":", "str", "=", "'ft'", ")", "->", "str", ":", "# type: ignore", "if", "not", "turbice", ":", "return", "''", "# Determine turbulance or icing", "if", "turbice", "[", "0", "]", "[", "0", "]", "==", "'5'", ":", "conditions", "=", "TURBULANCE_CONDITIONS", "elif", "turbice", "[", "0", "]", "[", "0", "]", "==", "'6'", ":", "conditions", "=", "ICING_CONDITIONS", "else", ":", "return", "''", "# Create list of split items (type, floor, height)", "split", "=", "[", "]", "for", "item", "in", "turbice", ":", "if", "len", "(", "item", ")", "==", "6", ":", "split", ".", "append", "(", "[", "item", "[", "1", ":", "2", "]", ",", "item", "[", "2", ":", "5", "]", ",", "item", "[", "5", "]", "]", ")", "# Combine items that cover a layer greater than 9000ft", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "split", ")", "-", "1", ")", ")", ":", "if", "split", "[", "i", "]", "[", "2", "]", "==", "'9'", "and", "split", "[", "i", "]", "[", "0", "]", "==", "split", "[", "i", "+", "1", "]", "[", "0", "]", "and", "int", "(", "split", "[", "i", "+", "1", "]", "[", "1", "]", ")", "==", "(", "int", "(", "split", "[", "i", "]", "[", "1", "]", ")", "+", "int", "(", "split", "[", "i", "]", "[", "2", "]", ")", "*", "10", ")", ":", "split", "[", "i", "]", "[", "2", "]", "=", "str", "(", "int", "(", "split", "[", "i", "]", "[", "2", "]", ")", "+", "int", "(", "split", "[", "i", "+", "1", "]", "[", "2", "]", ")", ")", "split", ".", "pop", "(", "i", "+", "1", ")", "# Return joined, formatted string from split items", "return", "', '", ".", "join", "(", "[", "'{conditions} from {low_alt}{unit} to {high_alt}{unit}'", ".", "format", "(", "conditions", "=", "conditions", "[", "item", "[", "0", "]", "]", ",", "low_alt", "=", "int", "(", "item", "[", "1", "]", ")", "*", "100", ",", "high_alt", "=", "int", "(", "item", "[", "1", "]", ")", "*", "100", "+", "int", "(", "item", "[", "2", "]", ")", "*", "1000", ",", "unit", "=", "unit", ")", "for", "item", "in", "split", "]", ")" ]
Translate the list of turbulance or icing into a readable sentence Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft
[ "Translate", "the", "list", "of", "turbulance", "or", "icing", "into", "a", "readable", "sentence" ]
python
train
42.766667
abilian/abilian-core
abilian/services/vocabularies/models.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/vocabularies/models.py#L31-L37
def by_position(self, position): """Like `.get()`, but by position number.""" # don't use .first(), so that MultipleResultsFound can be raised try: return self.filter_by(position=position).one() except sa.orm.exc.NoResultFound: return None
[ "def", "by_position", "(", "self", ",", "position", ")", ":", "# don't use .first(), so that MultipleResultsFound can be raised", "try", ":", "return", "self", ".", "filter_by", "(", "position", "=", "position", ")", ".", "one", "(", ")", "except", "sa", ".", "orm", ".", "exc", ".", "NoResultFound", ":", "return", "None" ]
Like `.get()`, but by position number.
[ "Like", ".", "get", "()", "but", "by", "position", "number", "." ]
python
train
41.285714
O365/python-o365
O365/excel.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L1536-L1548
def get_tables(self): """ Returns a collection of this worksheet tables""" url = self.build_url(self._endpoints.get('get_tables')) response = self.session.get(url) if not response: return [] data = response.json() return [self.table_constructor(parent=self, **{self._cloud_data_key: table}) for table in data.get('value', [])]
[ "def", "get_tables", "(", "self", ")", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'get_tables'", ")", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", ")", "if", "not", "response", ":", "return", "[", "]", "data", "=", "response", ".", "json", "(", ")", "return", "[", "self", ".", "table_constructor", "(", "parent", "=", "self", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "table", "}", ")", "for", "table", "in", "data", ".", "get", "(", "'value'", ",", "[", "]", ")", "]" ]
Returns a collection of this worksheet tables
[ "Returns", "a", "collection", "of", "this", "worksheet", "tables" ]
python
train
30.307692
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L4618-L4708
def to_vcf(self, path, rename=None, number=None, description=None, fill=None, write_header=True): r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description : dict, optional Descriptions for the INFO and FILTER headers. fill : dict, optional Fill values used for missing data in the table. write_header : bool, optional If True write VCF header. Examples -------- Setup a variant table to write out:: >>> import allel >>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3'] >>> pos = [2, 6, 3, 8, 1] >>> ids = ['a', 'b', 'c', 'd', 'e'] >>> ref = [b'A', b'C', b'T', b'G', b'N'] >>> alt = [(b'T', b'.'), ... (b'G', b'.'), ... (b'A', b'C'), ... (b'C', b'A'), ... (b'X', b'.')] >>> qual = [1.2, 2.3, 3.4, 4.5, 5.6] >>> filter_qd = [True, True, True, False, False] >>> filter_dp = [True, False, True, False, False] >>> dp = [12, 23, 34, 45, 56] >>> qd = [12.3, 23.4, 34.5, 45.6, 56.7] >>> flg = [True, False, True, False, True] >>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)] >>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9), ... (9.0, 9.9)] >>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp, ... filter_qd, dp, qd, flg, ac, xx] >>> records = list(zip(*columns)) >>> dtype = [('CHROM', 'S4'), ... ('POS', 'u4'), ... ('ID', 'S1'), ... ('REF', 'S1'), ... ('ALT', ('S1', 2)), ... ('qual', 'f4'), ... ('filter_dp', bool), ... ('filter_qd', bool), ... ('dp', int), ... ('qd', float), ... ('flg', bool), ... ('ac', (int, 2)), ... ('xx', (float, 2))] >>> vt = allel.VariantTable(records, dtype=dtype) Now write out to VCF and inspect the result:: >>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'} >>> fill = {'ALT': b'.', 'ac': -1} >>> number = {'ac': 'A'} >>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'} >>> vt.to_vcf('example.vcf', rename=rename, fill=fill, ... number=number, description=description) >>> print(open('example.vcf').read()) ##fileformat=VCFv4.1 ##fileDate=... ##source=... ##INFO=<ID=DP,Number=1,Type=Integer,Description=""> ##INFO=<ID=QD,Number=1,Type=Float,Description=""> ##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts"> ##INFO=<ID=flg,Number=0,Type=Flag,Description=""> ##INFO=<ID=xx,Number=2,Type=Float,Description=""> ##FILTER=<ID=QD,Description=""> ##FILTER=<ID=dp,Description="Low depth"> #CHROM POS ID REF ALT QUAL FILTER INFO chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=... chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5 chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x... chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7... chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=... """ write_vcf(path, callset=self, rename=rename, number=number, description=description, fill=fill, write_header=write_header)
[ "def", "to_vcf", "(", "self", ",", "path", ",", "rename", "=", "None", ",", "number", "=", "None", ",", "description", "=", "None", ",", "fill", "=", "None", ",", "write_header", "=", "True", ")", ":", "write_vcf", "(", "path", ",", "callset", "=", "self", ",", "rename", "=", "rename", ",", "number", "=", "number", ",", "description", "=", "description", ",", "fill", "=", "fill", ",", "write_header", "=", "write_header", ")" ]
r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description : dict, optional Descriptions for the INFO and FILTER headers. fill : dict, optional Fill values used for missing data in the table. write_header : bool, optional If True write VCF header. Examples -------- Setup a variant table to write out:: >>> import allel >>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3'] >>> pos = [2, 6, 3, 8, 1] >>> ids = ['a', 'b', 'c', 'd', 'e'] >>> ref = [b'A', b'C', b'T', b'G', b'N'] >>> alt = [(b'T', b'.'), ... (b'G', b'.'), ... (b'A', b'C'), ... (b'C', b'A'), ... (b'X', b'.')] >>> qual = [1.2, 2.3, 3.4, 4.5, 5.6] >>> filter_qd = [True, True, True, False, False] >>> filter_dp = [True, False, True, False, False] >>> dp = [12, 23, 34, 45, 56] >>> qd = [12.3, 23.4, 34.5, 45.6, 56.7] >>> flg = [True, False, True, False, True] >>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)] >>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9), ... (9.0, 9.9)] >>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp, ... filter_qd, dp, qd, flg, ac, xx] >>> records = list(zip(*columns)) >>> dtype = [('CHROM', 'S4'), ... ('POS', 'u4'), ... ('ID', 'S1'), ... ('REF', 'S1'), ... ('ALT', ('S1', 2)), ... ('qual', 'f4'), ... ('filter_dp', bool), ... ('filter_qd', bool), ... ('dp', int), ... ('qd', float), ... ('flg', bool), ... ('ac', (int, 2)), ... ('xx', (float, 2))] >>> vt = allel.VariantTable(records, dtype=dtype) Now write out to VCF and inspect the result:: >>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'} >>> fill = {'ALT': b'.', 'ac': -1} >>> number = {'ac': 'A'} >>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'} >>> vt.to_vcf('example.vcf', rename=rename, fill=fill, ... number=number, description=description) >>> print(open('example.vcf').read()) ##fileformat=VCFv4.1 ##fileDate=... ##source=... ##INFO=<ID=DP,Number=1,Type=Integer,Description=""> ##INFO=<ID=QD,Number=1,Type=Float,Description=""> ##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts"> ##INFO=<ID=flg,Number=0,Type=Flag,Description=""> ##INFO=<ID=xx,Number=2,Type=Float,Description=""> ##FILTER=<ID=QD,Description=""> ##FILTER=<ID=dp,Description="Low depth"> #CHROM POS ID REF ALT QUAL FILTER INFO chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=... chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5 chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x... chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7... chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=...
[ "r", "Write", "to", "a", "variant", "call", "format", "(", "VCF", ")", "file", "." ]
python
train
42.89011
mapbox/rio-color
rio_color/utils.py
https://github.com/mapbox/rio-color/blob/4e9d7a9348608e66f9381fcdba98c13050e91c83/rio_color/utils.py#L23-L27
def scale_dtype(arr, dtype): """Convert an array from 0..1 to dtype, scaling up linearly """ max_int = np.iinfo(dtype).max return (arr * max_int).astype(dtype)
[ "def", "scale_dtype", "(", "arr", ",", "dtype", ")", ":", "max_int", "=", "np", ".", "iinfo", "(", "dtype", ")", ".", "max", "return", "(", "arr", "*", "max_int", ")", ".", "astype", "(", "dtype", ")" ]
Convert an array from 0..1 to dtype, scaling up linearly
[ "Convert", "an", "array", "from", "0", "..", "1", "to", "dtype", "scaling", "up", "linearly" ]
python
train
34.2
Clinical-Genomics/scout
scout/server/blueprints/cases/views.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/views.py#L34-L39
def index(): """Display a list of all user institutes.""" institute_objs = user_institutes(store, current_user) institutes_count = ((institute_obj, store.cases(collaborator=institute_obj['_id']).count()) for institute_obj in institute_objs if institute_obj) return dict(institutes=institutes_count)
[ "def", "index", "(", ")", ":", "institute_objs", "=", "user_institutes", "(", "store", ",", "current_user", ")", "institutes_count", "=", "(", "(", "institute_obj", ",", "store", ".", "cases", "(", "collaborator", "=", "institute_obj", "[", "'_id'", "]", ")", ".", "count", "(", ")", ")", "for", "institute_obj", "in", "institute_objs", "if", "institute_obj", ")", "return", "dict", "(", "institutes", "=", "institutes_count", ")" ]
Display a list of all user institutes.
[ "Display", "a", "list", "of", "all", "user", "institutes", "." ]
python
test
55.5
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L5144-L5172
def conv_elems_1d(x, factor, out_depth=None): """Decrease the length and change the dimensionality. Merge/restore/compress factors positions of dim depth of the input into a single position of dim out_depth. This is basically just a strided convolution without overlap between each strides. The original length has to be divided by factor. Args: x (tf.Tensor): shape [batch_size, length, depth] factor (int): Length compression factor. out_depth (int): Output depth Returns: tf.Tensor: shape [batch_size, length//factor, out_depth] """ out_depth = out_depth or x.get_shape().as_list()[-1] # with tf.control_dependencies( # Dynamic assertion # [tf.assert_equal(tf.shape(x)[1] % factor, 0)]): x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth] x = layers().Conv2D( filters=out_depth, kernel_size=(1, factor), strides=(1, factor), padding="valid", data_format="channels_last", )(x) # [batch_size, 1, length//factor, out_depth] x = tf.squeeze(x, 1) # [batch_size, length//factor, depth] return x
[ "def", "conv_elems_1d", "(", "x", ",", "factor", ",", "out_depth", "=", "None", ")", ":", "out_depth", "=", "out_depth", "or", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "-", "1", "]", "# with tf.control_dependencies( # Dynamic assertion", "# [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):", "x", "=", "tf", ".", "expand_dims", "(", "x", ",", "1", ")", "# [batch_size, 1, length, depth]", "x", "=", "layers", "(", ")", ".", "Conv2D", "(", "filters", "=", "out_depth", ",", "kernel_size", "=", "(", "1", ",", "factor", ")", ",", "strides", "=", "(", "1", ",", "factor", ")", ",", "padding", "=", "\"valid\"", ",", "data_format", "=", "\"channels_last\"", ",", ")", "(", "x", ")", "# [batch_size, 1, length//factor, out_depth]", "x", "=", "tf", ".", "squeeze", "(", "x", ",", "1", ")", "# [batch_size, length//factor, depth]", "return", "x" ]
Decrease the length and change the dimensionality. Merge/restore/compress factors positions of dim depth of the input into a single position of dim out_depth. This is basically just a strided convolution without overlap between each strides. The original length has to be divided by factor. Args: x (tf.Tensor): shape [batch_size, length, depth] factor (int): Length compression factor. out_depth (int): Output depth Returns: tf.Tensor: shape [batch_size, length//factor, out_depth]
[ "Decrease", "the", "length", "and", "change", "the", "dimensionality", "." ]
python
train
36.517241
happyleavesaoc/python-voobly
voobly/__init__.py
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L112-L136
def _make_request(session, url, argument=None, params=None, raw=False): """Make a request to API endpoint.""" if not params: params = {} params['key'] = session.auth.key try: if argument: request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = '{}{}'.format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError('failed to connect') if resp.text == 'bad-key': raise VooblyError('bad api key') elif resp.text == 'too-busy': raise VooblyError('service too busy') elif not resp.text: raise VooblyError('no data returned') if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError('unexpected error {}'.format(resp.text))
[ "def", "_make_request", "(", "session", ",", "url", ",", "argument", "=", "None", ",", "params", "=", "None", ",", "raw", "=", "False", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "params", "[", "'key'", "]", "=", "session", ".", "auth", ".", "key", "try", ":", "if", "argument", ":", "request_url", "=", "'{}{}{}{}'", ".", "format", "(", "session", ".", "auth", ".", "base_url", ",", "VOOBLY_API_URL", ",", "url", ",", "argument", ")", "else", ":", "request_url", "=", "'{}{}'", ".", "format", "(", "VOOBLY_API_URL", ",", "url", ")", "resp", "=", "session", ".", "get", "(", "request_url", ",", "params", "=", "params", ")", "except", "RequestException", ":", "raise", "VooblyError", "(", "'failed to connect'", ")", "if", "resp", ".", "text", "==", "'bad-key'", ":", "raise", "VooblyError", "(", "'bad api key'", ")", "elif", "resp", ".", "text", "==", "'too-busy'", ":", "raise", "VooblyError", "(", "'service too busy'", ")", "elif", "not", "resp", ".", "text", ":", "raise", "VooblyError", "(", "'no data returned'", ")", "if", "raw", ":", "return", "resp", ".", "text", "try", ":", "return", "tablib", ".", "Dataset", "(", ")", ".", "load", "(", "resp", ".", "text", ")", ".", "dict", "except", "UnsupportedFormat", ":", "raise", "VooblyError", "(", "'unexpected error {}'", ".", "format", "(", "resp", ".", "text", ")", ")" ]
Make a request to API endpoint.
[ "Make", "a", "request", "to", "API", "endpoint", "." ]
python
train
36.68
delph-in/pydelphin
delphin/repp.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/repp.py#L445-L466
def tokenize(self, s, pattern=None, active=None): """ Rewrite and tokenize the input string *s*. Args: s (str): the input string to process pattern (str, optional): the regular expression pattern on which to split tokens; defaults to `[ \t]+` active (optional): a collection of external module names that may be applied if called Returns: a :class:`~delphin.tokens.YyTokenLattice` containing the tokens and their characterization information """ if pattern is None: if self.tokenize_pattern is None: pattern = r'[ \t]+' else: pattern = self.tokenize_pattern if active is None: active = self.active return self.group.tokenize(s, pattern=pattern, active=active)
[ "def", "tokenize", "(", "self", ",", "s", ",", "pattern", "=", "None", ",", "active", "=", "None", ")", ":", "if", "pattern", "is", "None", ":", "if", "self", ".", "tokenize_pattern", "is", "None", ":", "pattern", "=", "r'[ \\t]+'", "else", ":", "pattern", "=", "self", ".", "tokenize_pattern", "if", "active", "is", "None", ":", "active", "=", "self", ".", "active", "return", "self", ".", "group", ".", "tokenize", "(", "s", ",", "pattern", "=", "pattern", ",", "active", "=", "active", ")" ]
Rewrite and tokenize the input string *s*. Args: s (str): the input string to process pattern (str, optional): the regular expression pattern on which to split tokens; defaults to `[ \t]+` active (optional): a collection of external module names that may be applied if called Returns: a :class:`~delphin.tokens.YyTokenLattice` containing the tokens and their characterization information
[ "Rewrite", "and", "tokenize", "the", "input", "string", "*", "s", "*", "." ]
python
train
39.227273
tensorflow/tensor2tensor
tensor2tensor/models/research/glow_ops.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L548-L603
def conv_block(name, x, mid_channels, dilations=None, activation="relu", dropout=0.0): """2 layer conv block used in the affine coupling layer. Args: name: variable scope. x: 4-D or 5-D Tensor. mid_channels: Output channels of the second layer. dilations: Optional, list of integers. activation: relu or gatu. If relu, the second layer is relu(W*x) If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) dropout: Dropout probability. Returns: x: 4-D Tensor: Output activations. """ with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) is_2d = len(x_shape) == 4 num_steps = x_shape[1] if is_2d: first_filter = [3, 3] second_filter = [1, 1] else: # special case when number of steps equal 1 to avoid # padding. if num_steps == 1: first_filter = [1, 3, 3] else: first_filter = [2, 3, 3] second_filter = [1, 1, 1] # Edge Padding + conv2d + actnorm + relu: # [output: 512 channels] x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter, dilations=dilations) x = tf.nn.relu(x) x = get_dropout(x, rate=dropout) # Padding + conv2d + actnorm + activation. # [input, output: 512 channels] if activation == "relu": x = conv("1_2", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.relu(x) elif activation == "gatu": # x = tanh(w1*x) * sigm(w2*x) x_tanh = conv("1_tanh", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x_sigm = conv("1_sigm", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm) x = get_dropout(x, rate=dropout) return x
[ "def", "conv_block", "(", "name", ",", "x", ",", "mid_channels", ",", "dilations", "=", "None", ",", "activation", "=", "\"relu\"", ",", "dropout", "=", "0.0", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "is_2d", "=", "len", "(", "x_shape", ")", "==", "4", "num_steps", "=", "x_shape", "[", "1", "]", "if", "is_2d", ":", "first_filter", "=", "[", "3", ",", "3", "]", "second_filter", "=", "[", "1", ",", "1", "]", "else", ":", "# special case when number of steps equal 1 to avoid", "# padding.", "if", "num_steps", "==", "1", ":", "first_filter", "=", "[", "1", ",", "3", ",", "3", "]", "else", ":", "first_filter", "=", "[", "2", ",", "3", ",", "3", "]", "second_filter", "=", "[", "1", ",", "1", ",", "1", "]", "# Edge Padding + conv2d + actnorm + relu:", "# [output: 512 channels]", "x", "=", "conv", "(", "\"1_1\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "first_filter", ",", "dilations", "=", "dilations", ")", "x", "=", "tf", ".", "nn", ".", "relu", "(", "x", ")", "x", "=", "get_dropout", "(", "x", ",", "rate", "=", "dropout", ")", "# Padding + conv2d + actnorm + activation.", "# [input, output: 512 channels]", "if", "activation", "==", "\"relu\"", ":", "x", "=", "conv", "(", "\"1_2\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "second_filter", ",", "dilations", "=", "dilations", ")", "x", "=", "tf", ".", "nn", ".", "relu", "(", "x", ")", "elif", "activation", "==", "\"gatu\"", ":", "# x = tanh(w1*x) * sigm(w2*x)", "x_tanh", "=", "conv", "(", "\"1_tanh\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "second_filter", ",", "dilations", "=", "dilations", ")", "x_sigm", "=", "conv", "(", "\"1_sigm\"", ",", "x", ",", "output_channels", "=", "mid_channels", ",", "filter_size", "=", "second_filter", ",", "dilations", "=", "dilations", ")", "x", "=", "tf", ".", "nn", ".", "tanh", "(", "x_tanh", ")", "*", "tf", ".", "nn", ".", "sigmoid", "(", "x_sigm", ")", "x", "=", "get_dropout", "(", "x", ",", "rate", "=", "dropout", ")", "return", "x" ]
2 layer conv block used in the affine coupling layer. Args: name: variable scope. x: 4-D or 5-D Tensor. mid_channels: Output channels of the second layer. dilations: Optional, list of integers. activation: relu or gatu. If relu, the second layer is relu(W*x) If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) dropout: Dropout probability. Returns: x: 4-D Tensor: Output activations.
[ "2", "layer", "conv", "block", "used", "in", "the", "affine", "coupling", "layer", "." ]
python
train
33.428571
gwastro/pycbc
pycbc/transforms.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/transforms.py#L844-L896
def inverse_transform(self, maps): """ This function transforms from component masses and cartesian spins to mass-weighted spin parameters perpendicular with the angular momentum. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values. """ # convert out = {} xi1 = conversions.primary_xi( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) xi2 = conversions.secondary_xi( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) out["phi_a"] = conversions.phi_a( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) out["phi_s"] = conversions.phi_s( maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) # map parameters from primary/secondary to indices if isinstance(xi1, numpy.ndarray): mass1, mass2 = map(numpy.array, [maps[parameters.mass1], maps[parameters.mass2]]) mask_mass1_gte_mass2 = mass1 >= mass2 mask_mass1_lt_mass2 = mass1 < mass2 out["xi1"] = numpy.concatenate(( xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])) out["xi2"] = numpy.concatenate(( xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])) elif maps["mass1"] > maps["mass2"]: out["xi1"] = xi1 out["xi2"] = xi2 else: out["xi1"] = xi2 out["xi2"] = xi1 return self.format_output(maps, out)
[ "def", "inverse_transform", "(", "self", ",", "maps", ")", ":", "# convert", "out", "=", "{", "}", "xi1", "=", "conversions", ".", "primary_xi", "(", "maps", "[", "parameters", ".", "mass1", "]", ",", "maps", "[", "parameters", ".", "mass2", "]", ",", "maps", "[", "parameters", ".", "spin1x", "]", ",", "maps", "[", "parameters", ".", "spin1y", "]", ",", "maps", "[", "parameters", ".", "spin2x", "]", ",", "maps", "[", "parameters", ".", "spin2y", "]", ")", "xi2", "=", "conversions", ".", "secondary_xi", "(", "maps", "[", "parameters", ".", "mass1", "]", ",", "maps", "[", "parameters", ".", "mass2", "]", ",", "maps", "[", "parameters", ".", "spin1x", "]", ",", "maps", "[", "parameters", ".", "spin1y", "]", ",", "maps", "[", "parameters", ".", "spin2x", "]", ",", "maps", "[", "parameters", ".", "spin2y", "]", ")", "out", "[", "\"phi_a\"", "]", "=", "conversions", ".", "phi_a", "(", "maps", "[", "parameters", ".", "mass1", "]", ",", "maps", "[", "parameters", ".", "mass2", "]", ",", "maps", "[", "parameters", ".", "spin1x", "]", ",", "maps", "[", "parameters", ".", "spin1y", "]", ",", "maps", "[", "parameters", ".", "spin2x", "]", ",", "maps", "[", "parameters", ".", "spin2y", "]", ")", "out", "[", "\"phi_s\"", "]", "=", "conversions", ".", "phi_s", "(", "maps", "[", "parameters", ".", "spin1x", "]", ",", "maps", "[", "parameters", ".", "spin1y", "]", ",", "maps", "[", "parameters", ".", "spin2x", "]", ",", "maps", "[", "parameters", ".", "spin2y", "]", ")", "# map parameters from primary/secondary to indices", "if", "isinstance", "(", "xi1", ",", "numpy", ".", "ndarray", ")", ":", "mass1", ",", "mass2", "=", "map", "(", "numpy", ".", "array", ",", "[", "maps", "[", "parameters", ".", "mass1", "]", ",", "maps", "[", "parameters", ".", "mass2", "]", "]", ")", "mask_mass1_gte_mass2", "=", "mass1", ">=", "mass2", "mask_mass1_lt_mass2", "=", "mass1", "<", "mass2", "out", "[", "\"xi1\"", "]", "=", "numpy", ".", "concatenate", "(", "(", "xi1", "[", "mask_mass1_gte_mass2", "]", ",", "xi2", "[", "mask_mass1_lt_mass2", "]", ")", ")", "out", "[", "\"xi2\"", "]", "=", "numpy", ".", "concatenate", "(", "(", "xi1", "[", "mask_mass1_gte_mass2", "]", ",", "xi2", "[", "mask_mass1_lt_mass2", "]", ")", ")", "elif", "maps", "[", "\"mass1\"", "]", ">", "maps", "[", "\"mass2\"", "]", ":", "out", "[", "\"xi1\"", "]", "=", "xi1", "out", "[", "\"xi2\"", "]", "=", "xi2", "else", ":", "out", "[", "\"xi1\"", "]", "=", "xi2", "out", "[", "\"xi2\"", "]", "=", "xi1", "return", "self", ".", "format_output", "(", "maps", ",", "out", ")" ]
This function transforms from component masses and cartesian spins to mass-weighted spin parameters perpendicular with the angular momentum. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
[ "This", "function", "transforms", "from", "component", "masses", "and", "cartesian", "spins", "to", "mass", "-", "weighted", "spin", "parameters", "perpendicular", "with", "the", "angular", "momentum", "." ]
python
train
44.754717
seomoz/shovel
shovel/runner.py
https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/runner.py#L30-L123
def run(*args): '''Run the normal shovel functionality''' import os import sys import argparse import pkg_resources # First off, read the arguments parser = argparse.ArgumentParser(prog='shovel', description='Rake, for Python') parser.add_argument('method', help='The task to run') parser.add_argument('--verbose', dest='verbose', action='store_true', help='Be extra talkative') parser.add_argument('--dry-run', dest='dryRun', action='store_true', help='Show the args that would be used') ver = pkg_resources.require('shovel')[0].version parser.add_argument('--version', action='version', version='Shovel v %s' % ver, help='print the version of Shovel.') # Parse our arguments if args: clargs, remaining = parser.parse_known_args(args=args) else: # pragma: no cover clargs, remaining = parser.parse_known_args() if clargs.verbose: logger.setLevel(logging.DEBUG) args, kwargs = parse(remaining) # Import all of the files we want shovel = Shovel() # Read in any tasks that have already been defined shovel.extend(Task.clear()) for path in [ os.path.expanduser('~/.shovel.py'), os.path.expanduser('~/.shovel')]: if os.path.exists(path): # pragma: no cover shovel.read(path, os.path.expanduser('~/')) shovel_home = os.environ.get('SHOVEL_HOME') if shovel_home and os.path.exists(shovel_home): shovel.read(shovel_home, shovel_home) for path in ['shovel.py', 'shovel']: if os.path.exists(path): shovel.read(path) # If it's help we're looking for, look no further if clargs.method == 'help': print(help.shovel_help(shovel, *args, **kwargs)) elif clargs.method == 'tasks': tasks = list(v for _, v in shovel.items()) if not tasks: print('No tasks found!') else: names = list(t.fullname for t in tasks) docs = list(t.doc for t in tasks) # The width of the screen width = 80 import shutil try: width, _ = shutil.get_terminal_size(fallback=(0, width)) except AttributeError: pass # Create the format with padding for the longest name, and to # accomodate the screen width format = '%%-%is # %%-%is' % ( max(len(name) for name in names), width) for name, doc in zip(names, docs): print(format % (name, doc)) elif clargs.method: # Try to get the first command provided try: tasks = shovel.tasks(clargs.method) except KeyError: print('Could not find task "%s"' % clargs.method, file=sys.stderr) exit(1) if len(tasks) > 1: print('Specifier "%s" matches multiple tasks:' % clargs.method, file=sys.stderr) for task in tasks: print('\t%s' % task.fullname, file=sys.stderr) exit(2) task = tasks[0] if clargs.dryRun: print(task.dry(*args, **kwargs)) else: task(*args, **kwargs)
[ "def", "run", "(", "*", "args", ")", ":", "import", "os", "import", "sys", "import", "argparse", "import", "pkg_resources", "# First off, read the arguments", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'shovel'", ",", "description", "=", "'Rake, for Python'", ")", "parser", ".", "add_argument", "(", "'method'", ",", "help", "=", "'The task to run'", ")", "parser", ".", "add_argument", "(", "'--verbose'", ",", "dest", "=", "'verbose'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Be extra talkative'", ")", "parser", ".", "add_argument", "(", "'--dry-run'", ",", "dest", "=", "'dryRun'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Show the args that would be used'", ")", "ver", "=", "pkg_resources", ".", "require", "(", "'shovel'", ")", "[", "0", "]", ".", "version", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'Shovel v %s'", "%", "ver", ",", "help", "=", "'print the version of Shovel.'", ")", "# Parse our arguments", "if", "args", ":", "clargs", ",", "remaining", "=", "parser", ".", "parse_known_args", "(", "args", "=", "args", ")", "else", ":", "# pragma: no cover", "clargs", ",", "remaining", "=", "parser", ".", "parse_known_args", "(", ")", "if", "clargs", ".", "verbose", ":", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "args", ",", "kwargs", "=", "parse", "(", "remaining", ")", "# Import all of the files we want", "shovel", "=", "Shovel", "(", ")", "# Read in any tasks that have already been defined", "shovel", ".", "extend", "(", "Task", ".", "clear", "(", ")", ")", "for", "path", "in", "[", "os", ".", "path", ".", "expanduser", "(", "'~/.shovel.py'", ")", ",", "os", ".", "path", ".", "expanduser", "(", "'~/.shovel'", ")", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "# pragma: no cover", "shovel", ".", "read", "(", "path", ",", "os", ".", "path", ".", "expanduser", "(", "'~/'", ")", ")", "shovel_home", "=", "os", ".", "environ", ".", "get", "(", "'SHOVEL_HOME'", ")", "if", "shovel_home", "and", "os", ".", "path", ".", "exists", "(", "shovel_home", ")", ":", "shovel", ".", "read", "(", "shovel_home", ",", "shovel_home", ")", "for", "path", "in", "[", "'shovel.py'", ",", "'shovel'", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "shovel", ".", "read", "(", "path", ")", "# If it's help we're looking for, look no further", "if", "clargs", ".", "method", "==", "'help'", ":", "print", "(", "help", ".", "shovel_help", "(", "shovel", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "elif", "clargs", ".", "method", "==", "'tasks'", ":", "tasks", "=", "list", "(", "v", "for", "_", ",", "v", "in", "shovel", ".", "items", "(", ")", ")", "if", "not", "tasks", ":", "print", "(", "'No tasks found!'", ")", "else", ":", "names", "=", "list", "(", "t", ".", "fullname", "for", "t", "in", "tasks", ")", "docs", "=", "list", "(", "t", ".", "doc", "for", "t", "in", "tasks", ")", "# The width of the screen", "width", "=", "80", "import", "shutil", "try", ":", "width", ",", "_", "=", "shutil", ".", "get_terminal_size", "(", "fallback", "=", "(", "0", ",", "width", ")", ")", "except", "AttributeError", ":", "pass", "# Create the format with padding for the longest name, and to", "# accomodate the screen width", "format", "=", "'%%-%is # %%-%is'", "%", "(", "max", "(", "len", "(", "name", ")", "for", "name", "in", "names", ")", ",", "width", ")", "for", "name", ",", "doc", "in", "zip", "(", "names", ",", "docs", ")", ":", "print", "(", "format", "%", "(", "name", ",", "doc", ")", ")", "elif", "clargs", ".", "method", ":", "# Try to get the first command provided", "try", ":", "tasks", "=", "shovel", ".", "tasks", "(", "clargs", ".", "method", ")", "except", "KeyError", ":", "print", "(", "'Could not find task \"%s\"'", "%", "clargs", ".", "method", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", "1", ")", "if", "len", "(", "tasks", ")", ">", "1", ":", "print", "(", "'Specifier \"%s\" matches multiple tasks:'", "%", "clargs", ".", "method", ",", "file", "=", "sys", ".", "stderr", ")", "for", "task", "in", "tasks", ":", "print", "(", "'\\t%s'", "%", "task", ".", "fullname", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", "2", ")", "task", "=", "tasks", "[", "0", "]", "if", "clargs", ".", "dryRun", ":", "print", "(", "task", ".", "dry", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "else", ":", "task", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run the normal shovel functionality
[ "Run", "the", "normal", "shovel", "functionality" ]
python
train
33.148936
Meseira/subordinate
subordinate/idrangeset.py
https://github.com/Meseira/subordinate/blob/3438df304af3dccc5bd1515231402afa708f1cc3/subordinate/idrangeset.py#L103-L126
def remove(self, first, count): """ Remove a range of count consecutive ids starting at id first from all the ranges in the set. """ # Avoid trivialities if first < 0 or count < 1: return new_range = [] last = first + count - 1 for r in self.__range: if first <= r.last and r.first <= last: # There is an overlap if r.first < first: new_range.append(IdRange(r.first, first-r.first)) if last < r.last: new_range.append(IdRange(last+1, r.last-last)) else: # No overlap, range is kept new_range.append(r) self.__range = new_range
[ "def", "remove", "(", "self", ",", "first", ",", "count", ")", ":", "# Avoid trivialities", "if", "first", "<", "0", "or", "count", "<", "1", ":", "return", "new_range", "=", "[", "]", "last", "=", "first", "+", "count", "-", "1", "for", "r", "in", "self", ".", "__range", ":", "if", "first", "<=", "r", ".", "last", "and", "r", ".", "first", "<=", "last", ":", "# There is an overlap", "if", "r", ".", "first", "<", "first", ":", "new_range", ".", "append", "(", "IdRange", "(", "r", ".", "first", ",", "first", "-", "r", ".", "first", ")", ")", "if", "last", "<", "r", ".", "last", ":", "new_range", ".", "append", "(", "IdRange", "(", "last", "+", "1", ",", "r", ".", "last", "-", "last", ")", ")", "else", ":", "# No overlap, range is kept", "new_range", ".", "append", "(", "r", ")", "self", ".", "__range", "=", "new_range" ]
Remove a range of count consecutive ids starting at id first from all the ranges in the set.
[ "Remove", "a", "range", "of", "count", "consecutive", "ids", "starting", "at", "id", "first", "from", "all", "the", "ranges", "in", "the", "set", "." ]
python
train
30.916667
tomekwojcik/envelopes
envelopes/local.py
https://github.com/tomekwojcik/envelopes/blob/8ad190a55d0d8b805b6ae545b896e719467253b7/envelopes/local.py#L301-L311
def _get_current_object(self): """Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ if not hasattr(self.__local, '__release_local__'): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError('no object bound to %s' % self.__name__)
[ "def", "_get_current_object", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ".", "__local", ",", "'__release_local__'", ")", ":", "return", "self", ".", "__local", "(", ")", "try", ":", "return", "getattr", "(", "self", ".", "__local", ",", "self", ".", "__name__", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "'no object bound to %s'", "%", "self", ".", "__name__", ")" ]
Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context.
[ "Return", "the", "current", "object", ".", "This", "is", "useful", "if", "you", "want", "the", "real", "object", "behind", "the", "proxy", "at", "a", "time", "for", "performance", "reasons", "or", "because", "you", "want", "to", "pass", "the", "object", "into", "a", "different", "context", "." ]
python
train
46.454545
pbrisk/businessdate
businessdate/basedate.py
https://github.com/pbrisk/businessdate/blob/79a0c5a4e557cbacca82a430403b18413404a9bc/businessdate/basedate.py#L54-L67
def days_in_month(year, month): """ returns number of days for the given year and month :param int year: calendar year :param int month: calendar month :return int: """ eom = _days_per_month[month - 1] if is_leap_year(year) and month == 2: eom += 1 return eom
[ "def", "days_in_month", "(", "year", ",", "month", ")", ":", "eom", "=", "_days_per_month", "[", "month", "-", "1", "]", "if", "is_leap_year", "(", "year", ")", "and", "month", "==", "2", ":", "eom", "+=", "1", "return", "eom" ]
returns number of days for the given year and month :param int year: calendar year :param int month: calendar month :return int:
[ "returns", "number", "of", "days", "for", "the", "given", "year", "and", "month" ]
python
valid
20.928571
fermiPy/fermipy
fermipy/skymap.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/skymap.py#L351-L382
def get_map_values(self, lons, lats, ibin=None): """Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map """ pix_idxs = self.get_pixel_indices(lons, lats, ibin) idxs = copy.copy(pix_idxs) m = np.empty_like(idxs[0], dtype=bool) m.fill(True) for i, p in enumerate(pix_idxs): m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i]) idxs[i][~m] = 0 vals = self.counts.T[idxs] vals[~m] = np.nan return vals
[ "def", "get_map_values", "(", "self", ",", "lons", ",", "lats", ",", "ibin", "=", "None", ")", ":", "pix_idxs", "=", "self", ".", "get_pixel_indices", "(", "lons", ",", "lats", ",", "ibin", ")", "idxs", "=", "copy", ".", "copy", "(", "pix_idxs", ")", "m", "=", "np", ".", "empty_like", "(", "idxs", "[", "0", "]", ",", "dtype", "=", "bool", ")", "m", ".", "fill", "(", "True", ")", "for", "i", ",", "p", "in", "enumerate", "(", "pix_idxs", ")", ":", "m", "&=", "(", "pix_idxs", "[", "i", "]", ">=", "0", ")", "&", "(", "pix_idxs", "[", "i", "]", "<", "self", ".", "_npix", "[", "i", "]", ")", "idxs", "[", "i", "]", "[", "~", "m", "]", "=", "0", "vals", "=", "self", ".", "counts", ".", "T", "[", "idxs", "]", "vals", "[", "~", "m", "]", "=", "np", ".", "nan", "return", "vals" ]
Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map
[ "Return", "the", "map", "values", "corresponding", "to", "a", "set", "of", "coordinates", "." ]
python
train
29.625
stevearc/pyramid_webpack
pyramid_webpack/__init__.py
https://github.com/stevearc/pyramid_webpack/blob/4fcad26271fd6e8c270e19c7943240fea6d8c484/pyramid_webpack/__init__.py#L183-L194
def _add_url(self, chunk): """ Add a 'url' property to a chunk and return it """ if 'url' in chunk: return chunk public_path = chunk.get('publicPath') if public_path: chunk['url'] = public_path else: fullpath = posixpath.join(self.state.static_view_path, chunk['name']) chunk['url'] = self._request.static_url(fullpath) return chunk
[ "def", "_add_url", "(", "self", ",", "chunk", ")", ":", "if", "'url'", "in", "chunk", ":", "return", "chunk", "public_path", "=", "chunk", ".", "get", "(", "'publicPath'", ")", "if", "public_path", ":", "chunk", "[", "'url'", "]", "=", "public_path", "else", ":", "fullpath", "=", "posixpath", ".", "join", "(", "self", ".", "state", ".", "static_view_path", ",", "chunk", "[", "'name'", "]", ")", "chunk", "[", "'url'", "]", "=", "self", ".", "_request", ".", "static_url", "(", "fullpath", ")", "return", "chunk" ]
Add a 'url' property to a chunk and return it
[ "Add", "a", "url", "property", "to", "a", "chunk", "and", "return", "it" ]
python
train
37.916667
kinegratii/borax
borax/utils.py
https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/utils.py#L79-L82
def chunks(iterable, n): """Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464 """ for i in range(0, len(iterable), n): yield iterable[i:i + n]
[ "def", "chunks", "(", "iterable", ",", "n", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "iterable", ")", ",", "n", ")", ":", "yield", "iterable", "[", "i", ":", "i", "+", "n", "]" ]
Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464
[ "Yield", "successive", "n", "-", "sized", "chunks", "from", "iterable", "object", ".", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "312464" ]
python
train
48.5
fabioz/PyDev.Debugger
third_party/isort_container/isort/pie_slice.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/isort_container/isort/pie_slice.py#L79-L115
def unmodified_isinstance(*bases): """When called in the form MyOverrideClass(unmodified_isinstance(BuiltInClass)) it allows calls against passed in built in instances to pass even if there not a subclass """ class UnmodifiedIsInstance(type): if sys.version_info[0] == 2 and sys.version_info[1] <= 6: @classmethod def __instancecheck__(cls, instance): if cls.__name__ in (str(base.__name__) for base in bases): return isinstance(instance, bases) subclass = getattr(instance, '__class__', None) subtype = type(instance) instance_type = getattr(abc, '_InstanceType', None) if not instance_type: class test_object: pass instance_type = type(test_object) if subtype is instance_type: subtype = subclass if subtype is subclass or subclass is None: return cls.__subclasscheck__(subtype) return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)) else: @classmethod def __instancecheck__(cls, instance): if cls.__name__ in (str(base.__name__) for base in bases): return isinstance(instance, bases) return type.__instancecheck__(cls, instance) return with_metaclass(UnmodifiedIsInstance, *bases)
[ "def", "unmodified_isinstance", "(", "*", "bases", ")", ":", "class", "UnmodifiedIsInstance", "(", "type", ")", ":", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", "and", "sys", ".", "version_info", "[", "1", "]", "<=", "6", ":", "@", "classmethod", "def", "__instancecheck__", "(", "cls", ",", "instance", ")", ":", "if", "cls", ".", "__name__", "in", "(", "str", "(", "base", ".", "__name__", ")", "for", "base", "in", "bases", ")", ":", "return", "isinstance", "(", "instance", ",", "bases", ")", "subclass", "=", "getattr", "(", "instance", ",", "'__class__'", ",", "None", ")", "subtype", "=", "type", "(", "instance", ")", "instance_type", "=", "getattr", "(", "abc", ",", "'_InstanceType'", ",", "None", ")", "if", "not", "instance_type", ":", "class", "test_object", ":", "pass", "instance_type", "=", "type", "(", "test_object", ")", "if", "subtype", "is", "instance_type", ":", "subtype", "=", "subclass", "if", "subtype", "is", "subclass", "or", "subclass", "is", "None", ":", "return", "cls", ".", "__subclasscheck__", "(", "subtype", ")", "return", "(", "cls", ".", "__subclasscheck__", "(", "subclass", ")", "or", "cls", ".", "__subclasscheck__", "(", "subtype", ")", ")", "else", ":", "@", "classmethod", "def", "__instancecheck__", "(", "cls", ",", "instance", ")", ":", "if", "cls", ".", "__name__", "in", "(", "str", "(", "base", ".", "__name__", ")", "for", "base", "in", "bases", ")", ":", "return", "isinstance", "(", "instance", ",", "bases", ")", "return", "type", ".", "__instancecheck__", "(", "cls", ",", "instance", ")", "return", "with_metaclass", "(", "UnmodifiedIsInstance", ",", "*", "bases", ")" ]
When called in the form MyOverrideClass(unmodified_isinstance(BuiltInClass)) it allows calls against passed in built in instances to pass even if there not a subclass
[ "When", "called", "in", "the", "form" ]
python
train
39.621622
knipknap/exscript
Exscript/protocols/telnetlib.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/telnetlib.py#L297-L311
def write(self, buffer): """Write a string to the socket, doubling any IAC characters. Can block if the connection is blocked. May raise socket.error if the connection is closed. """ if type(buffer) == type(0): buffer = chr(buffer) elif not isinstance(buffer, bytes): buffer = buffer.encode(self.encoding) if IAC in buffer: buffer = buffer.replace(IAC, IAC+IAC) self.msg("send %s", repr(buffer)) self.sock.send(buffer)
[ "def", "write", "(", "self", ",", "buffer", ")", ":", "if", "type", "(", "buffer", ")", "==", "type", "(", "0", ")", ":", "buffer", "=", "chr", "(", "buffer", ")", "elif", "not", "isinstance", "(", "buffer", ",", "bytes", ")", ":", "buffer", "=", "buffer", ".", "encode", "(", "self", ".", "encoding", ")", "if", "IAC", "in", "buffer", ":", "buffer", "=", "buffer", ".", "replace", "(", "IAC", ",", "IAC", "+", "IAC", ")", "self", ".", "msg", "(", "\"send %s\"", ",", "repr", "(", "buffer", ")", ")", "self", ".", "sock", ".", "send", "(", "buffer", ")" ]
Write a string to the socket, doubling any IAC characters. Can block if the connection is blocked. May raise socket.error if the connection is closed.
[ "Write", "a", "string", "to", "the", "socket", "doubling", "any", "IAC", "characters", "." ]
python
train
34.333333
RudolfCardinal/pythonlib
cardinal_pythonlib/interval.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/interval.py#L869-L878
def first_interval_starting(self, start: datetime.datetime) -> \ Optional[Interval]: """ Returns our first interval that starts with the ``start`` parameter, or ``None``. """ for i in self.intervals: if i.start == start: return i return None
[ "def", "first_interval_starting", "(", "self", ",", "start", ":", "datetime", ".", "datetime", ")", "->", "Optional", "[", "Interval", "]", ":", "for", "i", "in", "self", ".", "intervals", ":", "if", "i", ".", "start", "==", "start", ":", "return", "i", "return", "None" ]
Returns our first interval that starts with the ``start`` parameter, or ``None``.
[ "Returns", "our", "first", "interval", "that", "starts", "with", "the", "start", "parameter", "or", "None", "." ]
python
train
32
indico/indico-plugins
importer/indico_importer/converter.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/importer/indico_importer/converter.py#L76-L112
def _convert(cls, record): """ Core method of the converter. Converts a single dictionary into another dictionary. """ if not record: return {} converted_dict = {} for field in cls.conversion: key = field[0] if len(field) >= 2 and field[1]: converted_key = field[1] else: converted_key = key if len(field) >= 3 and field[2]: conversion_method = field[2] else: conversion_method = cls.default_conversion_method if len(field) >= 4: converter = field[3] else: converter = None try: value = conversion_method(record[key]) except KeyError: continue if converter: value = converter._convert_internal(value) if converted_key is APPEND: if isinstance(value, list): for v in value: converted_dict.update(v) else: converted_dict.update(value) else: converted_dict[converted_key] = value return converted_dict
[ "def", "_convert", "(", "cls", ",", "record", ")", ":", "if", "not", "record", ":", "return", "{", "}", "converted_dict", "=", "{", "}", "for", "field", "in", "cls", ".", "conversion", ":", "key", "=", "field", "[", "0", "]", "if", "len", "(", "field", ")", ">=", "2", "and", "field", "[", "1", "]", ":", "converted_key", "=", "field", "[", "1", "]", "else", ":", "converted_key", "=", "key", "if", "len", "(", "field", ")", ">=", "3", "and", "field", "[", "2", "]", ":", "conversion_method", "=", "field", "[", "2", "]", "else", ":", "conversion_method", "=", "cls", ".", "default_conversion_method", "if", "len", "(", "field", ")", ">=", "4", ":", "converter", "=", "field", "[", "3", "]", "else", ":", "converter", "=", "None", "try", ":", "value", "=", "conversion_method", "(", "record", "[", "key", "]", ")", "except", "KeyError", ":", "continue", "if", "converter", ":", "value", "=", "converter", ".", "_convert_internal", "(", "value", ")", "if", "converted_key", "is", "APPEND", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "for", "v", "in", "value", ":", "converted_dict", ".", "update", "(", "v", ")", "else", ":", "converted_dict", ".", "update", "(", "value", ")", "else", ":", "converted_dict", "[", "converted_key", "]", "=", "value", "return", "converted_dict" ]
Core method of the converter. Converts a single dictionary into another dictionary.
[ "Core", "method", "of", "the", "converter", ".", "Converts", "a", "single", "dictionary", "into", "another", "dictionary", "." ]
python
train
33.27027
StackStorm/pybind
pybind/slxos/v17r_1_01a/interface/ethernet/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/interface/ethernet/__init__.py#L1514-L1535
def _set_link_error_disable(self, v, load=False): """ Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_error_disable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_error_disable() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """link_error_disable must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)""", }) self.__link_error_disable = t if hasattr(self, '_set'): self._set()
[ "def", "_set_link_error_disable", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "link_error_disable", ".", "link_error_disable", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"link-error-disable\"", ",", "rest_name", "=", "\"link-error-disable\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'port link dampening'", ",", "u'callpoint'", ":", "u'Pld'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-pld'", ",", "defining_module", "=", "'brocade-pld'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"link_error_disable must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name=\"link-error-disable\", rest_name=\"link-error-disable\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__link_error_disable", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_error_disable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_error_disable() directly.
[ "Setter", "method", "for", "link_error_disable", "mapped", "from", "YANG", "variable", "/", "interface", "/", "ethernet", "/", "link_error_disable", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_link_error_disable", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_link_error_disable", "()", "directly", "." ]
python
train
89.772727
twoolie/NBT
nbt/region.py
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/nbt/region.py#L404-L408
def _locate_free_sectors(self, ignore_chunk=None): """Return a list of booleans, indicating the free sectors.""" sectors = self._sectors(ignore_chunk=ignore_chunk) # Sectors are considered free, if the value is an empty list. return [not i for i in sectors]
[ "def", "_locate_free_sectors", "(", "self", ",", "ignore_chunk", "=", "None", ")", ":", "sectors", "=", "self", ".", "_sectors", "(", "ignore_chunk", "=", "ignore_chunk", ")", "# Sectors are considered free, if the value is an empty list.", "return", "[", "not", "i", "for", "i", "in", "sectors", "]" ]
Return a list of booleans, indicating the free sectors.
[ "Return", "a", "list", "of", "booleans", "indicating", "the", "free", "sectors", "." ]
python
train
57
numenta/nupic
src/nupic/frameworks/opf/exp_description_helpers.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/exp_description_helpers.py#L264-L284
def handleGetValue(self, topContainer): """ This method overrides ValueGetterBase's "pure virtual" method. It returns the referenced value. The derived class is NOT responsible for fully resolving the reference'd value in the event the value resolves to another ValueGetterBase-based instance -- this is handled automatically within ValueGetterBase implementation. topContainer: The top-level container (dict, tuple, or list [sub-]instance) within whose context the value-getter is applied. If self.__referenceDict is None, then topContainer will be used as the reference dictionary for resolving our dictionary key chain. Returns: The value referenced by this instance (which may be another value-getter instance) """ value = self.__referenceDict if self.__referenceDict is not None else topContainer for key in self.__dictKeyChain: value = value[key] return value
[ "def", "handleGetValue", "(", "self", ",", "topContainer", ")", ":", "value", "=", "self", ".", "__referenceDict", "if", "self", ".", "__referenceDict", "is", "not", "None", "else", "topContainer", "for", "key", "in", "self", ".", "__dictKeyChain", ":", "value", "=", "value", "[", "key", "]", "return", "value" ]
This method overrides ValueGetterBase's "pure virtual" method. It returns the referenced value. The derived class is NOT responsible for fully resolving the reference'd value in the event the value resolves to another ValueGetterBase-based instance -- this is handled automatically within ValueGetterBase implementation. topContainer: The top-level container (dict, tuple, or list [sub-]instance) within whose context the value-getter is applied. If self.__referenceDict is None, then topContainer will be used as the reference dictionary for resolving our dictionary key chain. Returns: The value referenced by this instance (which may be another value-getter instance)
[ "This", "method", "overrides", "ValueGetterBase", "s", "pure", "virtual", "method", ".", "It", "returns", "the", "referenced", "value", ".", "The", "derived", "class", "is", "NOT", "responsible", "for", "fully", "resolving", "the", "reference", "d", "value", "in", "the", "event", "the", "value", "resolves", "to", "another", "ValueGetterBase", "-", "based", "instance", "--", "this", "is", "handled", "automatically", "within", "ValueGetterBase", "implementation", "." ]
python
valid
47.428571
saltstack/salt
salt/states/cyg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cyg.py#L26-L91
def installed(name, cyg_arch='x86_64', mirrors=None): ''' Make sure that a package is installed. name The name of the package to install cyg_arch : x86_64 The cygwin architecture to install the package into. Current options are x86 and x86_64 mirrors : None List of mirrors to check. None will use a default mirror (kernel.org) CLI Example: .. code-block:: yaml rsync: cyg.installed: - mirrors: - http://mirror/without/public/key: "" - http://mirror/with/public/key: http://url/of/public/key ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if cyg_arch not in ['x86', 'x86_64']: ret['result'] = False ret['comment'] = 'The \'cyg_arch\' argument must\ be one of \'x86\' or \'x86_64\'' return ret LOG.debug('Installed State: Initial Mirror list: %s', mirrors) if not __salt__['cyg.check_valid_package'](name, cyg_arch=cyg_arch, mirrors=mirrors): ret['result'] = False ret['comment'] = 'Invalid package name.' return ret pkgs = __salt__['cyg.list'](name, cyg_arch) if name in pkgs: ret['result'] = True ret['comment'] = 'Package is already installed.' return ret if __opts__['test']: ret['comment'] = 'The package {0} would\ have been installed'.format(name) return ret if __salt__['cyg.install'](name, cyg_arch=cyg_arch, mirrors=mirrors): ret['result'] = True ret['changes'][name] = 'Installed' ret['comment'] = 'Package was successfully installed' else: ret['result'] = False ret['comment'] = 'Could not install package.' return ret
[ "def", "installed", "(", "name", ",", "cyg_arch", "=", "'x86_64'", ",", "mirrors", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "cyg_arch", "not", "in", "[", "'x86'", ",", "'x86_64'", "]", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'The \\'cyg_arch\\' argument must\\\n be one of \\'x86\\' or \\'x86_64\\''", "return", "ret", "LOG", ".", "debug", "(", "'Installed State: Initial Mirror list: %s'", ",", "mirrors", ")", "if", "not", "__salt__", "[", "'cyg.check_valid_package'", "]", "(", "name", ",", "cyg_arch", "=", "cyg_arch", ",", "mirrors", "=", "mirrors", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Invalid package name.'", "return", "ret", "pkgs", "=", "__salt__", "[", "'cyg.list'", "]", "(", "name", ",", "cyg_arch", ")", "if", "name", "in", "pkgs", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Package is already installed.'", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'The package {0} would\\\n have been installed'", ".", "format", "(", "name", ")", "return", "ret", "if", "__salt__", "[", "'cyg.install'", "]", "(", "name", ",", "cyg_arch", "=", "cyg_arch", ",", "mirrors", "=", "mirrors", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'Installed'", "ret", "[", "'comment'", "]", "=", "'Package was successfully installed'", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Could not install package.'", "return", "ret" ]
Make sure that a package is installed. name The name of the package to install cyg_arch : x86_64 The cygwin architecture to install the package into. Current options are x86 and x86_64 mirrors : None List of mirrors to check. None will use a default mirror (kernel.org) CLI Example: .. code-block:: yaml rsync: cyg.installed: - mirrors: - http://mirror/without/public/key: "" - http://mirror/with/public/key: http://url/of/public/key
[ "Make", "sure", "that", "a", "package", "is", "installed", "." ]
python
train
28.560606
globality-corp/openapi
openapi/base.py
https://github.com/globality-corp/openapi/blob/ee1de8468abeb800e3ad0134952726cdce6b2459/openapi/base.py#L85-L91
def loads(cls, s): """ Load an instance of this class from YAML. """ with closing(StringIO(s)) as fileobj: return cls.load(fileobj)
[ "def", "loads", "(", "cls", ",", "s", ")", ":", "with", "closing", "(", "StringIO", "(", "s", ")", ")", "as", "fileobj", ":", "return", "cls", ".", "load", "(", "fileobj", ")" ]
Load an instance of this class from YAML.
[ "Load", "an", "instance", "of", "this", "class", "from", "YAML", "." ]
python
train
24.285714
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L2019-L2026
def cmd_func(self, command: str) -> Optional[Callable]: """ Get the function for a command :param command: the name of the command """ func_name = self.cmd_func_name(command) if func_name: return getattr(self, func_name)
[ "def", "cmd_func", "(", "self", ",", "command", ":", "str", ")", "->", "Optional", "[", "Callable", "]", ":", "func_name", "=", "self", ".", "cmd_func_name", "(", "command", ")", "if", "func_name", ":", "return", "getattr", "(", "self", ",", "func_name", ")" ]
Get the function for a command :param command: the name of the command
[ "Get", "the", "function", "for", "a", "command", ":", "param", "command", ":", "the", "name", "of", "the", "command" ]
python
train
34.125
spyder-ide/spyder
spyder/utils/vcs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/vcs.py#L60-L70
def get_vcs_root(path): """Return VCS root directory path Return None if path is not within a supported VCS repository""" previous_path = path while get_vcs_info(path) is None: path = abspardir(path) if path == previous_path: return else: previous_path = path return osp.abspath(path)
[ "def", "get_vcs_root", "(", "path", ")", ":", "previous_path", "=", "path", "while", "get_vcs_info", "(", "path", ")", "is", "None", ":", "path", "=", "abspardir", "(", "path", ")", "if", "path", "==", "previous_path", ":", "return", "else", ":", "previous_path", "=", "path", "return", "osp", ".", "abspath", "(", "path", ")" ]
Return VCS root directory path Return None if path is not within a supported VCS repository
[ "Return", "VCS", "root", "directory", "path", "Return", "None", "if", "path", "is", "not", "within", "a", "supported", "VCS", "repository" ]
python
train
32
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L1377-L1418
def _do_multipart_upload(self, stream, metadata, size, num_retries): """Perform a multipart upload. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :type metadata: dict :param metadata: The metadata associated with the upload. :type size: int :param size: The number of bytes to be uploaded (which will be read from ``stream``). If not provided, the upload will be concluded once ``stream`` is exhausted (or :data:`None`). :type num_retries: int :param num_retries: Number of upload retries. (Deprecated: This argument will be removed in a future release.) :rtype: :class:`~requests.Response` :returns: The "200 OK" response object returned after the multipart upload request. :raises: :exc:`ValueError` if the ``stream`` has fewer than ``size`` bytes remaining. """ data = stream.read(size) if len(data) < size: msg = _READ_LESS_THAN_SIZE.format(size, len(data)) raise ValueError(msg) headers = _get_upload_headers(self._connection.USER_AGENT) upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project) upload = MultipartUpload(upload_url, headers=headers) if num_retries is not None: upload._retry_strategy = resumable_media.RetryStrategy( max_retries=num_retries ) response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE) return response
[ "def", "_do_multipart_upload", "(", "self", ",", "stream", ",", "metadata", ",", "size", ",", "num_retries", ")", ":", "data", "=", "stream", ".", "read", "(", "size", ")", "if", "len", "(", "data", ")", "<", "size", ":", "msg", "=", "_READ_LESS_THAN_SIZE", ".", "format", "(", "size", ",", "len", "(", "data", ")", ")", "raise", "ValueError", "(", "msg", ")", "headers", "=", "_get_upload_headers", "(", "self", ".", "_connection", ".", "USER_AGENT", ")", "upload_url", "=", "_MULTIPART_URL_TEMPLATE", ".", "format", "(", "project", "=", "self", ".", "project", ")", "upload", "=", "MultipartUpload", "(", "upload_url", ",", "headers", "=", "headers", ")", "if", "num_retries", "is", "not", "None", ":", "upload", ".", "_retry_strategy", "=", "resumable_media", ".", "RetryStrategy", "(", "max_retries", "=", "num_retries", ")", "response", "=", "upload", ".", "transmit", "(", "self", ".", "_http", ",", "data", ",", "metadata", ",", "_GENERIC_CONTENT_TYPE", ")", "return", "response" ]
Perform a multipart upload. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :type metadata: dict :param metadata: The metadata associated with the upload. :type size: int :param size: The number of bytes to be uploaded (which will be read from ``stream``). If not provided, the upload will be concluded once ``stream`` is exhausted (or :data:`None`). :type num_retries: int :param num_retries: Number of upload retries. (Deprecated: This argument will be removed in a future release.) :rtype: :class:`~requests.Response` :returns: The "200 OK" response object returned after the multipart upload request. :raises: :exc:`ValueError` if the ``stream`` has fewer than ``size`` bytes remaining.
[ "Perform", "a", "multipart", "upload", "." ]
python
train
38.119048
StellarCN/py-stellar-base
stellar_base/asset.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/asset.py#L61-L72
def to_dict(self): """Generate a dict for this object's attributes. :return: A dict representing an :class:`Asset` """ rv = {'code': self.code} if not self.is_native(): rv['issuer'] = self.issuer rv['type'] = self.type else: rv['type'] = 'native' return rv
[ "def", "to_dict", "(", "self", ")", ":", "rv", "=", "{", "'code'", ":", "self", ".", "code", "}", "if", "not", "self", ".", "is_native", "(", ")", ":", "rv", "[", "'issuer'", "]", "=", "self", ".", "issuer", "rv", "[", "'type'", "]", "=", "self", ".", "type", "else", ":", "rv", "[", "'type'", "]", "=", "'native'", "return", "rv" ]
Generate a dict for this object's attributes. :return: A dict representing an :class:`Asset`
[ "Generate", "a", "dict", "for", "this", "object", "s", "attributes", "." ]
python
train
28.166667
alephdata/memorious
memorious/cli.py
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/cli.py#L21-L30
def cli(debug, cache, incremental): """Crawler framework for documents and structured scrapers.""" settings.HTTP_CACHE = cache settings.INCREMENTAL = incremental settings.DEBUG = debug if settings.DEBUG: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) init_memorious()
[ "def", "cli", "(", "debug", ",", "cache", ",", "incremental", ")", ":", "settings", ".", "HTTP_CACHE", "=", "cache", "settings", ".", "INCREMENTAL", "=", "incremental", "settings", ".", "DEBUG", "=", "debug", "if", "settings", ".", "DEBUG", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "init_memorious", "(", ")" ]
Crawler framework for documents and structured scrapers.
[ "Crawler", "framework", "for", "documents", "and", "structured", "scrapers", "." ]
python
train
34.2
olt/scriptine
scriptine/_path.py
https://github.com/olt/scriptine/blob/f4cfea939f2f3ad352b24c5f6410f79e78723d0e/scriptine/_path.py#L914-L919
def ensure_dir(self, mode=0777): """ Make sure the directory exists, create if necessary. """ if not self.exists() or not self.isdir(): os.makedirs(self, mode)
[ "def", "ensure_dir", "(", "self", ",", "mode", "=", "0777", ")", ":", "if", "not", "self", ".", "exists", "(", ")", "or", "not", "self", ".", "isdir", "(", ")", ":", "os", ".", "makedirs", "(", "self", ",", "mode", ")" ]
Make sure the directory exists, create if necessary.
[ "Make", "sure", "the", "directory", "exists", "create", "if", "necessary", "." ]
python
train
33
mk-fg/feedjack
feedjack/fjcache.py
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/fjcache.py#L50-L54
def feed_interval_get(feed_id, parameters): 'Get adaptive interval between checks for a feed.' val = cache.get(getkey( T_INTERVAL, key=feed_interval_key(feed_id, parameters) )) return val if isinstance(val, tuple) else (val, None)
[ "def", "feed_interval_get", "(", "feed_id", ",", "parameters", ")", ":", "val", "=", "cache", ".", "get", "(", "getkey", "(", "T_INTERVAL", ",", "key", "=", "feed_interval_key", "(", "feed_id", ",", "parameters", ")", ")", ")", "return", "val", "if", "isinstance", "(", "val", ",", "tuple", ")", "else", "(", "val", ",", "None", ")" ]
Get adaptive interval between checks for a feed.
[ "Get", "adaptive", "interval", "between", "checks", "for", "a", "feed", "." ]
python
train
46.2
tensorflow/datasets
tensorflow_datasets/image/corruptions.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/corruptions.py#L199-L213
def impulse_noise(x, severity=1): """Impulse noise corruption to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added impulse noise. """ c = [.03, .06, .09, 0.17, 0.27][severity - 1] x = tfds.core.lazy_imports.skimage.util.random_noise( np.array(x) / 255., mode='s&p', amount=c) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
[ "def", "impulse_noise", "(", "x", ",", "severity", "=", "1", ")", ":", "c", "=", "[", ".03", ",", ".06", ",", ".09", ",", "0.17", ",", "0.27", "]", "[", "severity", "-", "1", "]", "x", "=", "tfds", ".", "core", ".", "lazy_imports", ".", "skimage", ".", "util", ".", "random_noise", "(", "np", ".", "array", "(", "x", ")", "/", "255.", ",", "mode", "=", "'s&p'", ",", "amount", "=", "c", ")", "x_clip", "=", "np", ".", "clip", "(", "x", ",", "0", ",", "1", ")", "*", "255", "return", "around_and_astype", "(", "x_clip", ")" ]
Impulse noise corruption to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
[ "Impulse", "noise", "corruption", "to", "images", "." ]
python
train
33.866667
srossross/rpmfile
rpmfile/cpiofile.py
https://github.com/srossross/rpmfile/blob/3ab96f211da7b56f5e99d8cc248f714a6e542d31/rpmfile/cpiofile.py#L171-L223
def _open(self, name=None, fileobj=None, mymap=None, block=None): """ The _open function takes some form of file identifier and creates an :py:class:`CpioFile` instance from it. :param :py:class:`str` name: a file name :param :py:class:`file` fileobj: if given, this overrides *name* :param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj* :param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*) The file to be used can be specified in any of four different forms, (in reverse precedence): #. a file name #. :py:class:`file` object #. :py:mod:`mmap.mmap`, or #. a block of memory """ if block is not None: if not name: name = '<unknown>' self.unpack_from(block) if fileobj: fileobj.close() return self if mymap is not None: block = mymap elif fileobj: try: mymap = mmap.mmap(fileobj.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ) # pylint: disable=W0702 except: mymap = 0 block = fileobj.read() elif name: fileobj = io.open(os.path.normpath(os.path.expanduser(name)), 'rb') else: assert False return self._open(name=name, fileobj=fileobj, mymap=mymap, block=block)
[ "def", "_open", "(", "self", ",", "name", "=", "None", ",", "fileobj", "=", "None", ",", "mymap", "=", "None", ",", "block", "=", "None", ")", ":", "if", "block", "is", "not", "None", ":", "if", "not", "name", ":", "name", "=", "'<unknown>'", "self", ".", "unpack_from", "(", "block", ")", "if", "fileobj", ":", "fileobj", ".", "close", "(", ")", "return", "self", "if", "mymap", "is", "not", "None", ":", "block", "=", "mymap", "elif", "fileobj", ":", "try", ":", "mymap", "=", "mmap", ".", "mmap", "(", "fileobj", ".", "fileno", "(", ")", ",", "0", ",", "mmap", ".", "MAP_SHARED", ",", "mmap", ".", "PROT_READ", ")", "# pylint: disable=W0702", "except", ":", "mymap", "=", "0", "block", "=", "fileobj", ".", "read", "(", ")", "elif", "name", ":", "fileobj", "=", "io", ".", "open", "(", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "expanduser", "(", "name", ")", ")", ",", "'rb'", ")", "else", ":", "assert", "False", "return", "self", ".", "_open", "(", "name", "=", "name", ",", "fileobj", "=", "fileobj", ",", "mymap", "=", "mymap", ",", "block", "=", "block", ")" ]
The _open function takes some form of file identifier and creates an :py:class:`CpioFile` instance from it. :param :py:class:`str` name: a file name :param :py:class:`file` fileobj: if given, this overrides *name* :param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj* :param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*) The file to be used can be specified in any of four different forms, (in reverse precedence): #. a file name #. :py:class:`file` object #. :py:mod:`mmap.mmap`, or #. a block of memory
[ "The", "_open", "function", "takes", "some", "form", "of", "file", "identifier", "and", "creates", "an", ":", "py", ":", "class", ":", "CpioFile", "instance", "from", "it", "." ]
python
train
29.339623
chimera0/accel-brain-code
Automatic-Summarization/pysummarization/vectorizablesentence/lstm_rtrbm.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Automatic-Summarization/pysummarization/vectorizablesentence/lstm_rtrbm.py#L36-L56
def vectorize(self, sentence_list): ''' Args: sentence_list: The list of tokenized sentences. [[`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...]] Returns: `np.ndarray` of tokens. [vector of token, vector of token, vector of token] ''' test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list, self.__seq_len) inferenced_arr = self.__rbm.inference( test_observed_arr, training_count=1, r_batch_size=-1 ) return inferenced_arr
[ "def", "vectorize", "(", "self", ",", "sentence_list", ")", ":", "test_observed_arr", "=", "self", ".", "__setup_dataset", "(", "sentence_list", ",", "self", ".", "__token_master_list", ",", "self", ".", "__seq_len", ")", "inferenced_arr", "=", "self", ".", "__rbm", ".", "inference", "(", "test_observed_arr", ",", "training_count", "=", "1", ",", "r_batch_size", "=", "-", "1", ")", "return", "inferenced_arr" ]
Args: sentence_list: The list of tokenized sentences. [[`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...]] Returns: `np.ndarray` of tokens. [vector of token, vector of token, vector of token]
[ "Args", ":", "sentence_list", ":", "The", "list", "of", "tokenized", "sentences", ".", "[[", "token", "token", "token", "...", "]", "[", "token", "token", "token", "...", "]", "[", "token", "token", "token", "...", "]]", "Returns", ":", "np", ".", "ndarray", "of", "tokens", ".", "[", "vector", "of", "token", "vector", "of", "token", "vector", "of", "token", "]" ]
python
train
34.095238
mikeywaites/flask-arrested
arrested/mixins.py
https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/mixins.py#L122-L135
def obj(self): """Returns the value of :meth:`ObjectMixin.get_object` and sets a private property called _obj. This property ensures the logic around allow_none is enforced across Endpoints using the Object interface. :raises: :class:`werkzeug.exceptions.BadRequest` :returns: The result of :meth:ObjectMixin.get_object` """ if not getattr(self, '_obj', None): self._obj = self.get_object() if self._obj is None and not self.allow_none: self.return_error(404) return self._obj
[ "def", "obj", "(", "self", ")", ":", "if", "not", "getattr", "(", "self", ",", "'_obj'", ",", "None", ")", ":", "self", ".", "_obj", "=", "self", ".", "get_object", "(", ")", "if", "self", ".", "_obj", "is", "None", "and", "not", "self", ".", "allow_none", ":", "self", ".", "return_error", "(", "404", ")", "return", "self", ".", "_obj" ]
Returns the value of :meth:`ObjectMixin.get_object` and sets a private property called _obj. This property ensures the logic around allow_none is enforced across Endpoints using the Object interface. :raises: :class:`werkzeug.exceptions.BadRequest` :returns: The result of :meth:ObjectMixin.get_object`
[ "Returns", "the", "value", "of", ":", "meth", ":", "ObjectMixin", ".", "get_object", "and", "sets", "a", "private", "property", "called", "_obj", ".", "This", "property", "ensures", "the", "logic", "around", "allow_none", "is", "enforced", "across", "Endpoints", "using", "the", "Object", "interface", "." ]
python
train
40.714286
loli/medpy
medpy/filter/label.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/label.py#L101-L132
def relabel_non_zero(label_image, start = 1): r""" Relabel the regions of a label image. Re-processes the labels to make them consecutively and starting from start. Keeps all zero (0) labels, as they are considered background. Parameters ---------- label_image : array_like A nD label map. start : integer The id of the first label to assign Returns ------- relabel_map : ndarray The relabelled label map. See also -------- relabel """ if start <= 0: raise ArgumentError('The starting value can not be 0 or lower.') l = list(scipy.unique(label_image)) if 0 in l: l.remove(0) mapping = dict() mapping[0] = 0 for key, item in zip(l, list(range(start, len(l) + start))): mapping[key] = item return relabel_map(label_image, mapping)
[ "def", "relabel_non_zero", "(", "label_image", ",", "start", "=", "1", ")", ":", "if", "start", "<=", "0", ":", "raise", "ArgumentError", "(", "'The starting value can not be 0 or lower.'", ")", "l", "=", "list", "(", "scipy", ".", "unique", "(", "label_image", ")", ")", "if", "0", "in", "l", ":", "l", ".", "remove", "(", "0", ")", "mapping", "=", "dict", "(", ")", "mapping", "[", "0", "]", "=", "0", "for", "key", ",", "item", "in", "zip", "(", "l", ",", "list", "(", "range", "(", "start", ",", "len", "(", "l", ")", "+", "start", ")", ")", ")", ":", "mapping", "[", "key", "]", "=", "item", "return", "relabel_map", "(", "label_image", ",", "mapping", ")" ]
r""" Relabel the regions of a label image. Re-processes the labels to make them consecutively and starting from start. Keeps all zero (0) labels, as they are considered background. Parameters ---------- label_image : array_like A nD label map. start : integer The id of the first label to assign Returns ------- relabel_map : ndarray The relabelled label map. See also -------- relabel
[ "r", "Relabel", "the", "regions", "of", "a", "label", "image", ".", "Re", "-", "processes", "the", "labels", "to", "make", "them", "consecutively", "and", "starting", "from", "start", ".", "Keeps", "all", "zero", "(", "0", ")", "labels", "as", "they", "are", "considered", "background", ".", "Parameters", "----------", "label_image", ":", "array_like", "A", "nD", "label", "map", ".", "start", ":", "integer", "The", "id", "of", "the", "first", "label", "to", "assign", "Returns", "-------", "relabel_map", ":", "ndarray", "The", "relabelled", "label", "map", ".", "See", "also", "--------", "relabel" ]
python
train
26.53125
emencia/emencia_paste_djangocms_3
emencia_paste_djangocms_3/django_buildout/project/accounts/views.py
https://github.com/emencia/emencia_paste_djangocms_3/blob/29eabbcb17e21996a6e1d99592fc719dc8833b59/emencia_paste_djangocms_3/django_buildout/project/accounts/views.py#L56-L111
def register(self, request, **cleaned_data): """ Given a username, email address and password, register a new user account, which will initially be inactive. Along with the new ``User`` object, a new ``registration.models.RegistrationProfile`` will be created, tied to that ``User``, containing the activation key which will be used for this account. Two emails will be sent. First one to the admin; this email should contain an activation link and a resume of the new user infos. Second one, to the user, for inform him that his request is pending. After the ``User`` and ``RegistrationProfile`` are created and the activation email is sent, the signal ``registration.signals.user_registered`` will be sent, with the new ``User`` as the keyword argument ``user`` and the class of this backend as the sender. """ if Site._meta.installed: site = Site.objects.get_current() else: site = RequestSite(request) create_user = RegistrationProfile.objects.create_inactive_user new_user = create_user( cleaned_data['username'], cleaned_data['email'], cleaned_data['password1'], site, send_email=False ) new_user.first_name = cleaned_data['first_name'] new_user.last_name = cleaned_data['last_name'] new_user.save() user_info = UserInfo( user=new_user, company=cleaned_data['company'], function=cleaned_data['function'], address=cleaned_data['address'], postal_code=cleaned_data['postal_code'], city=cleaned_data['city'], country=cleaned_data['country'], phone=cleaned_data['phone'], ) user_info.save() send_activation_email(new_user, site, user_info) send_activation_pending_email(new_user, site, user_info) signals.user_registered.send(sender=self.__class__, user=new_user, request=request) return new_user
[ "def", "register", "(", "self", ",", "request", ",", "*", "*", "cleaned_data", ")", ":", "if", "Site", ".", "_meta", ".", "installed", ":", "site", "=", "Site", ".", "objects", ".", "get_current", "(", ")", "else", ":", "site", "=", "RequestSite", "(", "request", ")", "create_user", "=", "RegistrationProfile", ".", "objects", ".", "create_inactive_user", "new_user", "=", "create_user", "(", "cleaned_data", "[", "'username'", "]", ",", "cleaned_data", "[", "'email'", "]", ",", "cleaned_data", "[", "'password1'", "]", ",", "site", ",", "send_email", "=", "False", ")", "new_user", ".", "first_name", "=", "cleaned_data", "[", "'first_name'", "]", "new_user", ".", "last_name", "=", "cleaned_data", "[", "'last_name'", "]", "new_user", ".", "save", "(", ")", "user_info", "=", "UserInfo", "(", "user", "=", "new_user", ",", "company", "=", "cleaned_data", "[", "'company'", "]", ",", "function", "=", "cleaned_data", "[", "'function'", "]", ",", "address", "=", "cleaned_data", "[", "'address'", "]", ",", "postal_code", "=", "cleaned_data", "[", "'postal_code'", "]", ",", "city", "=", "cleaned_data", "[", "'city'", "]", ",", "country", "=", "cleaned_data", "[", "'country'", "]", ",", "phone", "=", "cleaned_data", "[", "'phone'", "]", ",", ")", "user_info", ".", "save", "(", ")", "send_activation_email", "(", "new_user", ",", "site", ",", "user_info", ")", "send_activation_pending_email", "(", "new_user", ",", "site", ",", "user_info", ")", "signals", ".", "user_registered", ".", "send", "(", "sender", "=", "self", ".", "__class__", ",", "user", "=", "new_user", ",", "request", "=", "request", ")", "return", "new_user" ]
Given a username, email address and password, register a new user account, which will initially be inactive. Along with the new ``User`` object, a new ``registration.models.RegistrationProfile`` will be created, tied to that ``User``, containing the activation key which will be used for this account. Two emails will be sent. First one to the admin; this email should contain an activation link and a resume of the new user infos. Second one, to the user, for inform him that his request is pending. After the ``User`` and ``RegistrationProfile`` are created and the activation email is sent, the signal ``registration.signals.user_registered`` will be sent, with the new ``User`` as the keyword argument ``user`` and the class of this backend as the sender.
[ "Given", "a", "username", "email", "address", "and", "password", "register", "a", "new", "user", "account", "which", "will", "initially", "be", "inactive", "." ]
python
train
37.928571
gwpy/gwpy
gwpy/timeseries/io/core.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/core.py#L48-L66
def _join_factory(cls, gap, pad): """Build a joiner for the given cls, and the given padding options """ if issubclass(cls, dict): def _join(data): out = cls() data = list(data) while data: tsd = data.pop(0) out.append(tsd, gap=gap, pad=pad) del tsd return out else: from .. import TimeSeriesBaseList def _join(arrays): list_ = TimeSeriesBaseList(*arrays) return list_.join(pad=pad, gap=gap) return _join
[ "def", "_join_factory", "(", "cls", ",", "gap", ",", "pad", ")", ":", "if", "issubclass", "(", "cls", ",", "dict", ")", ":", "def", "_join", "(", "data", ")", ":", "out", "=", "cls", "(", ")", "data", "=", "list", "(", "data", ")", "while", "data", ":", "tsd", "=", "data", ".", "pop", "(", "0", ")", "out", ".", "append", "(", "tsd", ",", "gap", "=", "gap", ",", "pad", "=", "pad", ")", "del", "tsd", "return", "out", "else", ":", "from", ".", ".", "import", "TimeSeriesBaseList", "def", "_join", "(", "arrays", ")", ":", "list_", "=", "TimeSeriesBaseList", "(", "*", "arrays", ")", "return", "list_", ".", "join", "(", "pad", "=", "pad", ",", "gap", "=", "gap", ")", "return", "_join" ]
Build a joiner for the given cls, and the given padding options
[ "Build", "a", "joiner", "for", "the", "given", "cls", "and", "the", "given", "padding", "options" ]
python
train
29
pysal/giddy
giddy/markov.py
https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L858-L932
def chi2(T1, T2): """ chi-squared test of difference between two transition matrices. Parameters ---------- T1 : array (k, k), matrix of transitions (counts). T2 : array (k, k), matrix of transitions (counts) to use to form the probabilities under the null. Returns ------- : tuple (3 elements). (chi2 value, pvalue, degrees of freedom). Examples -------- >>> import libpysal >>> from giddy.markov import Spatial_Markov, chi2 >>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv")) >>> years = list(range(1929, 2010)) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read() >>> w.transform='r' >>> sm = Spatial_Markov(rpci, w, fixed=True) >>> T1 = sm.T[0] >>> T1 array([[562., 22., 1., 0.], [ 12., 201., 22., 0.], [ 0., 17., 97., 4.], [ 0., 0., 3., 19.]]) >>> T2 = sm.transitions >>> T2 array([[884., 77., 4., 0.], [ 68., 794., 87., 3.], [ 1., 92., 815., 51.], [ 1., 0., 60., 903.]]) >>> chi2(T1,T2) (23.39728441473295, 0.005363116704861337, 9) Notes ----- Second matrix is used to form the probabilities under the null. Marginal sums from first matrix are distributed across these probabilities under the null. In other words the observed transitions are taken from T1 while the expected transitions are formed as follows .. math:: E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j} Degrees of freedom corrected for any rows in either T1 or T2 that have zero total transitions. """ rs2 = T2.sum(axis=1) rs1 = T1.sum(axis=1) rs2nz = rs2 > 0 rs1nz = rs1 > 0 dof1 = sum(rs1nz) dof2 = sum(rs2nz) rs2 = rs2 + (rs2 == 0) dof = (dof1 - 1) * (dof2 - 1) p = np.diag(1 / rs2) * np.matrix(T2) E = np.diag(rs1) * np.matrix(p) num = T1 - E num = np.multiply(num, num) E = E + (E == 0) chi2 = num / E chi2 = chi2.sum() pvalue = 1 - stats.chi2.cdf(chi2, dof) return chi2, pvalue, dof
[ "def", "chi2", "(", "T1", ",", "T2", ")", ":", "rs2", "=", "T2", ".", "sum", "(", "axis", "=", "1", ")", "rs1", "=", "T1", ".", "sum", "(", "axis", "=", "1", ")", "rs2nz", "=", "rs2", ">", "0", "rs1nz", "=", "rs1", ">", "0", "dof1", "=", "sum", "(", "rs1nz", ")", "dof2", "=", "sum", "(", "rs2nz", ")", "rs2", "=", "rs2", "+", "(", "rs2", "==", "0", ")", "dof", "=", "(", "dof1", "-", "1", ")", "*", "(", "dof2", "-", "1", ")", "p", "=", "np", ".", "diag", "(", "1", "/", "rs2", ")", "*", "np", ".", "matrix", "(", "T2", ")", "E", "=", "np", ".", "diag", "(", "rs1", ")", "*", "np", ".", "matrix", "(", "p", ")", "num", "=", "T1", "-", "E", "num", "=", "np", ".", "multiply", "(", "num", ",", "num", ")", "E", "=", "E", "+", "(", "E", "==", "0", ")", "chi2", "=", "num", "/", "E", "chi2", "=", "chi2", ".", "sum", "(", ")", "pvalue", "=", "1", "-", "stats", ".", "chi2", ".", "cdf", "(", "chi2", ",", "dof", ")", "return", "chi2", ",", "pvalue", ",", "dof" ]
chi-squared test of difference between two transition matrices. Parameters ---------- T1 : array (k, k), matrix of transitions (counts). T2 : array (k, k), matrix of transitions (counts) to use to form the probabilities under the null. Returns ------- : tuple (3 elements). (chi2 value, pvalue, degrees of freedom). Examples -------- >>> import libpysal >>> from giddy.markov import Spatial_Markov, chi2 >>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv")) >>> years = list(range(1929, 2010)) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read() >>> w.transform='r' >>> sm = Spatial_Markov(rpci, w, fixed=True) >>> T1 = sm.T[0] >>> T1 array([[562., 22., 1., 0.], [ 12., 201., 22., 0.], [ 0., 17., 97., 4.], [ 0., 0., 3., 19.]]) >>> T2 = sm.transitions >>> T2 array([[884., 77., 4., 0.], [ 68., 794., 87., 3.], [ 1., 92., 815., 51.], [ 1., 0., 60., 903.]]) >>> chi2(T1,T2) (23.39728441473295, 0.005363116704861337, 9) Notes ----- Second matrix is used to form the probabilities under the null. Marginal sums from first matrix are distributed across these probabilities under the null. In other words the observed transitions are taken from T1 while the expected transitions are formed as follows .. math:: E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j} Degrees of freedom corrected for any rows in either T1 or T2 that have zero total transitions.
[ "chi", "-", "squared", "test", "of", "difference", "between", "two", "transition", "matrices", "." ]
python
train
29.853333