repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
mbj4668/pyang
pyang/yang_parser.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/yang_parser.py#L51-L78
def skip(self): """Skip whitespace and count position""" buflen = len(self.buf) while True: self.buf = self.buf.lstrip() if self.buf == '': self.readline() buflen = len(self.buf) else: self.offset += (buflen - len(self.buf)) break # do not keep comments in the syntax tree if not self.keep_comments: # skip line comment if self.buf[0] == '/': if self.buf[1] == '/': self.readline() return self.skip() # skip block comment elif self.buf[1] == '*': i = self.buf.find('*/') while i == -1: self.readline() i = self.buf.find('*/') self.set_buf(i+2) return self.skip()
[ "def", "skip", "(", "self", ")", ":", "buflen", "=", "len", "(", "self", ".", "buf", ")", "while", "True", ":", "self", ".", "buf", "=", "self", ".", "buf", ".", "lstrip", "(", ")", "if", "self", ".", "buf", "==", "''", ":", "self", ".", "readline", "(", ")", "buflen", "=", "len", "(", "self", ".", "buf", ")", "else", ":", "self", ".", "offset", "+=", "(", "buflen", "-", "len", "(", "self", ".", "buf", ")", ")", "break", "# do not keep comments in the syntax tree", "if", "not", "self", ".", "keep_comments", ":", "# skip line comment", "if", "self", ".", "buf", "[", "0", "]", "==", "'/'", ":", "if", "self", ".", "buf", "[", "1", "]", "==", "'/'", ":", "self", ".", "readline", "(", ")", "return", "self", ".", "skip", "(", ")", "# skip block comment", "elif", "self", ".", "buf", "[", "1", "]", "==", "'*'", ":", "i", "=", "self", ".", "buf", ".", "find", "(", "'*/'", ")", "while", "i", "==", "-", "1", ":", "self", ".", "readline", "(", ")", "i", "=", "self", ".", "buf", ".", "find", "(", "'*/'", ")", "self", ".", "set_buf", "(", "i", "+", "2", ")", "return", "self", ".", "skip", "(", ")" ]
Skip whitespace and count position
[ "Skip", "whitespace", "and", "count", "position" ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/vcard.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/vcard.py#L1399-L1415
def __make_fn(self): """Initialize the mandatory `self.fn` from `self.n`. This is a workaround for buggy clients which set only one of them.""" s=[] if self.n.prefix: s.append(self.n.prefix) if self.n.given: s.append(self.n.given) if self.n.middle: s.append(self.n.middle) if self.n.family: s.append(self.n.family) if self.n.suffix: s.append(self.n.suffix) s=u" ".join(s) self.content["FN"]=VCardString("FN", s, empty_ok = True)
[ "def", "__make_fn", "(", "self", ")", ":", "s", "=", "[", "]", "if", "self", ".", "n", ".", "prefix", ":", "s", ".", "append", "(", "self", ".", "n", ".", "prefix", ")", "if", "self", ".", "n", ".", "given", ":", "s", ".", "append", "(", "self", ".", "n", ".", "given", ")", "if", "self", ".", "n", ".", "middle", ":", "s", ".", "append", "(", "self", ".", "n", ".", "middle", ")", "if", "self", ".", "n", ".", "family", ":", "s", ".", "append", "(", "self", ".", "n", ".", "family", ")", "if", "self", ".", "n", ".", "suffix", ":", "s", ".", "append", "(", "self", ".", "n", ".", "suffix", ")", "s", "=", "u\" \"", ".", "join", "(", "s", ")", "self", ".", "content", "[", "\"FN\"", "]", "=", "VCardString", "(", "\"FN\"", ",", "s", ",", "empty_ok", "=", "True", ")" ]
Initialize the mandatory `self.fn` from `self.n`. This is a workaround for buggy clients which set only one of them.
[ "Initialize", "the", "mandatory", "self", ".", "fn", "from", "self", ".", "n", "." ]
python
valid
3ll3d00d/vibe
backend/src/analyser/common/signal.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/common/signal.py#L92-L120
def spectrum(self, ref=None, segmentLengthMultiplier=1, mode=None, **kwargs): """ analyses the source to generate the linear spectrum. :param ref: the reference value for dB purposes. :param segmentLengthMultiplier: allow for increased resolution. :param mode: cq or none. :return: f : ndarray Array of sample frequencies. Pxx : ndarray linear spectrum. """ def analysisFunc(x, nperseg, **kwargs): f, Pxx_spec = signal.welch(self.samples, self.fs, nperseg=nperseg, scaling='spectrum', detrend=False, **kwargs) Pxx_spec = np.sqrt(Pxx_spec) # it seems a 3dB adjustment is required to account for the change in nperseg if x > 0: Pxx_spec = Pxx_spec / (10 ** ((3 * x) / 20)) if ref is not None: Pxx_spec = librosa.amplitude_to_db(Pxx_spec, ref) return f, Pxx_spec if mode == 'cq': return self._cq(analysisFunc, segmentLengthMultiplier) else: return analysisFunc(0, self.getSegmentLength() * segmentLengthMultiplier, **kwargs)
[ "def", "spectrum", "(", "self", ",", "ref", "=", "None", ",", "segmentLengthMultiplier", "=", "1", ",", "mode", "=", "None", ",", "*", "*", "kwargs", ")", ":", "def", "analysisFunc", "(", "x", ",", "nperseg", ",", "*", "*", "kwargs", ")", ":", "f", ",", "Pxx_spec", "=", "signal", ".", "welch", "(", "self", ".", "samples", ",", "self", ".", "fs", ",", "nperseg", "=", "nperseg", ",", "scaling", "=", "'spectrum'", ",", "detrend", "=", "False", ",", "*", "*", "kwargs", ")", "Pxx_spec", "=", "np", ".", "sqrt", "(", "Pxx_spec", ")", "# it seems a 3dB adjustment is required to account for the change in nperseg", "if", "x", ">", "0", ":", "Pxx_spec", "=", "Pxx_spec", "/", "(", "10", "**", "(", "(", "3", "*", "x", ")", "/", "20", ")", ")", "if", "ref", "is", "not", "None", ":", "Pxx_spec", "=", "librosa", ".", "amplitude_to_db", "(", "Pxx_spec", ",", "ref", ")", "return", "f", ",", "Pxx_spec", "if", "mode", "==", "'cq'", ":", "return", "self", ".", "_cq", "(", "analysisFunc", ",", "segmentLengthMultiplier", ")", "else", ":", "return", "analysisFunc", "(", "0", ",", "self", ".", "getSegmentLength", "(", ")", "*", "segmentLengthMultiplier", ",", "*", "*", "kwargs", ")" ]
analyses the source to generate the linear spectrum. :param ref: the reference value for dB purposes. :param segmentLengthMultiplier: allow for increased resolution. :param mode: cq or none. :return: f : ndarray Array of sample frequencies. Pxx : ndarray linear spectrum.
[ "analyses", "the", "source", "to", "generate", "the", "linear", "spectrum", ".", ":", "param", "ref", ":", "the", "reference", "value", "for", "dB", "purposes", ".", ":", "param", "segmentLengthMultiplier", ":", "allow", "for", "increased", "resolution", ".", ":", "param", "mode", ":", "cq", "or", "none", ".", ":", "return", ":", "f", ":", "ndarray", "Array", "of", "sample", "frequencies", ".", "Pxx", ":", "ndarray", "linear", "spectrum", "." ]
python
train
crs4/pydoop
pydoop/hdfs/__init__.py
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L340-L353
def move(src, dest, user=None): """ Move or rename src to dest. """ src_host, src_port, src_path = path.split(src, user) dest_host, dest_port, dest_path = path.split(dest, user) src_fs = hdfs(src_host, src_port, user) dest_fs = hdfs(dest_host, dest_port, user) try: retval = src_fs.move(src_path, dest_fs, dest_path) return retval finally: src_fs.close() dest_fs.close()
[ "def", "move", "(", "src", ",", "dest", ",", "user", "=", "None", ")", ":", "src_host", ",", "src_port", ",", "src_path", "=", "path", ".", "split", "(", "src", ",", "user", ")", "dest_host", ",", "dest_port", ",", "dest_path", "=", "path", ".", "split", "(", "dest", ",", "user", ")", "src_fs", "=", "hdfs", "(", "src_host", ",", "src_port", ",", "user", ")", "dest_fs", "=", "hdfs", "(", "dest_host", ",", "dest_port", ",", "user", ")", "try", ":", "retval", "=", "src_fs", ".", "move", "(", "src_path", ",", "dest_fs", ",", "dest_path", ")", "return", "retval", "finally", ":", "src_fs", ".", "close", "(", ")", "dest_fs", ".", "close", "(", ")" ]
Move or rename src to dest.
[ "Move", "or", "rename", "src", "to", "dest", "." ]
python
train
saltstack/salt
salt/modules/rest_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rest_service.py#L128-L169
def status(name, sig=None): ''' Return the status for a service via rest_sample. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionadded:: 2015.8.0 .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' proxy_fn = 'rest_sample.service_status' contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: resp = __proxy__[proxy_fn](service) if resp['comment'] == 'running': results[service] = True else: results[service] = False if contains_globbing: return results return results[name]
[ "def", "status", "(", "name", ",", "sig", "=", "None", ")", ":", "proxy_fn", "=", "'rest_sample.service_status'", "contains_globbing", "=", "bool", "(", "re", ".", "search", "(", "r'\\*|\\?|\\[.+\\]'", ",", "name", ")", ")", "if", "contains_globbing", ":", "services", "=", "fnmatch", ".", "filter", "(", "get_all", "(", ")", ",", "name", ")", "else", ":", "services", "=", "[", "name", "]", "results", "=", "{", "}", "for", "service", "in", "services", ":", "resp", "=", "__proxy__", "[", "proxy_fn", "]", "(", "service", ")", "if", "resp", "[", "'comment'", "]", "==", "'running'", ":", "results", "[", "service", "]", "=", "True", "else", ":", "results", "[", "service", "]", "=", "False", "if", "contains_globbing", ":", "return", "results", "return", "results", "[", "name", "]" ]
Return the status for a service via rest_sample. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionadded:: 2015.8.0 .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name>
[ "Return", "the", "status", "for", "a", "service", "via", "rest_sample", ".", "If", "the", "name", "contains", "globbing", "a", "dict", "mapping", "service", "name", "to", "True", "/", "False", "values", "is", "returned", "." ]
python
train
PyGithub/PyGithub
github/Repository.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L2417-L2429
def get_stats_participation(self): """ :calls: `GET /repos/:owner/:repo/stats/participation <http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else>`_ :rtype: None or :class:`github.StatsParticipation.StatsParticipation` """ headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/stats/participation" ) if not data: return None else: return github.StatsParticipation.StatsParticipation(self._requester, headers, data, completed=True)
[ "def", "get_stats_participation", "(", "self", ")", ":", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"GET\"", ",", "self", ".", "url", "+", "\"/stats/participation\"", ")", "if", "not", "data", ":", "return", "None", "else", ":", "return", "github", ".", "StatsParticipation", ".", "StatsParticipation", "(", "self", ".", "_requester", ",", "headers", ",", "data", ",", "completed", "=", "True", ")" ]
:calls: `GET /repos/:owner/:repo/stats/participation <http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else>`_ :rtype: None or :class:`github.StatsParticipation.StatsParticipation`
[ ":", "calls", ":", "GET", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "stats", "/", "participation", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "repos", "/", "statistics", "/", "#get", "-", "the", "-", "weekly", "-", "commit", "-", "count", "-", "for", "-", "the", "-", "repo", "-", "owner", "-", "and", "-", "everyone", "-", "else", ">", "_", ":", "rtype", ":", "None", "or", ":", "class", ":", "github", ".", "StatsParticipation", ".", "StatsParticipation" ]
python
train
fossasia/knittingpattern
knittingpattern/Dumper/file.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Dumper/file.py#L142-L147
def _temporary_file(self, delete): """:return: a temporary file where the content is dumped to.""" file = NamedTemporaryFile("w+", delete=delete, encoding=self.__encoding) self._file(file) return file
[ "def", "_temporary_file", "(", "self", ",", "delete", ")", ":", "file", "=", "NamedTemporaryFile", "(", "\"w+\"", ",", "delete", "=", "delete", ",", "encoding", "=", "self", ".", "__encoding", ")", "self", ".", "_file", "(", "file", ")", "return", "file" ]
:return: a temporary file where the content is dumped to.
[ ":", "return", ":", "a", "temporary", "file", "where", "the", "content", "is", "dumped", "to", "." ]
python
valid
projectshift/shift-schema
shiftschema/translator.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/translator.py#L51-L89
def get_translations(self, locale): """ Get translation dictionary Returns a dictionary for locale or raises an exception if such can't be located. If a dictionary for locale was previously loaded returns that, otherwise goes through registered locations and merges any found custom dictionaries with defaults. :param locale: str, locale to load translations :return: dict, translations dictionary """ locale = self.normalize_locale(locale) if locale in self.translations: return self.translations[locale] translations = {} for path in self.dirs: file = os.path.join(path, '{}.py'.format(locale)) if not os.path.isfile(file): continue loader = SourceFileLoader(locale, file) locale_dict = loader.load_module() if not hasattr(locale_dict, 'translations'): continue language = getattr(locale_dict, 'translations') if translations: translations = language else: merged = dict(translations.items() | language.items()) translations = merged if translations: self.translations[locale] = translations return translations err = 'No translations found for locale [{}]' raise NoTranslations(err.format(locale))
[ "def", "get_translations", "(", "self", ",", "locale", ")", ":", "locale", "=", "self", ".", "normalize_locale", "(", "locale", ")", "if", "locale", "in", "self", ".", "translations", ":", "return", "self", ".", "translations", "[", "locale", "]", "translations", "=", "{", "}", "for", "path", "in", "self", ".", "dirs", ":", "file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'{}.py'", ".", "format", "(", "locale", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "file", ")", ":", "continue", "loader", "=", "SourceFileLoader", "(", "locale", ",", "file", ")", "locale_dict", "=", "loader", ".", "load_module", "(", ")", "if", "not", "hasattr", "(", "locale_dict", ",", "'translations'", ")", ":", "continue", "language", "=", "getattr", "(", "locale_dict", ",", "'translations'", ")", "if", "translations", ":", "translations", "=", "language", "else", ":", "merged", "=", "dict", "(", "translations", ".", "items", "(", ")", "|", "language", ".", "items", "(", ")", ")", "translations", "=", "merged", "if", "translations", ":", "self", ".", "translations", "[", "locale", "]", "=", "translations", "return", "translations", "err", "=", "'No translations found for locale [{}]'", "raise", "NoTranslations", "(", "err", ".", "format", "(", "locale", ")", ")" ]
Get translation dictionary Returns a dictionary for locale or raises an exception if such can't be located. If a dictionary for locale was previously loaded returns that, otherwise goes through registered locations and merges any found custom dictionaries with defaults. :param locale: str, locale to load translations :return: dict, translations dictionary
[ "Get", "translation", "dictionary", "Returns", "a", "dictionary", "for", "locale", "or", "raises", "an", "exception", "if", "such", "can", "t", "be", "located", ".", "If", "a", "dictionary", "for", "locale", "was", "previously", "loaded", "returns", "that", "otherwise", "goes", "through", "registered", "locations", "and", "merges", "any", "found", "custom", "dictionaries", "with", "defaults", "." ]
python
train
LogicalDash/LiSE
allegedb/allegedb/__init__.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/__init__.py#L227-L229
def setnode(delta, graph, node, exists): """Change a delta to say that a node was created or deleted""" delta.setdefault(graph, {}).setdefault('nodes', {})[node] = bool(exists)
[ "def", "setnode", "(", "delta", ",", "graph", ",", "node", ",", "exists", ")", ":", "delta", ".", "setdefault", "(", "graph", ",", "{", "}", ")", ".", "setdefault", "(", "'nodes'", ",", "{", "}", ")", "[", "node", "]", "=", "bool", "(", "exists", ")" ]
Change a delta to say that a node was created or deleted
[ "Change", "a", "delta", "to", "say", "that", "a", "node", "was", "created", "or", "deleted" ]
python
train
ssalentin/plip
plip/modules/detection.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L232-L260
def halogen(acceptor, donor): """Detect all halogen bonds of the type Y-O...X-C""" data = namedtuple('halogenbond', 'acc acc_orig_idx don don_orig_idx distance don_angle acc_angle restype ' 'resnr reschain restype_l resnr_l reschain_l donortype acctype sidechain') pairings = [] for acc, don in itertools.product(acceptor, donor): dist = euclidean3d(acc.o.coords, don.x.coords) if not config.MIN_DIST < dist < config.HALOGEN_DIST_MAX: continue vec1, vec2 = vector(acc.o.coords, acc.y.coords), vector(acc.o.coords, don.x.coords) vec3, vec4 = vector(don.x.coords, acc.o.coords), vector(don.x.coords, don.c.coords) acc_angle, don_angle = vecangle(vec1, vec2), vecangle(vec3, vec4) is_sidechain_hal = acc.o.OBAtom.GetResidue().GetAtomProperty(acc.o.OBAtom, 8) # Check if sidechain atom if not config.HALOGEN_ACC_ANGLE - config.HALOGEN_ANGLE_DEV < acc_angle \ < config.HALOGEN_ACC_ANGLE + config.HALOGEN_ANGLE_DEV: continue if not config.HALOGEN_DON_ANGLE - config.HALOGEN_ANGLE_DEV < don_angle \ < config.HALOGEN_DON_ANGLE + config.HALOGEN_ANGLE_DEV: continue restype, reschain, resnr = whichrestype(acc.o), whichchain(acc.o), whichresnumber(acc.o) restype_l, reschain_l, resnr_l = whichrestype(don.orig_x), whichchain(don.orig_x), whichresnumber(don.orig_x) contact = data(acc=acc, acc_orig_idx=acc.o_orig_idx, don=don, don_orig_idx=don.x_orig_idx, distance=dist, don_angle=don_angle, acc_angle=acc_angle, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, donortype=don.x.OBAtom.GetType(), acctype=acc.o.type, sidechain=is_sidechain_hal) pairings.append(contact) return filter_contacts(pairings)
[ "def", "halogen", "(", "acceptor", ",", "donor", ")", ":", "data", "=", "namedtuple", "(", "'halogenbond'", ",", "'acc acc_orig_idx don don_orig_idx distance don_angle acc_angle restype '", "'resnr reschain restype_l resnr_l reschain_l donortype acctype sidechain'", ")", "pairings", "=", "[", "]", "for", "acc", ",", "don", "in", "itertools", ".", "product", "(", "acceptor", ",", "donor", ")", ":", "dist", "=", "euclidean3d", "(", "acc", ".", "o", ".", "coords", ",", "don", ".", "x", ".", "coords", ")", "if", "not", "config", ".", "MIN_DIST", "<", "dist", "<", "config", ".", "HALOGEN_DIST_MAX", ":", "continue", "vec1", ",", "vec2", "=", "vector", "(", "acc", ".", "o", ".", "coords", ",", "acc", ".", "y", ".", "coords", ")", ",", "vector", "(", "acc", ".", "o", ".", "coords", ",", "don", ".", "x", ".", "coords", ")", "vec3", ",", "vec4", "=", "vector", "(", "don", ".", "x", ".", "coords", ",", "acc", ".", "o", ".", "coords", ")", ",", "vector", "(", "don", ".", "x", ".", "coords", ",", "don", ".", "c", ".", "coords", ")", "acc_angle", ",", "don_angle", "=", "vecangle", "(", "vec1", ",", "vec2", ")", ",", "vecangle", "(", "vec3", ",", "vec4", ")", "is_sidechain_hal", "=", "acc", ".", "o", ".", "OBAtom", ".", "GetResidue", "(", ")", ".", "GetAtomProperty", "(", "acc", ".", "o", ".", "OBAtom", ",", "8", ")", "# Check if sidechain atom", "if", "not", "config", ".", "HALOGEN_ACC_ANGLE", "-", "config", ".", "HALOGEN_ANGLE_DEV", "<", "acc_angle", "<", "config", ".", "HALOGEN_ACC_ANGLE", "+", "config", ".", "HALOGEN_ANGLE_DEV", ":", "continue", "if", "not", "config", ".", "HALOGEN_DON_ANGLE", "-", "config", ".", "HALOGEN_ANGLE_DEV", "<", "don_angle", "<", "config", ".", "HALOGEN_DON_ANGLE", "+", "config", ".", "HALOGEN_ANGLE_DEV", ":", "continue", "restype", ",", "reschain", ",", "resnr", "=", "whichrestype", "(", "acc", ".", "o", ")", ",", "whichchain", "(", "acc", ".", "o", ")", ",", "whichresnumber", "(", "acc", ".", "o", ")", "restype_l", ",", "reschain_l", ",", "resnr_l", "=", "whichrestype", "(", "don", ".", "orig_x", ")", ",", "whichchain", "(", "don", ".", "orig_x", ")", ",", "whichresnumber", "(", "don", ".", "orig_x", ")", "contact", "=", "data", "(", "acc", "=", "acc", ",", "acc_orig_idx", "=", "acc", ".", "o_orig_idx", ",", "don", "=", "don", ",", "don_orig_idx", "=", "don", ".", "x_orig_idx", ",", "distance", "=", "dist", ",", "don_angle", "=", "don_angle", ",", "acc_angle", "=", "acc_angle", ",", "restype", "=", "restype", ",", "resnr", "=", "resnr", ",", "reschain", "=", "reschain", ",", "restype_l", "=", "restype_l", ",", "reschain_l", "=", "reschain_l", ",", "resnr_l", "=", "resnr_l", ",", "donortype", "=", "don", ".", "x", ".", "OBAtom", ".", "GetType", "(", ")", ",", "acctype", "=", "acc", ".", "o", ".", "type", ",", "sidechain", "=", "is_sidechain_hal", ")", "pairings", ".", "append", "(", "contact", ")", "return", "filter_contacts", "(", "pairings", ")" ]
Detect all halogen bonds of the type Y-O...X-C
[ "Detect", "all", "halogen", "bonds", "of", "the", "type", "Y", "-", "O", "...", "X", "-", "C" ]
python
train
chaoss/grimoirelab-perceval-mozilla
perceval/backends/mozilla/crates.py
https://github.com/chaoss/grimoirelab-perceval-mozilla/blob/4514f8d3d609d3cb79d83c72d51fcc4b4a7daeb4/perceval/backends/mozilla/crates.py#L267-L273
def summary(self): """Get Crates.io summary""" path = urijoin(CRATES_API_URL, CATEGORY_SUMMARY) raw_content = self.fetch(path) return raw_content
[ "def", "summary", "(", "self", ")", ":", "path", "=", "urijoin", "(", "CRATES_API_URL", ",", "CATEGORY_SUMMARY", ")", "raw_content", "=", "self", ".", "fetch", "(", "path", ")", "return", "raw_content" ]
Get Crates.io summary
[ "Get", "Crates", ".", "io", "summary" ]
python
test
mitsei/dlkit
dlkit/json_/learning/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/managers.py#L1811-L1828
def get_objective_requisite_session(self, proxy): """Gets the session for examining objective requisites. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveRequisiteSession) - an ``ObjectiveRequisiteSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_requisite()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite()`` is ``true``.* """ if not self.supports_objective_requisite(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ObjectiveRequisiteSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_objective_requisite_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_objective_requisite", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ObjectiveRequisiteSession", "(", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the session for examining objective requisites. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveRequisiteSession) - an ``ObjectiveRequisiteSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_requisite()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite()`` is ``true``.*
[ "Gets", "the", "session", "for", "examining", "objective", "requisites", "." ]
python
train
hawkular/hawkular-client-python
hawkular/metrics.py
https://github.com/hawkular/hawkular-client-python/blob/52371f9ebabbe310efee2a8ff8eb735ccc0654bb/hawkular/metrics.py#L223-L242
def query_metric_definitions(self, metric_type=None, id_filter=None, **tags): """ Query available metric definitions. :param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes :param id_filter: Filter the id with regexp is tag filtering is used, otherwise a list of exact metric ids :param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax """ params = {} if id_filter is not None: params['id'] = id_filter if metric_type is not None: params['type'] = MetricType.short(metric_type) if len(tags) > 0: params['tags'] = self._transform_tags(**tags) return self._get(self._get_url(), **params)
[ "def", "query_metric_definitions", "(", "self", ",", "metric_type", "=", "None", ",", "id_filter", "=", "None", ",", "*", "*", "tags", ")", ":", "params", "=", "{", "}", "if", "id_filter", "is", "not", "None", ":", "params", "[", "'id'", "]", "=", "id_filter", "if", "metric_type", "is", "not", "None", ":", "params", "[", "'type'", "]", "=", "MetricType", ".", "short", "(", "metric_type", ")", "if", "len", "(", "tags", ")", ">", "0", ":", "params", "[", "'tags'", "]", "=", "self", ".", "_transform_tags", "(", "*", "*", "tags", ")", "return", "self", ".", "_get", "(", "self", ".", "_get_url", "(", ")", ",", "*", "*", "params", ")" ]
Query available metric definitions. :param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes :param id_filter: Filter the id with regexp is tag filtering is used, otherwise a list of exact metric ids :param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax
[ "Query", "available", "metric", "definitions", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py#L181-L187
def _defaults(self, keys=None): """create an empty record""" d = {} keys = self._keys if keys is None else keys for key in keys: d[key] = None return d
[ "def", "_defaults", "(", "self", ",", "keys", "=", "None", ")", ":", "d", "=", "{", "}", "keys", "=", "self", ".", "_keys", "if", "keys", "is", "None", "else", "keys", "for", "key", "in", "keys", ":", "d", "[", "key", "]", "=", "None", "return", "d" ]
create an empty record
[ "create", "an", "empty", "record" ]
python
test
MostAwesomeDude/blackjack
blackjack.py
https://github.com/MostAwesomeDude/blackjack/blob/1346642e353719ab68c0dc3573aa33b688431bf8/blackjack.py#L65-L73
def flip(self): """ Flip colors of a node and its children. """ left = self.left._replace(red=not self.left.red) right = self.right._replace(red=not self.right.red) top = self._replace(left=left, right=right, red=not self.red) return top
[ "def", "flip", "(", "self", ")", ":", "left", "=", "self", ".", "left", ".", "_replace", "(", "red", "=", "not", "self", ".", "left", ".", "red", ")", "right", "=", "self", ".", "right", ".", "_replace", "(", "red", "=", "not", "self", ".", "right", ".", "red", ")", "top", "=", "self", ".", "_replace", "(", "left", "=", "left", ",", "right", "=", "right", ",", "red", "=", "not", "self", ".", "red", ")", "return", "top" ]
Flip colors of a node and its children.
[ "Flip", "colors", "of", "a", "node", "and", "its", "children", "." ]
python
train
LudovicRousseau/pyscard
smartcard/ExclusiveTransmitCardConnection.py
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/ExclusiveTransmitCardConnection.py#L63-L83
def unlock(self): '''Unlock card with SCardEndTransaction.''' component = self.component while True: if isinstance( component, smartcard.pcsc.PCSCCardConnection.PCSCCardConnection): hresult = SCardEndTransaction(component.hcard, SCARD_LEAVE_CARD) if 0 != hresult: raise CardConnectionException( 'Failed to unlock with SCardEndTransaction: ' + SCardGetErrorMessage(hresult)) else: # print('unlocked') pass break if hasattr(component, 'component'): component = component.component else: break
[ "def", "unlock", "(", "self", ")", ":", "component", "=", "self", ".", "component", "while", "True", ":", "if", "isinstance", "(", "component", ",", "smartcard", ".", "pcsc", ".", "PCSCCardConnection", ".", "PCSCCardConnection", ")", ":", "hresult", "=", "SCardEndTransaction", "(", "component", ".", "hcard", ",", "SCARD_LEAVE_CARD", ")", "if", "0", "!=", "hresult", ":", "raise", "CardConnectionException", "(", "'Failed to unlock with SCardEndTransaction: '", "+", "SCardGetErrorMessage", "(", "hresult", ")", ")", "else", ":", "# print('unlocked')", "pass", "break", "if", "hasattr", "(", "component", ",", "'component'", ")", ":", "component", "=", "component", ".", "component", "else", ":", "break" ]
Unlock card with SCardEndTransaction.
[ "Unlock", "card", "with", "SCardEndTransaction", "." ]
python
train
stain/forgetSQL
lib/forgetSQL.py
https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L429-L437
def save(self): """Save to database if anything has changed since last load""" if ( self._new or (self._validID() and self._changed) or (self._updated and self._changed > self._updated) ): # Don't save if we have not loaded existing data! self._saveDB() return True return False
[ "def", "save", "(", "self", ")", ":", "if", "(", "self", ".", "_new", "or", "(", "self", ".", "_validID", "(", ")", "and", "self", ".", "_changed", ")", "or", "(", "self", ".", "_updated", "and", "self", ".", "_changed", ">", "self", ".", "_updated", ")", ")", ":", "# Don't save if we have not loaded existing data!", "self", ".", "_saveDB", "(", ")", "return", "True", "return", "False" ]
Save to database if anything has changed since last load
[ "Save", "to", "database", "if", "anything", "has", "changed", "since", "last", "load" ]
python
train
senaite/senaite.core
bika/lims/content/calculation.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/calculation.py#L396-L411
def _getModuleMember(self, dotted_name, member): """Get the member object of a module. :param dotted_name: The dotted name of the module, e.g. 'scipy.special' :type dotted_name: string :param member: The name of the member function, e.g. 'gammaincinv' :type member: string :returns: member object or None """ try: mod = importlib.import_module(dotted_name) except ImportError: return None members = dict(inspect.getmembers(mod)) return members.get(member)
[ "def", "_getModuleMember", "(", "self", ",", "dotted_name", ",", "member", ")", ":", "try", ":", "mod", "=", "importlib", ".", "import_module", "(", "dotted_name", ")", "except", "ImportError", ":", "return", "None", "members", "=", "dict", "(", "inspect", ".", "getmembers", "(", "mod", ")", ")", "return", "members", ".", "get", "(", "member", ")" ]
Get the member object of a module. :param dotted_name: The dotted name of the module, e.g. 'scipy.special' :type dotted_name: string :param member: The name of the member function, e.g. 'gammaincinv' :type member: string :returns: member object or None
[ "Get", "the", "member", "object", "of", "a", "module", "." ]
python
train
PierreRust/apigpio
apigpio/apigpio.py
https://github.com/PierreRust/apigpio/blob/2b969f40e06219b43a43498d8baf87f5935ceab2/apigpio/apigpio.py#L301-L319
def u2i(uint32): """ Converts a 32 bit unsigned number to signed. uint32:= an unsigned 32 bit number ... print(u2i(4294967272)) -24 print(u2i(37)) 37 ... """ mask = (2 ** 32) - 1 if uint32 & (1 << 31): v = uint32 | ~mask else: v = uint32 & mask return v
[ "def", "u2i", "(", "uint32", ")", ":", "mask", "=", "(", "2", "**", "32", ")", "-", "1", "if", "uint32", "&", "(", "1", "<<", "31", ")", ":", "v", "=", "uint32", "|", "~", "mask", "else", ":", "v", "=", "uint32", "&", "mask", "return", "v" ]
Converts a 32 bit unsigned number to signed. uint32:= an unsigned 32 bit number ... print(u2i(4294967272)) -24 print(u2i(37)) 37 ...
[ "Converts", "a", "32", "bit", "unsigned", "number", "to", "signed", "." ]
python
train
HydraChain/hydrachain
hydrachain/examples/native/fungible/fungible_contract.py
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/fungible/fungible_contract.py#L156-L163
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS): "In the IOU fungible the supply is set by Issuer, who issue funds." # allocate new issue as result of a new cash entry ctx.accounts[ctx.msg_sender] += amount ctx.issued_amounts[ctx.msg_sender] += amount # Store hash(rtgs) ctx.Issuance(ctx.msg_sender, rtgs_hash, amount) return OK
[ "def", "issue_funds", "(", "ctx", ",", "amount", "=", "'uint256'", ",", "rtgs_hash", "=", "'bytes32'", ",", "returns", "=", "STATUS", ")", ":", "# allocate new issue as result of a new cash entry", "ctx", ".", "accounts", "[", "ctx", ".", "msg_sender", "]", "+=", "amount", "ctx", ".", "issued_amounts", "[", "ctx", ".", "msg_sender", "]", "+=", "amount", "# Store hash(rtgs)", "ctx", ".", "Issuance", "(", "ctx", ".", "msg_sender", ",", "rtgs_hash", ",", "amount", ")", "return", "OK" ]
In the IOU fungible the supply is set by Issuer, who issue funds.
[ "In", "the", "IOU", "fungible", "the", "supply", "is", "set", "by", "Issuer", "who", "issue", "funds", "." ]
python
test
offu/WeRoBot
werobot/client.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/client.py#L751-L772
def send_image_message(self, user_id, media_id, kf_account=None): """ 发送图片消息。 :param user_id: 用户 ID 。 就是你收到的 `Message` 的 source :param media_id: 图片的媒体ID。 可以通过 :func:`upload_media` 上传。 :param kf_account: 发送消息的客服账户,默认值为 None,None 为不指定 :return: 返回的 JSON 数据包 """ data = { "touser": user_id, "msgtype": "image", "image": { "media_id": media_id } } if kf_account is not None: data['customservice'] = {'kf_account': kf_account} return self.post( url="https://api.weixin.qq.com/cgi-bin/message/custom/send", data=data )
[ "def", "send_image_message", "(", "self", ",", "user_id", ",", "media_id", ",", "kf_account", "=", "None", ")", ":", "data", "=", "{", "\"touser\"", ":", "user_id", ",", "\"msgtype\"", ":", "\"image\"", ",", "\"image\"", ":", "{", "\"media_id\"", ":", "media_id", "}", "}", "if", "kf_account", "is", "not", "None", ":", "data", "[", "'customservice'", "]", "=", "{", "'kf_account'", ":", "kf_account", "}", "return", "self", ".", "post", "(", "url", "=", "\"https://api.weixin.qq.com/cgi-bin/message/custom/send\"", ",", "data", "=", "data", ")" ]
发送图片消息。 :param user_id: 用户 ID 。 就是你收到的 `Message` 的 source :param media_id: 图片的媒体ID。 可以通过 :func:`upload_media` 上传。 :param kf_account: 发送消息的客服账户,默认值为 None,None 为不指定 :return: 返回的 JSON 数据包
[ "发送图片消息。" ]
python
train
saltstack/salt
salt/states/environ.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/environ.py#L34-L184
def setenv(name, value, false_unsets=False, clear_all=False, update_minion=False, permanent=False): ''' Set the salt process environment variables. name The environment key to set. Must be a string. value Either a string or dict. When string, it will be the value set for the environment key of 'name' above. When a dict, each key/value pair represents an environment variable to set. false_unsets If a key's value is False and false_unsets is True, then the key will be removed from the salt processes environment dict entirely. If a key's value is False and false_unsets is not True, then the key's value will be set to an empty string. Default: False clear_all USE WITH CAUTION! This option can unset environment variables needed for salt to function properly. If clear_all is True, then any environment variables not defined in the environ dict will be deleted. Default: False update_minion If True, apply these environ changes to the main salt-minion process. If False, the environ changes will only affect the current salt subprocess. Default: False permanent On Windows minions this will set the environment variable in the registry so that it is always added as a environment variable when applications open. If you want to set the variable to HKLM instead of HKCU just pass in "HKLM" for this parameter. On all other minion types this will be ignored. Note: This will only take affect on applications opened after this has been set. Example: .. code-block:: yaml a_string_env: environ.setenv: - name: foo - value: bar - update_minion: True a_dict_env: environ.setenv: - name: does_not_matter - value: foo: bar baz: quux ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} environ = {} if isinstance(value, six.string_types) or value is False: environ[name] = value elif isinstance(value, dict): environ = value else: ret['result'] = False ret['comment'] = 'Environ value must be string, dict or False' return ret if clear_all is True: # Any keys not in 'environ' dict supplied by user will be unset to_unset = [key for key in os.environ if key not in environ] for key in to_unset: if false_unsets is not True: # This key value will change to '' ret['changes'].update({key: ''}) else: # We're going to delete the key ret['changes'].update({key: None}) current_environ = dict(os.environ) already_set = [] for key, val in six.iteritems(environ): if val is False: # We unset this key from the environment if # false_unsets is True. Otherwise we want to set # the value to '' def key_exists(): if salt.utils.platform.is_windows(): permanent_hive = 'HKCU' permanent_key = 'Environment' if permanent == 'HKLM': permanent_hive = 'HKLM' permanent_key = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment' out = __utils__['reg.read_value'](permanent_hive, permanent_key, _norm_key(key)) return out['success'] is True else: return False if current_environ.get(_norm_key(key), None) is None and not key_exists(): # The key does not exist in environment if false_unsets is not True: # This key will be added with value '' ret['changes'].update({key: ''}) else: # The key exists. if false_unsets is not True: # Check to see if the value will change if current_environ.get(_norm_key(key), None) != '': # This key value will change to '' ret['changes'].update({key: ''}) else: # We're going to delete the key ret['changes'].update({key: None}) elif current_environ.get(_norm_key(key), '') == val: already_set.append(key) else: ret['changes'].update({key: val}) if __opts__['test']: if ret['changes']: ret['comment'] = 'Environ values will be changed' else: ret['comment'] = 'Environ values are already set with the correct values' return ret if ret['changes']: environ_ret = __salt__['environ.setenv'](environ, false_unsets, clear_all, update_minion, permanent) if not environ_ret: ret['result'] = False ret['comment'] = 'Failed to set environ variables' return ret ret['result'] = True ret['changes'] = environ_ret ret['comment'] = 'Environ values were set' else: ret['comment'] = 'Environ values were already set with the correct values' return ret
[ "def", "setenv", "(", "name", ",", "value", ",", "false_unsets", "=", "False", ",", "clear_all", "=", "False", ",", "update_minion", "=", "False", ",", "permanent", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "environ", "=", "{", "}", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "or", "value", "is", "False", ":", "environ", "[", "name", "]", "=", "value", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "environ", "=", "value", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Environ value must be string, dict or False'", "return", "ret", "if", "clear_all", "is", "True", ":", "# Any keys not in 'environ' dict supplied by user will be unset", "to_unset", "=", "[", "key", "for", "key", "in", "os", ".", "environ", "if", "key", "not", "in", "environ", "]", "for", "key", "in", "to_unset", ":", "if", "false_unsets", "is", "not", "True", ":", "# This key value will change to ''", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "key", ":", "''", "}", ")", "else", ":", "# We're going to delete the key", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "key", ":", "None", "}", ")", "current_environ", "=", "dict", "(", "os", ".", "environ", ")", "already_set", "=", "[", "]", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "environ", ")", ":", "if", "val", "is", "False", ":", "# We unset this key from the environment if", "# false_unsets is True. Otherwise we want to set", "# the value to ''", "def", "key_exists", "(", ")", ":", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "permanent_hive", "=", "'HKCU'", "permanent_key", "=", "'Environment'", "if", "permanent", "==", "'HKLM'", ":", "permanent_hive", "=", "'HKLM'", "permanent_key", "=", "r'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment'", "out", "=", "__utils__", "[", "'reg.read_value'", "]", "(", "permanent_hive", ",", "permanent_key", ",", "_norm_key", "(", "key", ")", ")", "return", "out", "[", "'success'", "]", "is", "True", "else", ":", "return", "False", "if", "current_environ", ".", "get", "(", "_norm_key", "(", "key", ")", ",", "None", ")", "is", "None", "and", "not", "key_exists", "(", ")", ":", "# The key does not exist in environment", "if", "false_unsets", "is", "not", "True", ":", "# This key will be added with value ''", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "key", ":", "''", "}", ")", "else", ":", "# The key exists.", "if", "false_unsets", "is", "not", "True", ":", "# Check to see if the value will change", "if", "current_environ", ".", "get", "(", "_norm_key", "(", "key", ")", ",", "None", ")", "!=", "''", ":", "# This key value will change to ''", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "key", ":", "''", "}", ")", "else", ":", "# We're going to delete the key", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "key", ":", "None", "}", ")", "elif", "current_environ", ".", "get", "(", "_norm_key", "(", "key", ")", ",", "''", ")", "==", "val", ":", "already_set", ".", "append", "(", "key", ")", "else", ":", "ret", "[", "'changes'", "]", ".", "update", "(", "{", "key", ":", "val", "}", ")", "if", "__opts__", "[", "'test'", "]", ":", "if", "ret", "[", "'changes'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Environ values will be changed'", "else", ":", "ret", "[", "'comment'", "]", "=", "'Environ values are already set with the correct values'", "return", "ret", "if", "ret", "[", "'changes'", "]", ":", "environ_ret", "=", "__salt__", "[", "'environ.setenv'", "]", "(", "environ", ",", "false_unsets", ",", "clear_all", ",", "update_minion", ",", "permanent", ")", "if", "not", "environ_ret", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to set environ variables'", "return", "ret", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "=", "environ_ret", "ret", "[", "'comment'", "]", "=", "'Environ values were set'", "else", ":", "ret", "[", "'comment'", "]", "=", "'Environ values were already set with the correct values'", "return", "ret" ]
Set the salt process environment variables. name The environment key to set. Must be a string. value Either a string or dict. When string, it will be the value set for the environment key of 'name' above. When a dict, each key/value pair represents an environment variable to set. false_unsets If a key's value is False and false_unsets is True, then the key will be removed from the salt processes environment dict entirely. If a key's value is False and false_unsets is not True, then the key's value will be set to an empty string. Default: False clear_all USE WITH CAUTION! This option can unset environment variables needed for salt to function properly. If clear_all is True, then any environment variables not defined in the environ dict will be deleted. Default: False update_minion If True, apply these environ changes to the main salt-minion process. If False, the environ changes will only affect the current salt subprocess. Default: False permanent On Windows minions this will set the environment variable in the registry so that it is always added as a environment variable when applications open. If you want to set the variable to HKLM instead of HKCU just pass in "HKLM" for this parameter. On all other minion types this will be ignored. Note: This will only take affect on applications opened after this has been set. Example: .. code-block:: yaml a_string_env: environ.setenv: - name: foo - value: bar - update_minion: True a_dict_env: environ.setenv: - name: does_not_matter - value: foo: bar baz: quux
[ "Set", "the", "salt", "process", "environment", "variables", "." ]
python
train
gwpy/gwpy
gwpy/table/io/gwf.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/gwf.py#L45-L63
def _row_from_frevent(frevent, columns, selection): """Generate a table row from an FrEvent Filtering (``selection``) is done here, rather than in the table reader, to enable filtering on columns that aren't being returned. """ # read params params = dict(frevent.GetParam()) params['time'] = float(LIGOTimeGPS(*frevent.GetGTime())) params['amplitude'] = frevent.GetAmplitude() params['probability'] = frevent.GetProbability() params['timeBefore'] = frevent.GetTimeBefore() params['timeAfter'] = frevent.GetTimeAfter() params['comment'] = frevent.GetComment() # filter if not all(op_(params[c], t) for c, op_, t in selection): return None # return event as list return [params[c] for c in columns]
[ "def", "_row_from_frevent", "(", "frevent", ",", "columns", ",", "selection", ")", ":", "# read params", "params", "=", "dict", "(", "frevent", ".", "GetParam", "(", ")", ")", "params", "[", "'time'", "]", "=", "float", "(", "LIGOTimeGPS", "(", "*", "frevent", ".", "GetGTime", "(", ")", ")", ")", "params", "[", "'amplitude'", "]", "=", "frevent", ".", "GetAmplitude", "(", ")", "params", "[", "'probability'", "]", "=", "frevent", ".", "GetProbability", "(", ")", "params", "[", "'timeBefore'", "]", "=", "frevent", ".", "GetTimeBefore", "(", ")", "params", "[", "'timeAfter'", "]", "=", "frevent", ".", "GetTimeAfter", "(", ")", "params", "[", "'comment'", "]", "=", "frevent", ".", "GetComment", "(", ")", "# filter", "if", "not", "all", "(", "op_", "(", "params", "[", "c", "]", ",", "t", ")", "for", "c", ",", "op_", ",", "t", "in", "selection", ")", ":", "return", "None", "# return event as list", "return", "[", "params", "[", "c", "]", "for", "c", "in", "columns", "]" ]
Generate a table row from an FrEvent Filtering (``selection``) is done here, rather than in the table reader, to enable filtering on columns that aren't being returned.
[ "Generate", "a", "table", "row", "from", "an", "FrEvent" ]
python
train
Capitains/MyCapytain
MyCapytain/resources/texts/remote/cts.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/remote/cts.py#L119-L132
def getReffs(self, level=1, subreference=None): """ Reference available at a given level :param level: Depth required. If not set, should retrieve first encountered level (1 based) :type level: Int :param subreference: Subreference (optional) :type subreference: CtsReference :rtype: [text_type] :returns: List of levels """ if self.depth is not None: level += self.depth return self.getValidReff(level, subreference)
[ "def", "getReffs", "(", "self", ",", "level", "=", "1", ",", "subreference", "=", "None", ")", ":", "if", "self", ".", "depth", "is", "not", "None", ":", "level", "+=", "self", ".", "depth", "return", "self", ".", "getValidReff", "(", "level", ",", "subreference", ")" ]
Reference available at a given level :param level: Depth required. If not set, should retrieve first encountered level (1 based) :type level: Int :param subreference: Subreference (optional) :type subreference: CtsReference :rtype: [text_type] :returns: List of levels
[ "Reference", "available", "at", "a", "given", "level" ]
python
train
secdev/scapy
scapy/utils.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils.py#L1151-L1162
def read_block_epb(self, block, size): """Enhanced Packet Block""" intid, tshigh, tslow, caplen, wirelen = struct.unpack( self.endian + "5I", block[:20], ) return (block[20:20 + caplen][:size], RawPcapNgReader.PacketMetadata(linktype=self.interfaces[intid][0], # noqa: E501 tsresol=self.interfaces[intid][2], # noqa: E501 tshigh=tshigh, tslow=tslow, wirelen=wirelen))
[ "def", "read_block_epb", "(", "self", ",", "block", ",", "size", ")", ":", "intid", ",", "tshigh", ",", "tslow", ",", "caplen", ",", "wirelen", "=", "struct", ".", "unpack", "(", "self", ".", "endian", "+", "\"5I\"", ",", "block", "[", ":", "20", "]", ",", ")", "return", "(", "block", "[", "20", ":", "20", "+", "caplen", "]", "[", ":", "size", "]", ",", "RawPcapNgReader", ".", "PacketMetadata", "(", "linktype", "=", "self", ".", "interfaces", "[", "intid", "]", "[", "0", "]", ",", "# noqa: E501", "tsresol", "=", "self", ".", "interfaces", "[", "intid", "]", "[", "2", "]", ",", "# noqa: E501", "tshigh", "=", "tshigh", ",", "tslow", "=", "tslow", ",", "wirelen", "=", "wirelen", ")", ")" ]
Enhanced Packet Block
[ "Enhanced", "Packet", "Block" ]
python
train
spyder-ide/spyder
spyder/preferences/appearance.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/appearance.py#L379-L391
def edit_scheme(self): """Edit current scheme.""" dlg = self.scheme_editor_dialog dlg.set_scheme(self.current_scheme) if dlg.exec_(): # Update temp scheme to reflect instant edits on the preview temporal_color_scheme = dlg.get_edited_color_scheme() for key in temporal_color_scheme: option = "temp/{0}".format(key) value = temporal_color_scheme[key] self.set_option(option, value) self.update_preview(scheme_name='temp')
[ "def", "edit_scheme", "(", "self", ")", ":", "dlg", "=", "self", ".", "scheme_editor_dialog", "dlg", ".", "set_scheme", "(", "self", ".", "current_scheme", ")", "if", "dlg", ".", "exec_", "(", ")", ":", "# Update temp scheme to reflect instant edits on the preview", "temporal_color_scheme", "=", "dlg", ".", "get_edited_color_scheme", "(", ")", "for", "key", "in", "temporal_color_scheme", ":", "option", "=", "\"temp/{0}\"", ".", "format", "(", "key", ")", "value", "=", "temporal_color_scheme", "[", "key", "]", "self", ".", "set_option", "(", "option", ",", "value", ")", "self", ".", "update_preview", "(", "scheme_name", "=", "'temp'", ")" ]
Edit current scheme.
[ "Edit", "current", "scheme", "." ]
python
train
grycap/RADL
radl/radl.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl.py#L1190-L1200
def get(self, aspect): """Get a network, system or configure or contextualize with the same id as aspect passed.""" classification = [(network, self.networks), (system, self.systems), (configure, self.configures)] aspect_list = [l for t, l in classification if isinstance(aspect, t)] assert len(aspect_list) == 1, "Unexpected aspect for RADL." aspect_list = aspect_list[0] old_aspect = [a for a in aspect_list if a.getId() == aspect.getId()] return old_aspect[0] if old_aspect else None
[ "def", "get", "(", "self", ",", "aspect", ")", ":", "classification", "=", "[", "(", "network", ",", "self", ".", "networks", ")", ",", "(", "system", ",", "self", ".", "systems", ")", ",", "(", "configure", ",", "self", ".", "configures", ")", "]", "aspect_list", "=", "[", "l", "for", "t", ",", "l", "in", "classification", "if", "isinstance", "(", "aspect", ",", "t", ")", "]", "assert", "len", "(", "aspect_list", ")", "==", "1", ",", "\"Unexpected aspect for RADL.\"", "aspect_list", "=", "aspect_list", "[", "0", "]", "old_aspect", "=", "[", "a", "for", "a", "in", "aspect_list", "if", "a", ".", "getId", "(", ")", "==", "aspect", ".", "getId", "(", ")", "]", "return", "old_aspect", "[", "0", "]", "if", "old_aspect", "else", "None" ]
Get a network, system or configure or contextualize with the same id as aspect passed.
[ "Get", "a", "network", "system", "or", "configure", "or", "contextualize", "with", "the", "same", "id", "as", "aspect", "passed", "." ]
python
train
saulpw/visidata
visidata/vdtui.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L801-L820
def drawRightStatus(self, scr, vs): 'Draw right side of status bar. Return length displayed.' rightx = self.windowWidth-1 ret = 0 for rstatcolor in self.callHook('rstatus', vs): if rstatcolor: try: rstatus, coloropt = rstatcolor rstatus = ' '+rstatus attr = colors.get_color(coloropt).attr statuslen = clipdraw(scr, self.windowHeight-1, rightx, rstatus, attr, rtl=True) rightx -= statuslen ret += statuslen except Exception as e: self.exceptionCaught(e) if scr: curses.doupdate() return ret
[ "def", "drawRightStatus", "(", "self", ",", "scr", ",", "vs", ")", ":", "rightx", "=", "self", ".", "windowWidth", "-", "1", "ret", "=", "0", "for", "rstatcolor", "in", "self", ".", "callHook", "(", "'rstatus'", ",", "vs", ")", ":", "if", "rstatcolor", ":", "try", ":", "rstatus", ",", "coloropt", "=", "rstatcolor", "rstatus", "=", "' '", "+", "rstatus", "attr", "=", "colors", ".", "get_color", "(", "coloropt", ")", ".", "attr", "statuslen", "=", "clipdraw", "(", "scr", ",", "self", ".", "windowHeight", "-", "1", ",", "rightx", ",", "rstatus", ",", "attr", ",", "rtl", "=", "True", ")", "rightx", "-=", "statuslen", "ret", "+=", "statuslen", "except", "Exception", "as", "e", ":", "self", ".", "exceptionCaught", "(", "e", ")", "if", "scr", ":", "curses", ".", "doupdate", "(", ")", "return", "ret" ]
Draw right side of status bar. Return length displayed.
[ "Draw", "right", "side", "of", "status", "bar", ".", "Return", "length", "displayed", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L3397-L3401
def tail(self, n=10): """Return a shallow copy a DataFrame with the last n rows.""" N = len(self) # self.cat(i1=max(0, N-n), i2=min(len(self), N)) return self[max(0, N - n):min(len(self), N)]
[ "def", "tail", "(", "self", ",", "n", "=", "10", ")", ":", "N", "=", "len", "(", "self", ")", "# self.cat(i1=max(0, N-n), i2=min(len(self), N))", "return", "self", "[", "max", "(", "0", ",", "N", "-", "n", ")", ":", "min", "(", "len", "(", "self", ")", ",", "N", ")", "]" ]
Return a shallow copy a DataFrame with the last n rows.
[ "Return", "a", "shallow", "copy", "a", "DataFrame", "with", "the", "last", "n", "rows", "." ]
python
test
GeorgeArgyros/symautomata
symautomata/cfgpda.py
https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/cfgpda.py#L39-L78
def _mpda(self, re_grammar, splitstring=0): """ Args: re_grammar (list): A list of grammar rules splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space Returns: PDA: The generated PDA """ cnfgrammar = CNFGenerator(re_grammar) if not self.alphabet: self._extract_alphabet(cnfgrammar) cnftopda = CnfPda(self.alphabet) productions = {} nonterminals = [] nonterminals.append(cnfgrammar.init_symbol) for key in list(cnfgrammar.grammar_nonterminals): if key != cnfgrammar.init_symbol: nonterminals.append(key) for key in list(cnfgrammar.grammar_nonterminals): j = 0 productions[key] = {} # print 'testing '+key for pair in cnfgrammar.grammar_rules: cnf_form = list(pair) if cnf_form[0] == key: productions[key][j] = {} if isinstance(cnf_form[1], type(())): # print list(p[1]) productions[key][j]['b0'] = list(cnf_form[1])[0] productions[key][j]['b1'] = list(cnf_form[1])[1] else: # print p[1] productions[key][j]['a'] = cnf_form[1] j = j + 1 return cnftopda.initialize( nonterminals, productions, list( cnfgrammar.grammar_terminals), splitstring)
[ "def", "_mpda", "(", "self", ",", "re_grammar", ",", "splitstring", "=", "0", ")", ":", "cnfgrammar", "=", "CNFGenerator", "(", "re_grammar", ")", "if", "not", "self", ".", "alphabet", ":", "self", ".", "_extract_alphabet", "(", "cnfgrammar", ")", "cnftopda", "=", "CnfPda", "(", "self", ".", "alphabet", ")", "productions", "=", "{", "}", "nonterminals", "=", "[", "]", "nonterminals", ".", "append", "(", "cnfgrammar", ".", "init_symbol", ")", "for", "key", "in", "list", "(", "cnfgrammar", ".", "grammar_nonterminals", ")", ":", "if", "key", "!=", "cnfgrammar", ".", "init_symbol", ":", "nonterminals", ".", "append", "(", "key", ")", "for", "key", "in", "list", "(", "cnfgrammar", ".", "grammar_nonterminals", ")", ":", "j", "=", "0", "productions", "[", "key", "]", "=", "{", "}", "# print 'testing '+key", "for", "pair", "in", "cnfgrammar", ".", "grammar_rules", ":", "cnf_form", "=", "list", "(", "pair", ")", "if", "cnf_form", "[", "0", "]", "==", "key", ":", "productions", "[", "key", "]", "[", "j", "]", "=", "{", "}", "if", "isinstance", "(", "cnf_form", "[", "1", "]", ",", "type", "(", "(", ")", ")", ")", ":", "# print list(p[1])", "productions", "[", "key", "]", "[", "j", "]", "[", "'b0'", "]", "=", "list", "(", "cnf_form", "[", "1", "]", ")", "[", "0", "]", "productions", "[", "key", "]", "[", "j", "]", "[", "'b1'", "]", "=", "list", "(", "cnf_form", "[", "1", "]", ")", "[", "1", "]", "else", ":", "# print p[1]", "productions", "[", "key", "]", "[", "j", "]", "[", "'a'", "]", "=", "cnf_form", "[", "1", "]", "j", "=", "j", "+", "1", "return", "cnftopda", ".", "initialize", "(", "nonterminals", ",", "productions", ",", "list", "(", "cnfgrammar", ".", "grammar_terminals", ")", ",", "splitstring", ")" ]
Args: re_grammar (list): A list of grammar rules splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space Returns: PDA: The generated PDA
[ "Args", ":", "re_grammar", "(", "list", ")", ":", "A", "list", "of", "grammar", "rules", "splitstring", "(", "bool", ")", ":", "A", "boolean", "for", "enabling", "or", "disabling", "the", "splitting", "of", "symbols", "using", "a", "space", "Returns", ":", "PDA", ":", "The", "generated", "PDA" ]
python
train
apache/incubator-mxnet
example/rnn/large_word_lm/run_utils.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/large_word_lm/run_utils.py#L66-L90
def evaluate(mod, data_iter, epoch, log_interval): """ Run evaluation on cpu. """ start = time.time() total_L = 0.0 nbatch = 0 density = 0 mod.set_states(value=0) for batch in data_iter: mod.forward(batch, is_train=False) outputs = mod.get_outputs(merge_multi_context=False) states = outputs[:-1] total_L += outputs[-1][0] mod.set_states(states=states) nbatch += 1 # don't include padding data in the test perplexity density += batch.data[1].mean() if (nbatch + 1) % log_interval == 0: logging.info("Eval batch %d loss : %.7f" % (nbatch, (total_L / density).asscalar())) data_iter.reset() loss = (total_L / density).asscalar() ppl = math.exp(loss) if loss < 100 else 1e37 end = time.time() logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \ (epoch, loss, ppl, end - start)) return loss
[ "def", "evaluate", "(", "mod", ",", "data_iter", ",", "epoch", ",", "log_interval", ")", ":", "start", "=", "time", ".", "time", "(", ")", "total_L", "=", "0.0", "nbatch", "=", "0", "density", "=", "0", "mod", ".", "set_states", "(", "value", "=", "0", ")", "for", "batch", "in", "data_iter", ":", "mod", ".", "forward", "(", "batch", ",", "is_train", "=", "False", ")", "outputs", "=", "mod", ".", "get_outputs", "(", "merge_multi_context", "=", "False", ")", "states", "=", "outputs", "[", ":", "-", "1", "]", "total_L", "+=", "outputs", "[", "-", "1", "]", "[", "0", "]", "mod", ".", "set_states", "(", "states", "=", "states", ")", "nbatch", "+=", "1", "# don't include padding data in the test perplexity", "density", "+=", "batch", ".", "data", "[", "1", "]", ".", "mean", "(", ")", "if", "(", "nbatch", "+", "1", ")", "%", "log_interval", "==", "0", ":", "logging", ".", "info", "(", "\"Eval batch %d loss : %.7f\"", "%", "(", "nbatch", ",", "(", "total_L", "/", "density", ")", ".", "asscalar", "(", ")", ")", ")", "data_iter", ".", "reset", "(", ")", "loss", "=", "(", "total_L", "/", "density", ")", ".", "asscalar", "(", ")", "ppl", "=", "math", ".", "exp", "(", "loss", ")", "if", "loss", "<", "100", "else", "1e37", "end", "=", "time", ".", "time", "(", ")", "logging", ".", "info", "(", "'Iter[%d]\\t\\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds '", "%", "(", "epoch", ",", "loss", ",", "ppl", ",", "end", "-", "start", ")", ")", "return", "loss" ]
Run evaluation on cpu.
[ "Run", "evaluation", "on", "cpu", "." ]
python
train
awslabs/aws-sam-cli
samcli/lib/utils/colors.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/lib/utils/colors.py#L61-L64
def _color(self, msg, color): """Internal helper method to add colors to input""" kwargs = {'fg': color} return click.style(msg, **kwargs) if self.colorize else msg
[ "def", "_color", "(", "self", ",", "msg", ",", "color", ")", ":", "kwargs", "=", "{", "'fg'", ":", "color", "}", "return", "click", ".", "style", "(", "msg", ",", "*", "*", "kwargs", ")", "if", "self", ".", "colorize", "else", "msg" ]
Internal helper method to add colors to input
[ "Internal", "helper", "method", "to", "add", "colors", "to", "input" ]
python
train
GNS3/gns3-server
gns3server/config.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/config.py#L157-L164
def reload(self): """ Reload configuration """ self.read_config() for section in self._override_config: self.set_section_config(section, self._override_config[section])
[ "def", "reload", "(", "self", ")", ":", "self", ".", "read_config", "(", ")", "for", "section", "in", "self", ".", "_override_config", ":", "self", ".", "set_section_config", "(", "section", ",", "self", ".", "_override_config", "[", "section", "]", ")" ]
Reload configuration
[ "Reload", "configuration" ]
python
train
jasonrbriggs/proton
python/proton/xmlutils.py
https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/xmlutils.py#L18-L26
def index(elem): ''' Return the index position of an element in the children of a parent. ''' parent = elem.getparent() for x in range(0, len(parent.getchildren())): if parent.getchildren()[x] == elem: return x return -1
[ "def", "index", "(", "elem", ")", ":", "parent", "=", "elem", ".", "getparent", "(", ")", "for", "x", "in", "range", "(", "0", ",", "len", "(", "parent", ".", "getchildren", "(", ")", ")", ")", ":", "if", "parent", ".", "getchildren", "(", ")", "[", "x", "]", "==", "elem", ":", "return", "x", "return", "-", "1" ]
Return the index position of an element in the children of a parent.
[ "Return", "the", "index", "position", "of", "an", "element", "in", "the", "children", "of", "a", "parent", "." ]
python
train
johnwmillr/LyricsGenius
lyricsgenius/api.py
https://github.com/johnwmillr/LyricsGenius/blob/e36482f7c42235037f3b9b7013edcd54141124e3/lyricsgenius/api.py#L93-L102
def search_genius_web(self, search_term, per_page=5): """Use the web-version of Genius search""" endpoint = "search/multi?" params = {'per_page': per_page, 'q': search_term} # This endpoint is not part of the API, requires different formatting url = "https://genius.com/api/" + endpoint + urlencode(params) response = requests.get(url, timeout=self.timeout) time.sleep(max(self._SLEEP_MIN, self.sleep_time)) return response.json()['response'] if response else None
[ "def", "search_genius_web", "(", "self", ",", "search_term", ",", "per_page", "=", "5", ")", ":", "endpoint", "=", "\"search/multi?\"", "params", "=", "{", "'per_page'", ":", "per_page", ",", "'q'", ":", "search_term", "}", "# This endpoint is not part of the API, requires different formatting", "url", "=", "\"https://genius.com/api/\"", "+", "endpoint", "+", "urlencode", "(", "params", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "self", ".", "timeout", ")", "time", ".", "sleep", "(", "max", "(", "self", ".", "_SLEEP_MIN", ",", "self", ".", "sleep_time", ")", ")", "return", "response", ".", "json", "(", ")", "[", "'response'", "]", "if", "response", "else", "None" ]
Use the web-version of Genius search
[ "Use", "the", "web", "-", "version", "of", "Genius", "search" ]
python
train
PmagPy/PmagPy
dialogs/grid_frame3.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame3.py#L89-L291
def InitUI(self): """ initialize window """ self.main_sizer = wx.BoxSizer(wx.VERTICAL) if self.grid_type in self.contribution.tables: dataframe = self.contribution.tables[self.grid_type] else: dataframe = None self.grid_builder = GridBuilder(self.contribution, self.grid_type, self.panel, parent_type=self.parent_type, reqd_headers=self.reqd_headers, exclude_cols=self.exclude_cols, huge=self.huge) self.grid = self.grid_builder.make_grid() self.grid.InitUI() ## Column management buttons self.add_cols_button = wx.Button(self.panel, label="Add additional columns", name='add_cols_btn', size=(170, 20)) self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button) self.remove_cols_button = wx.Button(self.panel, label="Remove columns", name='remove_cols_btn', size=(170, 20)) self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button) ## Row management buttons self.remove_row_button = wx.Button(self.panel, label="Remove last row", name='remove_last_row_btn') self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button) many_rows_box = wx.BoxSizer(wx.HORIZONTAL) self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)", name='add_many_rows_btn') self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value='1', initial=1, name='rows_spin_ctrl') many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE) many_rows_box.Add(self.rows_spin_ctrl) self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button) self.deleteRowButton = wx.Button(self.panel, id=-1, label='Delete selected row(s)', name='delete_row_btn') self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton) self.deleteRowButton.Disable() # measurements table should not be able to add new rows # that should be done elsewhere if self.huge: self.add_many_rows_button.Disable() self.rows_spin_ctrl.Disable() self.remove_row_button.Disable() # can't remove cols (seg fault), but can add them #self.add_cols_button.Disable() self.remove_cols_button.Disable() ## Data management buttons self.importButton = wx.Button(self.panel, id=-1, label='Import MagIC-format file', name='import_btn') self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton) self.exitButton = wx.Button(self.panel, id=-1, label='Save and close grid', name='save_and_quit_btn') self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton) self.cancelButton = wx.Button(self.panel, id=-1, label='Cancel', name='cancel_btn') self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton) self.Bind(wx.EVT_CLOSE, self.onCancelButton) ## Input/output buttons self.copyButton = wx.Button(self.panel, id=-1, label="Start copy mode", name="copy_mode_btn") self.Bind(wx.EVT_BUTTON, self.onCopyMode, self.copyButton) self.selectAllButton = wx.Button(self.panel, id=-1, label="Copy all cells", name="select_all_btn") self.Bind(wx.EVT_BUTTON, self.onSelectAll, self.selectAllButton) self.copySelectionButton = wx.Button(self.panel, id=-1, label="Copy selected cells", name="copy_selection_btn") self.Bind(wx.EVT_BUTTON, self.onCopySelection, self.copySelectionButton) self.copySelectionButton.Disable() ## Help message and button # button self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help", name='toggle_help_btn') self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn) # message self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='help_msg_boxsizer'), wx.VERTICAL) if self.grid_type == 'measurements': self.default_msg_text = "Edit measurements here.\nIn general, measurements should be imported directly into Pmag GUI,\nwhich has protocols for converting many lab formats into the MagIC format.\nIf we are missing your particular lab format, please let us know: https://github.com/PmagPy/PmagPy/issues.\nThis grid is just meant for looking at your measurements and doing small edits.\nCurrently, you can't add/remove rows here. You can add columns and edit cell values." else: self.default_msg_text = 'Edit {} here.\nYou can add or remove both rows and columns, however required columns may not be deleted.\nControlled vocabularies are indicated by **, and will have drop-down-menus.\nSuggested vocabularies are indicated by ^^, and also have drop-down-menus.\nTo edit all values in a column, click the column header.\nYou can cut and paste a block of cells from an Excel-like file.\nJust click the top left cell and use command "v".'.format(self.grid_type) txt = '' if self.grid_type == 'locations': txt = '\n\nNote: you can fill in location start/end latitude/longitude here.\nHowever, if you add sites in step 2, the program will calculate those values automatically,\nbased on site latitudes/logitudes.\nThese values will be written to your upload file.' if self.grid_type == 'samples': txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the sample's class, lithology, and type are the same as its parent site,\nthose values will propagate down, and will be written to your sample file automatically." if self.grid_type == 'specimens': txt = "\n\nNote: you can fill in lithology, class, and type for each specimen here.\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically." if self.grid_type == 'ages': txt = "\n\nNote: only ages for which you provide data will be written to your upload file." self.default_msg_text += txt self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text, style=wx.TE_CENTER, name='msg text') self.help_msg_boxsizer.Add(self.msg_text) self.help_msg_boxsizer.ShowItems(False) ## Code message and button # button self.toggle_codes_btn = wx.Button(self.panel, id=-1, label="Show method codes", name='toggle_codes_btn') self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn) # message self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, self.contribution.vocab) self.code_msg_boxsizer.ShowItems(False) ## Add content to sizers self.hbox = wx.BoxSizer(wx.HORIZONTAL) col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Columns', name='manage columns'), wx.VERTICAL) row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Rows', name='manage rows'), wx.VERTICAL) self.main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Manage data', name='manage data'), wx.VERTICAL) input_output_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='In/Out', name='manage in out'), wx.VERTICAL) col_btn_vbox.Add(self.add_cols_button, flag=wx.ALL, border=5) col_btn_vbox.Add(self.remove_cols_button, flag=wx.ALL, border=5) row_btn_vbox.Add(many_rows_box, flag=wx.ALL, border=5) row_btn_vbox.Add(self.remove_row_button, flag=wx.ALL, border=5) row_btn_vbox.Add(self.deleteRowButton, flag=wx.ALL, border=5) self.main_btn_vbox.Add(self.importButton, flag=wx.ALL, border=5) self.main_btn_vbox.Add(self.exitButton, flag=wx.ALL, border=5) self.main_btn_vbox.Add(self.cancelButton, flag=wx.ALL, border=5) input_output_vbox.Add(self.copyButton, flag=wx.ALL, border=5) input_output_vbox.Add(self.selectAllButton, flag=wx.ALL, border=5) input_output_vbox.Add(self.copySelectionButton, flag=wx.ALL, border=5) self.hbox.Add(col_btn_vbox) self.hbox.Add(row_btn_vbox) self.hbox.Add(self.main_btn_vbox) self.hbox.Add(input_output_vbox) #self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid) self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid) # self.Bind(wx.EVT_KEY_DOWN, self.on_key_down) self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit) # add actual data! self.grid_builder.add_data_to_grid(self.grid, self.grid_type) # fill in some default values self.grid_builder.fill_defaults() # set scrollbars self.grid.set_scrollbars() ## this would be a way to prevent editing ## some cells in age grid. ## with multiple types of ages, though, ## this doesn't make much sense #if self.grid_type == 'ages': # attr = wx.grid.GridCellAttr() # attr.SetReadOnly(True) # self.grid.SetColAttr(1, attr) self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self.contribution, self.grid) self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='grid container'), wx.VERTICAL) self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5) # final layout, set size self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER,#|wx.SHAPED, border=20) self.main_sizer.Add(self.toggle_help_btn, 0, flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED, border=5) self.main_sizer.Add(self.help_msg_boxsizer, 0, flag=wx.BOTTOM|wx.ALIGN_CENTRE, border=10) self.main_sizer.Add(self.toggle_codes_btn, 0, flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED, border=5) self.main_sizer.Add(self.code_msg_boxsizer, 0, flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED, border=5) self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.ALIGN_CENTER|wx.EXPAND, border=10) self.panel.SetSizer(self.main_sizer) panel_sizer = wx.BoxSizer(wx.VERTICAL) panel_sizer.Add(self.panel, 1, wx.EXPAND) self.SetSizer(panel_sizer) panel_sizer.Fit(self) ## this keeps sizing correct if the user resizes the window manually #self.Bind(wx.EVT_SIZE, self.do_fit) # self.Centre() self.Show()
[ "def", "InitUI", "(", "self", ")", ":", "self", ".", "main_sizer", "=", "wx", ".", "BoxSizer", "(", "wx", ".", "VERTICAL", ")", "if", "self", ".", "grid_type", "in", "self", ".", "contribution", ".", "tables", ":", "dataframe", "=", "self", ".", "contribution", ".", "tables", "[", "self", ".", "grid_type", "]", "else", ":", "dataframe", "=", "None", "self", ".", "grid_builder", "=", "GridBuilder", "(", "self", ".", "contribution", ",", "self", ".", "grid_type", ",", "self", ".", "panel", ",", "parent_type", "=", "self", ".", "parent_type", ",", "reqd_headers", "=", "self", ".", "reqd_headers", ",", "exclude_cols", "=", "self", ".", "exclude_cols", ",", "huge", "=", "self", ".", "huge", ")", "self", ".", "grid", "=", "self", ".", "grid_builder", ".", "make_grid", "(", ")", "self", ".", "grid", ".", "InitUI", "(", ")", "## Column management buttons", "self", ".", "add_cols_button", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "label", "=", "\"Add additional columns\"", ",", "name", "=", "'add_cols_btn'", ",", "size", "=", "(", "170", ",", "20", ")", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "on_add_cols", ",", "self", ".", "add_cols_button", ")", "self", ".", "remove_cols_button", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "label", "=", "\"Remove columns\"", ",", "name", "=", "'remove_cols_btn'", ",", "size", "=", "(", "170", ",", "20", ")", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "on_remove_cols", ",", "self", ".", "remove_cols_button", ")", "## Row management buttons", "self", ".", "remove_row_button", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "label", "=", "\"Remove last row\"", ",", "name", "=", "'remove_last_row_btn'", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "on_remove_row", ",", "self", ".", "remove_row_button", ")", "many_rows_box", "=", "wx", ".", "BoxSizer", "(", "wx", ".", "HORIZONTAL", ")", "self", ".", "add_many_rows_button", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "label", "=", "\"Add row(s)\"", ",", "name", "=", "'add_many_rows_btn'", ")", "self", ".", "rows_spin_ctrl", "=", "wx", ".", "SpinCtrl", "(", "self", ".", "panel", ",", "value", "=", "'1'", ",", "initial", "=", "1", ",", "name", "=", "'rows_spin_ctrl'", ")", "many_rows_box", ".", "Add", "(", "self", ".", "add_many_rows_button", ",", "flag", "=", "wx", ".", "ALIGN_CENTRE", ")", "many_rows_box", ".", "Add", "(", "self", ".", "rows_spin_ctrl", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "on_add_rows", ",", "self", ".", "add_many_rows_button", ")", "self", ".", "deleteRowButton", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "'Delete selected row(s)'", ",", "name", "=", "'delete_row_btn'", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "lambda", "event", ":", "self", ".", "on_remove_row", "(", "event", ",", "False", ")", ",", "self", ".", "deleteRowButton", ")", "self", ".", "deleteRowButton", ".", "Disable", "(", ")", "# measurements table should not be able to add new rows", "# that should be done elsewhere", "if", "self", ".", "huge", ":", "self", ".", "add_many_rows_button", ".", "Disable", "(", ")", "self", ".", "rows_spin_ctrl", ".", "Disable", "(", ")", "self", ".", "remove_row_button", ".", "Disable", "(", ")", "# can't remove cols (seg fault), but can add them", "#self.add_cols_button.Disable()", "self", ".", "remove_cols_button", ".", "Disable", "(", ")", "## Data management buttons", "self", ".", "importButton", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "'Import MagIC-format file'", ",", "name", "=", "'import_btn'", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "onImport", ",", "self", ".", "importButton", ")", "self", ".", "exitButton", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "'Save and close grid'", ",", "name", "=", "'save_and_quit_btn'", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "onSave", ",", "self", ".", "exitButton", ")", "self", ".", "cancelButton", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "'Cancel'", ",", "name", "=", "'cancel_btn'", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "onCancelButton", ",", "self", ".", "cancelButton", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_CLOSE", ",", "self", ".", "onCancelButton", ")", "## Input/output buttons", "self", ".", "copyButton", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "\"Start copy mode\"", ",", "name", "=", "\"copy_mode_btn\"", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "onCopyMode", ",", "self", ".", "copyButton", ")", "self", ".", "selectAllButton", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "\"Copy all cells\"", ",", "name", "=", "\"select_all_btn\"", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "onSelectAll", ",", "self", ".", "selectAllButton", ")", "self", ".", "copySelectionButton", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "\"Copy selected cells\"", ",", "name", "=", "\"copy_selection_btn\"", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "onCopySelection", ",", "self", ".", "copySelectionButton", ")", "self", ".", "copySelectionButton", ".", "Disable", "(", ")", "## Help message and button", "# button", "self", ".", "toggle_help_btn", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "\"Show help\"", ",", "name", "=", "'toggle_help_btn'", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "toggle_help", ",", "self", ".", "toggle_help_btn", ")", "# message", "self", ".", "help_msg_boxsizer", "=", "wx", ".", "StaticBoxSizer", "(", "wx", ".", "StaticBox", "(", "self", ".", "panel", ",", "-", "1", ",", "name", "=", "'help_msg_boxsizer'", ")", ",", "wx", ".", "VERTICAL", ")", "if", "self", ".", "grid_type", "==", "'measurements'", ":", "self", ".", "default_msg_text", "=", "\"Edit measurements here.\\nIn general, measurements should be imported directly into Pmag GUI,\\nwhich has protocols for converting many lab formats into the MagIC format.\\nIf we are missing your particular lab format, please let us know: https://github.com/PmagPy/PmagPy/issues.\\nThis grid is just meant for looking at your measurements and doing small edits.\\nCurrently, you can't add/remove rows here. You can add columns and edit cell values.\"", "else", ":", "self", ".", "default_msg_text", "=", "'Edit {} here.\\nYou can add or remove both rows and columns, however required columns may not be deleted.\\nControlled vocabularies are indicated by **, and will have drop-down-menus.\\nSuggested vocabularies are indicated by ^^, and also have drop-down-menus.\\nTo edit all values in a column, click the column header.\\nYou can cut and paste a block of cells from an Excel-like file.\\nJust click the top left cell and use command \"v\".'", ".", "format", "(", "self", ".", "grid_type", ")", "txt", "=", "''", "if", "self", ".", "grid_type", "==", "'locations'", ":", "txt", "=", "'\\n\\nNote: you can fill in location start/end latitude/longitude here.\\nHowever, if you add sites in step 2, the program will calculate those values automatically,\\nbased on site latitudes/logitudes.\\nThese values will be written to your upload file.'", "if", "self", ".", "grid_type", "==", "'samples'", ":", "txt", "=", "\"\\n\\nNote: you can fill in lithology, class, and type for each sample here.\\nHowever, if the sample's class, lithology, and type are the same as its parent site,\\nthose values will propagate down, and will be written to your sample file automatically.\"", "if", "self", ".", "grid_type", "==", "'specimens'", ":", "txt", "=", "\"\\n\\nNote: you can fill in lithology, class, and type for each specimen here.\\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\\nthose values will propagate down, and will be written to your specimen file automatically.\"", "if", "self", ".", "grid_type", "==", "'ages'", ":", "txt", "=", "\"\\n\\nNote: only ages for which you provide data will be written to your upload file.\"", "self", ".", "default_msg_text", "+=", "txt", "self", ".", "msg_text", "=", "wx", ".", "StaticText", "(", "self", ".", "panel", ",", "label", "=", "self", ".", "default_msg_text", ",", "style", "=", "wx", ".", "TE_CENTER", ",", "name", "=", "'msg text'", ")", "self", ".", "help_msg_boxsizer", ".", "Add", "(", "self", ".", "msg_text", ")", "self", ".", "help_msg_boxsizer", ".", "ShowItems", "(", "False", ")", "## Code message and button", "# button", "self", ".", "toggle_codes_btn", "=", "wx", ".", "Button", "(", "self", ".", "panel", ",", "id", "=", "-", "1", ",", "label", "=", "\"Show method codes\"", ",", "name", "=", "'toggle_codes_btn'", ")", "self", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "toggle_codes", ",", "self", ".", "toggle_codes_btn", ")", "# message", "self", ".", "code_msg_boxsizer", "=", "pw", ".", "MethodCodeDemystifier", "(", "self", ".", "panel", ",", "self", ".", "contribution", ".", "vocab", ")", "self", ".", "code_msg_boxsizer", ".", "ShowItems", "(", "False", ")", "## Add content to sizers", "self", ".", "hbox", "=", "wx", ".", "BoxSizer", "(", "wx", ".", "HORIZONTAL", ")", "col_btn_vbox", "=", "wx", ".", "StaticBoxSizer", "(", "wx", ".", "StaticBox", "(", "self", ".", "panel", ",", "-", "1", ",", "label", "=", "'Columns'", ",", "name", "=", "'manage columns'", ")", ",", "wx", ".", "VERTICAL", ")", "row_btn_vbox", "=", "wx", ".", "StaticBoxSizer", "(", "wx", ".", "StaticBox", "(", "self", ".", "panel", ",", "-", "1", ",", "label", "=", "'Rows'", ",", "name", "=", "'manage rows'", ")", ",", "wx", ".", "VERTICAL", ")", "self", ".", "main_btn_vbox", "=", "wx", ".", "StaticBoxSizer", "(", "wx", ".", "StaticBox", "(", "self", ".", "panel", ",", "-", "1", ",", "label", "=", "'Manage data'", ",", "name", "=", "'manage data'", ")", ",", "wx", ".", "VERTICAL", ")", "input_output_vbox", "=", "wx", ".", "StaticBoxSizer", "(", "wx", ".", "StaticBox", "(", "self", ".", "panel", ",", "-", "1", ",", "label", "=", "'In/Out'", ",", "name", "=", "'manage in out'", ")", ",", "wx", ".", "VERTICAL", ")", "col_btn_vbox", ".", "Add", "(", "self", ".", "add_cols_button", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "col_btn_vbox", ".", "Add", "(", "self", ".", "remove_cols_button", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "row_btn_vbox", ".", "Add", "(", "many_rows_box", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "row_btn_vbox", ".", "Add", "(", "self", ".", "remove_row_button", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "row_btn_vbox", ".", "Add", "(", "self", ".", "deleteRowButton", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "self", ".", "main_btn_vbox", ".", "Add", "(", "self", ".", "importButton", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "self", ".", "main_btn_vbox", ".", "Add", "(", "self", ".", "exitButton", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "self", ".", "main_btn_vbox", ".", "Add", "(", "self", ".", "cancelButton", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "input_output_vbox", ".", "Add", "(", "self", ".", "copyButton", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "input_output_vbox", ".", "Add", "(", "self", ".", "selectAllButton", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "input_output_vbox", ".", "Add", "(", "self", ".", "copySelectionButton", ",", "flag", "=", "wx", ".", "ALL", ",", "border", "=", "5", ")", "self", ".", "hbox", ".", "Add", "(", "col_btn_vbox", ")", "self", ".", "hbox", ".", "Add", "(", "row_btn_vbox", ")", "self", ".", "hbox", ".", "Add", "(", "self", ".", "main_btn_vbox", ")", "self", ".", "hbox", ".", "Add", "(", "input_output_vbox", ")", "#self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)", "self", ".", "grid", ".", "Bind", "(", "wx", ".", "grid", ".", "EVT_GRID_LABEL_LEFT_CLICK", ",", "self", ".", "onLeftClickLabel", ",", "self", ".", "grid", ")", "#", "self", ".", "Bind", "(", "wx", ".", "EVT_KEY_DOWN", ",", "self", ".", "on_key_down", ")", "self", ".", "panel", ".", "Bind", "(", "wx", ".", "EVT_TEXT_PASTE", ",", "self", ".", "do_fit", ")", "# add actual data!", "self", ".", "grid_builder", ".", "add_data_to_grid", "(", "self", ".", "grid", ",", "self", ".", "grid_type", ")", "# fill in some default values", "self", ".", "grid_builder", ".", "fill_defaults", "(", ")", "# set scrollbars", "self", ".", "grid", ".", "set_scrollbars", "(", ")", "## this would be a way to prevent editing", "## some cells in age grid.", "## with multiple types of ages, though,", "## this doesn't make much sense", "#if self.grid_type == 'ages':", "# attr = wx.grid.GridCellAttr()", "# attr.SetReadOnly(True)", "# self.grid.SetColAttr(1, attr)", "self", ".", "drop_down_menu", "=", "drop_down_menus", ".", "Menus", "(", "self", ".", "grid_type", ",", "self", ".", "contribution", ",", "self", ".", "grid", ")", "self", ".", "grid_box", "=", "wx", ".", "StaticBoxSizer", "(", "wx", ".", "StaticBox", "(", "self", ".", "panel", ",", "-", "1", ",", "name", "=", "'grid container'", ")", ",", "wx", ".", "VERTICAL", ")", "self", ".", "grid_box", ".", "Add", "(", "self", ".", "grid", ",", "1", ",", "flag", "=", "wx", ".", "ALL", "|", "wx", ".", "EXPAND", ",", "border", "=", "5", ")", "# final layout, set size", "self", ".", "main_sizer", ".", "Add", "(", "self", ".", "hbox", ",", "flag", "=", "wx", ".", "ALL", "|", "wx", ".", "ALIGN_CENTER", ",", "#|wx.SHAPED,", "border", "=", "20", ")", "self", ".", "main_sizer", ".", "Add", "(", "self", ".", "toggle_help_btn", ",", "0", ",", "flag", "=", "wx", ".", "BOTTOM", "|", "wx", ".", "ALIGN_CENTRE", ",", "#|wx.SHAPED,", "border", "=", "5", ")", "self", ".", "main_sizer", ".", "Add", "(", "self", ".", "help_msg_boxsizer", ",", "0", ",", "flag", "=", "wx", ".", "BOTTOM", "|", "wx", ".", "ALIGN_CENTRE", ",", "border", "=", "10", ")", "self", ".", "main_sizer", ".", "Add", "(", "self", ".", "toggle_codes_btn", ",", "0", ",", "flag", "=", "wx", ".", "BOTTOM", "|", "wx", ".", "ALIGN_CENTRE", ",", "#|wx.SHAPED,", "border", "=", "5", ")", "self", ".", "main_sizer", ".", "Add", "(", "self", ".", "code_msg_boxsizer", ",", "0", ",", "flag", "=", "wx", ".", "BOTTOM", "|", "wx", ".", "ALIGN_CENTRE", ",", "#|wx.SHAPED,", "border", "=", "5", ")", "self", ".", "main_sizer", ".", "Add", "(", "self", ".", "grid_box", ",", "2", ",", "flag", "=", "wx", ".", "ALL", "|", "wx", ".", "ALIGN_CENTER", "|", "wx", ".", "EXPAND", ",", "border", "=", "10", ")", "self", ".", "panel", ".", "SetSizer", "(", "self", ".", "main_sizer", ")", "panel_sizer", "=", "wx", ".", "BoxSizer", "(", "wx", ".", "VERTICAL", ")", "panel_sizer", ".", "Add", "(", "self", ".", "panel", ",", "1", ",", "wx", ".", "EXPAND", ")", "self", ".", "SetSizer", "(", "panel_sizer", ")", "panel_sizer", ".", "Fit", "(", "self", ")", "## this keeps sizing correct if the user resizes the window manually", "#self.Bind(wx.EVT_SIZE, self.do_fit)", "# self.Centre()", "self", ".", "Show", "(", ")" ]
initialize window
[ "initialize", "window" ]
python
train
astropy/photutils
photutils/detection/findstars.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/detection/findstars.py#L897-L991
def find_stars(self, data, mask=None): """ Find stars in an astronomical image. Parameters ---------- data : 2D array_like The 2D image array. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. Returns ------- table : `~astropy.table.Table` or `None` A table of found stars with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``sharpness``: object sharpness. * ``roundness1``: object roundness based on symmetry. * ``roundness2``: object roundness based on marginal Gaussian fits. * ``npix``: the total number of pixels in the Gaussian kernel array. * ``sky``: the input ``sky`` parameter. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object flux calculated as the peak density in the convolved image divided by the detection threshold. This derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. The derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. `None` is returned if no stars are found. """ star_cutouts = _find_stars(data, self.kernel, self.threshold_eff, mask=mask, exclude_border=self.exclude_border) if star_cutouts is None: warnings.warn('No sources were found.', NoDetectionsWarning) return None self._star_cutouts = star_cutouts star_props = [] for star_cutout in star_cutouts: props = _DAOFind_Properties(star_cutout, self.kernel, self.sky) if np.isnan(props.dx_hx).any() or np.isnan(props.dy_hy).any(): continue if (props.sharpness <= self.sharplo or props.sharpness >= self.sharphi): continue if (props.roundness1 <= self.roundlo or props.roundness1 >= self.roundhi): continue if (props.roundness2 <= self.roundlo or props.roundness2 >= self.roundhi): continue if self.peakmax is not None and props.peak >= self.peakmax: continue star_props.append(props) nstars = len(star_props) if nstars == 0: warnings.warn('Sources were found, but none pass the sharpness ' 'and roundness criteria.', NoDetectionsWarning) return None if self.brightest is not None: fluxes = [props.flux for props in star_props] idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist()) star_props = [star_props[k] for k in idx] nstars = len(star_props) table = Table() table['id'] = np.arange(nstars) + 1 columns = ('xcentroid', 'ycentroid', 'sharpness', 'roundness1', 'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag') for column in columns: table[column] = [getattr(props, column) for props in star_props] return table
[ "def", "find_stars", "(", "self", ",", "data", ",", "mask", "=", "None", ")", ":", "star_cutouts", "=", "_find_stars", "(", "data", ",", "self", ".", "kernel", ",", "self", ".", "threshold_eff", ",", "mask", "=", "mask", ",", "exclude_border", "=", "self", ".", "exclude_border", ")", "if", "star_cutouts", "is", "None", ":", "warnings", ".", "warn", "(", "'No sources were found.'", ",", "NoDetectionsWarning", ")", "return", "None", "self", ".", "_star_cutouts", "=", "star_cutouts", "star_props", "=", "[", "]", "for", "star_cutout", "in", "star_cutouts", ":", "props", "=", "_DAOFind_Properties", "(", "star_cutout", ",", "self", ".", "kernel", ",", "self", ".", "sky", ")", "if", "np", ".", "isnan", "(", "props", ".", "dx_hx", ")", ".", "any", "(", ")", "or", "np", ".", "isnan", "(", "props", ".", "dy_hy", ")", ".", "any", "(", ")", ":", "continue", "if", "(", "props", ".", "sharpness", "<=", "self", ".", "sharplo", "or", "props", ".", "sharpness", ">=", "self", ".", "sharphi", ")", ":", "continue", "if", "(", "props", ".", "roundness1", "<=", "self", ".", "roundlo", "or", "props", ".", "roundness1", ">=", "self", ".", "roundhi", ")", ":", "continue", "if", "(", "props", ".", "roundness2", "<=", "self", ".", "roundlo", "or", "props", ".", "roundness2", ">=", "self", ".", "roundhi", ")", ":", "continue", "if", "self", ".", "peakmax", "is", "not", "None", "and", "props", ".", "peak", ">=", "self", ".", "peakmax", ":", "continue", "star_props", ".", "append", "(", "props", ")", "nstars", "=", "len", "(", "star_props", ")", "if", "nstars", "==", "0", ":", "warnings", ".", "warn", "(", "'Sources were found, but none pass the sharpness '", "'and roundness criteria.'", ",", "NoDetectionsWarning", ")", "return", "None", "if", "self", ".", "brightest", "is", "not", "None", ":", "fluxes", "=", "[", "props", ".", "flux", "for", "props", "in", "star_props", "]", "idx", "=", "sorted", "(", "np", ".", "argsort", "(", "fluxes", ")", "[", "-", "self", ".", "brightest", ":", "]", ".", "tolist", "(", ")", ")", "star_props", "=", "[", "star_props", "[", "k", "]", "for", "k", "in", "idx", "]", "nstars", "=", "len", "(", "star_props", ")", "table", "=", "Table", "(", ")", "table", "[", "'id'", "]", "=", "np", ".", "arange", "(", "nstars", ")", "+", "1", "columns", "=", "(", "'xcentroid'", ",", "'ycentroid'", ",", "'sharpness'", ",", "'roundness1'", ",", "'roundness2'", ",", "'npix'", ",", "'sky'", ",", "'peak'", ",", "'flux'", ",", "'mag'", ")", "for", "column", "in", "columns", ":", "table", "[", "column", "]", "=", "[", "getattr", "(", "props", ",", "column", ")", "for", "props", "in", "star_props", "]", "return", "table" ]
Find stars in an astronomical image. Parameters ---------- data : 2D array_like The 2D image array. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. Returns ------- table : `~astropy.table.Table` or `None` A table of found stars with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``sharpness``: object sharpness. * ``roundness1``: object roundness based on symmetry. * ``roundness2``: object roundness based on marginal Gaussian fits. * ``npix``: the total number of pixels in the Gaussian kernel array. * ``sky``: the input ``sky`` parameter. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object flux calculated as the peak density in the convolved image divided by the detection threshold. This derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. The derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. `None` is returned if no stars are found.
[ "Find", "stars", "in", "an", "astronomical", "image", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/msk.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/msk.py#L45-L143
def generateFromWatershedShapefile(self, shapefile_path, cell_size, out_raster_path=None, load_raster_to_db=True): """ Generates a mask from a watershed_shapefile Example:: from gsshapy.orm import ProjectFile, WatershedMaskFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' shapefile_path = 'watershed_boundary.shp' # Create Test DB sqlalchemy_url, sql_engine = dbt.init_sqlite_memory() # Create DB Sessions db_session = dbt.create_session(sqlalchemy_url, sql_engine) # Instantiate GSSHAPY object for reading to database project_manager = ProjectFile() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate watershed mask watershed_mask = WatershedMaskFile(session=db_session, project_file=project_manager) watershed_mask.generateFromWatershedShapefile(shapefile_path, x_num_cells=50, y_num_cells=50, ) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard') """ if not self.projectFile: raise ValueError("Must be connected to project file ...") # match elevation grid if exists match_grid = None try: match_grid = self.projectFile.getGrid(use_mask=False) except ValueError: pass # match projection if exists wkt_projection = None try: wkt_projection = self.projectFile.getWkt() except ValueError: pass if out_raster_path is None: out_raster_path = '{0}.{1}'.format(self.projectFile.name, self.extension) # make sure paths are absolute as the working directory changes shapefile_path = os.path.abspath(shapefile_path) # make sure the polygon is valid check_watershed_boundary_geometry(shapefile_path) gr = rasterize_shapefile(shapefile_path, x_cell_size=cell_size, y_cell_size=cell_size, match_grid=match_grid, raster_nodata=0, as_gdal_grid=True, raster_wkt_proj=wkt_projection, convert_to_utm=True) with tmp_chdir(self.projectFile.project_directory): gr.to_grass_ascii(out_raster_path, print_nodata=False) self.filename = out_raster_path # update project file cards self.projectFile.setCard('WATERSHED_MASK', out_raster_path, add_quotes=True) self.projectFile.setCard('GRIDSIZE', str((gr.geotransform[1] - gr.geotransform[-1])/2.0)) self.projectFile.setCard('ROWS', str(gr.y_size)) self.projectFile.setCard('COLS', str(gr.x_size)) # write projection file if does not exist if wkt_projection is None: proj_file = ProjectionFile() proj_file.projection = gr.wkt proj_file.projectFile = self.projectFile proj_path = "{0}_prj.pro".format(os.path.splitext(out_raster_path)[0]) gr.write_prj(proj_path) self.projectFile.setCard('#PROJECTION_FILE', proj_path, add_quotes=True) # read raster into object if load_raster_to_db: self._load_raster_text(out_raster_path)
[ "def", "generateFromWatershedShapefile", "(", "self", ",", "shapefile_path", ",", "cell_size", ",", "out_raster_path", "=", "None", ",", "load_raster_to_db", "=", "True", ")", ":", "if", "not", "self", ".", "projectFile", ":", "raise", "ValueError", "(", "\"Must be connected to project file ...\"", ")", "# match elevation grid if exists", "match_grid", "=", "None", "try", ":", "match_grid", "=", "self", ".", "projectFile", ".", "getGrid", "(", "use_mask", "=", "False", ")", "except", "ValueError", ":", "pass", "# match projection if exists", "wkt_projection", "=", "None", "try", ":", "wkt_projection", "=", "self", ".", "projectFile", ".", "getWkt", "(", ")", "except", "ValueError", ":", "pass", "if", "out_raster_path", "is", "None", ":", "out_raster_path", "=", "'{0}.{1}'", ".", "format", "(", "self", ".", "projectFile", ".", "name", ",", "self", ".", "extension", ")", "# make sure paths are absolute as the working directory changes", "shapefile_path", "=", "os", ".", "path", ".", "abspath", "(", "shapefile_path", ")", "# make sure the polygon is valid", "check_watershed_boundary_geometry", "(", "shapefile_path", ")", "gr", "=", "rasterize_shapefile", "(", "shapefile_path", ",", "x_cell_size", "=", "cell_size", ",", "y_cell_size", "=", "cell_size", ",", "match_grid", "=", "match_grid", ",", "raster_nodata", "=", "0", ",", "as_gdal_grid", "=", "True", ",", "raster_wkt_proj", "=", "wkt_projection", ",", "convert_to_utm", "=", "True", ")", "with", "tmp_chdir", "(", "self", ".", "projectFile", ".", "project_directory", ")", ":", "gr", ".", "to_grass_ascii", "(", "out_raster_path", ",", "print_nodata", "=", "False", ")", "self", ".", "filename", "=", "out_raster_path", "# update project file cards", "self", ".", "projectFile", ".", "setCard", "(", "'WATERSHED_MASK'", ",", "out_raster_path", ",", "add_quotes", "=", "True", ")", "self", ".", "projectFile", ".", "setCard", "(", "'GRIDSIZE'", ",", "str", "(", "(", "gr", ".", "geotransform", "[", "1", "]", "-", "gr", ".", "geotransform", "[", "-", "1", "]", ")", "/", "2.0", ")", ")", "self", ".", "projectFile", ".", "setCard", "(", "'ROWS'", ",", "str", "(", "gr", ".", "y_size", ")", ")", "self", ".", "projectFile", ".", "setCard", "(", "'COLS'", ",", "str", "(", "gr", ".", "x_size", ")", ")", "# write projection file if does not exist", "if", "wkt_projection", "is", "None", ":", "proj_file", "=", "ProjectionFile", "(", ")", "proj_file", ".", "projection", "=", "gr", ".", "wkt", "proj_file", ".", "projectFile", "=", "self", ".", "projectFile", "proj_path", "=", "\"{0}_prj.pro\"", ".", "format", "(", "os", ".", "path", ".", "splitext", "(", "out_raster_path", ")", "[", "0", "]", ")", "gr", ".", "write_prj", "(", "proj_path", ")", "self", ".", "projectFile", ".", "setCard", "(", "'#PROJECTION_FILE'", ",", "proj_path", ",", "add_quotes", "=", "True", ")", "# read raster into object", "if", "load_raster_to_db", ":", "self", ".", "_load_raster_text", "(", "out_raster_path", ")" ]
Generates a mask from a watershed_shapefile Example:: from gsshapy.orm import ProjectFile, WatershedMaskFile from gsshapy.lib import db_tools as dbt gssha_directory = '/gsshapy/tests/grid_standard/gssha_project' shapefile_path = 'watershed_boundary.shp' # Create Test DB sqlalchemy_url, sql_engine = dbt.init_sqlite_memory() # Create DB Sessions db_session = dbt.create_session(sqlalchemy_url, sql_engine) # Instantiate GSSHAPY object for reading to database project_manager = ProjectFile() # read project file project_manager.readInput(directory=gssha_directory, projectFileName='grid_standard.prj', session=db_session) # generate watershed mask watershed_mask = WatershedMaskFile(session=db_session, project_file=project_manager) watershed_mask.generateFromWatershedShapefile(shapefile_path, x_num_cells=50, y_num_cells=50, ) # write out updated parameters project_manager.writeInput(session=db_session, directory=gssha_directory, name='grid_standard')
[ "Generates", "a", "mask", "from", "a", "watershed_shapefile" ]
python
train
SpectoLabs/hoverpy
hoverpy/hp.py
https://github.com/SpectoLabs/hoverpy/blob/e153ec57f80634019d827d378f184c01fedc5a0e/hoverpy/hp.py#L135-L148
def simulation(self, data=None): """ Gets / Sets the simulation data. If no data is passed in, then this method acts as a getter. if data is passed in, then this method acts as a setter. Keyword arguments: data -- the simulation data you wish to set (default None) """ if data: return self._session.put(self.__v2() + "/simulation", data=data) else: return self._session.get(self.__v2() + "/simulation").json()
[ "def", "simulation", "(", "self", ",", "data", "=", "None", ")", ":", "if", "data", ":", "return", "self", ".", "_session", ".", "put", "(", "self", ".", "__v2", "(", ")", "+", "\"/simulation\"", ",", "data", "=", "data", ")", "else", ":", "return", "self", ".", "_session", ".", "get", "(", "self", ".", "__v2", "(", ")", "+", "\"/simulation\"", ")", ".", "json", "(", ")" ]
Gets / Sets the simulation data. If no data is passed in, then this method acts as a getter. if data is passed in, then this method acts as a setter. Keyword arguments: data -- the simulation data you wish to set (default None)
[ "Gets", "/", "Sets", "the", "simulation", "data", "." ]
python
train
emory-libraries/eulfedora
eulfedora/syncutil.py
https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/syncutil.py#L315-L409
def object_data(self): '''Process the archival export and return a buffer with foxml content for ingest into the destination repository. :returns: :class:`io.BytesIO` for ingest, with references to uploaded datastream content or content location urls ''' self.foxml_buffer = io.BytesIO() if self.progress_bar: self.progress_bar.start() previous_section = None while True: try: section = self.get_next_section() except StopIteration: break if section == BINARY_CONTENT_START: self.within_file = True # get datastream info from the end of the section just before this one # (needed to provide size to upload request) dsinfo = self.get_datastream_info(previous_section) if dsinfo: logger.info('Found encoded datastream %(id)s (%(mimetype)s, size %(size)s, %(type)s %(digest)s)', dsinfo) else: # error if datastream info is not found, because either # size or version date is required to handle content raise Exception('Failed to find datastream information for %s from \n%s' \ % (self.obj.pid, previous_section)) if self.xml_only and not \ dsinfo['mimetype'] in ['text/xml', 'application/rdf+xml', 'application/xml']: # possibly other mimetypes also? try: dsid = dsinfo['id'].split('.')[0] except ValueError: # if dsid doesn't include a .# (for versioning), # use the id as is. dsid = dsinfo['id'] if self.url_credentials: # if url credentials are set, parse the base fedora api # url so they can be inserted at the right place parsed_url = urlparse(self.obj.api.base_url) # reassemble base url, adding in credentials base_url = ''.join([parsed_url.scheme, '://', self.url_credentials, parsed_url.netloc, parsed_url.path]) else: base_url = self.obj.api.base_url # versioned datastream dissemination url content_location = '%sobjects/%s/datastreams/%s/content?asOfDateTime=%s' % \ (base_url, self.obj.pid, dsid, dsinfo['created']) else: upload_args = {} if self.progress_bar: def upload_callback(monitor): self.progress_bar.upload = monitor.bytes_read upload_args = {'callback': upload_callback} # use upload id as content location content_location = self.dest_repo.api.upload(self.encoded_datastream(), size=int(dsinfo['size']), **upload_args) self.foxml_buffer.write(force_bytes('<foxml:contentLocation REF="%s" TYPE="URL"/>' \ % content_location)) elif section == BINARY_CONTENT_END: # should not occur here; this section will be processed by # encoded_datastream method self.within_file = False elif self.within_file: # should not occur here; this section will be pulled by # encoded_datastream method # binary content within a file - ignore here # (handled by encoded_datastream method) continue else: # not start or end of binary content, and not # within a file, so yield as is (e.g., datastream tags # between small files) self.foxml_buffer.write(section) previous_section = section return self.foxml_buffer
[ "def", "object_data", "(", "self", ")", ":", "self", ".", "foxml_buffer", "=", "io", ".", "BytesIO", "(", ")", "if", "self", ".", "progress_bar", ":", "self", ".", "progress_bar", ".", "start", "(", ")", "previous_section", "=", "None", "while", "True", ":", "try", ":", "section", "=", "self", ".", "get_next_section", "(", ")", "except", "StopIteration", ":", "break", "if", "section", "==", "BINARY_CONTENT_START", ":", "self", ".", "within_file", "=", "True", "# get datastream info from the end of the section just before this one", "# (needed to provide size to upload request)", "dsinfo", "=", "self", ".", "get_datastream_info", "(", "previous_section", ")", "if", "dsinfo", ":", "logger", ".", "info", "(", "'Found encoded datastream %(id)s (%(mimetype)s, size %(size)s, %(type)s %(digest)s)'", ",", "dsinfo", ")", "else", ":", "# error if datastream info is not found, because either", "# size or version date is required to handle content", "raise", "Exception", "(", "'Failed to find datastream information for %s from \\n%s'", "%", "(", "self", ".", "obj", ".", "pid", ",", "previous_section", ")", ")", "if", "self", ".", "xml_only", "and", "not", "dsinfo", "[", "'mimetype'", "]", "in", "[", "'text/xml'", ",", "'application/rdf+xml'", ",", "'application/xml'", "]", ":", "# possibly other mimetypes also?", "try", ":", "dsid", "=", "dsinfo", "[", "'id'", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "except", "ValueError", ":", "# if dsid doesn't include a .# (for versioning),", "# use the id as is.", "dsid", "=", "dsinfo", "[", "'id'", "]", "if", "self", ".", "url_credentials", ":", "# if url credentials are set, parse the base fedora api", "# url so they can be inserted at the right place", "parsed_url", "=", "urlparse", "(", "self", ".", "obj", ".", "api", ".", "base_url", ")", "# reassemble base url, adding in credentials", "base_url", "=", "''", ".", "join", "(", "[", "parsed_url", ".", "scheme", ",", "'://'", ",", "self", ".", "url_credentials", ",", "parsed_url", ".", "netloc", ",", "parsed_url", ".", "path", "]", ")", "else", ":", "base_url", "=", "self", ".", "obj", ".", "api", ".", "base_url", "# versioned datastream dissemination url", "content_location", "=", "'%sobjects/%s/datastreams/%s/content?asOfDateTime=%s'", "%", "(", "base_url", ",", "self", ".", "obj", ".", "pid", ",", "dsid", ",", "dsinfo", "[", "'created'", "]", ")", "else", ":", "upload_args", "=", "{", "}", "if", "self", ".", "progress_bar", ":", "def", "upload_callback", "(", "monitor", ")", ":", "self", ".", "progress_bar", ".", "upload", "=", "monitor", ".", "bytes_read", "upload_args", "=", "{", "'callback'", ":", "upload_callback", "}", "# use upload id as content location", "content_location", "=", "self", ".", "dest_repo", ".", "api", ".", "upload", "(", "self", ".", "encoded_datastream", "(", ")", ",", "size", "=", "int", "(", "dsinfo", "[", "'size'", "]", ")", ",", "*", "*", "upload_args", ")", "self", ".", "foxml_buffer", ".", "write", "(", "force_bytes", "(", "'<foxml:contentLocation REF=\"%s\" TYPE=\"URL\"/>'", "%", "content_location", ")", ")", "elif", "section", "==", "BINARY_CONTENT_END", ":", "# should not occur here; this section will be processed by", "# encoded_datastream method", "self", ".", "within_file", "=", "False", "elif", "self", ".", "within_file", ":", "# should not occur here; this section will be pulled by", "# encoded_datastream method", "# binary content within a file - ignore here", "# (handled by encoded_datastream method)", "continue", "else", ":", "# not start or end of binary content, and not", "# within a file, so yield as is (e.g., datastream tags", "# between small files)", "self", ".", "foxml_buffer", ".", "write", "(", "section", ")", "previous_section", "=", "section", "return", "self", ".", "foxml_buffer" ]
Process the archival export and return a buffer with foxml content for ingest into the destination repository. :returns: :class:`io.BytesIO` for ingest, with references to uploaded datastream content or content location urls
[ "Process", "the", "archival", "export", "and", "return", "a", "buffer", "with", "foxml", "content", "for", "ingest", "into", "the", "destination", "repository", "." ]
python
train
ellmetha/django-machina
machina/apps/forum_permission/handler.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_permission/handler.py#L125-L142
def can_edit_post(self, post, user): """ Given a forum post, checks whether the user can edit the latter. """ checker = self._get_checker(user) # A user can edit a post if... # they are a superuser # they are the original poster of the forum post # they belong to the forum moderators is_author = self._is_post_author(post, user) can_edit = ( user.is_superuser or ( is_author and checker.has_perm('can_edit_own_posts', post.topic.forum) and not post.topic.is_locked ) or checker.has_perm('can_edit_posts', post.topic.forum) ) return can_edit
[ "def", "can_edit_post", "(", "self", ",", "post", ",", "user", ")", ":", "checker", "=", "self", ".", "_get_checker", "(", "user", ")", "# A user can edit a post if...", "# they are a superuser", "# they are the original poster of the forum post", "# they belong to the forum moderators", "is_author", "=", "self", ".", "_is_post_author", "(", "post", ",", "user", ")", "can_edit", "=", "(", "user", ".", "is_superuser", "or", "(", "is_author", "and", "checker", ".", "has_perm", "(", "'can_edit_own_posts'", ",", "post", ".", "topic", ".", "forum", ")", "and", "not", "post", ".", "topic", ".", "is_locked", ")", "or", "checker", ".", "has_perm", "(", "'can_edit_posts'", ",", "post", ".", "topic", ".", "forum", ")", ")", "return", "can_edit" ]
Given a forum post, checks whether the user can edit the latter.
[ "Given", "a", "forum", "post", "checks", "whether", "the", "user", "can", "edit", "the", "latter", "." ]
python
train
fastmonkeys/pontus
pontus/_compat.py
https://github.com/fastmonkeys/pontus/blob/cf02fb22c4558b899e2dcbe437a1a525321c4f12/pontus/_compat.py#L25-L33
def unicode_compatible(cls): """ A decorator that defines ``__str__`` and ``__unicode__`` methods under Python 2. """ if not is_py3: cls.__unicode__ = cls.__str__ cls.__str__ = lambda self: self.__unicode__().encode('utf-8') return cls
[ "def", "unicode_compatible", "(", "cls", ")", ":", "if", "not", "is_py3", ":", "cls", ".", "__unicode__", "=", "cls", ".", "__str__", "cls", ".", "__str__", "=", "lambda", "self", ":", "self", ".", "__unicode__", "(", ")", ".", "encode", "(", "'utf-8'", ")", "return", "cls" ]
A decorator that defines ``__str__`` and ``__unicode__`` methods under Python 2.
[ "A", "decorator", "that", "defines", "__str__", "and", "__unicode__", "methods", "under", "Python", "2", "." ]
python
train
Tenchi2xh/Almonds
almonds/utils.py
https://github.com/Tenchi2xh/Almonds/blob/6b27024729f055f2cb5e14ae3ca3cb428ae054bc/almonds/utils.py#L59-L70
def open_file(filename): """ Multi-platform way to make the OS open a file with its default application """ if sys.platform.startswith("darwin"): subprocess.call(("open", filename)) elif sys.platform == "cygwin": subprocess.call(("cygstart", filename)) elif os.name == "nt": os.system("start %s" % filename) elif os.name == "posix": subprocess.call(("xdg-open", filename))
[ "def", "open_file", "(", "filename", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "\"darwin\"", ")", ":", "subprocess", ".", "call", "(", "(", "\"open\"", ",", "filename", ")", ")", "elif", "sys", ".", "platform", "==", "\"cygwin\"", ":", "subprocess", ".", "call", "(", "(", "\"cygstart\"", ",", "filename", ")", ")", "elif", "os", ".", "name", "==", "\"nt\"", ":", "os", ".", "system", "(", "\"start %s\"", "%", "filename", ")", "elif", "os", ".", "name", "==", "\"posix\"", ":", "subprocess", ".", "call", "(", "(", "\"xdg-open\"", ",", "filename", ")", ")" ]
Multi-platform way to make the OS open a file with its default application
[ "Multi", "-", "platform", "way", "to", "make", "the", "OS", "open", "a", "file", "with", "its", "default", "application" ]
python
train
padfoot27/merlin
venv/lib/python2.7/site-packages/setuptools/sandbox.py
https://github.com/padfoot27/merlin/blob/c317505c5eca0e774fcf8b8c7f08801479a5099a/venv/lib/python2.7/site-packages/setuptools/sandbox.py#L32-L46
def _execfile(filename, globals, locals=None): """ Python 3 implementation of execfile. """ mode = 'rb' # Python 2.6 compile requires LF for newlines, so use deprecated # Universal newlines support. if sys.version_info < (2, 7): mode += 'U' with open(filename, mode) as stream: script = stream.read() if locals is None: locals = globals code = compile(script, filename, 'exec') exec(code, globals, locals)
[ "def", "_execfile", "(", "filename", ",", "globals", ",", "locals", "=", "None", ")", ":", "mode", "=", "'rb'", "# Python 2.6 compile requires LF for newlines, so use deprecated", "# Universal newlines support.", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ":", "mode", "+=", "'U'", "with", "open", "(", "filename", ",", "mode", ")", "as", "stream", ":", "script", "=", "stream", ".", "read", "(", ")", "if", "locals", "is", "None", ":", "locals", "=", "globals", "code", "=", "compile", "(", "script", ",", "filename", ",", "'exec'", ")", "exec", "(", "code", ",", "globals", ",", "locals", ")" ]
Python 3 implementation of execfile.
[ "Python", "3", "implementation", "of", "execfile", "." ]
python
train
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/decorators.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/decorators.py#L109-L120
def log_message(logger, message=""): """ Decorator to log a message before executing a function """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): _log_message(logger, func.__name__, message) result = func(*args, **kwargs) return result return wrapper return decorator
[ "def", "log_message", "(", "logger", ",", "message", "=", "\"\"", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_log_message", "(", "logger", ",", "func", ".", "__name__", ",", "message", ")", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result", "return", "wrapper", "return", "decorator" ]
Decorator to log a message before executing a function
[ "Decorator", "to", "log", "a", "message", "before", "executing", "a", "function" ]
python
test
apache/airflow
airflow/contrib/hooks/bigquery_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1657-L1703
def get_datasets_list(self, project_id=None): """ Method returns full list of BigQuery datasets in the current project .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list :param project_id: Google Cloud Project for which you try to get all datasets :type project_id: str :return: datasets_list Example of returned datasets_list: :: { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_2_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_2_test" } }, { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_1_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_1_test" } } ] """ dataset_project_id = project_id if project_id else self.project_id try: datasets_list = self.service.datasets().list( projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets'] self.log.info("Datasets List: %s", datasets_list) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return datasets_list
[ "def", "get_datasets_list", "(", "self", ",", "project_id", "=", "None", ")", ":", "dataset_project_id", "=", "project_id", "if", "project_id", "else", "self", ".", "project_id", "try", ":", "datasets_list", "=", "self", ".", "service", ".", "datasets", "(", ")", ".", "list", "(", "projectId", "=", "dataset_project_id", ")", ".", "execute", "(", "num_retries", "=", "self", ".", "num_retries", ")", "[", "'datasets'", "]", "self", ".", "log", ".", "info", "(", "\"Datasets List: %s\"", ",", "datasets_list", ")", "except", "HttpError", "as", "err", ":", "raise", "AirflowException", "(", "'BigQuery job failed. Error was: {}'", ".", "format", "(", "err", ".", "content", ")", ")", "return", "datasets_list" ]
Method returns full list of BigQuery datasets in the current project .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list :param project_id: Google Cloud Project for which you try to get all datasets :type project_id: str :return: datasets_list Example of returned datasets_list: :: { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_2_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_2_test" } }, { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_1_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_1_test" } } ]
[ "Method", "returns", "full", "list", "of", "BigQuery", "datasets", "in", "the", "current", "project" ]
python
test
dropbox/stone
stone/backends/obj_c.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/obj_c.py#L129-L162
def _get_imports_m(self, data_types, default_imports): """Emits all necessary implementation file imports for the given Stone data type.""" if not isinstance(data_types, list): data_types = [data_types] import_classes = default_imports for data_type in data_types: import_classes.append(fmt_class_prefix(data_type)) if data_type.parent_type: import_classes.append(fmt_class_prefix(data_type.parent_type)) if is_struct_type( data_type) and data_type.has_enumerated_subtypes(): for _, subtype in data_type.get_all_subtypes_with_tags(): import_classes.append(fmt_class_prefix(subtype)) for field in data_type.all_fields: data_type, _ = unwrap_nullable(field.data_type) # unpack list or map while is_list_type(data_type) or is_map_type(data_type): data_type = (data_type.value_data_type if is_map_type(data_type) else data_type.data_type) if is_user_defined_type(data_type): import_classes.append(fmt_class_prefix(data_type)) if import_classes: import_classes = list(set(import_classes)) import_classes.sort() return import_classes
[ "def", "_get_imports_m", "(", "self", ",", "data_types", ",", "default_imports", ")", ":", "if", "not", "isinstance", "(", "data_types", ",", "list", ")", ":", "data_types", "=", "[", "data_types", "]", "import_classes", "=", "default_imports", "for", "data_type", "in", "data_types", ":", "import_classes", ".", "append", "(", "fmt_class_prefix", "(", "data_type", ")", ")", "if", "data_type", ".", "parent_type", ":", "import_classes", ".", "append", "(", "fmt_class_prefix", "(", "data_type", ".", "parent_type", ")", ")", "if", "is_struct_type", "(", "data_type", ")", "and", "data_type", ".", "has_enumerated_subtypes", "(", ")", ":", "for", "_", ",", "subtype", "in", "data_type", ".", "get_all_subtypes_with_tags", "(", ")", ":", "import_classes", ".", "append", "(", "fmt_class_prefix", "(", "subtype", ")", ")", "for", "field", "in", "data_type", ".", "all_fields", ":", "data_type", ",", "_", "=", "unwrap_nullable", "(", "field", ".", "data_type", ")", "# unpack list or map", "while", "is_list_type", "(", "data_type", ")", "or", "is_map_type", "(", "data_type", ")", ":", "data_type", "=", "(", "data_type", ".", "value_data_type", "if", "is_map_type", "(", "data_type", ")", "else", "data_type", ".", "data_type", ")", "if", "is_user_defined_type", "(", "data_type", ")", ":", "import_classes", ".", "append", "(", "fmt_class_prefix", "(", "data_type", ")", ")", "if", "import_classes", ":", "import_classes", "=", "list", "(", "set", "(", "import_classes", ")", ")", "import_classes", ".", "sort", "(", ")", "return", "import_classes" ]
Emits all necessary implementation file imports for the given Stone data type.
[ "Emits", "all", "necessary", "implementation", "file", "imports", "for", "the", "given", "Stone", "data", "type", "." ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/sensor_graph.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/sensor_graph.py#L221-L270
def _seek_streamer(self, index, value): """Complex logic for actually seeking a streamer to a reading_id. This routine hides all of the gnarly logic of the various edge cases. In particular, the behavior depends on whether the reading id is found, and if it is found, whether it belongs to the indicated streamer or not. If not, the behavior depends on whether the sought reading it too high or too low. """ highest_id = self._rsl.highest_stored_id() streamer = self.graph.streamers[index] if not streamer.walker.buffered: return _pack_sgerror(SensorLogError.CANNOT_USE_UNBUFFERED_STREAM) find_type = None try: exact = streamer.walker.seek(value, target='id') if exact: find_type = 'exact' else: find_type = 'other_stream' except UnresolvedIdentifierError: if value > highest_id: find_type = 'too_high' else: find_type = 'too_low' # If we found an exact match, move one beyond it if find_type == 'exact': try: streamer.walker.pop() except StreamEmptyError: pass error = Error.NO_ERROR elif find_type == 'too_high': streamer.walker.skip_all() error = _pack_sgerror(SensorLogError.NO_MORE_READINGS) elif find_type == 'too_low': streamer.walker.seek(0, target='offset') error = _pack_sgerror(SensorLogError.NO_MORE_READINGS) else: error = _pack_sgerror(SensorLogError.ID_FOUND_FOR_ANOTHER_STREAM) return error
[ "def", "_seek_streamer", "(", "self", ",", "index", ",", "value", ")", ":", "highest_id", "=", "self", ".", "_rsl", ".", "highest_stored_id", "(", ")", "streamer", "=", "self", ".", "graph", ".", "streamers", "[", "index", "]", "if", "not", "streamer", ".", "walker", ".", "buffered", ":", "return", "_pack_sgerror", "(", "SensorLogError", ".", "CANNOT_USE_UNBUFFERED_STREAM", ")", "find_type", "=", "None", "try", ":", "exact", "=", "streamer", ".", "walker", ".", "seek", "(", "value", ",", "target", "=", "'id'", ")", "if", "exact", ":", "find_type", "=", "'exact'", "else", ":", "find_type", "=", "'other_stream'", "except", "UnresolvedIdentifierError", ":", "if", "value", ">", "highest_id", ":", "find_type", "=", "'too_high'", "else", ":", "find_type", "=", "'too_low'", "# If we found an exact match, move one beyond it", "if", "find_type", "==", "'exact'", ":", "try", ":", "streamer", ".", "walker", ".", "pop", "(", ")", "except", "StreamEmptyError", ":", "pass", "error", "=", "Error", ".", "NO_ERROR", "elif", "find_type", "==", "'too_high'", ":", "streamer", ".", "walker", ".", "skip_all", "(", ")", "error", "=", "_pack_sgerror", "(", "SensorLogError", ".", "NO_MORE_READINGS", ")", "elif", "find_type", "==", "'too_low'", ":", "streamer", ".", "walker", ".", "seek", "(", "0", ",", "target", "=", "'offset'", ")", "error", "=", "_pack_sgerror", "(", "SensorLogError", ".", "NO_MORE_READINGS", ")", "else", ":", "error", "=", "_pack_sgerror", "(", "SensorLogError", ".", "ID_FOUND_FOR_ANOTHER_STREAM", ")", "return", "error" ]
Complex logic for actually seeking a streamer to a reading_id. This routine hides all of the gnarly logic of the various edge cases. In particular, the behavior depends on whether the reading id is found, and if it is found, whether it belongs to the indicated streamer or not. If not, the behavior depends on whether the sought reading it too high or too low.
[ "Complex", "logic", "for", "actually", "seeking", "a", "streamer", "to", "a", "reading_id", "." ]
python
train
asmodehn/filefinder2
filefinder2/_fileloader2.py
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/_fileloader2.py#L154-L160
def is_package(self, fullname): """Concrete implementation of InspectLoader.is_package by checking if the path returned by get_filename has a filename of '__init__.py'.""" filename = os.path.split(self.get_filename(fullname))[1] filename_base = filename.rsplit('.', 1)[0] tail_name = fullname.rpartition('.')[2] return filename_base == '__init__' and tail_name != '__init__'
[ "def", "is_package", "(", "self", ",", "fullname", ")", ":", "filename", "=", "os", ".", "path", ".", "split", "(", "self", ".", "get_filename", "(", "fullname", ")", ")", "[", "1", "]", "filename_base", "=", "filename", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "tail_name", "=", "fullname", ".", "rpartition", "(", "'.'", ")", "[", "2", "]", "return", "filename_base", "==", "'__init__'", "and", "tail_name", "!=", "'__init__'" ]
Concrete implementation of InspectLoader.is_package by checking if the path returned by get_filename has a filename of '__init__.py'.
[ "Concrete", "implementation", "of", "InspectLoader", ".", "is_package", "by", "checking", "if", "the", "path", "returned", "by", "get_filename", "has", "a", "filename", "of", "__init__", ".", "py", "." ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/despike.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/despike.py#L102-L139
def _median_window(window, window_start, multiplier, starttime, sampling_rate, debug=0): """ Internal function to aid parallel processing :type window: numpy.ndarry :param window: Data to look for peaks in. :type window_start: int :param window_start: Index of window start point in larger array, used \ for peak indexing. :type multiplier: float :param multiplier: Multiple of MAD to use as threshold :type starttime: obspy.core.utcdatetime.UTCDateTime :param starttime: Starttime of window, used in debug plotting. :type sampling_rate: float :param sampling_rate in Hz, used for debug plotting :type debug: int :param debug: debug level, if want plots, >= 4. :returns: peaks :rtype: list """ MAD = np.median(np.abs(window)) thresh = multiplier * MAD if debug >= 2: print('Threshold for window is: ' + str(thresh) + '\nMedian is: ' + str(MAD) + '\nMax is: ' + str(np.max(window))) peaks = find_peaks2_short(arr=window, thresh=thresh, trig_int=5, debug=0) if debug >= 4 and peaks: peaks_plot(window, starttime, sampling_rate, save=False, peaks=peaks) if peaks: peaks = [(peak[0], peak[1] + window_start) for peak in peaks] else: peaks = [] return peaks
[ "def", "_median_window", "(", "window", ",", "window_start", ",", "multiplier", ",", "starttime", ",", "sampling_rate", ",", "debug", "=", "0", ")", ":", "MAD", "=", "np", ".", "median", "(", "np", ".", "abs", "(", "window", ")", ")", "thresh", "=", "multiplier", "*", "MAD", "if", "debug", ">=", "2", ":", "print", "(", "'Threshold for window is: '", "+", "str", "(", "thresh", ")", "+", "'\\nMedian is: '", "+", "str", "(", "MAD", ")", "+", "'\\nMax is: '", "+", "str", "(", "np", ".", "max", "(", "window", ")", ")", ")", "peaks", "=", "find_peaks2_short", "(", "arr", "=", "window", ",", "thresh", "=", "thresh", ",", "trig_int", "=", "5", ",", "debug", "=", "0", ")", "if", "debug", ">=", "4", "and", "peaks", ":", "peaks_plot", "(", "window", ",", "starttime", ",", "sampling_rate", ",", "save", "=", "False", ",", "peaks", "=", "peaks", ")", "if", "peaks", ":", "peaks", "=", "[", "(", "peak", "[", "0", "]", ",", "peak", "[", "1", "]", "+", "window_start", ")", "for", "peak", "in", "peaks", "]", "else", ":", "peaks", "=", "[", "]", "return", "peaks" ]
Internal function to aid parallel processing :type window: numpy.ndarry :param window: Data to look for peaks in. :type window_start: int :param window_start: Index of window start point in larger array, used \ for peak indexing. :type multiplier: float :param multiplier: Multiple of MAD to use as threshold :type starttime: obspy.core.utcdatetime.UTCDateTime :param starttime: Starttime of window, used in debug plotting. :type sampling_rate: float :param sampling_rate in Hz, used for debug plotting :type debug: int :param debug: debug level, if want plots, >= 4. :returns: peaks :rtype: list
[ "Internal", "function", "to", "aid", "parallel", "processing" ]
python
train
inasafe/inasafe
safe/gui/widgets/dock.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L487-L498
def disconnect_layer_listener(self): """Destroy the signal/slot to listen for layers loaded in QGIS. ..seealso:: connect_layer_listener """ project = QgsProject.instance() project.layersWillBeRemoved.disconnect(self.get_layers) project.layersAdded.disconnect(self.get_layers) project.layersRemoved.disconnect(self.get_layers) self.iface.mapCanvas().layersChanged.disconnect(self.get_layers) self.iface.currentLayerChanged.disconnect(self.layer_changed)
[ "def", "disconnect_layer_listener", "(", "self", ")", ":", "project", "=", "QgsProject", ".", "instance", "(", ")", "project", ".", "layersWillBeRemoved", ".", "disconnect", "(", "self", ".", "get_layers", ")", "project", ".", "layersAdded", ".", "disconnect", "(", "self", ".", "get_layers", ")", "project", ".", "layersRemoved", ".", "disconnect", "(", "self", ".", "get_layers", ")", "self", ".", "iface", ".", "mapCanvas", "(", ")", ".", "layersChanged", ".", "disconnect", "(", "self", ".", "get_layers", ")", "self", ".", "iface", ".", "currentLayerChanged", ".", "disconnect", "(", "self", ".", "layer_changed", ")" ]
Destroy the signal/slot to listen for layers loaded in QGIS. ..seealso:: connect_layer_listener
[ "Destroy", "the", "signal", "/", "slot", "to", "listen", "for", "layers", "loaded", "in", "QGIS", "." ]
python
train
saltstack/salt
salt/states/mysql_query.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mysql_query.py#L53-L222
def run_file(name, database, query_file=None, output=None, grain=None, key=None, overwrite=True, saltenv=None, check_db_exists=True, **connection_args): ''' Execute an arbitrary query on the specified database .. versionadded:: 2017.7.0 name Used only as an ID database The name of the database to execute the query_file on query_file The file of mysql commands to run output grain: output in a grain other: the file to store results None: output to the result comment (default) grain: grain to store the output (need output=grain) key: the specified grain will be treated as a dictionary, the result of this state will be stored under the specified key. overwrite: The file or grain will be overwritten if it already exists (default) saltenv: The saltenv to pull the query_file from check_db_exists: The state run will check that the specified database exists (default=True) before running any queries ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Database {0} is already present'.format(database)} if any([query_file.startswith(proto) for proto in ['http://', 'https://', 'salt://', 's3://', 'swift://']]): query_file = __salt__['cp.cache_file'](query_file, saltenv=saltenv or __env__) if not os.path.exists(query_file): ret['comment'] = 'File {0} does not exist'.format(query_file) ret['result'] = False return ret # check if database exists if check_db_exists and not __salt__['mysql.db_exists'](database, **connection_args): err = _get_mysql_error() if err is not None: ret['comment'] = err ret['result'] = False return ret ret['result'] = None ret['comment'] = ('Database {0} is not present' ).format(database) return ret # Check if execution needed if output == 'grain': if grain is not None and key is None: if not overwrite and grain in __salt__['grains.ls'](): ret['comment'] = 'No execution needed. Grain ' + grain\ + ' already set' return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, storing result in '\ + 'grain: ' + grain return ret elif grain is not None: if grain in __salt__['grains.ls'](): grain_value = __salt__['grains.get'](grain) else: grain_value = {} if not overwrite and key in grain_value: ret['comment'] = 'No execution needed. Grain ' + grain\ + ':' + key + ' already set' return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, storing result in '\ + 'grain: ' + grain + ':' + key return ret else: ret['result'] = False ret['comment'] = "Error: output type 'grain' needs the grain "\ + "parameter\n" return ret elif output is not None: if not overwrite and os.path.isfile(output): ret['comment'] = 'No execution needed. File ' + output\ + ' already set' return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, storing result in '\ + 'file: ' + output return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, not storing result' return ret # The database is present, execute the query query_result = __salt__['mysql.file_query'](database, query_file, **connection_args) if query_result is False: ret['result'] = False return ret mapped_results = [] if 'results' in query_result: for res in query_result['results']: mapped_line = {} for idx, col in enumerate(query_result['columns']): mapped_line[col] = res[idx] mapped_results.append(mapped_line) query_result['results'] = mapped_results ret['comment'] = six.text_type(query_result) if output == 'grain': if grain is not None and key is None: __salt__['grains.setval'](grain, query_result) ret['changes']['query'] = "Executed. Output into grain: "\ + grain elif grain is not None: if grain in __salt__['grains.ls'](): grain_value = __salt__['grains.get'](grain) else: grain_value = {} grain_value[key] = query_result __salt__['grains.setval'](grain, grain_value) ret['changes']['query'] = "Executed. Output into grain: "\ + grain + ":" + key elif output is not None: ret['changes']['query'] = "Executed. Output into " + output with salt.utils.files.fopen(output, 'w') as output_file: if 'results' in query_result: for res in query_result['results']: for col, val in six.iteritems(res): output_file.write( salt.utils.stringutils.to_str( col + ':' + val + '\n' ) ) else: output_file.write( salt.utils.stringutils.to_str(query_result) ) else: ret['changes']['query'] = "Executed" return ret
[ "def", "run_file", "(", "name", ",", "database", ",", "query_file", "=", "None", ",", "output", "=", "None", ",", "grain", "=", "None", ",", "key", "=", "None", ",", "overwrite", "=", "True", ",", "saltenv", "=", "None", ",", "check_db_exists", "=", "True", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "'Database {0} is already present'", ".", "format", "(", "database", ")", "}", "if", "any", "(", "[", "query_file", ".", "startswith", "(", "proto", ")", "for", "proto", "in", "[", "'http://'", ",", "'https://'", ",", "'salt://'", ",", "'s3://'", ",", "'swift://'", "]", "]", ")", ":", "query_file", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "query_file", ",", "saltenv", "=", "saltenv", "or", "__env__", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "query_file", ")", ":", "ret", "[", "'comment'", "]", "=", "'File {0} does not exist'", ".", "format", "(", "query_file", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "# check if database exists", "if", "check_db_exists", "and", "not", "__salt__", "[", "'mysql.db_exists'", "]", "(", "database", ",", "*", "*", "connection_args", ")", ":", "err", "=", "_get_mysql_error", "(", ")", "if", "err", "is", "not", "None", ":", "ret", "[", "'comment'", "]", "=", "err", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "(", "'Database {0} is not present'", ")", ".", "format", "(", "database", ")", "return", "ret", "# Check if execution needed", "if", "output", "==", "'grain'", ":", "if", "grain", "is", "not", "None", "and", "key", "is", "None", ":", "if", "not", "overwrite", "and", "grain", "in", "__salt__", "[", "'grains.ls'", "]", "(", ")", ":", "ret", "[", "'comment'", "]", "=", "'No execution needed. Grain '", "+", "grain", "+", "' already set'", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Query would execute, storing result in '", "+", "'grain: '", "+", "grain", "return", "ret", "elif", "grain", "is", "not", "None", ":", "if", "grain", "in", "__salt__", "[", "'grains.ls'", "]", "(", ")", ":", "grain_value", "=", "__salt__", "[", "'grains.get'", "]", "(", "grain", ")", "else", ":", "grain_value", "=", "{", "}", "if", "not", "overwrite", "and", "key", "in", "grain_value", ":", "ret", "[", "'comment'", "]", "=", "'No execution needed. Grain '", "+", "grain", "+", "':'", "+", "key", "+", "' already set'", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Query would execute, storing result in '", "+", "'grain: '", "+", "grain", "+", "':'", "+", "key", "return", "ret", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "\"Error: output type 'grain' needs the grain \"", "+", "\"parameter\\n\"", "return", "ret", "elif", "output", "is", "not", "None", ":", "if", "not", "overwrite", "and", "os", ".", "path", ".", "isfile", "(", "output", ")", ":", "ret", "[", "'comment'", "]", "=", "'No execution needed. File '", "+", "output", "+", "' already set'", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Query would execute, storing result in '", "+", "'file: '", "+", "output", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Query would execute, not storing result'", "return", "ret", "# The database is present, execute the query", "query_result", "=", "__salt__", "[", "'mysql.file_query'", "]", "(", "database", ",", "query_file", ",", "*", "*", "connection_args", ")", "if", "query_result", "is", "False", ":", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "mapped_results", "=", "[", "]", "if", "'results'", "in", "query_result", ":", "for", "res", "in", "query_result", "[", "'results'", "]", ":", "mapped_line", "=", "{", "}", "for", "idx", ",", "col", "in", "enumerate", "(", "query_result", "[", "'columns'", "]", ")", ":", "mapped_line", "[", "col", "]", "=", "res", "[", "idx", "]", "mapped_results", ".", "append", "(", "mapped_line", ")", "query_result", "[", "'results'", "]", "=", "mapped_results", "ret", "[", "'comment'", "]", "=", "six", ".", "text_type", "(", "query_result", ")", "if", "output", "==", "'grain'", ":", "if", "grain", "is", "not", "None", "and", "key", "is", "None", ":", "__salt__", "[", "'grains.setval'", "]", "(", "grain", ",", "query_result", ")", "ret", "[", "'changes'", "]", "[", "'query'", "]", "=", "\"Executed. Output into grain: \"", "+", "grain", "elif", "grain", "is", "not", "None", ":", "if", "grain", "in", "__salt__", "[", "'grains.ls'", "]", "(", ")", ":", "grain_value", "=", "__salt__", "[", "'grains.get'", "]", "(", "grain", ")", "else", ":", "grain_value", "=", "{", "}", "grain_value", "[", "key", "]", "=", "query_result", "__salt__", "[", "'grains.setval'", "]", "(", "grain", ",", "grain_value", ")", "ret", "[", "'changes'", "]", "[", "'query'", "]", "=", "\"Executed. Output into grain: \"", "+", "grain", "+", "\":\"", "+", "key", "elif", "output", "is", "not", "None", ":", "ret", "[", "'changes'", "]", "[", "'query'", "]", "=", "\"Executed. Output into \"", "+", "output", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "output", ",", "'w'", ")", "as", "output_file", ":", "if", "'results'", "in", "query_result", ":", "for", "res", "in", "query_result", "[", "'results'", "]", ":", "for", "col", ",", "val", "in", "six", ".", "iteritems", "(", "res", ")", ":", "output_file", ".", "write", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "col", "+", "':'", "+", "val", "+", "'\\n'", ")", ")", "else", ":", "output_file", ".", "write", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "query_result", ")", ")", "else", ":", "ret", "[", "'changes'", "]", "[", "'query'", "]", "=", "\"Executed\"", "return", "ret" ]
Execute an arbitrary query on the specified database .. versionadded:: 2017.7.0 name Used only as an ID database The name of the database to execute the query_file on query_file The file of mysql commands to run output grain: output in a grain other: the file to store results None: output to the result comment (default) grain: grain to store the output (need output=grain) key: the specified grain will be treated as a dictionary, the result of this state will be stored under the specified key. overwrite: The file or grain will be overwritten if it already exists (default) saltenv: The saltenv to pull the query_file from check_db_exists: The state run will check that the specified database exists (default=True) before running any queries
[ "Execute", "an", "arbitrary", "query", "on", "the", "specified", "database" ]
python
train
mpds-io/python-api-client
mpds_client/retrieve_MPDS.py
https://github.com/mpds-io/python-api-client/blob/edfdd79c6aac44d0a5f7f785e252a88acc95b6fe/mpds_client/retrieve_MPDS.py#L236-L316
def get_data(self, search, phases=None, fields=default_fields): """ Retrieve data in JSON. JSON is expected to be valid against the schema at https://developer.mpds.io/mpds.schema.json Args: search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"}, documented at https://developer.mpds.io/#Categories phases: (list) Phase IDs, according to the MPDS distinct phases concept fields: (dict) Data of interest for C-, S-, and P-entries, e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']}, documented at https://developer.mpds.io/#JSON-schemata Returns: List of dicts: C-, S-, and P-entries, the format is documented at https://developer.mpds.io/#JSON-schemata """ output = [] fields = { key: [jmespath.compile(item) if isinstance(item, str) else item() for item in value] for key, value in fields.items() } if fields else None tot_count = 0 phases = list(set(phases)) if phases else [] if len(phases) > self.maxnphases: all_phases = array_split(phases, int(math.ceil( len(phases)/self.maxnphases ))) else: all_phases = [phases] nsteps = len(all_phases) for step, current_phases in enumerate(all_phases, start=1): counter, hits_count = 0, 0 while True: result = self._request(search, phases=list(current_phases), page=counter) if result['error']: raise APIError(result['error'], result.get('code', 0)) if result['npages'] > self.maxnpages: raise APIError( "Too many hits (%s > %s), please, be more specific" % \ (result['count'], self.maxnpages * self.pagesize), 2 ) output.extend(self._massage(result['out'], fields)) if hits_count and hits_count != result['count']: raise APIError("API error: hits count has been changed during the query") hits_count = result['count'] time.sleep(self.chillouttime) if counter == result['npages'] - 1: break counter += 1 if self.verbose: sys.stdout.write("\r\t%d%% of step %s from %s" % ( (counter/result['npages']) * 100, step, nsteps) ) sys.stdout.flush() tot_count += hits_count if len(output) != tot_count: raise APIError("API error: collected and declared counts of hits differ") if self.verbose: sys.stdout.write("Got %s hits\r\n" % tot_count) sys.stdout.flush() return output
[ "def", "get_data", "(", "self", ",", "search", ",", "phases", "=", "None", ",", "fields", "=", "default_fields", ")", ":", "output", "=", "[", "]", "fields", "=", "{", "key", ":", "[", "jmespath", ".", "compile", "(", "item", ")", "if", "isinstance", "(", "item", ",", "str", ")", "else", "item", "(", ")", "for", "item", "in", "value", "]", "for", "key", ",", "value", "in", "fields", ".", "items", "(", ")", "}", "if", "fields", "else", "None", "tot_count", "=", "0", "phases", "=", "list", "(", "set", "(", "phases", ")", ")", "if", "phases", "else", "[", "]", "if", "len", "(", "phases", ")", ">", "self", ".", "maxnphases", ":", "all_phases", "=", "array_split", "(", "phases", ",", "int", "(", "math", ".", "ceil", "(", "len", "(", "phases", ")", "/", "self", ".", "maxnphases", ")", ")", ")", "else", ":", "all_phases", "=", "[", "phases", "]", "nsteps", "=", "len", "(", "all_phases", ")", "for", "step", ",", "current_phases", "in", "enumerate", "(", "all_phases", ",", "start", "=", "1", ")", ":", "counter", ",", "hits_count", "=", "0", ",", "0", "while", "True", ":", "result", "=", "self", ".", "_request", "(", "search", ",", "phases", "=", "list", "(", "current_phases", ")", ",", "page", "=", "counter", ")", "if", "result", "[", "'error'", "]", ":", "raise", "APIError", "(", "result", "[", "'error'", "]", ",", "result", ".", "get", "(", "'code'", ",", "0", ")", ")", "if", "result", "[", "'npages'", "]", ">", "self", ".", "maxnpages", ":", "raise", "APIError", "(", "\"Too many hits (%s > %s), please, be more specific\"", "%", "(", "result", "[", "'count'", "]", ",", "self", ".", "maxnpages", "*", "self", ".", "pagesize", ")", ",", "2", ")", "output", ".", "extend", "(", "self", ".", "_massage", "(", "result", "[", "'out'", "]", ",", "fields", ")", ")", "if", "hits_count", "and", "hits_count", "!=", "result", "[", "'count'", "]", ":", "raise", "APIError", "(", "\"API error: hits count has been changed during the query\"", ")", "hits_count", "=", "result", "[", "'count'", "]", "time", ".", "sleep", "(", "self", ".", "chillouttime", ")", "if", "counter", "==", "result", "[", "'npages'", "]", "-", "1", ":", "break", "counter", "+=", "1", "if", "self", ".", "verbose", ":", "sys", ".", "stdout", ".", "write", "(", "\"\\r\\t%d%% of step %s from %s\"", "%", "(", "(", "counter", "/", "result", "[", "'npages'", "]", ")", "*", "100", ",", "step", ",", "nsteps", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "tot_count", "+=", "hits_count", "if", "len", "(", "output", ")", "!=", "tot_count", ":", "raise", "APIError", "(", "\"API error: collected and declared counts of hits differ\"", ")", "if", "self", ".", "verbose", ":", "sys", ".", "stdout", ".", "write", "(", "\"Got %s hits\\r\\n\"", "%", "tot_count", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "return", "output" ]
Retrieve data in JSON. JSON is expected to be valid against the schema at https://developer.mpds.io/mpds.schema.json Args: search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"}, documented at https://developer.mpds.io/#Categories phases: (list) Phase IDs, according to the MPDS distinct phases concept fields: (dict) Data of interest for C-, S-, and P-entries, e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']}, documented at https://developer.mpds.io/#JSON-schemata Returns: List of dicts: C-, S-, and P-entries, the format is documented at https://developer.mpds.io/#JSON-schemata
[ "Retrieve", "data", "in", "JSON", ".", "JSON", "is", "expected", "to", "be", "valid", "against", "the", "schema", "at", "https", ":", "//", "developer", ".", "mpds", ".", "io", "/", "mpds", ".", "schema", ".", "json" ]
python
train
materialsvirtuallab/monty
monty/pprint.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/pprint.py#L10-L42
def pprint_table(table, out=sys.stdout, rstrip=False): """ Prints out a table of data, padded for alignment Each row must have the same number of columns. Args: table: The table to print. A list of lists. out: Output stream (file-like object) rstrip: if True, trailing withespaces are removed from the entries. """ def max_width_col(table, col_idx): """ Get the maximum width of the given column index """ return max([len(row[col_idx]) for row in table]) if rstrip: for row_idx, row in enumerate(table): table[row_idx] = [c.rstrip() for c in row] col_paddings = [] ncols = len(table[0]) for i in range(ncols): col_paddings.append(max_width_col(table, i)) for row in table: # left col out.write(row[0].ljust(col_paddings[0] + 1)) # rest of the cols for i in range(1, len(row)): col = row[i].rjust(col_paddings[i] + 2) out.write(col) out.write("\n")
[ "def", "pprint_table", "(", "table", ",", "out", "=", "sys", ".", "stdout", ",", "rstrip", "=", "False", ")", ":", "def", "max_width_col", "(", "table", ",", "col_idx", ")", ":", "\"\"\"\n Get the maximum width of the given column index\n \"\"\"", "return", "max", "(", "[", "len", "(", "row", "[", "col_idx", "]", ")", "for", "row", "in", "table", "]", ")", "if", "rstrip", ":", "for", "row_idx", ",", "row", "in", "enumerate", "(", "table", ")", ":", "table", "[", "row_idx", "]", "=", "[", "c", ".", "rstrip", "(", ")", "for", "c", "in", "row", "]", "col_paddings", "=", "[", "]", "ncols", "=", "len", "(", "table", "[", "0", "]", ")", "for", "i", "in", "range", "(", "ncols", ")", ":", "col_paddings", ".", "append", "(", "max_width_col", "(", "table", ",", "i", ")", ")", "for", "row", "in", "table", ":", "# left col", "out", ".", "write", "(", "row", "[", "0", "]", ".", "ljust", "(", "col_paddings", "[", "0", "]", "+", "1", ")", ")", "# rest of the cols", "for", "i", "in", "range", "(", "1", ",", "len", "(", "row", ")", ")", ":", "col", "=", "row", "[", "i", "]", ".", "rjust", "(", "col_paddings", "[", "i", "]", "+", "2", ")", "out", ".", "write", "(", "col", ")", "out", ".", "write", "(", "\"\\n\"", ")" ]
Prints out a table of data, padded for alignment Each row must have the same number of columns. Args: table: The table to print. A list of lists. out: Output stream (file-like object) rstrip: if True, trailing withespaces are removed from the entries.
[ "Prints", "out", "a", "table", "of", "data", "padded", "for", "alignment", "Each", "row", "must", "have", "the", "same", "number", "of", "columns", "." ]
python
train
pypa/bandersnatch
src/bandersnatch_filter_plugins/whitelist_name.py
https://github.com/pypa/bandersnatch/blob/8b702c3bc128c5a1cbdd18890adede2f7f17fad4/src/bandersnatch_filter_plugins/whitelist_name.py#L28-L48
def _determine_unfiltered_package_names(self): """ Return a list of package names to be filtered base on the configuration file. """ # This plugin only processes packages, if the line in the packages # configuration contains a PEP440 specifier it will be processed by the # blacklist release filter. So we need to remove any packages that # are not applicable for this plugin. unfiltered_packages = set() try: lines = self.configuration["whitelist"]["packages"] package_lines = lines.split("\n") except KeyError: package_lines = [] for package_line in package_lines: package_line = package_line.strip() if not package_line or package_line.startswith("#"): continue unfiltered_packages.add(package_line) return list(unfiltered_packages)
[ "def", "_determine_unfiltered_package_names", "(", "self", ")", ":", "# This plugin only processes packages, if the line in the packages", "# configuration contains a PEP440 specifier it will be processed by the", "# blacklist release filter. So we need to remove any packages that", "# are not applicable for this plugin.", "unfiltered_packages", "=", "set", "(", ")", "try", ":", "lines", "=", "self", ".", "configuration", "[", "\"whitelist\"", "]", "[", "\"packages\"", "]", "package_lines", "=", "lines", ".", "split", "(", "\"\\n\"", ")", "except", "KeyError", ":", "package_lines", "=", "[", "]", "for", "package_line", "in", "package_lines", ":", "package_line", "=", "package_line", ".", "strip", "(", ")", "if", "not", "package_line", "or", "package_line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "unfiltered_packages", ".", "add", "(", "package_line", ")", "return", "list", "(", "unfiltered_packages", ")" ]
Return a list of package names to be filtered base on the configuration file.
[ "Return", "a", "list", "of", "package", "names", "to", "be", "filtered", "base", "on", "the", "configuration", "file", "." ]
python
train
reingart/gui2py
gui/graphic.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/graphic.py#L153-L190
def _getbitmap_type( self, filename ) : """ Get the type of an image from the file's extension ( .jpg, etc. ) """ # KEA 2001-07-27 # was #name, ext = filename.split( '.' ) #ext = ext.upper() if filename is None or filename == '': return None name, ext = os.path.splitext(filename) ext = ext[1:].upper() if ext == 'BMP': return wx.BITMAP_TYPE_BMP elif ext == 'GIF': return wx.BITMAP_TYPE_GIF elif ext == 'JPG' or ext == 'JPEG': return wx.BITMAP_TYPE_JPEG elif ext == 'PCX': return wx.BITMAP_TYPE_PCX #elif ext == 'PICT': # return wx.BITMAP_TYPE_PICT elif ext == 'PNG': return wx.BITMAP_TYPE_PNG elif ext == 'PNM': return wx.BITMAP_TYPE_PNM elif ext == 'TIF' or ext == 'TIFF': return wx.BITMAP_TYPE_TIF elif ext == 'XBM': return wx.BITMAP_TYPE_XBM elif ext == 'XPM': return wx.BITMAP_TYPE_XPM else: # KEA 2001-10-10 # rather than throw an exception, we could try and have wxPython figure out the image # type by returning wxBITMAP_TYPE_ANY raise RuntimeError('invalid graphics format')
[ "def", "_getbitmap_type", "(", "self", ",", "filename", ")", ":", "# KEA 2001-07-27\r", "# was\r", "#name, ext = filename.split( '.' )\r", "#ext = ext.upper()\r", "if", "filename", "is", "None", "or", "filename", "==", "''", ":", "return", "None", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "ext", "=", "ext", "[", "1", ":", "]", ".", "upper", "(", ")", "if", "ext", "==", "'BMP'", ":", "return", "wx", ".", "BITMAP_TYPE_BMP", "elif", "ext", "==", "'GIF'", ":", "return", "wx", ".", "BITMAP_TYPE_GIF", "elif", "ext", "==", "'JPG'", "or", "ext", "==", "'JPEG'", ":", "return", "wx", ".", "BITMAP_TYPE_JPEG", "elif", "ext", "==", "'PCX'", ":", "return", "wx", ".", "BITMAP_TYPE_PCX", "#elif ext == 'PICT':\r", "# return wx.BITMAP_TYPE_PICT\r", "elif", "ext", "==", "'PNG'", ":", "return", "wx", ".", "BITMAP_TYPE_PNG", "elif", "ext", "==", "'PNM'", ":", "return", "wx", ".", "BITMAP_TYPE_PNM", "elif", "ext", "==", "'TIF'", "or", "ext", "==", "'TIFF'", ":", "return", "wx", ".", "BITMAP_TYPE_TIF", "elif", "ext", "==", "'XBM'", ":", "return", "wx", ".", "BITMAP_TYPE_XBM", "elif", "ext", "==", "'XPM'", ":", "return", "wx", ".", "BITMAP_TYPE_XPM", "else", ":", "# KEA 2001-10-10\r", "# rather than throw an exception, we could try and have wxPython figure out the image\r", "# type by returning wxBITMAP_TYPE_ANY\r", "raise", "RuntimeError", "(", "'invalid graphics format'", ")" ]
Get the type of an image from the file's extension ( .jpg, etc. )
[ "Get", "the", "type", "of", "an", "image", "from", "the", "file", "s", "extension", "(", ".", "jpg", "etc", ".", ")" ]
python
test
saimn/sigal
sigal/image.py
https://github.com/saimn/sigal/blob/912ca39991355d358dc85fd55c7aeabdd7acc386/sigal/image.py#L140-L156
def generate_thumbnail(source, outname, box, fit=True, options=None, thumb_fit_centering=(0.5, 0.5)): """Create a thumbnail image.""" logger = logging.getLogger(__name__) img = _read_image(source) original_format = img.format if fit: img = ImageOps.fit(img, box, PILImage.ANTIALIAS, centering=thumb_fit_centering) else: img.thumbnail(box, PILImage.ANTIALIAS) outformat = img.format or original_format or 'JPEG' logger.debug('Save thumnail image: %s (%s)', outname, outformat) save_image(img, outname, outformat, options=options, autoconvert=True)
[ "def", "generate_thumbnail", "(", "source", ",", "outname", ",", "box", ",", "fit", "=", "True", ",", "options", "=", "None", ",", "thumb_fit_centering", "=", "(", "0.5", ",", "0.5", ")", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "img", "=", "_read_image", "(", "source", ")", "original_format", "=", "img", ".", "format", "if", "fit", ":", "img", "=", "ImageOps", ".", "fit", "(", "img", ",", "box", ",", "PILImage", ".", "ANTIALIAS", ",", "centering", "=", "thumb_fit_centering", ")", "else", ":", "img", ".", "thumbnail", "(", "box", ",", "PILImage", ".", "ANTIALIAS", ")", "outformat", "=", "img", ".", "format", "or", "original_format", "or", "'JPEG'", "logger", ".", "debug", "(", "'Save thumnail image: %s (%s)'", ",", "outname", ",", "outformat", ")", "save_image", "(", "img", ",", "outname", ",", "outformat", ",", "options", "=", "options", ",", "autoconvert", "=", "True", ")" ]
Create a thumbnail image.
[ "Create", "a", "thumbnail", "image", "." ]
python
valid
senaite/senaite.core
bika/lims/content/abstractanalysis.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/abstractanalysis.py#L699-L714
def isMethodAllowed(self, method): """Checks if the analysis can follow the method specified, either if the method was assigned directly (by using "Allows manual entry of results") or indirectly via Instrument ("Allows instrument entry of results") in Analysis Service Edit view. Param method can be either a uid or an object :param method: string,Method :return: True if the analysis can follow the method specified :rtype: bool """ if isinstance(method, str): uid = method else: uid = method.UID() return uid in self.getAllowedMethodUIDs()
[ "def", "isMethodAllowed", "(", "self", ",", "method", ")", ":", "if", "isinstance", "(", "method", ",", "str", ")", ":", "uid", "=", "method", "else", ":", "uid", "=", "method", ".", "UID", "(", ")", "return", "uid", "in", "self", ".", "getAllowedMethodUIDs", "(", ")" ]
Checks if the analysis can follow the method specified, either if the method was assigned directly (by using "Allows manual entry of results") or indirectly via Instrument ("Allows instrument entry of results") in Analysis Service Edit view. Param method can be either a uid or an object :param method: string,Method :return: True if the analysis can follow the method specified :rtype: bool
[ "Checks", "if", "the", "analysis", "can", "follow", "the", "method", "specified", "either", "if", "the", "method", "was", "assigned", "directly", "(", "by", "using", "Allows", "manual", "entry", "of", "results", ")", "or", "indirectly", "via", "Instrument", "(", "Allows", "instrument", "entry", "of", "results", ")", "in", "Analysis", "Service", "Edit", "view", ".", "Param", "method", "can", "be", "either", "a", "uid", "or", "an", "object", ":", "param", "method", ":", "string", "Method", ":", "return", ":", "True", "if", "the", "analysis", "can", "follow", "the", "method", "specified", ":", "rtype", ":", "bool" ]
python
train
gitpython-developers/GitPython
git/remote.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/remote.py#L602-L613
def create(cls, repo, name, url, **kwargs): """Create a new remote to the given repository :param repo: Repository instance that is to receive the new remote :param name: Desired name of the remote :param url: URL which corresponds to the remote's name :param kwargs: Additional arguments to be passed to the git-remote add command :return: New Remote instance :raise GitCommandError: in case an origin with that name already exists""" scmd = 'add' kwargs['insert_kwargs_after'] = scmd repo.git.remote(scmd, name, Git.polish_url(url), **kwargs) return cls(repo, name)
[ "def", "create", "(", "cls", ",", "repo", ",", "name", ",", "url", ",", "*", "*", "kwargs", ")", ":", "scmd", "=", "'add'", "kwargs", "[", "'insert_kwargs_after'", "]", "=", "scmd", "repo", ".", "git", ".", "remote", "(", "scmd", ",", "name", ",", "Git", ".", "polish_url", "(", "url", ")", ",", "*", "*", "kwargs", ")", "return", "cls", "(", "repo", ",", "name", ")" ]
Create a new remote to the given repository :param repo: Repository instance that is to receive the new remote :param name: Desired name of the remote :param url: URL which corresponds to the remote's name :param kwargs: Additional arguments to be passed to the git-remote add command :return: New Remote instance :raise GitCommandError: in case an origin with that name already exists
[ "Create", "a", "new", "remote", "to", "the", "given", "repository", ":", "param", "repo", ":", "Repository", "instance", "that", "is", "to", "receive", "the", "new", "remote", ":", "param", "name", ":", "Desired", "name", "of", "the", "remote", ":", "param", "url", ":", "URL", "which", "corresponds", "to", "the", "remote", "s", "name", ":", "param", "kwargs", ":", "Additional", "arguments", "to", "be", "passed", "to", "the", "git", "-", "remote", "add", "command", ":", "return", ":", "New", "Remote", "instance", ":", "raise", "GitCommandError", ":", "in", "case", "an", "origin", "with", "that", "name", "already", "exists" ]
python
train
bxlab/bx-python
lib/bx_extras/stats.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1403-L1421
def lksprob(alam): """ Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from Numerical Recipies. Usage: lksprob(alam) """ fac = 2.0 sum = 0.0 termbf = 0.0 a2 = -2.0*alam*alam for j in range(1,201): term = fac*math.exp(a2*j*j) sum = sum + term if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum): return sum fac = -fac termbf = math.fabs(term) return 1.0
[ "def", "lksprob", "(", "alam", ")", ":", "fac", "=", "2.0", "sum", "=", "0.0", "termbf", "=", "0.0", "a2", "=", "-", "2.0", "*", "alam", "*", "alam", "for", "j", "in", "range", "(", "1", ",", "201", ")", ":", "term", "=", "fac", "*", "math", ".", "exp", "(", "a2", "*", "j", "*", "j", ")", "sum", "=", "sum", "+", "term", "if", "math", ".", "fabs", "(", "term", ")", "<=", "(", "0.001", "*", "termbf", ")", "or", "math", ".", "fabs", "(", "term", ")", "<", "(", "1.0e-8", "*", "sum", ")", ":", "return", "sum", "fac", "=", "-", "fac", "termbf", "=", "math", ".", "fabs", "(", "term", ")", "return", "1.0" ]
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from Numerical Recipies. Usage: lksprob(alam)
[ "Computes", "a", "Kolmolgorov", "-", "Smirnov", "t", "-", "test", "significance", "level", ".", "Adapted", "from", "Numerical", "Recipies", "." ]
python
train
pycontribs/pyrax
pyrax/clouddatabases.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L552-L557
def list_backups(self, limit=20, marker=0): """ Returns a paginated list of backups for this instance. """ return self.manager._list_backups_for_instance(self, limit=limit, marker=marker)
[ "def", "list_backups", "(", "self", ",", "limit", "=", "20", ",", "marker", "=", "0", ")", ":", "return", "self", ".", "manager", ".", "_list_backups_for_instance", "(", "self", ",", "limit", "=", "limit", ",", "marker", "=", "marker", ")" ]
Returns a paginated list of backups for this instance.
[ "Returns", "a", "paginated", "list", "of", "backups", "for", "this", "instance", "." ]
python
train
moonso/vcftoolbox
vcftoolbox/header_parser.py
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/header_parser.py#L249-L269
def remove_header(self, name): """Remove a field from the header""" if name in self.info_dict: self.info_dict.pop(name) logger.info("Removed '{0}' from INFO".format(name)) if name in self.filter_dict: self.filter_dict.pop(name) logger.info("Removed '{0}' from FILTER".format(name)) if name in self.format_dict: self.format_dict.pop(name) logger.info("Removed '{0}' from FORMAT".format(name)) if name in self.contig_dict: self.contig_dict.pop(name) logger.info("Removed '{0}' from CONTIG".format(name)) if name in self.alt_dict: self.alt_dict.pop(name) logger.info("Removed '{0}' from ALT".format(name)) if name in self.other_dict: self.other_dict.pop(name) logger.info("Removed '{0}' from OTHER".format(name)) return
[ "def", "remove_header", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "info_dict", ":", "self", ".", "info_dict", ".", "pop", "(", "name", ")", "logger", ".", "info", "(", "\"Removed '{0}' from INFO\"", ".", "format", "(", "name", ")", ")", "if", "name", "in", "self", ".", "filter_dict", ":", "self", ".", "filter_dict", ".", "pop", "(", "name", ")", "logger", ".", "info", "(", "\"Removed '{0}' from FILTER\"", ".", "format", "(", "name", ")", ")", "if", "name", "in", "self", ".", "format_dict", ":", "self", ".", "format_dict", ".", "pop", "(", "name", ")", "logger", ".", "info", "(", "\"Removed '{0}' from FORMAT\"", ".", "format", "(", "name", ")", ")", "if", "name", "in", "self", ".", "contig_dict", ":", "self", ".", "contig_dict", ".", "pop", "(", "name", ")", "logger", ".", "info", "(", "\"Removed '{0}' from CONTIG\"", ".", "format", "(", "name", ")", ")", "if", "name", "in", "self", ".", "alt_dict", ":", "self", ".", "alt_dict", ".", "pop", "(", "name", ")", "logger", ".", "info", "(", "\"Removed '{0}' from ALT\"", ".", "format", "(", "name", ")", ")", "if", "name", "in", "self", ".", "other_dict", ":", "self", ".", "other_dict", ".", "pop", "(", "name", ")", "logger", ".", "info", "(", "\"Removed '{0}' from OTHER\"", ".", "format", "(", "name", ")", ")", "return" ]
Remove a field from the header
[ "Remove", "a", "field", "from", "the", "header" ]
python
train
mozillazg/python-shanbay
shanbay/team.py
https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/team.py#L116-L121
def members(self): """获取小组所有成员的信息列表""" all_members = [] for page in range(1, self.max_page() + 1): all_members.extend(self.single_page_members(page)) return all_members
[ "def", "members", "(", "self", ")", ":", "all_members", "=", "[", "]", "for", "page", "in", "range", "(", "1", ",", "self", ".", "max_page", "(", ")", "+", "1", ")", ":", "all_members", ".", "extend", "(", "self", ".", "single_page_members", "(", "page", ")", ")", "return", "all_members" ]
获取小组所有成员的信息列表
[ "获取小组所有成员的信息列表" ]
python
train
DinoTools/python-overpy
overpy/__init__.py
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1464-L1486
def _handle_start_node(self, attrs): """ Handle opening node element :param attrs: Attributes of the element :type attrs: Dict """ self._curr = { 'attributes': dict(attrs), 'lat': None, 'lon': None, 'node_id': None, 'tags': {} } if attrs.get('id', None) is not None: self._curr['node_id'] = int(attrs['id']) del self._curr['attributes']['id'] if attrs.get('lat', None) is not None: self._curr['lat'] = Decimal(attrs['lat']) del self._curr['attributes']['lat'] if attrs.get('lon', None) is not None: self._curr['lon'] = Decimal(attrs['lon']) del self._curr['attributes']['lon']
[ "def", "_handle_start_node", "(", "self", ",", "attrs", ")", ":", "self", ".", "_curr", "=", "{", "'attributes'", ":", "dict", "(", "attrs", ")", ",", "'lat'", ":", "None", ",", "'lon'", ":", "None", ",", "'node_id'", ":", "None", ",", "'tags'", ":", "{", "}", "}", "if", "attrs", ".", "get", "(", "'id'", ",", "None", ")", "is", "not", "None", ":", "self", ".", "_curr", "[", "'node_id'", "]", "=", "int", "(", "attrs", "[", "'id'", "]", ")", "del", "self", ".", "_curr", "[", "'attributes'", "]", "[", "'id'", "]", "if", "attrs", ".", "get", "(", "'lat'", ",", "None", ")", "is", "not", "None", ":", "self", ".", "_curr", "[", "'lat'", "]", "=", "Decimal", "(", "attrs", "[", "'lat'", "]", ")", "del", "self", ".", "_curr", "[", "'attributes'", "]", "[", "'lat'", "]", "if", "attrs", ".", "get", "(", "'lon'", ",", "None", ")", "is", "not", "None", ":", "self", ".", "_curr", "[", "'lon'", "]", "=", "Decimal", "(", "attrs", "[", "'lon'", "]", ")", "del", "self", ".", "_curr", "[", "'attributes'", "]", "[", "'lon'", "]" ]
Handle opening node element :param attrs: Attributes of the element :type attrs: Dict
[ "Handle", "opening", "node", "element" ]
python
train
pywbem/pywbem
pywbem/_statistics.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_statistics.py#L508-L543
def formatted(self, include_server_time): """ Return a formatted one-line string with the statistics values for the operation for which this statistics object maintains data. This is a low-level method that is called by :meth:`pywbem.Statistics.formatted`. """ if include_server_time: # pylint: disable=no-else-return return ('{0:5d} {1:5d} ' '{2:7.3f} {3:7.3f} {4:7.3f} ' '{5:7.3f} {6:7.3f} {7:7.3f} ' '{8:6.0f} {9:6.0f} {10:6.0f} ' '{11:8.0f} {12:8.0f} {13:8.0f} {14}\n'. format(self.count, self.exception_count, self.avg_time, self.min_time, self.max_time, self.avg_server_time, self.min_server_time, self.max_server_time, self.avg_request_len, self.min_request_len, self.max_request_len, self.avg_reply_len, self.min_reply_len, self.max_reply_len, self.name)) else: return ('{0:5d} {1:5d} ' '{2:7.3f} {3:7.3f} {4:7.3f} ' '{5:6.0f} {6:6.0f} {7:6.0f} ' '{8:6.0f} {9:8.0f} {10:8.0f} {11}\n'. format(self.count, self.exception_count, self.avg_time, self.min_time, self.max_time, self.avg_request_len, self.min_request_len, self.max_request_len, self.avg_reply_len, self.min_reply_len, self.max_reply_len, self.name))
[ "def", "formatted", "(", "self", ",", "include_server_time", ")", ":", "if", "include_server_time", ":", "# pylint: disable=no-else-return", "return", "(", "'{0:5d} {1:5d} '", "'{2:7.3f} {3:7.3f} {4:7.3f} '", "'{5:7.3f} {6:7.3f} {7:7.3f} '", "'{8:6.0f} {9:6.0f} {10:6.0f} '", "'{11:8.0f} {12:8.0f} {13:8.0f} {14}\\n'", ".", "format", "(", "self", ".", "count", ",", "self", ".", "exception_count", ",", "self", ".", "avg_time", ",", "self", ".", "min_time", ",", "self", ".", "max_time", ",", "self", ".", "avg_server_time", ",", "self", ".", "min_server_time", ",", "self", ".", "max_server_time", ",", "self", ".", "avg_request_len", ",", "self", ".", "min_request_len", ",", "self", ".", "max_request_len", ",", "self", ".", "avg_reply_len", ",", "self", ".", "min_reply_len", ",", "self", ".", "max_reply_len", ",", "self", ".", "name", ")", ")", "else", ":", "return", "(", "'{0:5d} {1:5d} '", "'{2:7.3f} {3:7.3f} {4:7.3f} '", "'{5:6.0f} {6:6.0f} {7:6.0f} '", "'{8:6.0f} {9:8.0f} {10:8.0f} {11}\\n'", ".", "format", "(", "self", ".", "count", ",", "self", ".", "exception_count", ",", "self", ".", "avg_time", ",", "self", ".", "min_time", ",", "self", ".", "max_time", ",", "self", ".", "avg_request_len", ",", "self", ".", "min_request_len", ",", "self", ".", "max_request_len", ",", "self", ".", "avg_reply_len", ",", "self", ".", "min_reply_len", ",", "self", ".", "max_reply_len", ",", "self", ".", "name", ")", ")" ]
Return a formatted one-line string with the statistics values for the operation for which this statistics object maintains data. This is a low-level method that is called by :meth:`pywbem.Statistics.formatted`.
[ "Return", "a", "formatted", "one", "-", "line", "string", "with", "the", "statistics", "values", "for", "the", "operation", "for", "which", "this", "statistics", "object", "maintains", "data", "." ]
python
train
base4sistemas/pyescpos
escpos/conn/serial.py
https://github.com/base4sistemas/pyescpos/blob/621bd00f1499aff700f37d8d36d04e0d761708f1/escpos/conn/serial.py#L131-L139
def get_parities(): """ Returns supported parities in a Django-like choices tuples. """ parities = [] s = pyserial.Serial() for name, value in s.getSupportedParities(): parities.append((value, name,)) return tuple(parities)
[ "def", "get_parities", "(", ")", ":", "parities", "=", "[", "]", "s", "=", "pyserial", ".", "Serial", "(", ")", "for", "name", ",", "value", "in", "s", ".", "getSupportedParities", "(", ")", ":", "parities", ".", "append", "(", "(", "value", ",", "name", ",", ")", ")", "return", "tuple", "(", "parities", ")" ]
Returns supported parities in a Django-like choices tuples.
[ "Returns", "supported", "parities", "in", "a", "Django", "-", "like", "choices", "tuples", "." ]
python
train
numberoverzero/accordian
accordian.py
https://github.com/numberoverzero/accordian/blob/f1fe44dc9c646006418017bbf70f597b180c8b97/accordian.py#L118-L131
def unregister(self, event): """ Remove all registered handlers for an event. Silent return when event was not registered. Usage: dispatch.unregister("my_event") dispatch.unregister("my_event") # no-op """ if self.running: raise RuntimeError("Can't unregister while running") self._handlers.pop(event, None)
[ "def", "unregister", "(", "self", ",", "event", ")", ":", "if", "self", ".", "running", ":", "raise", "RuntimeError", "(", "\"Can't unregister while running\"", ")", "self", ".", "_handlers", ".", "pop", "(", "event", ",", "None", ")" ]
Remove all registered handlers for an event. Silent return when event was not registered. Usage: dispatch.unregister("my_event") dispatch.unregister("my_event") # no-op
[ "Remove", "all", "registered", "handlers", "for", "an", "event", ".", "Silent", "return", "when", "event", "was", "not", "registered", "." ]
python
train
SBRG/ssbio
ssbio/protein/sequence/utils/fasta.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/fasta.py#L131-L153
def fasta_files_equal(seq_file1, seq_file2): """Check equality of a FASTA file to another FASTA file Args: seq_file1: Path to a FASTA file seq_file2: Path to another FASTA file Returns: bool: If the sequences are the same """ # Load already set representative sequence seq1 = SeqIO.read(open(seq_file1), 'fasta') # Load kegg sequence seq2 = SeqIO.read(open(seq_file2), 'fasta') # Test equality if str(seq1.seq) == str(seq2.seq): return True else: return False
[ "def", "fasta_files_equal", "(", "seq_file1", ",", "seq_file2", ")", ":", "# Load already set representative sequence", "seq1", "=", "SeqIO", ".", "read", "(", "open", "(", "seq_file1", ")", ",", "'fasta'", ")", "# Load kegg sequence", "seq2", "=", "SeqIO", ".", "read", "(", "open", "(", "seq_file2", ")", ",", "'fasta'", ")", "# Test equality", "if", "str", "(", "seq1", ".", "seq", ")", "==", "str", "(", "seq2", ".", "seq", ")", ":", "return", "True", "else", ":", "return", "False" ]
Check equality of a FASTA file to another FASTA file Args: seq_file1: Path to a FASTA file seq_file2: Path to another FASTA file Returns: bool: If the sequences are the same
[ "Check", "equality", "of", "a", "FASTA", "file", "to", "another", "FASTA", "file" ]
python
train
pjuren/pyokit
src/pyokit/io/indexedFile.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/indexedFile.py#L284-L390
def read_index(self, fh, indexed_fh, rec_iterator=None, rec_hash_func=None, parse_hash=str, flush=True, no_reindex=True, verbose=False): """ Populate this index from a file. Input format is just a tab-separated file, one record per line. The last column is the file location for the record and all columns before that are collectively considered to be the hash key for that record (which is probably only 1 column, but this allows us to permit tabs in hash keys). Lines consisting only of whitespace are skipped. :param fh: filename or stream-like object to read from. :param indexed_fh: either the filename of the indexed file or handle to it. :param rec_iterator: a function that will return an interator for the indexed file type (not the iterator for the file itself). This function must take a single argument which is the name the file to iterate over, or a stream like object similar to a filestream. :param rec_hash_func: a function that accepts the record type produced by the iterator and produces a unique hash for each record. :param parse_hash: a function to convert the string representation of the hash into whatever type is needed. By default, we just leave these as strings. :param flush: remove everything currently in the index and discard any details about a file that is already fully/partially indexed by this object. This is the default behavior. If False, then data from <fh> is just added to the existing index data (potentially overwriting some of it) and the existing index can continue to be used as before. :param no_reindex: if True, after loading the index, a missing key will cause an exception, rather than trigger re-scanning the indexed file for the associated record. The only reason to set this to False would be if your index was incomplete. :param verbose: output status message to STDERR about progress reading the index (if possible). :raise IndexError: on malformed line in input file/stream """ # set the record iterator and hash functions, if they were given if rec_iterator is not None: self.record_iterator = rec_iterator if rec_hash_func is not None: self.record_hash_function = rec_hash_func # disable re-indexing? self._no_reindex = no_reindex # figure out what kind of index identifier we got: handle or filename? handle = fh try: handle = open(fh) except TypeError: # okay, not a filename, we'll try treating it as a stream to read from. pass # clear this index? if flush: self._index = {} self._indexed_file_handle = None self._indexed_file_name = None # replace the name/handle for the indexed file indexed_fn = None try: # try treating this as a filename self.indexed_file = (indexed_fh, None) indexed_fn = indexed_fh except TypeError: try: # try treating this as a file handle self.indexed_file = (None, indexed_fh) except TypeError: fn = " from " + str(fh) if indexed_fn is not None else "" raise IndexError("failed to read index" + fn + "; " "reason: expected indexed filename or stream-like " "object, got " + str(type(indexed_fh))) # try to get an idea of how much data we have... if verbose: try: total = os.path.getsize(handle.name) pind = ProgressIndicator(totalToDo=total, messagePrefix="completed", messageSuffix="of loading " + handle.name) except AttributeError as e: sys.stderr.write(str(e)) sys.stderr.write("completed [unknown] of loading index") verbose = False # read the index file and populate this object for line in handle: line = line.rstrip() if verbose: pind.done = handle.tell() pind.showProgress() if line.isspace(): continue parts = line.split("\t") if len(parts) < 2: raise IndexError("failed to parse line: '" + line + "'") key = parse_hash("\t".join(parts[:-1])) value = parts[-1] self._index[key] = int(value)
[ "def", "read_index", "(", "self", ",", "fh", ",", "indexed_fh", ",", "rec_iterator", "=", "None", ",", "rec_hash_func", "=", "None", ",", "parse_hash", "=", "str", ",", "flush", "=", "True", ",", "no_reindex", "=", "True", ",", "verbose", "=", "False", ")", ":", "# set the record iterator and hash functions, if they were given", "if", "rec_iterator", "is", "not", "None", ":", "self", ".", "record_iterator", "=", "rec_iterator", "if", "rec_hash_func", "is", "not", "None", ":", "self", ".", "record_hash_function", "=", "rec_hash_func", "# disable re-indexing?", "self", ".", "_no_reindex", "=", "no_reindex", "# figure out what kind of index identifier we got: handle or filename?", "handle", "=", "fh", "try", ":", "handle", "=", "open", "(", "fh", ")", "except", "TypeError", ":", "# okay, not a filename, we'll try treating it as a stream to read from.", "pass", "# clear this index?", "if", "flush", ":", "self", ".", "_index", "=", "{", "}", "self", ".", "_indexed_file_handle", "=", "None", "self", ".", "_indexed_file_name", "=", "None", "# replace the name/handle for the indexed file", "indexed_fn", "=", "None", "try", ":", "# try treating this as a filename", "self", ".", "indexed_file", "=", "(", "indexed_fh", ",", "None", ")", "indexed_fn", "=", "indexed_fh", "except", "TypeError", ":", "try", ":", "# try treating this as a file handle", "self", ".", "indexed_file", "=", "(", "None", ",", "indexed_fh", ")", "except", "TypeError", ":", "fn", "=", "\" from \"", "+", "str", "(", "fh", ")", "if", "indexed_fn", "is", "not", "None", "else", "\"\"", "raise", "IndexError", "(", "\"failed to read index\"", "+", "fn", "+", "\"; \"", "\"reason: expected indexed filename or stream-like \"", "\"object, got \"", "+", "str", "(", "type", "(", "indexed_fh", ")", ")", ")", "# try to get an idea of how much data we have...", "if", "verbose", ":", "try", ":", "total", "=", "os", ".", "path", ".", "getsize", "(", "handle", ".", "name", ")", "pind", "=", "ProgressIndicator", "(", "totalToDo", "=", "total", ",", "messagePrefix", "=", "\"completed\"", ",", "messageSuffix", "=", "\"of loading \"", "+", "handle", ".", "name", ")", "except", "AttributeError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "str", "(", "e", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"completed [unknown] of loading index\"", ")", "verbose", "=", "False", "# read the index file and populate this object", "for", "line", "in", "handle", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "verbose", ":", "pind", ".", "done", "=", "handle", ".", "tell", "(", ")", "pind", ".", "showProgress", "(", ")", "if", "line", ".", "isspace", "(", ")", ":", "continue", "parts", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "parts", ")", "<", "2", ":", "raise", "IndexError", "(", "\"failed to parse line: '\"", "+", "line", "+", "\"'\"", ")", "key", "=", "parse_hash", "(", "\"\\t\"", ".", "join", "(", "parts", "[", ":", "-", "1", "]", ")", ")", "value", "=", "parts", "[", "-", "1", "]", "self", ".", "_index", "[", "key", "]", "=", "int", "(", "value", ")" ]
Populate this index from a file. Input format is just a tab-separated file, one record per line. The last column is the file location for the record and all columns before that are collectively considered to be the hash key for that record (which is probably only 1 column, but this allows us to permit tabs in hash keys). Lines consisting only of whitespace are skipped. :param fh: filename or stream-like object to read from. :param indexed_fh: either the filename of the indexed file or handle to it. :param rec_iterator: a function that will return an interator for the indexed file type (not the iterator for the file itself). This function must take a single argument which is the name the file to iterate over, or a stream like object similar to a filestream. :param rec_hash_func: a function that accepts the record type produced by the iterator and produces a unique hash for each record. :param parse_hash: a function to convert the string representation of the hash into whatever type is needed. By default, we just leave these as strings. :param flush: remove everything currently in the index and discard any details about a file that is already fully/partially indexed by this object. This is the default behavior. If False, then data from <fh> is just added to the existing index data (potentially overwriting some of it) and the existing index can continue to be used as before. :param no_reindex: if True, after loading the index, a missing key will cause an exception, rather than trigger re-scanning the indexed file for the associated record. The only reason to set this to False would be if your index was incomplete. :param verbose: output status message to STDERR about progress reading the index (if possible). :raise IndexError: on malformed line in input file/stream
[ "Populate", "this", "index", "from", "a", "file", ".", "Input", "format", "is", "just", "a", "tab", "-", "separated", "file", "one", "record", "per", "line", ".", "The", "last", "column", "is", "the", "file", "location", "for", "the", "record", "and", "all", "columns", "before", "that", "are", "collectively", "considered", "to", "be", "the", "hash", "key", "for", "that", "record", "(", "which", "is", "probably", "only", "1", "column", "but", "this", "allows", "us", "to", "permit", "tabs", "in", "hash", "keys", ")", ".", "Lines", "consisting", "only", "of", "whitespace", "are", "skipped", "." ]
python
train
cyrus-/cypy
cypy/np/plotting.py
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/np/plotting.py#L25-L39
def raster(times, indices, max_time=None, max_index=None, x_label="Timestep", y_label="Index", **kwargs): """Plots a raster plot given times and indices of events.""" # set default size to 1 if 's' not in kwargs: kwargs['s'] = 1 scatter(times, indices, **kwargs) if max_time is None: max_time = max(times) if max_index is None: max_index = max(indices) axis((0, max_time, 0, max_index)) if x_label is not None: xlabel(x_label) if y_label is not None: ylabel(y_label)
[ "def", "raster", "(", "times", ",", "indices", ",", "max_time", "=", "None", ",", "max_index", "=", "None", ",", "x_label", "=", "\"Timestep\"", ",", "y_label", "=", "\"Index\"", ",", "*", "*", "kwargs", ")", ":", "# set default size to 1", "if", "'s'", "not", "in", "kwargs", ":", "kwargs", "[", "'s'", "]", "=", "1", "scatter", "(", "times", ",", "indices", ",", "*", "*", "kwargs", ")", "if", "max_time", "is", "None", ":", "max_time", "=", "max", "(", "times", ")", "if", "max_index", "is", "None", ":", "max_index", "=", "max", "(", "indices", ")", "axis", "(", "(", "0", ",", "max_time", ",", "0", ",", "max_index", ")", ")", "if", "x_label", "is", "not", "None", ":", "xlabel", "(", "x_label", ")", "if", "y_label", "is", "not", "None", ":", "ylabel", "(", "y_label", ")" ]
Plots a raster plot given times and indices of events.
[ "Plots", "a", "raster", "plot", "given", "times", "and", "indices", "of", "events", "." ]
python
train
leosartaj/tvstats
tvstats/graph.py
https://github.com/leosartaj/tvstats/blob/164fe736111d43869f8c9686e07a5ab1b9f22444/tvstats/graph.py#L7-L24
def graphdata(data): """returns ratings and episode number to be used for making graphs""" data = jh.get_ratings(data) num = 1 rating_final = [] episode_final = [] for k,v in data.iteritems(): rating=[] epinum=[] for r in v: if r != None: rating.append(float(r)) epinum.append(num) num+=1 rating_final.append(rating) episode_final.append(epinum) return rating_final,episode_final
[ "def", "graphdata", "(", "data", ")", ":", "data", "=", "jh", ".", "get_ratings", "(", "data", ")", "num", "=", "1", "rating_final", "=", "[", "]", "episode_final", "=", "[", "]", "for", "k", ",", "v", "in", "data", ".", "iteritems", "(", ")", ":", "rating", "=", "[", "]", "epinum", "=", "[", "]", "for", "r", "in", "v", ":", "if", "r", "!=", "None", ":", "rating", ".", "append", "(", "float", "(", "r", ")", ")", "epinum", ".", "append", "(", "num", ")", "num", "+=", "1", "rating_final", ".", "append", "(", "rating", ")", "episode_final", ".", "append", "(", "epinum", ")", "return", "rating_final", ",", "episode_final" ]
returns ratings and episode number to be used for making graphs
[ "returns", "ratings", "and", "episode", "number", "to", "be", "used", "for", "making", "graphs" ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L6617-L6630
def is_lop(ch,block_op_pairs_dict=get_block_op_pairs('{}[]()')): ''' # is_lop('{',block_op_pairs_dict) # is_lop('[',block_op_pairs_dict) # is_lop('}',block_op_pairs_dict) # is_lop(']',block_op_pairs_dict) # is_lop('a',block_op_pairs_dict) ''' for i in range(1,block_op_pairs_dict.__len__()+1): if(ch == block_op_pairs_dict[i][0]): return(True) else: pass return(False)
[ "def", "is_lop", "(", "ch", ",", "block_op_pairs_dict", "=", "get_block_op_pairs", "(", "'{}[]()'", ")", ")", ":", "for", "i", "in", "range", "(", "1", ",", "block_op_pairs_dict", ".", "__len__", "(", ")", "+", "1", ")", ":", "if", "(", "ch", "==", "block_op_pairs_dict", "[", "i", "]", "[", "0", "]", ")", ":", "return", "(", "True", ")", "else", ":", "pass", "return", "(", "False", ")" ]
# is_lop('{',block_op_pairs_dict) # is_lop('[',block_op_pairs_dict) # is_lop('}',block_op_pairs_dict) # is_lop(']',block_op_pairs_dict) # is_lop('a',block_op_pairs_dict)
[ "#", "is_lop", "(", "{", "block_op_pairs_dict", ")", "#", "is_lop", "(", "[", "block_op_pairs_dict", ")", "#", "is_lop", "(", "}", "block_op_pairs_dict", ")", "#", "is_lop", "(", "]", "block_op_pairs_dict", ")", "#", "is_lop", "(", "a", "block_op_pairs_dict", ")" ]
python
valid
cqparts/cqparts
src/cqparts/display/__init__.py
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/display/__init__.py#L66-L83
def display(component, **kwargs): """ Display the given component based on the environment it's run from. See :class:`DisplayEnvironment <cqparts.display.environment.DisplayEnvironment>` documentation for more details. :param component: component to display :type component: :class:`Component <cqparts.Component>` Additional parameters may be used by the chosen :class:`DisplayEnvironment <cqparts.display.environment.DisplayEnvironment>` """ disp_env = get_display_environment() if disp_env is None: raise LookupError('valid display environment could not be found') disp_env.display(component, **kwargs)
[ "def", "display", "(", "component", ",", "*", "*", "kwargs", ")", ":", "disp_env", "=", "get_display_environment", "(", ")", "if", "disp_env", "is", "None", ":", "raise", "LookupError", "(", "'valid display environment could not be found'", ")", "disp_env", ".", "display", "(", "component", ",", "*", "*", "kwargs", ")" ]
Display the given component based on the environment it's run from. See :class:`DisplayEnvironment <cqparts.display.environment.DisplayEnvironment>` documentation for more details. :param component: component to display :type component: :class:`Component <cqparts.Component>` Additional parameters may be used by the chosen :class:`DisplayEnvironment <cqparts.display.environment.DisplayEnvironment>`
[ "Display", "the", "given", "component", "based", "on", "the", "environment", "it", "s", "run", "from", ".", "See", ":", "class", ":", "DisplayEnvironment", "<cqparts", ".", "display", ".", "environment", ".", "DisplayEnvironment", ">", "documentation", "for", "more", "details", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/plugin.py#L1930-L1938
def open_last_closed(self): """ Reopens the last closed tab.""" editorstack = self.get_current_editorstack() last_closed_files = editorstack.get_last_closed_files() if (len(last_closed_files) > 0): file_to_open = last_closed_files[0] last_closed_files.remove(file_to_open) editorstack.set_last_closed_files(last_closed_files) self.load(file_to_open)
[ "def", "open_last_closed", "(", "self", ")", ":", "editorstack", "=", "self", ".", "get_current_editorstack", "(", ")", "last_closed_files", "=", "editorstack", ".", "get_last_closed_files", "(", ")", "if", "(", "len", "(", "last_closed_files", ")", ">", "0", ")", ":", "file_to_open", "=", "last_closed_files", "[", "0", "]", "last_closed_files", ".", "remove", "(", "file_to_open", ")", "editorstack", ".", "set_last_closed_files", "(", "last_closed_files", ")", "self", ".", "load", "(", "file_to_open", ")" ]
Reopens the last closed tab.
[ "Reopens", "the", "last", "closed", "tab", "." ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1785-L1796
def step_through(self, msg='', shutit_pexpect_child=None, level=1, print_input=True, value=True): """Implements a step-through function, using pause_point. """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) if (not shutit_global.shutit_global_object.determine_interactive() or not shutit_global.shutit_global_object.interactive or shutit_global.shutit_global_object.interactive < level): return True self.build['step_through'] = value shutit_pexpect_session.pause_point(msg, print_input=print_input, level=level) return True
[ "def", "step_through", "(", "self", ",", "msg", "=", "''", ",", "shutit_pexpect_child", "=", "None", ",", "level", "=", "1", ",", "print_input", "=", "True", ",", "value", "=", "True", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "shutit_pexpect_child", "=", "shutit_pexpect_child", "or", "self", ".", "get_current_shutit_pexpect_session", "(", ")", ".", "pexpect_child", "shutit_pexpect_session", "=", "self", ".", "get_shutit_pexpect_session_from_child", "(", "shutit_pexpect_child", ")", "if", "(", "not", "shutit_global", ".", "shutit_global_object", ".", "determine_interactive", "(", ")", "or", "not", "shutit_global", ".", "shutit_global_object", ".", "interactive", "or", "shutit_global", ".", "shutit_global_object", ".", "interactive", "<", "level", ")", ":", "return", "True", "self", ".", "build", "[", "'step_through'", "]", "=", "value", "shutit_pexpect_session", ".", "pause_point", "(", "msg", ",", "print_input", "=", "print_input", ",", "level", "=", "level", ")", "return", "True" ]
Implements a step-through function, using pause_point.
[ "Implements", "a", "step", "-", "through", "function", "using", "pause_point", "." ]
python
train
minus7/asif
asif/bot.py
https://github.com/minus7/asif/blob/0d8acc5306ba93386ec679f69d466b56f099b877/asif/bot.py#L680-L687
def _populate(self, client): """ Populate module with the client when available """ self.client = client for fn in self._buffered_calls: self._log.debug("Executing buffered call {}".format(fn)) fn()
[ "def", "_populate", "(", "self", ",", "client", ")", ":", "self", ".", "client", "=", "client", "for", "fn", "in", "self", ".", "_buffered_calls", ":", "self", ".", "_log", ".", "debug", "(", "\"Executing buffered call {}\"", ".", "format", "(", "fn", ")", ")", "fn", "(", ")" ]
Populate module with the client when available
[ "Populate", "module", "with", "the", "client", "when", "available" ]
python
train
StackStorm/pybind
pybind/slxos/v17r_2_00/interface/port_channel/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/interface/port_channel/__init__.py#L1151-L1181
def _set_port_profile_port(self, v, load=False): """ Setter method for port_profile_port, mapped from YANG variable /interface/port_channel/port_profile_port (empty) If this variable is read-only (config: false) in the source YANG file, then _set_port_profile_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_port_profile_port() directly. YANG Description: This specifies if a physical/logical port can be enabled for port-profiling. The presence of this leaf indicates that the port is enabled for port-profiling. Else, it is not enabled. Enabling a port for port-profiling results in to application of network policies (as per PP-MAC mapping) following MAC learning process. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="port-profile-port", rest_name="port-profile-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the interface to AMPP profile mode', u'sort-priority': u'135'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='empty', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """port_profile_port must be of a type compatible with empty""", 'defined-type': "empty", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="port-profile-port", rest_name="port-profile-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the interface to AMPP profile mode', u'sort-priority': u'135'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='empty', is_config=True)""", }) self.__port_profile_port = t if hasattr(self, '_set'): self._set()
[ "def", "_set_port_profile_port", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGBool", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"port-profile-port\"", ",", "rest_name", "=", "\"port-profile-port\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Set the interface to AMPP profile mode'", ",", "u'sort-priority'", ":", "u'135'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-port-profile'", ",", "defining_module", "=", "'brocade-port-profile'", ",", "yang_type", "=", "'empty'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"port_profile_port must be of a type compatible with empty\"\"\"", ",", "'defined-type'", ":", "\"empty\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"port-profile-port\", rest_name=\"port-profile-port\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the interface to AMPP profile mode', u'sort-priority': u'135'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='empty', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__port_profile_port", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for port_profile_port, mapped from YANG variable /interface/port_channel/port_profile_port (empty) If this variable is read-only (config: false) in the source YANG file, then _set_port_profile_port is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_port_profile_port() directly. YANG Description: This specifies if a physical/logical port can be enabled for port-profiling. The presence of this leaf indicates that the port is enabled for port-profiling. Else, it is not enabled. Enabling a port for port-profiling results in to application of network policies (as per PP-MAC mapping) following MAC learning process.
[ "Setter", "method", "for", "port_profile_port", "mapped", "from", "YANG", "variable", "/", "interface", "/", "port_channel", "/", "port_profile_port", "(", "empty", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_port_profile_port", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_port_profile_port", "()", "directly", "." ]
python
train
greenbone/ospd
ospd/misc.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L130-L140
def get_hosts_unfinished(self, scan_id): """ Get a list of finished hosts.""" unfinished_hosts = list() for target in self.scans_table[scan_id]['finished_hosts']: unfinished_hosts.extend(target_str_to_list(target)) for target in self.scans_table[scan_id]['finished_hosts']: for host in self.scans_table[scan_id]['finished_hosts'][target]: unfinished_hosts.remove(host) return unfinished_hosts
[ "def", "get_hosts_unfinished", "(", "self", ",", "scan_id", ")", ":", "unfinished_hosts", "=", "list", "(", ")", "for", "target", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'finished_hosts'", "]", ":", "unfinished_hosts", ".", "extend", "(", "target_str_to_list", "(", "target", ")", ")", "for", "target", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'finished_hosts'", "]", ":", "for", "host", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'finished_hosts'", "]", "[", "target", "]", ":", "unfinished_hosts", ".", "remove", "(", "host", ")", "return", "unfinished_hosts" ]
Get a list of finished hosts.
[ "Get", "a", "list", "of", "finished", "hosts", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3127-L3167
def com_google_fonts_check_name_typographicsubfamilyname(ttFont, style_with_spaces): """ Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries. """ from fontbakery.utils import name_entry_id failed = False if style_with_spaces in ['Regular', 'Italic', 'Bold', 'Bold Italic']: for name in ttFont['name'].names: if name.nameID == NameID.TYPOGRAPHIC_SUBFAMILY_NAME: failed = True yield FAIL, Message("ribbi", ("Font style is '{}' and, for that reason," " it is not expected to have a " "{} entry!").format(style_with_spaces, name_entry_id(name))) else: expected_value = style_with_spaces has_entry = False for name in ttFont['name'].names: if name.nameID == NameID.TYPOGRAPHIC_SUBFAMILY_NAME: string = name.string.decode(name.getEncoding()).strip() if string == expected_value: has_entry = True else: failed = True yield FAIL, Message("non-ribbi-bad-value", ("Entry {} on the 'name' table: " "Expected '{}' " "but got '{}'.").format(name_entry_id(name), expected_value, string)) if not failed and not has_entry: failed = True yield FAIL, Message("non-ribbi-lacks-entry", ("non-RIBBI fonts must have a" " TYPOGRAPHIC_SUBFAMILY_NAME entry" " on the name table.")) if not failed: yield PASS, "TYPOGRAPHIC_SUBFAMILY_NAME entries are all good."
[ "def", "com_google_fonts_check_name_typographicsubfamilyname", "(", "ttFont", ",", "style_with_spaces", ")", ":", "from", "fontbakery", ".", "utils", "import", "name_entry_id", "failed", "=", "False", "if", "style_with_spaces", "in", "[", "'Regular'", ",", "'Italic'", ",", "'Bold'", ",", "'Bold Italic'", "]", ":", "for", "name", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "if", "name", ".", "nameID", "==", "NameID", ".", "TYPOGRAPHIC_SUBFAMILY_NAME", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"ribbi\"", ",", "(", "\"Font style is '{}' and, for that reason,\"", "\" it is not expected to have a \"", "\"{} entry!\"", ")", ".", "format", "(", "style_with_spaces", ",", "name_entry_id", "(", "name", ")", ")", ")", "else", ":", "expected_value", "=", "style_with_spaces", "has_entry", "=", "False", "for", "name", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "if", "name", ".", "nameID", "==", "NameID", ".", "TYPOGRAPHIC_SUBFAMILY_NAME", ":", "string", "=", "name", ".", "string", ".", "decode", "(", "name", ".", "getEncoding", "(", ")", ")", ".", "strip", "(", ")", "if", "string", "==", "expected_value", ":", "has_entry", "=", "True", "else", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"non-ribbi-bad-value\"", ",", "(", "\"Entry {} on the 'name' table: \"", "\"Expected '{}' \"", "\"but got '{}'.\"", ")", ".", "format", "(", "name_entry_id", "(", "name", ")", ",", "expected_value", ",", "string", ")", ")", "if", "not", "failed", "and", "not", "has_entry", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"non-ribbi-lacks-entry\"", ",", "(", "\"non-RIBBI fonts must have a\"", "\" TYPOGRAPHIC_SUBFAMILY_NAME entry\"", "\" on the name table.\"", ")", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"TYPOGRAPHIC_SUBFAMILY_NAME entries are all good.\"" ]
Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries.
[ "Check", "name", "table", ":", "TYPOGRAPHIC_SUBFAMILY_NAME", "entries", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_netconf_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_netconf_ext.py#L81-L93
def get_netconf_client_capabilities_output_session_version(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_netconf_client_capabilities = ET.Element("get_netconf_client_capabilities") config = get_netconf_client_capabilities output = ET.SubElement(get_netconf_client_capabilities, "output") session = ET.SubElement(output, "session") version = ET.SubElement(session, "version") version.text = kwargs.pop('version') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_netconf_client_capabilities_output_session_version", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_netconf_client_capabilities", "=", "ET", ".", "Element", "(", "\"get_netconf_client_capabilities\"", ")", "config", "=", "get_netconf_client_capabilities", "output", "=", "ET", ".", "SubElement", "(", "get_netconf_client_capabilities", ",", "\"output\"", ")", "session", "=", "ET", ".", "SubElement", "(", "output", ",", "\"session\"", ")", "version", "=", "ET", ".", "SubElement", "(", "session", ",", "\"version\"", ")", "version", ".", "text", "=", "kwargs", ".", "pop", "(", "'version'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
mattupstate/flask-security
flask_security/utils.py
https://github.com/mattupstate/flask-security/blob/a401fb47018fbbbe0b899ea55afadfd0e3cd847a/flask_security/utils.py#L114-L129
def get_hmac(password): """Returns a Base64 encoded HMAC+SHA512 of the password signed with the salt specified by ``SECURITY_PASSWORD_SALT``. :param password: The password to sign """ salt = _security.password_salt if salt is None: raise RuntimeError( 'The configuration value `SECURITY_PASSWORD_SALT` must ' 'not be None when the value of `SECURITY_PASSWORD_HASH` is ' 'set to "%s"' % _security.password_hash) h = hmac.new(encode_string(salt), encode_string(password), hashlib.sha512) return base64.b64encode(h.digest())
[ "def", "get_hmac", "(", "password", ")", ":", "salt", "=", "_security", ".", "password_salt", "if", "salt", "is", "None", ":", "raise", "RuntimeError", "(", "'The configuration value `SECURITY_PASSWORD_SALT` must '", "'not be None when the value of `SECURITY_PASSWORD_HASH` is '", "'set to \"%s\"'", "%", "_security", ".", "password_hash", ")", "h", "=", "hmac", ".", "new", "(", "encode_string", "(", "salt", ")", ",", "encode_string", "(", "password", ")", ",", "hashlib", ".", "sha512", ")", "return", "base64", ".", "b64encode", "(", "h", ".", "digest", "(", ")", ")" ]
Returns a Base64 encoded HMAC+SHA512 of the password signed with the salt specified by ``SECURITY_PASSWORD_SALT``. :param password: The password to sign
[ "Returns", "a", "Base64", "encoded", "HMAC", "+", "SHA512", "of", "the", "password", "signed", "with", "the", "salt", "specified", "by", "SECURITY_PASSWORD_SALT", "." ]
python
train
sivakov512/python-static-api-generator
static_api_generator/loaders.py
https://github.com/sivakov512/python-static-api-generator/blob/0a7ec27324b9b2a3d1fa9894c4cba73af9ebcc01/static_api_generator/loaders.py#L26-L34
def _validate_extension(self): """Validates that source file extension is supported. :raises: UnsupportedExtensionError """ extension = self.fpath.split('.')[-1] if extension not in self.supported_extensions: raise UnsupportedExtensionError
[ "def", "_validate_extension", "(", "self", ")", ":", "extension", "=", "self", ".", "fpath", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "extension", "not", "in", "self", ".", "supported_extensions", ":", "raise", "UnsupportedExtensionError" ]
Validates that source file extension is supported. :raises: UnsupportedExtensionError
[ "Validates", "that", "source", "file", "extension", "is", "supported", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1410-L1423
def delete_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False): """Delete the DCNM OUT network and update the result. """ tenant_name = fw_dict.get('tenant_name') ret = self._delete_service_nwk(tenant_id, tenant_name, 'out') if ret: res = fw_const.DCNM_OUT_NETWORK_DEL_SUCCESS LOG.info("out Service network deleted for tenant %s", tenant_id) else: res = fw_const.DCNM_OUT_NETWORK_DEL_FAIL LOG.info("out Service network deleted failed for tenant %s", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret
[ "def", "delete_dcnm_out_nwk", "(", "self", ",", "tenant_id", ",", "fw_dict", ",", "is_fw_virt", "=", "False", ")", ":", "tenant_name", "=", "fw_dict", ".", "get", "(", "'tenant_name'", ")", "ret", "=", "self", ".", "_delete_service_nwk", "(", "tenant_id", ",", "tenant_name", ",", "'out'", ")", "if", "ret", ":", "res", "=", "fw_const", ".", "DCNM_OUT_NETWORK_DEL_SUCCESS", "LOG", ".", "info", "(", "\"out Service network deleted for tenant %s\"", ",", "tenant_id", ")", "else", ":", "res", "=", "fw_const", ".", "DCNM_OUT_NETWORK_DEL_FAIL", "LOG", ".", "info", "(", "\"out Service network deleted failed for tenant %s\"", ",", "tenant_id", ")", "self", ".", "update_fw_db_result", "(", "tenant_id", ",", "dcnm_status", "=", "res", ")", "return", "ret" ]
Delete the DCNM OUT network and update the result.
[ "Delete", "the", "DCNM", "OUT", "network", "and", "update", "the", "result", "." ]
python
train
google/budou
budou/budou.py
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/budou.py#L86-L104
def authenticate(json_path=None): """Gets a Natural Language API parser by authenticating the API. **This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a parser instead. Args: json_path (:obj:`str`, optional): The file path to the service account's credentials. Returns: Parser. (:obj:`budou.parser.NLAPIParser`) """ msg = ('budou.authentication() is deprecated. ' 'Please use budou.get_parser() to obtain a parser instead.') warnings.warn(msg, DeprecationWarning) parser = get_parser('nlapi', credentials_path=json_path) return parser
[ "def", "authenticate", "(", "json_path", "=", "None", ")", ":", "msg", "=", "(", "'budou.authentication() is deprecated. '", "'Please use budou.get_parser() to obtain a parser instead.'", ")", "warnings", ".", "warn", "(", "msg", ",", "DeprecationWarning", ")", "parser", "=", "get_parser", "(", "'nlapi'", ",", "credentials_path", "=", "json_path", ")", "return", "parser" ]
Gets a Natural Language API parser by authenticating the API. **This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a parser instead. Args: json_path (:obj:`str`, optional): The file path to the service account's credentials. Returns: Parser. (:obj:`budou.parser.NLAPIParser`)
[ "Gets", "a", "Natural", "Language", "API", "parser", "by", "authenticating", "the", "API", "." ]
python
train
log2timeline/dfvfs
dfvfs/file_io/sqlite_blob_file_io.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/sqlite_blob_file_io.py#L217-L242
def seek(self, offset, whence=os.SEEK_SET): """Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed. """ if not self._database_object: raise IOError('Not opened.') if whence == os.SEEK_CUR: offset += self._current_offset elif whence == os.SEEK_END: offset += self._size elif whence != os.SEEK_SET: raise IOError('Unsupported whence.') if offset < 0: raise IOError('Invalid offset value out of bounds.') self._current_offset = offset
[ "def", "seek", "(", "self", ",", "offset", ",", "whence", "=", "os", ".", "SEEK_SET", ")", ":", "if", "not", "self", ".", "_database_object", ":", "raise", "IOError", "(", "'Not opened.'", ")", "if", "whence", "==", "os", ".", "SEEK_CUR", ":", "offset", "+=", "self", ".", "_current_offset", "elif", "whence", "==", "os", ".", "SEEK_END", ":", "offset", "+=", "self", ".", "_size", "elif", "whence", "!=", "os", ".", "SEEK_SET", ":", "raise", "IOError", "(", "'Unsupported whence.'", ")", "if", "offset", "<", "0", ":", "raise", "IOError", "(", "'Invalid offset value out of bounds.'", ")", "self", ".", "_current_offset", "=", "offset" ]
Seeks to an offset within the file-like object. Args: offset (int): offset to seek to. whence (Optional(int)): value that indicates whether offset is an absolute or relative position within the file. Raises: IOError: if the seek failed. OSError: if the seek failed.
[ "Seeks", "to", "an", "offset", "within", "the", "file", "-", "like", "object", "." ]
python
train
gbiggs/rtctree
rtctree/component.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L532-L552
def activate_in_ec(self, ec_index): '''Activate this component in an execution context. @param ec_index The index of the execution context to activate in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) ec = self.participating_ecs[ec_index] else: ec = self.owned_ecs[ec_index] ec.activate_component(self._obj)
[ "def", "activate_in_ec", "(", "self", ",", "ec_index", ")", ":", "with", "self", ".", "_mutex", ":", "if", "ec_index", ">=", "len", "(", "self", ".", "owned_ecs", ")", ":", "ec_index", "-=", "len", "(", "self", ".", "owned_ecs", ")", "if", "ec_index", ">=", "len", "(", "self", ".", "participating_ecs", ")", ":", "raise", "exceptions", ".", "BadECIndexError", "(", "ec_index", ")", "ec", "=", "self", ".", "participating_ecs", "[", "ec_index", "]", "else", ":", "ec", "=", "self", ".", "owned_ecs", "[", "ec_index", "]", "ec", ".", "activate_component", "(", "self", ".", "_obj", ")" ]
Activate this component in an execution context. @param ec_index The index of the execution context to activate in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs.
[ "Activate", "this", "component", "in", "an", "execution", "context", "." ]
python
train
oseledets/ttpy
tt/core/vector.py
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L127-L145
def erank(self): """ Effective rank of the TT-vector """ r = self.r n = self.n d = self.d if d <= 1: er = 0e0 else: sz = _np.dot(n * r[0:d], r[1:]) if sz == 0: er = 0e0 else: b = r[0] * n[0] + n[d - 1] * r[d] if d is 2: er = sz * 1.0 / b else: a = _np.sum(n[1:d - 1]) er = (_np.sqrt(b * b + 4 * a * sz) - b) / (2 * a) return er
[ "def", "erank", "(", "self", ")", ":", "r", "=", "self", ".", "r", "n", "=", "self", ".", "n", "d", "=", "self", ".", "d", "if", "d", "<=", "1", ":", "er", "=", "0e0", "else", ":", "sz", "=", "_np", ".", "dot", "(", "n", "*", "r", "[", "0", ":", "d", "]", ",", "r", "[", "1", ":", "]", ")", "if", "sz", "==", "0", ":", "er", "=", "0e0", "else", ":", "b", "=", "r", "[", "0", "]", "*", "n", "[", "0", "]", "+", "n", "[", "d", "-", "1", "]", "*", "r", "[", "d", "]", "if", "d", "is", "2", ":", "er", "=", "sz", "*", "1.0", "/", "b", "else", ":", "a", "=", "_np", ".", "sum", "(", "n", "[", "1", ":", "d", "-", "1", "]", ")", "er", "=", "(", "_np", ".", "sqrt", "(", "b", "*", "b", "+", "4", "*", "a", "*", "sz", ")", "-", "b", ")", "/", "(", "2", "*", "a", ")", "return", "er" ]
Effective rank of the TT-vector
[ "Effective", "rank", "of", "the", "TT", "-", "vector" ]
python
train
google/python-gflags
gflags/flagvalues.py
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L714-L806
def _ParseArgs(self, args, known_only): """Helper function to do the main argument parsing. This function goes through args and does the bulk of the flag parsing. It will find the corresponding flag in our flag dictionary, and call its .parse() method on the flag value. Args: args: List of strings with the arguments to parse. known_only: parse and remove known flags, return rest in unparsed_args Returns: A tuple with the following: unknown_flags: List of (flag name, arg) for flags we don't know about. unparsed_args: List of arguments we did not parse. undefok: Set of flags that were given via --undefok. Raises: Error: on any parsing error. ValueError: on flag value parsing error. """ unknown_flags, unparsed_args, undefok = [], [], set() flag_dict = self.FlagDict() args = iter(args) for arg in args: value = None def GetValue(): # pylint: disable=cell-var-from-loop try: return next(args) if value is None else value except StopIteration: raise exceptions.Error('Missing value for flag ' + arg) if not arg.startswith('-'): # A non-argument: default is break, GNU is skip. unparsed_args.append(arg) if self.IsGnuGetOpt(): continue else: break if arg == '--': if known_only: unparsed_args.append(arg) break if '=' in arg: name, value = arg.lstrip('-').split('=', 1) else: name, value = arg.lstrip('-'), None if not name: # The argument is all dashes (including one dash). unparsed_args.append(arg) if self.IsGnuGetOpt(): continue else: break # --undefok is a special case. if name == 'undefok': if known_only: unparsed_args.append(arg) value = GetValue() undefok.update(v.strip() for v in value.split(',')) undefok.update('no' + v.strip() for v in value.split(',')) continue flag = flag_dict.get(name) if flag: value = (flag.boolean and value is None) or GetValue() elif name.startswith('no') and len(name) > 2: # Boolean flags can take the form of --noflag, with no value. noflag = flag_dict.get(name[2:]) if noflag and noflag.boolean: if value is not None: raise ValueError(arg + ' does not take an argument') flag = noflag value = False if flag: flag.parse(value) flag.using_default_value = False elif known_only: unparsed_args.append(arg) else: unknown_flags.append((name, arg)) unparsed_args.extend(args) return unknown_flags, unparsed_args, undefok
[ "def", "_ParseArgs", "(", "self", ",", "args", ",", "known_only", ")", ":", "unknown_flags", ",", "unparsed_args", ",", "undefok", "=", "[", "]", ",", "[", "]", ",", "set", "(", ")", "flag_dict", "=", "self", ".", "FlagDict", "(", ")", "args", "=", "iter", "(", "args", ")", "for", "arg", "in", "args", ":", "value", "=", "None", "def", "GetValue", "(", ")", ":", "# pylint: disable=cell-var-from-loop", "try", ":", "return", "next", "(", "args", ")", "if", "value", "is", "None", "else", "value", "except", "StopIteration", ":", "raise", "exceptions", ".", "Error", "(", "'Missing value for flag '", "+", "arg", ")", "if", "not", "arg", ".", "startswith", "(", "'-'", ")", ":", "# A non-argument: default is break, GNU is skip.", "unparsed_args", ".", "append", "(", "arg", ")", "if", "self", ".", "IsGnuGetOpt", "(", ")", ":", "continue", "else", ":", "break", "if", "arg", "==", "'--'", ":", "if", "known_only", ":", "unparsed_args", ".", "append", "(", "arg", ")", "break", "if", "'='", "in", "arg", ":", "name", ",", "value", "=", "arg", ".", "lstrip", "(", "'-'", ")", ".", "split", "(", "'='", ",", "1", ")", "else", ":", "name", ",", "value", "=", "arg", ".", "lstrip", "(", "'-'", ")", ",", "None", "if", "not", "name", ":", "# The argument is all dashes (including one dash).", "unparsed_args", ".", "append", "(", "arg", ")", "if", "self", ".", "IsGnuGetOpt", "(", ")", ":", "continue", "else", ":", "break", "# --undefok is a special case.", "if", "name", "==", "'undefok'", ":", "if", "known_only", ":", "unparsed_args", ".", "append", "(", "arg", ")", "value", "=", "GetValue", "(", ")", "undefok", ".", "update", "(", "v", ".", "strip", "(", ")", "for", "v", "in", "value", ".", "split", "(", "','", ")", ")", "undefok", ".", "update", "(", "'no'", "+", "v", ".", "strip", "(", ")", "for", "v", "in", "value", ".", "split", "(", "','", ")", ")", "continue", "flag", "=", "flag_dict", ".", "get", "(", "name", ")", "if", "flag", ":", "value", "=", "(", "flag", ".", "boolean", "and", "value", "is", "None", ")", "or", "GetValue", "(", ")", "elif", "name", ".", "startswith", "(", "'no'", ")", "and", "len", "(", "name", ")", ">", "2", ":", "# Boolean flags can take the form of --noflag, with no value.", "noflag", "=", "flag_dict", ".", "get", "(", "name", "[", "2", ":", "]", ")", "if", "noflag", "and", "noflag", ".", "boolean", ":", "if", "value", "is", "not", "None", ":", "raise", "ValueError", "(", "arg", "+", "' does not take an argument'", ")", "flag", "=", "noflag", "value", "=", "False", "if", "flag", ":", "flag", ".", "parse", "(", "value", ")", "flag", ".", "using_default_value", "=", "False", "elif", "known_only", ":", "unparsed_args", ".", "append", "(", "arg", ")", "else", ":", "unknown_flags", ".", "append", "(", "(", "name", ",", "arg", ")", ")", "unparsed_args", ".", "extend", "(", "args", ")", "return", "unknown_flags", ",", "unparsed_args", ",", "undefok" ]
Helper function to do the main argument parsing. This function goes through args and does the bulk of the flag parsing. It will find the corresponding flag in our flag dictionary, and call its .parse() method on the flag value. Args: args: List of strings with the arguments to parse. known_only: parse and remove known flags, return rest in unparsed_args Returns: A tuple with the following: unknown_flags: List of (flag name, arg) for flags we don't know about. unparsed_args: List of arguments we did not parse. undefok: Set of flags that were given via --undefok. Raises: Error: on any parsing error. ValueError: on flag value parsing error.
[ "Helper", "function", "to", "do", "the", "main", "argument", "parsing", "." ]
python
train
ljcooke/see
see/output.py
https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/output.py#L92-L103
def filter_ignoring_case(self, pattern): """ Like ``filter`` but case-insensitive. Expects a regular expression string without the surrounding ``/`` characters. >>> see().filter('^my', ignore_case=True) MyClass() """ return self.filter(re.compile(pattern, re.I))
[ "def", "filter_ignoring_case", "(", "self", ",", "pattern", ")", ":", "return", "self", ".", "filter", "(", "re", ".", "compile", "(", "pattern", ",", "re", ".", "I", ")", ")" ]
Like ``filter`` but case-insensitive. Expects a regular expression string without the surrounding ``/`` characters. >>> see().filter('^my', ignore_case=True) MyClass()
[ "Like", "filter", "but", "case", "-", "insensitive", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/index.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/index.py#L124-L138
def _ensure_html_response(url, session): # type: (str, PipSession) -> None """Send a HEAD request to the URL, and ensure the response contains HTML. Raises `_NotHTTP` if the URL is not available for a HEAD request, or `_NotHTML` if the content type is not text/html. """ scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) resp.raise_for_status() _ensure_html_header(resp)
[ "def", "_ensure_html_response", "(", "url", ",", "session", ")", ":", "# type: (str, PipSession) -> None", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", "=", "urllib_parse", ".", "urlsplit", "(", "url", ")", "if", "scheme", "not", "in", "{", "'http'", ",", "'https'", "}", ":", "raise", "_NotHTTP", "(", ")", "resp", "=", "session", ".", "head", "(", "url", ",", "allow_redirects", "=", "True", ")", "resp", ".", "raise_for_status", "(", ")", "_ensure_html_header", "(", "resp", ")" ]
Send a HEAD request to the URL, and ensure the response contains HTML. Raises `_NotHTTP` if the URL is not available for a HEAD request, or `_NotHTML` if the content type is not text/html.
[ "Send", "a", "HEAD", "request", "to", "the", "URL", "and", "ensure", "the", "response", "contains", "HTML", "." ]
python
train
acutesoftware/virtual-AI-simulator
vais/build_internet.py
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/build_internet.py#L12-L21
def main(): """ generates a virtual internet, sets pages and runs web_users on it """ e = mod_env.Internet('VAIS - Load testing', 'Simulation of several websites') e.create(800) print(e) #Create some users to browse the web and load test website print(npc.web_users.params)
[ "def", "main", "(", ")", ":", "e", "=", "mod_env", ".", "Internet", "(", "'VAIS - Load testing'", ",", "'Simulation of several websites'", ")", "e", ".", "create", "(", "800", ")", "print", "(", "e", ")", "#Create some users to browse the web and load test website", "print", "(", "npc", ".", "web_users", ".", "params", ")" ]
generates a virtual internet, sets pages and runs web_users on it
[ "generates", "a", "virtual", "internet", "sets", "pages", "and", "runs", "web_users", "on", "it" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L717-L720
def fw_policy_delete(self, data, fw_name=None): """Top level policy delete routine. """ LOG.debug("FW Policy Debug") self._fw_policy_delete(fw_name, data)
[ "def", "fw_policy_delete", "(", "self", ",", "data", ",", "fw_name", "=", "None", ")", ":", "LOG", ".", "debug", "(", "\"FW Policy Debug\"", ")", "self", ".", "_fw_policy_delete", "(", "fw_name", ",", "data", ")" ]
Top level policy delete routine.
[ "Top", "level", "policy", "delete", "routine", "." ]
python
train
sebdah/dynamic-dynamodb
dynamic_dynamodb/statistics/gsi.py
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/statistics/gsi.py#L14-L58
def get_consumed_read_units_percent( table_name, gsi_name, lookback_window_start=15, lookback_period=5): """ Returns the number of consumed read units in percent :type table_name: str :param table_name: Name of the DynamoDB table :type gsi_name: str :param gsi_name: Name of the GSI :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Number of consumed reads as a percentage of provisioned reads """ try: metrics = __get_aws_metric( table_name, gsi_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_read_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_read_units = 0 try: gsi_read_units = dynamodb.get_provisioned_gsi_read_units( table_name, gsi_name) consumed_read_units_percent = ( float(consumed_read_units) / float(gsi_read_units) * 100) except JSONResponseError: raise logger.info('{0} - GSI: {1} - Consumed read units: {2:.2f}%'.format( table_name, gsi_name, consumed_read_units_percent)) return consumed_read_units_percent
[ "def", "get_consumed_read_units_percent", "(", "table_name", ",", "gsi_name", ",", "lookback_window_start", "=", "15", ",", "lookback_period", "=", "5", ")", ":", "try", ":", "metrics", "=", "__get_aws_metric", "(", "table_name", ",", "gsi_name", ",", "lookback_window_start", ",", "lookback_period", ",", "'ConsumedReadCapacityUnits'", ")", "except", "BotoServerError", ":", "raise", "if", "metrics", ":", "lookback_seconds", "=", "lookback_period", "*", "60", "consumed_read_units", "=", "(", "float", "(", "metrics", "[", "0", "]", "[", "'Sum'", "]", ")", "/", "float", "(", "lookback_seconds", ")", ")", "else", ":", "consumed_read_units", "=", "0", "try", ":", "gsi_read_units", "=", "dynamodb", ".", "get_provisioned_gsi_read_units", "(", "table_name", ",", "gsi_name", ")", "consumed_read_units_percent", "=", "(", "float", "(", "consumed_read_units", ")", "/", "float", "(", "gsi_read_units", ")", "*", "100", ")", "except", "JSONResponseError", ":", "raise", "logger", ".", "info", "(", "'{0} - GSI: {1} - Consumed read units: {2:.2f}%'", ".", "format", "(", "table_name", ",", "gsi_name", ",", "consumed_read_units_percent", ")", ")", "return", "consumed_read_units_percent" ]
Returns the number of consumed read units in percent :type table_name: str :param table_name: Name of the DynamoDB table :type gsi_name: str :param gsi_name: Name of the GSI :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Number of consumed reads as a percentage of provisioned reads
[ "Returns", "the", "number", "of", "consumed", "read", "units", "in", "percent" ]
python
train
PMEAL/porespy
porespy/tools/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/tools/__funcs__.py#L709-L734
def in_hull(points, hull): """ Test if a list of coordinates are inside a given convex hull Parameters ---------- points : array_like (N x ndims) The spatial coordinates of the points to check hull : scipy.spatial.ConvexHull object **OR** array_like Can be either a convex hull object as returned by ``scipy.spatial.ConvexHull`` or simply the coordinates of the points that define the convex hull. Returns ------- result : 1D-array A 1D-array Boolean array of length *N* indicating whether or not the given points in ``points`` lies within the provided ``hull``. """ from scipy.spatial import Delaunay, ConvexHull if isinstance(hull, ConvexHull): hull = hull.points hull = Delaunay(hull) return hull.find_simplex(points) >= 0
[ "def", "in_hull", "(", "points", ",", "hull", ")", ":", "from", "scipy", ".", "spatial", "import", "Delaunay", ",", "ConvexHull", "if", "isinstance", "(", "hull", ",", "ConvexHull", ")", ":", "hull", "=", "hull", ".", "points", "hull", "=", "Delaunay", "(", "hull", ")", "return", "hull", ".", "find_simplex", "(", "points", ")", ">=", "0" ]
Test if a list of coordinates are inside a given convex hull Parameters ---------- points : array_like (N x ndims) The spatial coordinates of the points to check hull : scipy.spatial.ConvexHull object **OR** array_like Can be either a convex hull object as returned by ``scipy.spatial.ConvexHull`` or simply the coordinates of the points that define the convex hull. Returns ------- result : 1D-array A 1D-array Boolean array of length *N* indicating whether or not the given points in ``points`` lies within the provided ``hull``.
[ "Test", "if", "a", "list", "of", "coordinates", "are", "inside", "a", "given", "convex", "hull" ]
python
train
tdryer/hangups
hangups/client.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/client.py#L611-L616
async def set_active_client(self, set_active_client_request): """Set the active client.""" response = hangouts_pb2.SetActiveClientResponse() await self._pb_request('clients/setactiveclient', set_active_client_request, response) return response
[ "async", "def", "set_active_client", "(", "self", ",", "set_active_client_request", ")", ":", "response", "=", "hangouts_pb2", ".", "SetActiveClientResponse", "(", ")", "await", "self", ".", "_pb_request", "(", "'clients/setactiveclient'", ",", "set_active_client_request", ",", "response", ")", "return", "response" ]
Set the active client.
[ "Set", "the", "active", "client", "." ]
python
valid
titusjan/argos
argos/repo/rtiplugins/pandasio.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/pandasio.py#L234-L243
def _fetchAllChildren(self): """ Fetches the index if the showIndex member is True Descendants can override this function to add the subdevicions. """ assert self.isSliceable, "No underlying pandas object: self._ndFrame is None" childItems = [] if self._standAlone: childItems.append(self._createIndexRti(self._ndFrame.index, 'index')) return childItems
[ "def", "_fetchAllChildren", "(", "self", ")", ":", "assert", "self", ".", "isSliceable", ",", "\"No underlying pandas object: self._ndFrame is None\"", "childItems", "=", "[", "]", "if", "self", ".", "_standAlone", ":", "childItems", ".", "append", "(", "self", ".", "_createIndexRti", "(", "self", ".", "_ndFrame", ".", "index", ",", "'index'", ")", ")", "return", "childItems" ]
Fetches the index if the showIndex member is True Descendants can override this function to add the subdevicions.
[ "Fetches", "the", "index", "if", "the", "showIndex", "member", "is", "True" ]
python
train
project-rig/rig
rig/routing_table/minimise.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/minimise.py#L74-L132
def minimise_table(table, target_length, methods=(remove_default_entries, ordered_covering)): """Apply different minimisation algorithms to minimise a single routing table. Parameters ---------- table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Routing table to minimise. NOTE: This is the data structure as returned by :py:meth:`~rig.routing_table.routing_tree_to_tables`. target_length : int or None Maximum length of the routing table. If None then all methods will be tried and the smallest achieved table will be returned. methods : Each method is tried in the order presented and the first to meet the required target length for a given chip is used. Consequently less computationally costly algorithms should be nearer the start of the list. The defaults will try to remove default routes (:py:meth:rig.routing_table.remove_default_routes.minimise) and then fall back on the ordered covering algorithm (:py:meth:rig.routing_table.ordered_covering.minimise). Returns ------- [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Minimised routing table, guaranteed to be at least as small as `target_length`, or as small as possible if `target_length` is None. Raises ------ MinimisationFailedError If no method can sufficiently minimise the table. """ # Add a final method which checks the size of the table and returns it if # the size is correct. NOTE: This method will avoid running any other # minimisers if the table is already sufficiently small. methods = list(methods) methods.insert(0, _identity) if target_length is not None: best_achieved = len(table) # Try each minimiser in turn until the table is small enough for f in methods: try: # Minimise the table, if this fails a MinimisationFailedError # will be raised and the return will not be executed. new_table = f(table, target_length) return new_table except MinimisationFailedError as exc: # Store the best achieved final length if best_achieved is None or exc.final_length < best_achieved: best_achieved = exc.final_length # The table must still be too large raise MinimisationFailedError(target_length, best_achieved) else: # Try all methods and return the smallest table return min((f(table, target_length) for f in methods), key=len)
[ "def", "minimise_table", "(", "table", ",", "target_length", ",", "methods", "=", "(", "remove_default_entries", ",", "ordered_covering", ")", ")", ":", "# Add a final method which checks the size of the table and returns it if", "# the size is correct. NOTE: This method will avoid running any other", "# minimisers if the table is already sufficiently small.", "methods", "=", "list", "(", "methods", ")", "methods", ".", "insert", "(", "0", ",", "_identity", ")", "if", "target_length", "is", "not", "None", ":", "best_achieved", "=", "len", "(", "table", ")", "# Try each minimiser in turn until the table is small enough", "for", "f", "in", "methods", ":", "try", ":", "# Minimise the table, if this fails a MinimisationFailedError", "# will be raised and the return will not be executed.", "new_table", "=", "f", "(", "table", ",", "target_length", ")", "return", "new_table", "except", "MinimisationFailedError", "as", "exc", ":", "# Store the best achieved final length", "if", "best_achieved", "is", "None", "or", "exc", ".", "final_length", "<", "best_achieved", ":", "best_achieved", "=", "exc", ".", "final_length", "# The table must still be too large", "raise", "MinimisationFailedError", "(", "target_length", ",", "best_achieved", ")", "else", ":", "# Try all methods and return the smallest table", "return", "min", "(", "(", "f", "(", "table", ",", "target_length", ")", "for", "f", "in", "methods", ")", ",", "key", "=", "len", ")" ]
Apply different minimisation algorithms to minimise a single routing table. Parameters ---------- table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Routing table to minimise. NOTE: This is the data structure as returned by :py:meth:`~rig.routing_table.routing_tree_to_tables`. target_length : int or None Maximum length of the routing table. If None then all methods will be tried and the smallest achieved table will be returned. methods : Each method is tried in the order presented and the first to meet the required target length for a given chip is used. Consequently less computationally costly algorithms should be nearer the start of the list. The defaults will try to remove default routes (:py:meth:rig.routing_table.remove_default_routes.minimise) and then fall back on the ordered covering algorithm (:py:meth:rig.routing_table.ordered_covering.minimise). Returns ------- [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Minimised routing table, guaranteed to be at least as small as `target_length`, or as small as possible if `target_length` is None. Raises ------ MinimisationFailedError If no method can sufficiently minimise the table.
[ "Apply", "different", "minimisation", "algorithms", "to", "minimise", "a", "single", "routing", "table", "." ]
python
train
byt3bl33d3r/CrackMapExec
cme/modules/invoke_vnc.py
https://github.com/byt3bl33d3r/CrackMapExec/blob/333f1c4e06884e85b2776459963ef85d182aba8e/cme/modules/invoke_vnc.py#L15-L39
def options(self, context, module_options): ''' CONTYPE Specifies the VNC connection type, choices are: reverse, bind (default: reverse). PORT VNC Port (default: 5900) PASSWORD Specifies the connection password. ''' self.contype = 'reverse' self.port = 5900 self.password = None if 'PASSWORD' not in module_options: context.log.error('PASSWORD option is required!') exit(1) if 'CONTYPE' in module_options: self.contype = module_options['CONTYPE'] if 'PORT' in module_options: self.port = int(module_options['PORT']) self.password = module_options['PASSWORD'] self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1') self.ps_script2 = obfs_ps_script('invoke-vnc/Invoke-Vnc.ps1')
[ "def", "options", "(", "self", ",", "context", ",", "module_options", ")", ":", "self", ".", "contype", "=", "'reverse'", "self", ".", "port", "=", "5900", "self", ".", "password", "=", "None", "if", "'PASSWORD'", "not", "in", "module_options", ":", "context", ".", "log", ".", "error", "(", "'PASSWORD option is required!'", ")", "exit", "(", "1", ")", "if", "'CONTYPE'", "in", "module_options", ":", "self", ".", "contype", "=", "module_options", "[", "'CONTYPE'", "]", "if", "'PORT'", "in", "module_options", ":", "self", ".", "port", "=", "int", "(", "module_options", "[", "'PORT'", "]", ")", "self", ".", "password", "=", "module_options", "[", "'PASSWORD'", "]", "self", ".", "ps_script1", "=", "obfs_ps_script", "(", "'cme_powershell_scripts/Invoke-PSInject.ps1'", ")", "self", ".", "ps_script2", "=", "obfs_ps_script", "(", "'invoke-vnc/Invoke-Vnc.ps1'", ")" ]
CONTYPE Specifies the VNC connection type, choices are: reverse, bind (default: reverse). PORT VNC Port (default: 5900) PASSWORD Specifies the connection password.
[ "CONTYPE", "Specifies", "the", "VNC", "connection", "type", "choices", "are", ":", "reverse", "bind", "(", "default", ":", "reverse", ")", ".", "PORT", "VNC", "Port", "(", "default", ":", "5900", ")", "PASSWORD", "Specifies", "the", "connection", "password", "." ]
python
train
genialis/resolwe
resolwe/flow/utils/stats.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/stats.py#L30-L46
def update(self, num): """Update metrics with the new number.""" num = float(num) self.count += 1 self.low = min(self.low, num) self.high = max(self.high, num) # Welford's online mean and variance algorithm. delta = num - self.mean self.mean = self.mean + delta / self.count delta2 = num - self.mean self._rolling_variance = self._rolling_variance + delta * delta2 if self.count > 1: self.deviation = math.sqrt(self._rolling_variance / (self.count - 1)) else: self.deviation = 0.0
[ "def", "update", "(", "self", ",", "num", ")", ":", "num", "=", "float", "(", "num", ")", "self", ".", "count", "+=", "1", "self", ".", "low", "=", "min", "(", "self", ".", "low", ",", "num", ")", "self", ".", "high", "=", "max", "(", "self", ".", "high", ",", "num", ")", "# Welford's online mean and variance algorithm.", "delta", "=", "num", "-", "self", ".", "mean", "self", ".", "mean", "=", "self", ".", "mean", "+", "delta", "/", "self", ".", "count", "delta2", "=", "num", "-", "self", ".", "mean", "self", ".", "_rolling_variance", "=", "self", ".", "_rolling_variance", "+", "delta", "*", "delta2", "if", "self", ".", "count", ">", "1", ":", "self", ".", "deviation", "=", "math", ".", "sqrt", "(", "self", ".", "_rolling_variance", "/", "(", "self", ".", "count", "-", "1", ")", ")", "else", ":", "self", ".", "deviation", "=", "0.0" ]
Update metrics with the new number.
[ "Update", "metrics", "with", "the", "new", "number", "." ]
python
train
saltstack/salt
salt/modules/zabbix.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zabbix.py#L2305-L2343
def mediatype_update(mediatypeid, name=False, mediatype=False, **kwargs): ''' Update existing mediatype .. note:: This function accepts all standard mediatype properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/3.0/manual/api/reference/mediatype/object :param mediatypeid: ID of the mediatype to update :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the updated mediatypes, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.usergroup_update 8 name="Email update" ''' conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'mediatype.update' params = {"mediatypeid": mediatypeid} if name: params['description'] = name if mediatype: params['type'] = mediatype params = _params_extend(params, **kwargs) ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result']['mediatypeids'] else: raise KeyError except KeyError: return ret
[ "def", "mediatype_update", "(", "mediatypeid", ",", "name", "=", "False", ",", "mediatype", "=", "False", ",", "*", "*", "kwargs", ")", ":", "conn_args", "=", "_login", "(", "*", "*", "kwargs", ")", "ret", "=", "{", "}", "try", ":", "if", "conn_args", ":", "method", "=", "'mediatype.update'", "params", "=", "{", "\"mediatypeid\"", ":", "mediatypeid", "}", "if", "name", ":", "params", "[", "'description'", "]", "=", "name", "if", "mediatype", ":", "params", "[", "'type'", "]", "=", "mediatype", "params", "=", "_params_extend", "(", "params", ",", "*", "*", "kwargs", ")", "ret", "=", "_query", "(", "method", ",", "params", ",", "conn_args", "[", "'url'", "]", ",", "conn_args", "[", "'auth'", "]", ")", "return", "ret", "[", "'result'", "]", "[", "'mediatypeids'", "]", "else", ":", "raise", "KeyError", "except", "KeyError", ":", "return", "ret" ]
Update existing mediatype .. note:: This function accepts all standard mediatype properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/3.0/manual/api/reference/mediatype/object :param mediatypeid: ID of the mediatype to update :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the updated mediatypes, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.usergroup_update 8 name="Email update"
[ "Update", "existing", "mediatype" ]
python
train