repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
grundic/yagocd
yagocd/client.py
https://github.com/grundic/yagocd/blob/4c75336ae6f107c8723d37b15e52169151822127/yagocd/client.py#L221-L229
def info(self): """ Property for accessing :class:`InfoManager` instance, which is used to general server info. :rtype: yagocd.resources.info.InfoManager """ if self._info_manager is None: self._info_manager = InfoManager(session=self._session) return self._info_manager
[ "def", "info", "(", "self", ")", ":", "if", "self", ".", "_info_manager", "is", "None", ":", "self", ".", "_info_manager", "=", "InfoManager", "(", "session", "=", "self", ".", "_session", ")", "return", "self", ".", "_info_manager" ]
Property for accessing :class:`InfoManager` instance, which is used to general server info. :rtype: yagocd.resources.info.InfoManager
[ "Property", "for", "accessing", ":", "class", ":", "InfoManager", "instance", "which", "is", "used", "to", "general", "server", "info", "." ]
python
train
35.888889
Skyscanner/pycfmodel
pycfmodel/model/resources/properties/policy_document.py
https://github.com/Skyscanner/pycfmodel/blob/e3da4db96f59c0a5dba06ae66ad25645775e5500/pycfmodel/model/resources/properties/policy_document.py#L176-L189
def wildcard_allowed_actions(self, pattern=None): """ Find statements which allow wildcard actions. A pattern can be specified for the wildcard action """ wildcard_allowed = [] for statement in self.statements: if statement.wildcard_actions(pattern) and statement.effect == "Allow": wildcard_allowed.append(statement) return wildcard_allowed
[ "def", "wildcard_allowed_actions", "(", "self", ",", "pattern", "=", "None", ")", ":", "wildcard_allowed", "=", "[", "]", "for", "statement", "in", "self", ".", "statements", ":", "if", "statement", ".", "wildcard_actions", "(", "pattern", ")", "and", "statement", ".", "effect", "==", "\"Allow\"", ":", "wildcard_allowed", ".", "append", "(", "statement", ")", "return", "wildcard_allowed" ]
Find statements which allow wildcard actions. A pattern can be specified for the wildcard action
[ "Find", "statements", "which", "allow", "wildcard", "actions", "." ]
python
train
29.714286
lambdamusic/Ontospy
ontospy/extras/hacks/sketch.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/hacks/sketch.py#L149-L166
def omnigraffle(self): """ tries to open an export directly in omnigraffle """ temp = self.rdf_source("dot") try: # try to put in the user/tmp folder from os.path import expanduser home = expanduser("~") filename = home + "/tmp/turtle_sketch.dot" f = open(filename, "w") except: filename = "turtle_sketch.dot" f = open(filename, "w") f.write(temp) f.close() try: os.system("open " + filename) except: os.system("start " + filename)
[ "def", "omnigraffle", "(", "self", ")", ":", "temp", "=", "self", ".", "rdf_source", "(", "\"dot\"", ")", "try", ":", "# try to put in the user/tmp folder", "from", "os", ".", "path", "import", "expanduser", "home", "=", "expanduser", "(", "\"~\"", ")", "filename", "=", "home", "+", "\"/tmp/turtle_sketch.dot\"", "f", "=", "open", "(", "filename", ",", "\"w\"", ")", "except", ":", "filename", "=", "\"turtle_sketch.dot\"", "f", "=", "open", "(", "filename", ",", "\"w\"", ")", "f", ".", "write", "(", "temp", ")", "f", ".", "close", "(", ")", "try", ":", "os", ".", "system", "(", "\"open \"", "+", "filename", ")", "except", ":", "os", ".", "system", "(", "\"start \"", "+", "filename", ")" ]
tries to open an export directly in omnigraffle
[ "tries", "to", "open", "an", "export", "directly", "in", "omnigraffle" ]
python
train
25.333333
CalebBell/ht
ht/conv_plate.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_plate.py#L51-L147
def Nu_plate_Kumar(Re, Pr, chevron_angle, mu=None, mu_wall=None): r'''Calculates Nusselt number for single-phase flow in a **well-designed** Chevron-style plate heat exchanger according to [1]_. The data is believed to have been developed by APV International Limited, since acquired by SPX Corporation. This uses a curve fit of that data published in [2]_. .. math:: Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17} `C1` and `m` are coefficients looked up in a table, with varying ranges of Re validity and chevron angle validity. See the source for their exact values. The wall fluid property correction is included only if the viscosity values are provided. Parameters ---------- Re : float Reynolds number with respect to the hydraulic diameter of the channels, [-] Pr : float Prandtl number calculated with bulk fluid properties, [-] chevron_angle : float Angle of the plate corrugations with respect to the vertical axis (the direction of flow if the plates were straight), between 0 and 90. Many plate exchangers use two alternating patterns; use their average angle for that situation [degrees] mu : float, optional Viscosity of the fluid at the bulk (inlet and outlet average) temperature, [Pa*s] mu_wall : float, optional Viscosity of fluid at wall temperature, [Pa*s] Returns ------- Nu : float Nusselt number with respect to `Dh`, [-] Notes ----- Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees. See `PlateExchanger` for further clarification on the definitions. It is believed the constants used in this correlation were curve-fit to the actual graph in [1]_ by the author of [2]_ as there is no As the coefficients change, there are numerous small discontinuities, although the data on the graphs is continuous with sharp transitions of the slope. The author of [1]_ states clearly this correlation is "applicable only to well designed Chevron PHEs". Examples -------- >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30) 47.757818892853955 With the wall-correction factor included: >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4) 49.604284135097544 References ---------- .. [1] Kumar, H. "The plate heat exchanger: construction and design." In First U.K. National Conference on Heat Transfer: Held at the University of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium Series, vol. 86, pp. 1275-1288. 1984. .. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat Transfer and Pressure Drop Correlations for Refrigerant Evaporators." Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16. doi:10.1080/01457630304056. ''' # Uses the standard diameter as characteristic diameter beta_list_len = len(Kumar_beta_list) for i in range(beta_list_len): if chevron_angle <= Kumar_beta_list[i]: C1_options, m_options, Re_ranges = Kumar_C1s[i], Kumar_ms[i], Kumar_Nu_Res[i] break elif i == beta_list_len-1: C1_options, m_options, Re_ranges = Kumar_C1s[-1], Kumar_ms[-1], Kumar_Nu_Res[-1] Re_len = len(Re_ranges) for j in range(Re_len): if Re <= Re_ranges[j]: C1, m = C1_options[j], m_options[j] break elif j == Re_len-1: C1, m = C1_options[-1], m_options[-1] Nu = C1*Re**m*Pr**0.33 if mu_wall is not None and mu is not None: Nu *= (mu/mu_wall)**0.17 return Nu
[ "def", "Nu_plate_Kumar", "(", "Re", ",", "Pr", ",", "chevron_angle", ",", "mu", "=", "None", ",", "mu_wall", "=", "None", ")", ":", "# Uses the standard diameter as characteristic diameter", "beta_list_len", "=", "len", "(", "Kumar_beta_list", ")", "for", "i", "in", "range", "(", "beta_list_len", ")", ":", "if", "chevron_angle", "<=", "Kumar_beta_list", "[", "i", "]", ":", "C1_options", ",", "m_options", ",", "Re_ranges", "=", "Kumar_C1s", "[", "i", "]", ",", "Kumar_ms", "[", "i", "]", ",", "Kumar_Nu_Res", "[", "i", "]", "break", "elif", "i", "==", "beta_list_len", "-", "1", ":", "C1_options", ",", "m_options", ",", "Re_ranges", "=", "Kumar_C1s", "[", "-", "1", "]", ",", "Kumar_ms", "[", "-", "1", "]", ",", "Kumar_Nu_Res", "[", "-", "1", "]", "Re_len", "=", "len", "(", "Re_ranges", ")", "for", "j", "in", "range", "(", "Re_len", ")", ":", "if", "Re", "<=", "Re_ranges", "[", "j", "]", ":", "C1", ",", "m", "=", "C1_options", "[", "j", "]", ",", "m_options", "[", "j", "]", "break", "elif", "j", "==", "Re_len", "-", "1", ":", "C1", ",", "m", "=", "C1_options", "[", "-", "1", "]", ",", "m_options", "[", "-", "1", "]", "Nu", "=", "C1", "*", "Re", "**", "m", "*", "Pr", "**", "0.33", "if", "mu_wall", "is", "not", "None", "and", "mu", "is", "not", "None", ":", "Nu", "*=", "(", "mu", "/", "mu_wall", ")", "**", "0.17", "return", "Nu" ]
r'''Calculates Nusselt number for single-phase flow in a **well-designed** Chevron-style plate heat exchanger according to [1]_. The data is believed to have been developed by APV International Limited, since acquired by SPX Corporation. This uses a curve fit of that data published in [2]_. .. math:: Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17} `C1` and `m` are coefficients looked up in a table, with varying ranges of Re validity and chevron angle validity. See the source for their exact values. The wall fluid property correction is included only if the viscosity values are provided. Parameters ---------- Re : float Reynolds number with respect to the hydraulic diameter of the channels, [-] Pr : float Prandtl number calculated with bulk fluid properties, [-] chevron_angle : float Angle of the plate corrugations with respect to the vertical axis (the direction of flow if the plates were straight), between 0 and 90. Many plate exchangers use two alternating patterns; use their average angle for that situation [degrees] mu : float, optional Viscosity of the fluid at the bulk (inlet and outlet average) temperature, [Pa*s] mu_wall : float, optional Viscosity of fluid at wall temperature, [Pa*s] Returns ------- Nu : float Nusselt number with respect to `Dh`, [-] Notes ----- Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees. See `PlateExchanger` for further clarification on the definitions. It is believed the constants used in this correlation were curve-fit to the actual graph in [1]_ by the author of [2]_ as there is no As the coefficients change, there are numerous small discontinuities, although the data on the graphs is continuous with sharp transitions of the slope. The author of [1]_ states clearly this correlation is "applicable only to well designed Chevron PHEs". Examples -------- >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30) 47.757818892853955 With the wall-correction factor included: >>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4) 49.604284135097544 References ---------- .. [1] Kumar, H. "The plate heat exchanger: construction and design." In First U.K. National Conference on Heat Transfer: Held at the University of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium Series, vol. 86, pp. 1275-1288. 1984. .. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat Transfer and Pressure Drop Correlations for Refrigerant Evaporators." Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16. doi:10.1080/01457630304056.
[ "r", "Calculates", "Nusselt", "number", "for", "single", "-", "phase", "flow", "in", "a", "**", "well", "-", "designed", "**", "Chevron", "-", "style", "plate", "heat", "exchanger", "according", "to", "[", "1", "]", "_", ".", "The", "data", "is", "believed", "to", "have", "been", "developed", "by", "APV", "International", "Limited", "since", "acquired", "by", "SPX", "Corporation", ".", "This", "uses", "a", "curve", "fit", "of", "that", "data", "published", "in", "[", "2", "]", "_", ".", "..", "math", "::", "Nu", "=", "C_1", "Re^m", "Pr^", "{", "0", ".", "33", "}", "\\", "left", "(", "\\", "frac", "{", "\\", "mu", "}", "{", "\\", "mu_", "{", "wall", "}}", "\\", "right", ")", "^", "{", "0", ".", "17", "}", "C1", "and", "m", "are", "coefficients", "looked", "up", "in", "a", "table", "with", "varying", "ranges", "of", "Re", "validity", "and", "chevron", "angle", "validity", ".", "See", "the", "source", "for", "their", "exact", "values", ".", "The", "wall", "fluid", "property", "correction", "is", "included", "only", "if", "the", "viscosity", "values", "are", "provided", ".", "Parameters", "----------", "Re", ":", "float", "Reynolds", "number", "with", "respect", "to", "the", "hydraulic", "diameter", "of", "the", "channels", "[", "-", "]", "Pr", ":", "float", "Prandtl", "number", "calculated", "with", "bulk", "fluid", "properties", "[", "-", "]", "chevron_angle", ":", "float", "Angle", "of", "the", "plate", "corrugations", "with", "respect", "to", "the", "vertical", "axis", "(", "the", "direction", "of", "flow", "if", "the", "plates", "were", "straight", ")", "between", "0", "and", "90", ".", "Many", "plate", "exchangers", "use", "two", "alternating", "patterns", ";", "use", "their", "average", "angle", "for", "that", "situation", "[", "degrees", "]", "mu", ":", "float", "optional", "Viscosity", "of", "the", "fluid", "at", "the", "bulk", "(", "inlet", "and", "outlet", "average", ")", "temperature", "[", "Pa", "*", "s", "]", "mu_wall", ":", "float", "optional", "Viscosity", "of", "fluid", "at", "wall", "temperature", "[", "Pa", "*", "s", "]" ]
python
train
38.505155
e7dal/bubble3
behave4cmd0/command_steps.py
https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/command_steps.py#L198-L210
def step_it_should_fail_with(context): ''' EXAMPLE: ... when I run "behave ..." then it should fail with: """ TEXT """ ''' assert context.text is not None, "ENSURE: multiline text is provided." step_command_output_should_contain(context) assert_that(context.command_result.returncode, is_not(equal_to(0)))
[ "def", "step_it_should_fail_with", "(", "context", ")", ":", "assert", "context", ".", "text", "is", "not", "None", ",", "\"ENSURE: multiline text is provided.\"", "step_command_output_should_contain", "(", "context", ")", "assert_that", "(", "context", ".", "command_result", ".", "returncode", ",", "is_not", "(", "equal_to", "(", "0", ")", ")", ")" ]
EXAMPLE: ... when I run "behave ..." then it should fail with: """ TEXT """
[ "EXAMPLE", ":", "...", "when", "I", "run", "behave", "...", "then", "it", "should", "fail", "with", ":", "TEXT" ]
python
train
29
neurosynth/neurosynth
neurosynth/analysis/decode.py
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/decode.py#L216-L219
def _dot_product(self, imgs_to_decode): """ Decoding using the dot product. """ return np.dot(imgs_to_decode.T, self.feature_images).T
[ "def", "_dot_product", "(", "self", ",", "imgs_to_decode", ")", ":", "return", "np", ".", "dot", "(", "imgs_to_decode", ".", "T", ",", "self", ".", "feature_images", ")", ".", "T" ]
Decoding using the dot product.
[ "Decoding", "using", "the", "dot", "product", "." ]
python
test
38.75
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/MessageDecoder.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/MessageDecoder.py#L84-L104
def decode_sent_msg(pref, message, pretty=False): """decode_sent_msg: Return a string of the decoded message """ newline = "\n" if pretty else " " indent = " " if pretty else "" start = newline + indent out = [] out.append("%s%s{%sSEQNUM: %d," % (pref, newline, start, message[Const.W_SEQ])) out.append("%sCOMPRESSION: %d," % (start, message[Const.W_COMPRESSION])) out.append("%sHASH: %s...," % (start, str(binascii.b2a_hex(message[Const.W_HASH]).decode('ascii'))[:10])) out.append("%sMESSAGE:%s{%sCLIENTREF: %s," % (start, start, start + indent, message[Const.W_MESSAGE][Const.M_CLIENTREF])) out.append("%sRESOURCE: %s," % (start + indent, R_TYPES[message[Const.W_MESSAGE][Const.M_RESOURCE]])) out.append("%sTYPE: %s," % (start + indent, C_TYPES[message[Const.W_MESSAGE][Const.M_TYPE]])) out.append("%sACTION: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_ACTION])) if Const.M_RANGE in message[Const.W_MESSAGE]: out.append("%sRANGE: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_RANGE])) out.append("%sPAYLOAD: %s%s}%s}" % (start + indent, message[Const.W_MESSAGE][Const.M_PAYLOAD], start, newline)) return ''.join(out)
[ "def", "decode_sent_msg", "(", "pref", ",", "message", ",", "pretty", "=", "False", ")", ":", "newline", "=", "\"\\n\"", "if", "pretty", "else", "\" \"", "indent", "=", "\" \"", "if", "pretty", "else", "\"\"", "start", "=", "newline", "+", "indent", "out", "=", "[", "]", "out", ".", "append", "(", "\"%s%s{%sSEQNUM: %d,\"", "%", "(", "pref", ",", "newline", ",", "start", ",", "message", "[", "Const", ".", "W_SEQ", "]", ")", ")", "out", ".", "append", "(", "\"%sCOMPRESSION: %d,\"", "%", "(", "start", ",", "message", "[", "Const", ".", "W_COMPRESSION", "]", ")", ")", "out", ".", "append", "(", "\"%sHASH: %s...,\"", "%", "(", "start", ",", "str", "(", "binascii", ".", "b2a_hex", "(", "message", "[", "Const", ".", "W_HASH", "]", ")", ".", "decode", "(", "'ascii'", ")", ")", "[", ":", "10", "]", ")", ")", "out", ".", "append", "(", "\"%sMESSAGE:%s{%sCLIENTREF: %s,\"", "%", "(", "start", ",", "start", ",", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_CLIENTREF", "]", ")", ")", "out", ".", "append", "(", "\"%sRESOURCE: %s,\"", "%", "(", "start", "+", "indent", ",", "R_TYPES", "[", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_RESOURCE", "]", "]", ")", ")", "out", ".", "append", "(", "\"%sTYPE: %s,\"", "%", "(", "start", "+", "indent", ",", "C_TYPES", "[", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_TYPE", "]", "]", ")", ")", "out", ".", "append", "(", "\"%sACTION: %s,\"", "%", "(", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_ACTION", "]", ")", ")", "if", "Const", ".", "M_RANGE", "in", "message", "[", "Const", ".", "W_MESSAGE", "]", ":", "out", ".", "append", "(", "\"%sRANGE: %s,\"", "%", "(", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_RANGE", "]", ")", ")", "out", ".", "append", "(", "\"%sPAYLOAD: %s%s}%s}\"", "%", "(", "start", "+", "indent", ",", "message", "[", "Const", ".", "W_MESSAGE", "]", "[", "Const", ".", "M_PAYLOAD", "]", ",", "start", ",", "newline", ")", ")", "return", "''", ".", "join", "(", "out", ")" ]
decode_sent_msg: Return a string of the decoded message
[ "decode_sent_msg", ":", "Return", "a", "string", "of", "the", "decoded", "message" ]
python
train
59.47619
JinnLynn/genpac
genpac/publicsuffixlist/__init__.py
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/publicsuffixlist/__init__.py#L249-L260
def privateparts(self, domain): """ Return tuple of labels and the private suffix. """ s = self.privatesuffix(domain) if s is None: return None else: # I know the domain is valid and ends with private suffix pre = domain[0:-(len(s)+1)] if pre == "": return (s,) else: return tuple(pre.split(".") + [s])
[ "def", "privateparts", "(", "self", ",", "domain", ")", ":", "s", "=", "self", ".", "privatesuffix", "(", "domain", ")", "if", "s", "is", "None", ":", "return", "None", "else", ":", "# I know the domain is valid and ends with private suffix", "pre", "=", "domain", "[", "0", ":", "-", "(", "len", "(", "s", ")", "+", "1", ")", "]", "if", "pre", "==", "\"\"", ":", "return", "(", "s", ",", ")", "else", ":", "return", "tuple", "(", "pre", ".", "split", "(", "\".\"", ")", "+", "[", "s", "]", ")" ]
Return tuple of labels and the private suffix.
[ "Return", "tuple", "of", "labels", "and", "the", "private", "suffix", "." ]
python
train
34.583333
mcs07/CIRpy
cirpy.py
https://github.com/mcs07/CIRpy/blob/fee2bbbb08eb39bbbe003f835d64e8c0c1688904/cirpy.py#L259-L286
def download(input, filename, representation, overwrite=False, resolvers=None, get3d=False, **kwargs): """Convenience function to save a CIR response as a file. This is just a simple wrapper around the resolve function. :param string input: Chemical identifier to resolve :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable :raises IOError: if overwrite is False and file already exists """ result = resolve(input, representation, resolvers, get3d, **kwargs) # Just log and return if nothing resolved if not result: log.debug('No file to download.') return # Only overwrite an existing file if explicitly instructed to. if not overwrite and os.path.isfile(filename): raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename) # Ensure file ends with a newline if not result.endswith('\n'): result += '\n' with open(filename, 'w') as f: f.write(result)
[ "def", "download", "(", "input", ",", "filename", ",", "representation", ",", "overwrite", "=", "False", ",", "resolvers", "=", "None", ",", "get3d", "=", "False", ",", "*", "*", "kwargs", ")", ":", "result", "=", "resolve", "(", "input", ",", "representation", ",", "resolvers", ",", "get3d", ",", "*", "*", "kwargs", ")", "# Just log and return if nothing resolved", "if", "not", "result", ":", "log", ".", "debug", "(", "'No file to download.'", ")", "return", "# Only overwrite an existing file if explicitly instructed to.", "if", "not", "overwrite", "and", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "raise", "IOError", "(", "\"%s already exists. Use 'overwrite=True' to overwrite it.\"", "%", "filename", ")", "# Ensure file ends with a newline", "if", "not", "result", ".", "endswith", "(", "'\\n'", ")", ":", "result", "+=", "'\\n'", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "result", ")" ]
Convenience function to save a CIR response as a file. This is just a simple wrapper around the resolve function. :param string input: Chemical identifier to resolve :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable :raises IOError: if overwrite is False and file already exists
[ "Convenience", "function", "to", "save", "a", "CIR", "response", "as", "a", "file", "." ]
python
train
48.857143
mattharrison/rst2odp
odplib/preso.py
https://github.com/mattharrison/rst2odp/blob/4adbf29b28c8207ec882f792ded07e98b1d3e7d0/odplib/preso.py#L92-L113
def cwd_decorator(func): """ decorator to change cwd to directory containing rst for this function """ def wrapper(*args, **kw): cur_dir = os.getcwd() found = False for arg in sys.argv: if arg.endswith(".rst"): found = arg break if found: directory = os.path.dirname(found) if directory: os.chdir(directory) data = func(*args, **kw) os.chdir(cur_dir) return data return wrapper
[ "def", "cwd_decorator", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "cur_dir", "=", "os", ".", "getcwd", "(", ")", "found", "=", "False", "for", "arg", "in", "sys", ".", "argv", ":", "if", "arg", ".", "endswith", "(", "\".rst\"", ")", ":", "found", "=", "arg", "break", "if", "found", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "found", ")", "if", "directory", ":", "os", ".", "chdir", "(", "directory", ")", "data", "=", "func", "(", "*", "args", ",", "*", "*", "kw", ")", "os", ".", "chdir", "(", "cur_dir", ")", "return", "data", "return", "wrapper" ]
decorator to change cwd to directory containing rst for this function
[ "decorator", "to", "change", "cwd", "to", "directory", "containing", "rst", "for", "this", "function" ]
python
train
23.590909
zbanks/shmooze
shmooze/lib/database.py
https://github.com/zbanks/shmooze/blob/7ae615e172c174d6fe184a8bf90f8ad075bf58ed/shmooze/lib/database.py#L72-L120
def create_top_schema(self): """ (Category --->) Item <---> Module <---> LogEntry <---> is a many-to-many relationship ---> is a foreign key relationship (- Category: represents a group of Items which form a top list) - Item: something that can be played multiple times and is grouped by to build a top list - Module: an instance of a module on the queue - LogEntry: an act performed on the queue and logged """ self.execute("""CREATE TABLE IF NOT EXISTS top_module ( uuid TEXT, add_timestamp DATETIME );""") self.execute("""CREATE TABLE IF NOT EXISTS top_category ( pk INTEGER PRIMARY KEY, slug TEXT, description TEXT );""") self.execute("""CREATE TABLE IF NOT EXISTS top_item ( pk INTEGER PRIMARY KEY, canonical_id TEXT, category_pk INTEGER, requeue_command TEXT, url TEXT, description TEXT );""") self.execute("""CREATE TABLE IF NOT EXISTS top_item_module ( pk INTEGER PRIMARY KEY, item_pk INTEGER, module_uuid TEXT );""") self.execute("""CREATE TABLE IF NOT EXISTS top_log_entry ( pk INTEGER, uid TEXT, namespace TEXT, timestamp DATETIME, input_json TEXT, output_json TEXT );""") self.execute("""CREATE TABLE IF NOT EXISTS top_module_log_entry ( pk INTEGER PRIMARY KEY, log_pk INTEGER, module_uuid TEXT, log_type TEXT );""") self.commit()
[ "def", "create_top_schema", "(", "self", ")", ":", "self", ".", "execute", "(", "\"\"\"CREATE TABLE IF NOT EXISTS top_module (\n uuid TEXT,\n add_timestamp DATETIME\n );\"\"\"", ")", "self", ".", "execute", "(", "\"\"\"CREATE TABLE IF NOT EXISTS top_category (\n pk INTEGER PRIMARY KEY,\n slug TEXT,\n description TEXT\n );\"\"\"", ")", "self", ".", "execute", "(", "\"\"\"CREATE TABLE IF NOT EXISTS top_item (\n pk INTEGER PRIMARY KEY,\n canonical_id TEXT,\n category_pk INTEGER,\n requeue_command TEXT,\n url TEXT,\n description TEXT\n );\"\"\"", ")", "self", ".", "execute", "(", "\"\"\"CREATE TABLE IF NOT EXISTS top_item_module (\n pk INTEGER PRIMARY KEY,\n item_pk INTEGER,\n module_uuid TEXT\n );\"\"\"", ")", "self", ".", "execute", "(", "\"\"\"CREATE TABLE IF NOT EXISTS top_log_entry (\n pk INTEGER,\n uid TEXT,\n namespace TEXT,\n timestamp DATETIME,\n input_json TEXT,\n output_json TEXT\n );\"\"\"", ")", "self", ".", "execute", "(", "\"\"\"CREATE TABLE IF NOT EXISTS top_module_log_entry (\n pk INTEGER PRIMARY KEY,\n log_pk INTEGER,\n module_uuid TEXT,\n log_type TEXT\n );\"\"\"", ")", "self", ".", "commit", "(", ")" ]
(Category --->) Item <---> Module <---> LogEntry <---> is a many-to-many relationship ---> is a foreign key relationship (- Category: represents a group of Items which form a top list) - Item: something that can be played multiple times and is grouped by to build a top list - Module: an instance of a module on the queue - LogEntry: an act performed on the queue and logged
[ "(", "Category", "---", ">", ")", "Item", "<", "---", ">", "Module", "<", "---", ">", "LogEntry" ]
python
train
33.959184
Jajcus/pyxmpp2
pyxmpp2/ext/muc/muc.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muc.py#L923-L941
def __groupchat_message(self,stanza): """Process a groupchat message from a MUC room. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `Message` :return: `True` if the message was properly recognized as directed to one of the managed rooms, `False` otherwise. :returntype: `bool`""" fr=stanza.get_from() key=fr.bare().as_unicode() rs=self.rooms.get(key) if not rs: self.__logger.debug("groupchat message from unknown source") return False rs.process_groupchat_message(stanza) return True
[ "def", "__groupchat_message", "(", "self", ",", "stanza", ")", ":", "fr", "=", "stanza", ".", "get_from", "(", ")", "key", "=", "fr", ".", "bare", "(", ")", ".", "as_unicode", "(", ")", "rs", "=", "self", ".", "rooms", ".", "get", "(", "key", ")", "if", "not", "rs", ":", "self", ".", "__logger", ".", "debug", "(", "\"groupchat message from unknown source\"", ")", "return", "False", "rs", ".", "process_groupchat_message", "(", "stanza", ")", "return", "True" ]
Process a groupchat message from a MUC room. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `Message` :return: `True` if the message was properly recognized as directed to one of the managed rooms, `False` otherwise. :returntype: `bool`
[ "Process", "a", "groupchat", "message", "from", "a", "MUC", "room", "." ]
python
valid
33.473684
datastore/datastore
datastore/core/basic.py
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L1056-L1059
def put(self, key, value): '''Stores the object in all underlying datastores.''' for store in self._stores: store.put(key, value)
[ "def", "put", "(", "self", ",", "key", ",", "value", ")", ":", "for", "store", "in", "self", ".", "_stores", ":", "store", ".", "put", "(", "key", ",", "value", ")" ]
Stores the object in all underlying datastores.
[ "Stores", "the", "object", "in", "all", "underlying", "datastores", "." ]
python
train
35
teepark/junction
junction/client.py
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L133-L165
def send_rpc(self, service, routing_id, method, args=None, kwargs=None, broadcast=False): '''Send out an RPC request :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to all peers with matching subscriptions :type broadcast: bool :returns: a :class:`RPC <junction.futures.RPC>` object representing the RPC and its future response. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub ''' if not self._peer.up: raise errors.Unroutable() return self._dispatcher.send_proxied_rpc(service, routing_id, method, args or (), kwargs or {}, not broadcast)
[ "def", "send_rpc", "(", "self", ",", "service", ",", "routing_id", ",", "method", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "broadcast", "=", "False", ")", ":", "if", "not", "self", ".", "_peer", ".", "up", ":", "raise", "errors", ".", "Unroutable", "(", ")", "return", "self", ".", "_dispatcher", ".", "send_proxied_rpc", "(", "service", ",", "routing_id", ",", "method", ",", "args", "or", "(", ")", ",", "kwargs", "or", "{", "}", ",", "not", "broadcast", ")" ]
Send out an RPC request :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to all peers with matching subscriptions :type broadcast: bool :returns: a :class:`RPC <junction.futures.RPC>` object representing the RPC and its future response. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub
[ "Send", "out", "an", "RPC", "request" ]
python
train
38.151515
aws/chalice
chalice/utils.py
https://github.com/aws/chalice/blob/10d7fb52e68bd1c52aae251c97e3939fc0190412/chalice/utils.py#L30-L46
def to_cfn_resource_name(name): # type: (str) -> str """Transform a name to a valid cfn name. This will convert the provided name to a CamelCase name. It's possible that the conversion to a CFN resource name can result in name collisions. It's up to the caller to handle name collisions appropriately. """ if not name: raise ValueError("Invalid name: %r" % name) word_separators = ['-', '_'] for word_separator in word_separators: word_parts = [p for p in name.split(word_separator) if p] name = ''.join([w[0].upper() + w[1:] for w in word_parts]) return re.sub(r'[^A-Za-z0-9]+', '', name)
[ "def", "to_cfn_resource_name", "(", "name", ")", ":", "# type: (str) -> str", "if", "not", "name", ":", "raise", "ValueError", "(", "\"Invalid name: %r\"", "%", "name", ")", "word_separators", "=", "[", "'-'", ",", "'_'", "]", "for", "word_separator", "in", "word_separators", ":", "word_parts", "=", "[", "p", "for", "p", "in", "name", ".", "split", "(", "word_separator", ")", "if", "p", "]", "name", "=", "''", ".", "join", "(", "[", "w", "[", "0", "]", ".", "upper", "(", ")", "+", "w", "[", "1", ":", "]", "for", "w", "in", "word_parts", "]", ")", "return", "re", ".", "sub", "(", "r'[^A-Za-z0-9]+'", ",", "''", ",", "name", ")" ]
Transform a name to a valid cfn name. This will convert the provided name to a CamelCase name. It's possible that the conversion to a CFN resource name can result in name collisions. It's up to the caller to handle name collisions appropriately.
[ "Transform", "a", "name", "to", "a", "valid", "cfn", "name", "." ]
python
train
37.882353
deanmalmgren/textract
textract/parsers/utils.py
https://github.com/deanmalmgren/textract/blob/117ea191d93d80321e4bf01f23cc1ac54d69a075/textract/parsers/utils.py#L35-L48
def process(self, filename, encoding, **kwargs): """Process ``filename`` and encode byte-string with ``encoding``. This method is called by :func:`textract.parsers.process` and wraps the :meth:`.BaseParser.extract` method in `a delicious unicode sandwich <http://nedbatchelder.com/text/unipain.html>`_. """ # make a "unicode sandwich" to handle dealing with unknown # input byte strings and converting them to a predictable # output encoding # http://nedbatchelder.com/text/unipain/unipain.html#35 byte_string = self.extract(filename, **kwargs) unicode_string = self.decode(byte_string) return self.encode(unicode_string, encoding)
[ "def", "process", "(", "self", ",", "filename", ",", "encoding", ",", "*", "*", "kwargs", ")", ":", "# make a \"unicode sandwich\" to handle dealing with unknown", "# input byte strings and converting them to a predictable", "# output encoding", "# http://nedbatchelder.com/text/unipain/unipain.html#35", "byte_string", "=", "self", ".", "extract", "(", "filename", ",", "*", "*", "kwargs", ")", "unicode_string", "=", "self", ".", "decode", "(", "byte_string", ")", "return", "self", ".", "encode", "(", "unicode_string", ",", "encoding", ")" ]
Process ``filename`` and encode byte-string with ``encoding``. This method is called by :func:`textract.parsers.process` and wraps the :meth:`.BaseParser.extract` method in `a delicious unicode sandwich <http://nedbatchelder.com/text/unipain.html>`_.
[ "Process", "filename", "and", "encode", "byte", "-", "string", "with", "encoding", ".", "This", "method", "is", "called", "by", ":", "func", ":", "textract", ".", "parsers", ".", "process", "and", "wraps", "the", ":", "meth", ":", ".", "BaseParser", ".", "extract", "method", "in", "a", "delicious", "unicode", "sandwich", "<http", ":", "//", "nedbatchelder", ".", "com", "/", "text", "/", "unipain", ".", "html", ">", "_", "." ]
python
train
51.071429
snipsco/snipsmanagercore
snipsmanagercore/intent_parser.py
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/intent_parser.py#L59-L133
def get_slot_value(payload, slot_name): """ Return the parsed value of a slot. An intent has the form: { "text": "brew me a cappuccino with 3 sugars tomorrow", "slots": [ {"value": {"slotName": "coffee_type", "value": "cappuccino"}}, ... ] } This function extracts a slot value given its slot name, and parses it into a Python object if applicable (e.g. for dates). Slots can be of various forms, the simplest being just: {"slotName": "coffee_sugar_amout", "value": "3"} More complex examples are date times, where we distinguish between instant times, or intervals. Thus, a slot: { "slotName": "weatherForecastStartDatetime", "value": { "kind": "InstantTime", "value": { "value": "2017-07-14 00:00:00 +00:00", "grain": "Day", "precision": "Exact" } } } will be extracted as an `InstantTime` object, with datetime parsed and granularity set. Another example is a time interval: { "slotName": "weatherForecastStartDatetime", "value": { "kind": "TimeInterval", "value": { "from": "2017-07-14 12:00:00 +00:00", "to": "2017-07-14 19:00:00 +00:00" } }, } which will be extracted as a TimeInterval object. :param payload: the intent, in JSON format. :return: the parsed value, as described above. """ if not 'slots' in payload: return [] slots = [] for candidate in payload['slots']: if 'slotName' in candidate and candidate['slotName'] == slot_name: slots.append(candidate) result = [] for slot in slots: kind = IntentParser.get_dict_value(slot, ['value', 'kind']) if kind == "InstantTime": result.append(IntentParser.parse_instant_time(slot)) elif kind == "TimeInterval": result.append(IntentParser.parse_time_interval(slot)) else: result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \ or IntentParser.get_dict_value(slot, ['value', 'value'])) return result
[ "def", "get_slot_value", "(", "payload", ",", "slot_name", ")", ":", "if", "not", "'slots'", "in", "payload", ":", "return", "[", "]", "slots", "=", "[", "]", "for", "candidate", "in", "payload", "[", "'slots'", "]", ":", "if", "'slotName'", "in", "candidate", "and", "candidate", "[", "'slotName'", "]", "==", "slot_name", ":", "slots", ".", "append", "(", "candidate", ")", "result", "=", "[", "]", "for", "slot", "in", "slots", ":", "kind", "=", "IntentParser", ".", "get_dict_value", "(", "slot", ",", "[", "'value'", ",", "'kind'", "]", ")", "if", "kind", "==", "\"InstantTime\"", ":", "result", ".", "append", "(", "IntentParser", ".", "parse_instant_time", "(", "slot", ")", ")", "elif", "kind", "==", "\"TimeInterval\"", ":", "result", ".", "append", "(", "IntentParser", ".", "parse_time_interval", "(", "slot", ")", ")", "else", ":", "result", ".", "append", "(", "IntentParser", ".", "get_dict_value", "(", "slot", ",", "[", "'value'", ",", "'value'", ",", "'value'", "]", ")", "or", "IntentParser", ".", "get_dict_value", "(", "slot", ",", "[", "'value'", ",", "'value'", "]", ")", ")", "return", "result" ]
Return the parsed value of a slot. An intent has the form: { "text": "brew me a cappuccino with 3 sugars tomorrow", "slots": [ {"value": {"slotName": "coffee_type", "value": "cappuccino"}}, ... ] } This function extracts a slot value given its slot name, and parses it into a Python object if applicable (e.g. for dates). Slots can be of various forms, the simplest being just: {"slotName": "coffee_sugar_amout", "value": "3"} More complex examples are date times, where we distinguish between instant times, or intervals. Thus, a slot: { "slotName": "weatherForecastStartDatetime", "value": { "kind": "InstantTime", "value": { "value": "2017-07-14 00:00:00 +00:00", "grain": "Day", "precision": "Exact" } } } will be extracted as an `InstantTime` object, with datetime parsed and granularity set. Another example is a time interval: { "slotName": "weatherForecastStartDatetime", "value": { "kind": "TimeInterval", "value": { "from": "2017-07-14 12:00:00 +00:00", "to": "2017-07-14 19:00:00 +00:00" } }, } which will be extracted as a TimeInterval object. :param payload: the intent, in JSON format. :return: the parsed value, as described above.
[ "Return", "the", "parsed", "value", "of", "a", "slot", ".", "An", "intent", "has", "the", "form", ":" ]
python
train
33.4
IBM/pyxcli
pyxcli/events/events.py
https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/events/events.py#L71-L124
def send_event(self, action, properties, event_severity=EVENT_SEVERITY): """ send css_event and if fails send custom_event instead Args: action (ACTIONS): the action causing the event properties (dict): the action additional properties event_severity (string): the event severity Raises: XCLIError: if the xcli.cmd.custom_event failed KeyError: if action wasn't predefined TypeError: if properties is not None or dict """ # verify properties event_properties = dict() if (properties is None) else properties if type(event_properties) is not dict: raise TypeError('properties is not dict') # prepare event event_bunch = Bunch( Product=self.product_name, Version=self.product_version, Server=self.server_name, Platform=self.platform, Action=action, Properties=event_properties) event_description = self._get_description_prefix() + \ json.dumps(event_bunch) use_custom_event = True if CSS_PRODUCT_EVENT in dir(self.xcli.cmd): try: # send css product event log.debug("sending css_product_event " "description=%s severity=%s", event_description, event_severity) self.xcli.cmd.css_product_event(severity=event_severity, product=self.product_name, version=self.product_version, server=self.server_name, platform=self.platform, action=action, properties=event_properties) use_custom_event = False except (UnrecognizedCommandError, OperationForbiddenForUserCategoryError): log.warning("failed css_product_event " "description=%s severity=%s", event_description, event_severity) if use_custom_event: # send custom event log.debug("sending custom_event description=%s severity=%s", event_description, event_severity) self.xcli.cmd.custom_event( description=event_description, severity=event_severity)
[ "def", "send_event", "(", "self", ",", "action", ",", "properties", ",", "event_severity", "=", "EVENT_SEVERITY", ")", ":", "# verify properties", "event_properties", "=", "dict", "(", ")", "if", "(", "properties", "is", "None", ")", "else", "properties", "if", "type", "(", "event_properties", ")", "is", "not", "dict", ":", "raise", "TypeError", "(", "'properties is not dict'", ")", "# prepare event", "event_bunch", "=", "Bunch", "(", "Product", "=", "self", ".", "product_name", ",", "Version", "=", "self", ".", "product_version", ",", "Server", "=", "self", ".", "server_name", ",", "Platform", "=", "self", ".", "platform", ",", "Action", "=", "action", ",", "Properties", "=", "event_properties", ")", "event_description", "=", "self", ".", "_get_description_prefix", "(", ")", "+", "json", ".", "dumps", "(", "event_bunch", ")", "use_custom_event", "=", "True", "if", "CSS_PRODUCT_EVENT", "in", "dir", "(", "self", ".", "xcli", ".", "cmd", ")", ":", "try", ":", "# send css product event", "log", ".", "debug", "(", "\"sending css_product_event \"", "\"description=%s severity=%s\"", ",", "event_description", ",", "event_severity", ")", "self", ".", "xcli", ".", "cmd", ".", "css_product_event", "(", "severity", "=", "event_severity", ",", "product", "=", "self", ".", "product_name", ",", "version", "=", "self", ".", "product_version", ",", "server", "=", "self", ".", "server_name", ",", "platform", "=", "self", ".", "platform", ",", "action", "=", "action", ",", "properties", "=", "event_properties", ")", "use_custom_event", "=", "False", "except", "(", "UnrecognizedCommandError", ",", "OperationForbiddenForUserCategoryError", ")", ":", "log", ".", "warning", "(", "\"failed css_product_event \"", "\"description=%s severity=%s\"", ",", "event_description", ",", "event_severity", ")", "if", "use_custom_event", ":", "# send custom event", "log", ".", "debug", "(", "\"sending custom_event description=%s severity=%s\"", ",", "event_description", ",", "event_severity", ")", "self", ".", "xcli", ".", "cmd", ".", "custom_event", "(", "description", "=", "event_description", ",", "severity", "=", "event_severity", ")" ]
send css_event and if fails send custom_event instead Args: action (ACTIONS): the action causing the event properties (dict): the action additional properties event_severity (string): the event severity Raises: XCLIError: if the xcli.cmd.custom_event failed KeyError: if action wasn't predefined TypeError: if properties is not None or dict
[ "send", "css_event", "and", "if", "fails", "send", "custom_event", "instead", "Args", ":", "action", "(", "ACTIONS", ")", ":", "the", "action", "causing", "the", "event", "properties", "(", "dict", ")", ":", "the", "action", "additional", "properties", "event_severity", "(", "string", ")", ":", "the", "event", "severity", "Raises", ":", "XCLIError", ":", "if", "the", "xcli", ".", "cmd", ".", "custom_event", "failed", "KeyError", ":", "if", "action", "wasn", "t", "predefined", "TypeError", ":", "if", "properties", "is", "not", "None", "or", "dict" ]
python
train
46.518519
LionelAuroux/pyrser
pyrser/parsing/node.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/parsing/node.py#L389-L396
def values(self): """ in order """ tmp = self while tmp is not None: yield tmp.data tmp = tmp.next
[ "def", "values", "(", "self", ")", ":", "tmp", "=", "self", "while", "tmp", "is", "not", "None", ":", "yield", "tmp", ".", "data", "tmp", "=", "tmp", ".", "next" ]
in order
[ "in", "order" ]
python
test
19.375
idlesign/django-sitemessage
sitemessage/messages/base.py
https://github.com/idlesign/django-sitemessage/blob/25b179b798370354c5988042ec209e255d23793f/sitemessage/messages/base.py#L188-L214
def _get_url(cls, name, message_model, dispatch_model): """Returns a common pattern sitemessage URL. :param str name: URL name :param Message message_model: :param Dispatch|None dispatch_model: :return: """ global APP_URLS_ATTACHED url = '' if dispatch_model is None: return url if APP_URLS_ATTACHED != False: # sic! hashed = cls.get_dispatch_hash(dispatch_model.id, message_model.id) try: url = reverse(name, args=[message_model.id, dispatch_model.id, hashed]) url = '%s%s' % (get_site_url(), url) except NoReverseMatch: if APP_URLS_ATTACHED is None: APP_URLS_ATTACHED = False return url
[ "def", "_get_url", "(", "cls", ",", "name", ",", "message_model", ",", "dispatch_model", ")", ":", "global", "APP_URLS_ATTACHED", "url", "=", "''", "if", "dispatch_model", "is", "None", ":", "return", "url", "if", "APP_URLS_ATTACHED", "!=", "False", ":", "# sic!", "hashed", "=", "cls", ".", "get_dispatch_hash", "(", "dispatch_model", ".", "id", ",", "message_model", ".", "id", ")", "try", ":", "url", "=", "reverse", "(", "name", ",", "args", "=", "[", "message_model", ".", "id", ",", "dispatch_model", ".", "id", ",", "hashed", "]", ")", "url", "=", "'%s%s'", "%", "(", "get_site_url", "(", ")", ",", "url", ")", "except", "NoReverseMatch", ":", "if", "APP_URLS_ATTACHED", "is", "None", ":", "APP_URLS_ATTACHED", "=", "False", "return", "url" ]
Returns a common pattern sitemessage URL. :param str name: URL name :param Message message_model: :param Dispatch|None dispatch_model: :return:
[ "Returns", "a", "common", "pattern", "sitemessage", "URL", "." ]
python
train
28.666667
pantsbuild/pants
src/python/pants/pantsd/service/pants_service.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/service/pants_service.py#L190-L196
def mark_running(self): """Moves the service to the Running state. Raises if the service is not currently in the Paused state. """ with self._lock: self._set_state(self._RUNNING, self._PAUSED)
[ "def", "mark_running", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "_set_state", "(", "self", ".", "_RUNNING", ",", "self", ".", "_PAUSED", ")" ]
Moves the service to the Running state. Raises if the service is not currently in the Paused state.
[ "Moves", "the", "service", "to", "the", "Running", "state", "." ]
python
train
29.857143
ultrabug/py3status
py3status/modules/i3block.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/modules/i3block.py#L158-L225
def _persist(self): """ Run the command inside a thread so that we can catch output for each line as it comes in and display it. """ # run the block/command for command in self.commands: try: process = Popen( [command], stdout=PIPE, stderr=PIPE, universal_newlines=True, env=self.env, shell=True, ) except Exception as e: retcode = process.poll() msg = "Command '{cmd}' {error} retcode {retcode}" self.py3.log(msg.format(cmd=command, error=e, retcode=retcode)) # persistent blocklet output can be of two forms. Either each row # of the output is on a new line this is much easier to deal with) # or else the output can be continuous and just flushed when ready. # The second form is more tricky, if we find newlines then we # switch to easy parsing of the output. # When we have output we store in self.persistent_output and then # trigger the module to update. fd = process.stdout.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) has_newlines = False while True: line = process.stdout.read(1) # switch to a non-blocking read as we do not know the output # length fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) line += process.stdout.read(1024) # switch back to blocking so we can wait for the next output fcntl.fcntl(fd, fcntl.F_SETFL, fl) if process.poll(): break if self.py3.is_python_2(): line = line.decode("utf-8") self.persistent_output = line self.py3.update() if line[-1] == "\n": has_newlines = True break if line == "": break if has_newlines: msg = "Switch to newline persist method {cmd}" self.py3.log(msg.format(cmd=command)) # just read the output in a sane manner for line in iter(process.stdout.readline, b""): if process.poll(): break if self.py3.is_python_2(): line = line.decode("utf-8") self.persistent_output = line self.py3.update() self.py3.log("command exited {cmd}".format(cmd=command)) self.persistent_output = "Error\nError\n{}".format( self.py3.COLOR_ERROR or self.py3.COLOR_BAD ) self.py3.update()
[ "def", "_persist", "(", "self", ")", ":", "# run the block/command", "for", "command", "in", "self", ".", "commands", ":", "try", ":", "process", "=", "Popen", "(", "[", "command", "]", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "universal_newlines", "=", "True", ",", "env", "=", "self", ".", "env", ",", "shell", "=", "True", ",", ")", "except", "Exception", "as", "e", ":", "retcode", "=", "process", ".", "poll", "(", ")", "msg", "=", "\"Command '{cmd}' {error} retcode {retcode}\"", "self", ".", "py3", ".", "log", "(", "msg", ".", "format", "(", "cmd", "=", "command", ",", "error", "=", "e", ",", "retcode", "=", "retcode", ")", ")", "# persistent blocklet output can be of two forms. Either each row", "# of the output is on a new line this is much easier to deal with)", "# or else the output can be continuous and just flushed when ready.", "# The second form is more tricky, if we find newlines then we", "# switch to easy parsing of the output.", "# When we have output we store in self.persistent_output and then", "# trigger the module to update.", "fd", "=", "process", ".", "stdout", ".", "fileno", "(", ")", "fl", "=", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_GETFL", ")", "has_newlines", "=", "False", "while", "True", ":", "line", "=", "process", ".", "stdout", ".", "read", "(", "1", ")", "# switch to a non-blocking read as we do not know the output", "# length", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_SETFL", ",", "fl", "|", "os", ".", "O_NONBLOCK", ")", "line", "+=", "process", ".", "stdout", ".", "read", "(", "1024", ")", "# switch back to blocking so we can wait for the next output", "fcntl", ".", "fcntl", "(", "fd", ",", "fcntl", ".", "F_SETFL", ",", "fl", ")", "if", "process", ".", "poll", "(", ")", ":", "break", "if", "self", ".", "py3", ".", "is_python_2", "(", ")", ":", "line", "=", "line", ".", "decode", "(", "\"utf-8\"", ")", "self", ".", "persistent_output", "=", "line", "self", ".", "py3", ".", "update", "(", ")", "if", "line", "[", "-", "1", "]", "==", "\"\\n\"", ":", "has_newlines", "=", "True", "break", "if", "line", "==", "\"\"", ":", "break", "if", "has_newlines", ":", "msg", "=", "\"Switch to newline persist method {cmd}\"", "self", ".", "py3", ".", "log", "(", "msg", ".", "format", "(", "cmd", "=", "command", ")", ")", "# just read the output in a sane manner", "for", "line", "in", "iter", "(", "process", ".", "stdout", ".", "readline", ",", "b\"\"", ")", ":", "if", "process", ".", "poll", "(", ")", ":", "break", "if", "self", ".", "py3", ".", "is_python_2", "(", ")", ":", "line", "=", "line", ".", "decode", "(", "\"utf-8\"", ")", "self", ".", "persistent_output", "=", "line", "self", ".", "py3", ".", "update", "(", ")", "self", ".", "py3", ".", "log", "(", "\"command exited {cmd}\"", ".", "format", "(", "cmd", "=", "command", ")", ")", "self", ".", "persistent_output", "=", "\"Error\\nError\\n{}\"", ".", "format", "(", "self", ".", "py3", ".", "COLOR_ERROR", "or", "self", ".", "py3", ".", "COLOR_BAD", ")", "self", ".", "py3", ".", "update", "(", ")" ]
Run the command inside a thread so that we can catch output for each line as it comes in and display it.
[ "Run", "the", "command", "inside", "a", "thread", "so", "that", "we", "can", "catch", "output", "for", "each", "line", "as", "it", "comes", "in", "and", "display", "it", "." ]
python
train
41.514706
misli/django-cms-articles
cms_articles/api.py
https://github.com/misli/django-cms-articles/blob/d96ac77e049022deb4c70d268e4eab74d175145c/cms_articles/api.py#L26-L100
def create_article(tree, template, title, language, slug=None, description=None, page_title=None, menu_title=None, meta_description=None, created_by=None, image=None, publication_date=None, publication_end_date=None, published=False, login_required=False, creation_date=None, categories=[]): """ Create a CMS Article and it's title for the given language """ # validate tree tree = tree.get_public_object() assert tree.application_urls == 'CMSArticlesApp' # validate template assert template in [tpl[0] for tpl in settings.CMS_ARTICLES_TEMPLATES] get_template(template) # validate language: assert language in get_language_list(tree.node.site_id), settings.CMS_LANGUAGES.get(tree.node.site_id) # validate publication date if publication_date: assert isinstance(publication_date, datetime.date) # validate publication end date if publication_end_date: assert isinstance(publication_end_date, datetime.date) # validate creation date if not creation_date: creation_date = publication_date if creation_date: assert isinstance(creation_date, datetime.date) # get username if created_by: try: username = created_by.get_username() except Exception: username = force_text(created_by) else: username = 'script' with current_user(username): # create article article = Article.objects.create( tree=tree, template=template, login_required=login_required, creation_date=creation_date, publication_date=publication_date, publication_end_date=publication_end_date, languages=language, ) for category in categories: article.categories.add(category) # create title create_title( article=article, language=language, title=title, slug=slug, description=description, page_title=page_title, menu_title=menu_title, meta_description=meta_description, creation_date=creation_date, image=image, ) # publish article if published: article.publish(language) return article.reload()
[ "def", "create_article", "(", "tree", ",", "template", ",", "title", ",", "language", ",", "slug", "=", "None", ",", "description", "=", "None", ",", "page_title", "=", "None", ",", "menu_title", "=", "None", ",", "meta_description", "=", "None", ",", "created_by", "=", "None", ",", "image", "=", "None", ",", "publication_date", "=", "None", ",", "publication_end_date", "=", "None", ",", "published", "=", "False", ",", "login_required", "=", "False", ",", "creation_date", "=", "None", ",", "categories", "=", "[", "]", ")", ":", "# validate tree", "tree", "=", "tree", ".", "get_public_object", "(", ")", "assert", "tree", ".", "application_urls", "==", "'CMSArticlesApp'", "# validate template", "assert", "template", "in", "[", "tpl", "[", "0", "]", "for", "tpl", "in", "settings", ".", "CMS_ARTICLES_TEMPLATES", "]", "get_template", "(", "template", ")", "# validate language:", "assert", "language", "in", "get_language_list", "(", "tree", ".", "node", ".", "site_id", ")", ",", "settings", ".", "CMS_LANGUAGES", ".", "get", "(", "tree", ".", "node", ".", "site_id", ")", "# validate publication date", "if", "publication_date", ":", "assert", "isinstance", "(", "publication_date", ",", "datetime", ".", "date", ")", "# validate publication end date", "if", "publication_end_date", ":", "assert", "isinstance", "(", "publication_end_date", ",", "datetime", ".", "date", ")", "# validate creation date", "if", "not", "creation_date", ":", "creation_date", "=", "publication_date", "if", "creation_date", ":", "assert", "isinstance", "(", "creation_date", ",", "datetime", ".", "date", ")", "# get username", "if", "created_by", ":", "try", ":", "username", "=", "created_by", ".", "get_username", "(", ")", "except", "Exception", ":", "username", "=", "force_text", "(", "created_by", ")", "else", ":", "username", "=", "'script'", "with", "current_user", "(", "username", ")", ":", "# create article", "article", "=", "Article", ".", "objects", ".", "create", "(", "tree", "=", "tree", ",", "template", "=", "template", ",", "login_required", "=", "login_required", ",", "creation_date", "=", "creation_date", ",", "publication_date", "=", "publication_date", ",", "publication_end_date", "=", "publication_end_date", ",", "languages", "=", "language", ",", ")", "for", "category", "in", "categories", ":", "article", ".", "categories", ".", "add", "(", "category", ")", "# create title", "create_title", "(", "article", "=", "article", ",", "language", "=", "language", ",", "title", "=", "title", ",", "slug", "=", "slug", ",", "description", "=", "description", ",", "page_title", "=", "page_title", ",", "menu_title", "=", "menu_title", ",", "meta_description", "=", "meta_description", ",", "creation_date", "=", "creation_date", ",", "image", "=", "image", ",", ")", "# publish article", "if", "published", ":", "article", ".", "publish", "(", "language", ")", "return", "article", ".", "reload", "(", ")" ]
Create a CMS Article and it's title for the given language
[ "Create", "a", "CMS", "Article", "and", "it", "s", "title", "for", "the", "given", "language" ]
python
train
30.92
bspaans/python-mingus
mingus/containers/track.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/track.py#L89-L127
def from_chords(self, chords, duration=1): """Add chords to the Track. The given chords should be a list of shorthand strings or list of list of shorthand strings, etc. Each sublist divides the value by 2. If a tuning is set, chords will be expanded so they have a proper fingering. Example: >>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1) """ tun = self.get_tuning() def add_chord(chord, duration): if type(chord) == list: for c in chord: add_chord(c, duration * 2) else: chord = NoteContainer().from_chord(chord) if tun: chord = tun.find_chord_fingering(chord, return_best_as_NoteContainer=True) if not self.add_notes(chord, duration): # This should be the standard behaviour of add_notes dur = self.bars[-1].value_left() self.add_notes(chord, dur) # warning should hold note self.add_notes(chord, value.subtract(duration, dur)) for c in chords: if c is not None: add_chord(c, duration) else: self.add_notes(None, duration) return self
[ "def", "from_chords", "(", "self", ",", "chords", ",", "duration", "=", "1", ")", ":", "tun", "=", "self", ".", "get_tuning", "(", ")", "def", "add_chord", "(", "chord", ",", "duration", ")", ":", "if", "type", "(", "chord", ")", "==", "list", ":", "for", "c", "in", "chord", ":", "add_chord", "(", "c", ",", "duration", "*", "2", ")", "else", ":", "chord", "=", "NoteContainer", "(", ")", ".", "from_chord", "(", "chord", ")", "if", "tun", ":", "chord", "=", "tun", ".", "find_chord_fingering", "(", "chord", ",", "return_best_as_NoteContainer", "=", "True", ")", "if", "not", "self", ".", "add_notes", "(", "chord", ",", "duration", ")", ":", "# This should be the standard behaviour of add_notes", "dur", "=", "self", ".", "bars", "[", "-", "1", "]", ".", "value_left", "(", ")", "self", ".", "add_notes", "(", "chord", ",", "dur", ")", "# warning should hold note", "self", ".", "add_notes", "(", "chord", ",", "value", ".", "subtract", "(", "duration", ",", "dur", ")", ")", "for", "c", "in", "chords", ":", "if", "c", "is", "not", "None", ":", "add_chord", "(", "c", ",", "duration", ")", "else", ":", "self", ".", "add_notes", "(", "None", ",", "duration", ")", "return", "self" ]
Add chords to the Track. The given chords should be a list of shorthand strings or list of list of shorthand strings, etc. Each sublist divides the value by 2. If a tuning is set, chords will be expanded so they have a proper fingering. Example: >>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
[ "Add", "chords", "to", "the", "Track", "." ]
python
train
34.307692
benoitkugler/abstractDataLibrary
pyDLib/GUI/common.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/common.py#L346-L369
def cree_widgets(self): """Create widgets and store them in self.widgets""" for t in self.FIELDS: if type(t) is str: attr, kwargs = t, {} else: attr, kwargs = t[0], t[1].copy() self.champs.append(attr) is_editable = kwargs.pop("is_editable", self.is_editable) args = [self.acces[attr], is_editable] with_base = kwargs.pop("with_base", False) if with_base: args.append(self.acces.base) if 'with_label' in kwargs: label = kwargs.pop('with_label') else: label = ASSOCIATION[attr][0] if kwargs: w = ASSOCIATION[attr][3](*args, **kwargs) else: w = ASSOCIATION[attr][3](*args) self.widgets[attr] = (w, label)
[ "def", "cree_widgets", "(", "self", ")", ":", "for", "t", "in", "self", ".", "FIELDS", ":", "if", "type", "(", "t", ")", "is", "str", ":", "attr", ",", "kwargs", "=", "t", ",", "{", "}", "else", ":", "attr", ",", "kwargs", "=", "t", "[", "0", "]", ",", "t", "[", "1", "]", ".", "copy", "(", ")", "self", ".", "champs", ".", "append", "(", "attr", ")", "is_editable", "=", "kwargs", ".", "pop", "(", "\"is_editable\"", ",", "self", ".", "is_editable", ")", "args", "=", "[", "self", ".", "acces", "[", "attr", "]", ",", "is_editable", "]", "with_base", "=", "kwargs", ".", "pop", "(", "\"with_base\"", ",", "False", ")", "if", "with_base", ":", "args", ".", "append", "(", "self", ".", "acces", ".", "base", ")", "if", "'with_label'", "in", "kwargs", ":", "label", "=", "kwargs", ".", "pop", "(", "'with_label'", ")", "else", ":", "label", "=", "ASSOCIATION", "[", "attr", "]", "[", "0", "]", "if", "kwargs", ":", "w", "=", "ASSOCIATION", "[", "attr", "]", "[", "3", "]", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "w", "=", "ASSOCIATION", "[", "attr", "]", "[", "3", "]", "(", "*", "args", ")", "self", ".", "widgets", "[", "attr", "]", "=", "(", "w", ",", "label", ")" ]
Create widgets and store them in self.widgets
[ "Create", "widgets", "and", "store", "them", "in", "self", ".", "widgets" ]
python
train
35.541667
briancappello/flask-unchained
flask_mail.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_mail.py#L691-L704
def init_app(self, app): """Initializes your mail settings from the application settings. You can use this if you want to set up your Mail instance at configuration time. :param app: Flask application instance """ state = self.init_mail(app.config, app.debug, app.testing) # register extension with app app.extensions = getattr(app, 'extensions', {}) app.extensions['mail'] = state return state
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "state", "=", "self", ".", "init_mail", "(", "app", ".", "config", ",", "app", ".", "debug", ",", "app", ".", "testing", ")", "# register extension with app", "app", ".", "extensions", "=", "getattr", "(", "app", ",", "'extensions'", ",", "{", "}", ")", "app", ".", "extensions", "[", "'mail'", "]", "=", "state", "return", "state" ]
Initializes your mail settings from the application settings. You can use this if you want to set up your Mail instance at configuration time. :param app: Flask application instance
[ "Initializes", "your", "mail", "settings", "from", "the", "application", "settings", "." ]
python
train
33.142857
ga4gh/ga4gh-client
ga4gh/client/cli.py
https://github.com/ga4gh/ga4gh-client/blob/d23b00b89112ef0930d45ee75aa3c6de3db615c5/ga4gh/client/cli.py#L501-L521
def _textOutput(self, gaObjects): """ Prints out the specified Variant objects in a VCF-like form. """ for variant in gaObjects: print( variant.id, variant.variant_set_id, variant.names, variant.reference_name, variant.start, variant.end, variant.reference_bases, variant.alternate_bases, sep="\t", end="\t") for key, value in variant.attributes.attr.items(): val = value.values[0].string_value print(key, val, sep="=", end=";") print("\t", end="") for c in variant.calls: print( c.call_set_id, c.genotype.__str__().replace('\n', ''), c.genotype_likelihood, c.attributes, c.phaseset, sep=":", end="\t") print()
[ "def", "_textOutput", "(", "self", ",", "gaObjects", ")", ":", "for", "variant", "in", "gaObjects", ":", "print", "(", "variant", ".", "id", ",", "variant", ".", "variant_set_id", ",", "variant", ".", "names", ",", "variant", ".", "reference_name", ",", "variant", ".", "start", ",", "variant", ".", "end", ",", "variant", ".", "reference_bases", ",", "variant", ".", "alternate_bases", ",", "sep", "=", "\"\\t\"", ",", "end", "=", "\"\\t\"", ")", "for", "key", ",", "value", "in", "variant", ".", "attributes", ".", "attr", ".", "items", "(", ")", ":", "val", "=", "value", ".", "values", "[", "0", "]", ".", "string_value", "print", "(", "key", ",", "val", ",", "sep", "=", "\"=\"", ",", "end", "=", "\";\"", ")", "print", "(", "\"\\t\"", ",", "end", "=", "\"\"", ")", "for", "c", "in", "variant", ".", "calls", ":", "print", "(", "c", ".", "call_set_id", ",", "c", ".", "genotype", ".", "__str__", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ",", "c", ".", "genotype_likelihood", ",", "c", ".", "attributes", ",", "c", ".", "phaseset", ",", "sep", "=", "\":\"", ",", "end", "=", "\"\\t\"", ")", "print", "(", ")" ]
Prints out the specified Variant objects in a VCF-like form.
[ "Prints", "out", "the", "specified", "Variant", "objects", "in", "a", "VCF", "-", "like", "form", "." ]
python
train
41.619048
zyga/json-schema-validator
json_schema_validator/schema.py
https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L145-L153
def requires(self): """Additional object or objects required by this object.""" # NOTE: spec says this can also be a list of strings value = self._schema.get("requires", {}) if not isinstance(value, (basestring, dict)): raise SchemaError( "requires value {0!r} is neither a string nor an" " object".format(value)) return value
[ "def", "requires", "(", "self", ")", ":", "# NOTE: spec says this can also be a list of strings", "value", "=", "self", ".", "_schema", ".", "get", "(", "\"requires\"", ",", "{", "}", ")", "if", "not", "isinstance", "(", "value", ",", "(", "basestring", ",", "dict", ")", ")", ":", "raise", "SchemaError", "(", "\"requires value {0!r} is neither a string nor an\"", "\" object\"", ".", "format", "(", "value", ")", ")", "return", "value" ]
Additional object or objects required by this object.
[ "Additional", "object", "or", "objects", "required", "by", "this", "object", "." ]
python
train
44.666667
CityOfZion/neo-python
neo/Wallets/NEP5Token.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/NEP5Token.py#L310-L318
def Deserialize(self, reader): """ Read serialized data from byte stream Args: reader (neocore.IO.BinaryReader): reader to read byte data from """ self.name = reader.ReadVarString().decode('utf-8') self.symbol = reader.ReadVarString().decode('utf-8') self.decimals = reader.ReadUInt8()
[ "def", "Deserialize", "(", "self", ",", "reader", ")", ":", "self", ".", "name", "=", "reader", ".", "ReadVarString", "(", ")", ".", "decode", "(", "'utf-8'", ")", "self", ".", "symbol", "=", "reader", ".", "ReadVarString", "(", ")", ".", "decode", "(", "'utf-8'", ")", "self", ".", "decimals", "=", "reader", ".", "ReadUInt8", "(", ")" ]
Read serialized data from byte stream Args: reader (neocore.IO.BinaryReader): reader to read byte data from
[ "Read", "serialized", "data", "from", "byte", "stream", "Args", ":", "reader", "(", "neocore", ".", "IO", ".", "BinaryReader", ")", ":", "reader", "to", "read", "byte", "data", "from" ]
python
train
38.333333
wbond/oscrypto
oscrypto/_osx/util.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/util.py#L42-L60
def _extract_error(): """ Extracts the last OS error message into a python unicode string :return: A unicode string error message """ error_num = errno() try: error_string = os.strerror(error_num) except (ValueError): return str_cls(error_num) if isinstance(error_string, str_cls): return error_string return _try_decode(error_string)
[ "def", "_extract_error", "(", ")", ":", "error_num", "=", "errno", "(", ")", "try", ":", "error_string", "=", "os", ".", "strerror", "(", "error_num", ")", "except", "(", "ValueError", ")", ":", "return", "str_cls", "(", "error_num", ")", "if", "isinstance", "(", "error_string", ",", "str_cls", ")", ":", "return", "error_string", "return", "_try_decode", "(", "error_string", ")" ]
Extracts the last OS error message into a python unicode string :return: A unicode string error message
[ "Extracts", "the", "last", "OS", "error", "message", "into", "a", "python", "unicode", "string" ]
python
valid
20.473684
NuGrid/NuGridPy
nugridpy/ppn.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ppn.py#L545-L607
def getColData(self, attri, fname, numtype='cycNum'): """ In this method a column of data for the associated column attribute is returned. Parameters ---------- attri : string The name of the attribute we are looking for. fname : string The name of the file we are getting the data from or the cycle number found in the filename. numtype : string, optional Determines whether fname is the name of a file or, the cycle number. If it is 'file' it will then interpret it as a file, if it is 'cycNum' it will then interpret it as a cycle number. The default is "cycNum". """ fname=self.findFile(fname,numtype) f=open(fname,'r') for i in range(self.index+1): f.readline() lines=f.readlines() for i in range(len(lines)): lines[i]=lines[i].strip() lines[i]=lines[i].split() index=0 data=[] while index < len (self.dcols): if attri== self.dcols[index]: break index+=1 for i in range(len(lines)): if index==5 and len(lines[i])==7: data.append(str(lines[i][index].capitalize())+'-'\ +str(lines[i][index+1])) elif index==5 and len(lines[i])!=7: tmp=str(lines[i][index]) if tmp[len(tmp)-1].isdigit(): tmp1=tmp[0]+tmp[1] tmp1=tmp1.capitalize() tmp2='' for j in range(len(tmp)): if j == 0 or j == 1: continue tmp2+=tmp[j] data.append(tmp1+'-'+tmp2) elif tmp=='PROT': data.append('H-1') elif tmp==('NEUT'or'NEUTR'or'nn'or'N 1'or'N-1'): data.append('N-1') else: data.append(tmp) elif index==0: data.append(int(lines[i][index])) else: data.append(float(lines[i][index])) return array(data)
[ "def", "getColData", "(", "self", ",", "attri", ",", "fname", ",", "numtype", "=", "'cycNum'", ")", ":", "fname", "=", "self", ".", "findFile", "(", "fname", ",", "numtype", ")", "f", "=", "open", "(", "fname", ",", "'r'", ")", "for", "i", "in", "range", "(", "self", ".", "index", "+", "1", ")", ":", "f", ".", "readline", "(", ")", "lines", "=", "f", ".", "readlines", "(", ")", "for", "i", "in", "range", "(", "len", "(", "lines", ")", ")", ":", "lines", "[", "i", "]", "=", "lines", "[", "i", "]", ".", "strip", "(", ")", "lines", "[", "i", "]", "=", "lines", "[", "i", "]", ".", "split", "(", ")", "index", "=", "0", "data", "=", "[", "]", "while", "index", "<", "len", "(", "self", ".", "dcols", ")", ":", "if", "attri", "==", "self", ".", "dcols", "[", "index", "]", ":", "break", "index", "+=", "1", "for", "i", "in", "range", "(", "len", "(", "lines", ")", ")", ":", "if", "index", "==", "5", "and", "len", "(", "lines", "[", "i", "]", ")", "==", "7", ":", "data", ".", "append", "(", "str", "(", "lines", "[", "i", "]", "[", "index", "]", ".", "capitalize", "(", ")", ")", "+", "'-'", "+", "str", "(", "lines", "[", "i", "]", "[", "index", "+", "1", "]", ")", ")", "elif", "index", "==", "5", "and", "len", "(", "lines", "[", "i", "]", ")", "!=", "7", ":", "tmp", "=", "str", "(", "lines", "[", "i", "]", "[", "index", "]", ")", "if", "tmp", "[", "len", "(", "tmp", ")", "-", "1", "]", ".", "isdigit", "(", ")", ":", "tmp1", "=", "tmp", "[", "0", "]", "+", "tmp", "[", "1", "]", "tmp1", "=", "tmp1", ".", "capitalize", "(", ")", "tmp2", "=", "''", "for", "j", "in", "range", "(", "len", "(", "tmp", ")", ")", ":", "if", "j", "==", "0", "or", "j", "==", "1", ":", "continue", "tmp2", "+=", "tmp", "[", "j", "]", "data", ".", "append", "(", "tmp1", "+", "'-'", "+", "tmp2", ")", "elif", "tmp", "==", "'PROT'", ":", "data", ".", "append", "(", "'H-1'", ")", "elif", "tmp", "==", "(", "'NEUT'", "or", "'NEUTR'", "or", "'nn'", "or", "'N 1'", "or", "'N-1'", ")", ":", "data", ".", "append", "(", "'N-1'", ")", "else", ":", "data", ".", "append", "(", "tmp", ")", "elif", "index", "==", "0", ":", "data", ".", "append", "(", "int", "(", "lines", "[", "i", "]", "[", "index", "]", ")", ")", "else", ":", "data", ".", "append", "(", "float", "(", "lines", "[", "i", "]", "[", "index", "]", ")", ")", "return", "array", "(", "data", ")" ]
In this method a column of data for the associated column attribute is returned. Parameters ---------- attri : string The name of the attribute we are looking for. fname : string The name of the file we are getting the data from or the cycle number found in the filename. numtype : string, optional Determines whether fname is the name of a file or, the cycle number. If it is 'file' it will then interpret it as a file, if it is 'cycNum' it will then interpret it as a cycle number. The default is "cycNum".
[ "In", "this", "method", "a", "column", "of", "data", "for", "the", "associated", "column", "attribute", "is", "returned", "." ]
python
train
34.31746
awslabs/serverless-application-model
samtranslator/plugins/application/serverless_app_plugin.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/plugins/application/serverless_app_plugin.py#L300-L315
def _handle_get_cfn_template_response(self, response, application_id, template_id): """ Handles the response from the SAR service call :param dict response: the response dictionary from the app repo :param string application_id: the ApplicationId :param string template_id: the unique TemplateId for this application """ status = response['Status'] if status != "ACTIVE": # Other options are PREPARING and EXPIRED. if status == 'EXPIRED': message = ("Template for {} with id {} returned status: {}. Cannot access an expired " "template.".format(application_id, template_id, status)) raise InvalidResourceException(application_id, message) self._in_progress_templates.append((application_id, template_id))
[ "def", "_handle_get_cfn_template_response", "(", "self", ",", "response", ",", "application_id", ",", "template_id", ")", ":", "status", "=", "response", "[", "'Status'", "]", "if", "status", "!=", "\"ACTIVE\"", ":", "# Other options are PREPARING and EXPIRED.", "if", "status", "==", "'EXPIRED'", ":", "message", "=", "(", "\"Template for {} with id {} returned status: {}. Cannot access an expired \"", "\"template.\"", ".", "format", "(", "application_id", ",", "template_id", ",", "status", ")", ")", "raise", "InvalidResourceException", "(", "application_id", ",", "message", ")", "self", ".", "_in_progress_templates", ".", "append", "(", "(", "application_id", ",", "template_id", ")", ")" ]
Handles the response from the SAR service call :param dict response: the response dictionary from the app repo :param string application_id: the ApplicationId :param string template_id: the unique TemplateId for this application
[ "Handles", "the", "response", "from", "the", "SAR", "service", "call" ]
python
train
53.0625
daviddrysdale/python-phonenumbers
python/phonenumbers/unicode_util.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/unicode_util.py#L397-L405
def digit(uni_char, default_value=None): """Returns the digit value assigned to the Unicode character uni_char as integer. If no such value is defined, default is returned, or, if not given, ValueError is raised.""" uni_char = unicod(uni_char) # Force to Unicode. if default_value is not None: return unicodedata.digit(uni_char, default_value) else: return unicodedata.digit(uni_char)
[ "def", "digit", "(", "uni_char", ",", "default_value", "=", "None", ")", ":", "uni_char", "=", "unicod", "(", "uni_char", ")", "# Force to Unicode.", "if", "default_value", "is", "not", "None", ":", "return", "unicodedata", ".", "digit", "(", "uni_char", ",", "default_value", ")", "else", ":", "return", "unicodedata", ".", "digit", "(", "uni_char", ")" ]
Returns the digit value assigned to the Unicode character uni_char as integer. If no such value is defined, default is returned, or, if not given, ValueError is raised.
[ "Returns", "the", "digit", "value", "assigned", "to", "the", "Unicode", "character", "uni_char", "as", "integer", ".", "If", "no", "such", "value", "is", "defined", "default", "is", "returned", "or", "if", "not", "given", "ValueError", "is", "raised", "." ]
python
train
46.333333
bitesofcode/projexui
projexui/widgets/xorbbrowserwidget/xcardwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbbrowserwidget/xcardwidget.py#L75-L92
def setRecord( self, record ): """ Sets the record that is linked with this widget. :param record | <orb.Table> """ super(XBasicCardWidget, self).setRecord(record) browser = self.browserWidget() if ( not browser ): return factory = browser.factory() if ( not factory ): return self._thumbnailButton.setIcon(factory.thumbnail(record)) self._titleLabel.setText(factory.thumbnailText(record))
[ "def", "setRecord", "(", "self", ",", "record", ")", ":", "super", "(", "XBasicCardWidget", ",", "self", ")", ".", "setRecord", "(", "record", ")", "browser", "=", "self", ".", "browserWidget", "(", ")", "if", "(", "not", "browser", ")", ":", "return", "factory", "=", "browser", ".", "factory", "(", ")", "if", "(", "not", "factory", ")", ":", "return", "self", ".", "_thumbnailButton", ".", "setIcon", "(", "factory", ".", "thumbnail", "(", "record", ")", ")", "self", ".", "_titleLabel", ".", "setText", "(", "factory", ".", "thumbnailText", "(", "record", ")", ")" ]
Sets the record that is linked with this widget. :param record | <orb.Table>
[ "Sets", "the", "record", "that", "is", "linked", "with", "this", "widget", ".", ":", "param", "record", "|", "<orb", ".", "Table", ">" ]
python
train
30.111111
jtwhite79/pyemu
pyemu/logger.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/logger.py#L64-L92
def log(self,phrase): """log something that happened. The first time phrase is passed the start time is saved. The second time the phrase is logged, the elapsed time is written Parameters ---------- phrase : str the thing that happened """ pass t = datetime.now() if phrase in self.items.keys(): s = str(t) + ' finished: ' + str(phrase) + " took: " + \ str(t - self.items[phrase]) + '\n' if self.echo: print(s,end='') if self.filename: self.f.write(s) self.f.flush() self.items.pop(phrase) else: s = str(t) + ' starting: ' + str(phrase) + '\n' if self.echo: print(s,end='') if self.filename: self.f.write(s) self.f.flush() self.items[phrase] = copy.deepcopy(t)
[ "def", "log", "(", "self", ",", "phrase", ")", ":", "pass", "t", "=", "datetime", ".", "now", "(", ")", "if", "phrase", "in", "self", ".", "items", ".", "keys", "(", ")", ":", "s", "=", "str", "(", "t", ")", "+", "' finished: '", "+", "str", "(", "phrase", ")", "+", "\" took: \"", "+", "str", "(", "t", "-", "self", ".", "items", "[", "phrase", "]", ")", "+", "'\\n'", "if", "self", ".", "echo", ":", "print", "(", "s", ",", "end", "=", "''", ")", "if", "self", ".", "filename", ":", "self", ".", "f", ".", "write", "(", "s", ")", "self", ".", "f", ".", "flush", "(", ")", "self", ".", "items", ".", "pop", "(", "phrase", ")", "else", ":", "s", "=", "str", "(", "t", ")", "+", "' starting: '", "+", "str", "(", "phrase", ")", "+", "'\\n'", "if", "self", ".", "echo", ":", "print", "(", "s", ",", "end", "=", "''", ")", "if", "self", ".", "filename", ":", "self", ".", "f", ".", "write", "(", "s", ")", "self", ".", "f", ".", "flush", "(", ")", "self", ".", "items", "[", "phrase", "]", "=", "copy", ".", "deepcopy", "(", "t", ")" ]
log something that happened. The first time phrase is passed the start time is saved. The second time the phrase is logged, the elapsed time is written Parameters ---------- phrase : str the thing that happened
[ "log", "something", "that", "happened", ".", "The", "first", "time", "phrase", "is", "passed", "the", "start", "time", "is", "saved", ".", "The", "second", "time", "the", "phrase", "is", "logged", "the", "elapsed", "time", "is", "written" ]
python
train
32.793103
saltstack/salt
salt/runners/vault.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/vault.py#L192-L239
def _expand_pattern_lists(pattern, **mappings): ''' Expands the pattern for any list-valued mappings, such that for any list of length N in the mappings present in the pattern, N copies of the pattern are returned, each with an element of the list substituted. pattern: A pattern to expand, for example ``by-role/{grains[roles]}`` mappings: A dictionary of variables that can be expanded into the pattern. Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains .. code-block:: yaml grains: roles: - web - database This function will expand into two patterns, ``[by-role/web, by-role/database]``. Note that this method does not expand any non-list patterns. ''' expanded_patterns = [] f = string.Formatter() ''' This function uses a string.Formatter to get all the formatting tokens from the pattern, then recursively replaces tokens whose expanded value is a list. For a list with N items, it will create N new pattern strings and then continue with the next token. In practice this is expected to not be very expensive, since patterns will typically involve a handful of lists at most. ''' # pylint: disable=W0105 for (_, field_name, _, _) in f.parse(pattern): if field_name is None: continue (value, _) = f.get_field(field_name, None, mappings) if isinstance(value, list): token = '{{{0}}}'.format(field_name) expanded = [pattern.replace(token, six.text_type(elem)) for elem in value] for expanded_item in expanded: result = _expand_pattern_lists(expanded_item, **mappings) expanded_patterns += result return expanded_patterns return [pattern]
[ "def", "_expand_pattern_lists", "(", "pattern", ",", "*", "*", "mappings", ")", ":", "expanded_patterns", "=", "[", "]", "f", "=", "string", ".", "Formatter", "(", ")", "'''\n This function uses a string.Formatter to get all the formatting tokens from\n the pattern, then recursively replaces tokens whose expanded value is a\n list. For a list with N items, it will create N new pattern strings and\n then continue with the next token. In practice this is expected to not be\n very expensive, since patterns will typically involve a handful of lists at\n most.\n '''", "# pylint: disable=W0105", "for", "(", "_", ",", "field_name", ",", "_", ",", "_", ")", "in", "f", ".", "parse", "(", "pattern", ")", ":", "if", "field_name", "is", "None", ":", "continue", "(", "value", ",", "_", ")", "=", "f", ".", "get_field", "(", "field_name", ",", "None", ",", "mappings", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "token", "=", "'{{{0}}}'", ".", "format", "(", "field_name", ")", "expanded", "=", "[", "pattern", ".", "replace", "(", "token", ",", "six", ".", "text_type", "(", "elem", ")", ")", "for", "elem", "in", "value", "]", "for", "expanded_item", "in", "expanded", ":", "result", "=", "_expand_pattern_lists", "(", "expanded_item", ",", "*", "*", "mappings", ")", "expanded_patterns", "+=", "result", "return", "expanded_patterns", "return", "[", "pattern", "]" ]
Expands the pattern for any list-valued mappings, such that for any list of length N in the mappings present in the pattern, N copies of the pattern are returned, each with an element of the list substituted. pattern: A pattern to expand, for example ``by-role/{grains[roles]}`` mappings: A dictionary of variables that can be expanded into the pattern. Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains .. code-block:: yaml grains: roles: - web - database This function will expand into two patterns, ``[by-role/web, by-role/database]``. Note that this method does not expand any non-list patterns.
[ "Expands", "the", "pattern", "for", "any", "list", "-", "valued", "mappings", "such", "that", "for", "any", "list", "of", "length", "N", "in", "the", "mappings", "present", "in", "the", "pattern", "N", "copies", "of", "the", "pattern", "are", "returned", "each", "with", "an", "element", "of", "the", "list", "substituted", "." ]
python
train
37.666667
OSLL/jabba
jabba/file_index.py
https://github.com/OSLL/jabba/blob/71c1d008ab497020fba6ffa12a600721eb3f5ef7/jabba/file_index.py#L66-L78
def add_file(self, path, yaml): """ Adds given file to the file index """ if is_job_config(yaml): name = self.get_job_name(yaml) file_data = FileData(path=path, yaml=yaml) self.files[path] = file_data self.jobs[name] = file_data else: self.files[path] = FileData(path=path, yaml=yaml)
[ "def", "add_file", "(", "self", ",", "path", ",", "yaml", ")", ":", "if", "is_job_config", "(", "yaml", ")", ":", "name", "=", "self", ".", "get_job_name", "(", "yaml", ")", "file_data", "=", "FileData", "(", "path", "=", "path", ",", "yaml", "=", "yaml", ")", "self", ".", "files", "[", "path", "]", "=", "file_data", "self", ".", "jobs", "[", "name", "]", "=", "file_data", "else", ":", "self", ".", "files", "[", "path", "]", "=", "FileData", "(", "path", "=", "path", ",", "yaml", "=", "yaml", ")" ]
Adds given file to the file index
[ "Adds", "given", "file", "to", "the", "file", "index" ]
python
train
28.769231
pypa/pipenv
pipenv/vendor/pexpect/spawnbase.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/spawnbase.py#L157-L180
def read_nonblocking(self, size=1, timeout=None): """This reads data from the file descriptor. This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it. The timeout parameter is ignored. """ try: s = os.read(self.child_fd, size) except OSError as err: if err.args[0] == errno.EIO: # Linux-style EOF self.flag_eof = True raise EOF('End Of File (EOF). Exception style platform.') raise if s == b'': # BSD-style EOF self.flag_eof = True raise EOF('End Of File (EOF). Empty string style platform.') s = self._decoder.decode(s, final=False) self._log(s, 'read') return s
[ "def", "read_nonblocking", "(", "self", ",", "size", "=", "1", ",", "timeout", "=", "None", ")", ":", "try", ":", "s", "=", "os", ".", "read", "(", "self", ".", "child_fd", ",", "size", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "args", "[", "0", "]", "==", "errno", ".", "EIO", ":", "# Linux-style EOF", "self", ".", "flag_eof", "=", "True", "raise", "EOF", "(", "'End Of File (EOF). Exception style platform.'", ")", "raise", "if", "s", "==", "b''", ":", "# BSD-style EOF", "self", ".", "flag_eof", "=", "True", "raise", "EOF", "(", "'End Of File (EOF). Empty string style platform.'", ")", "s", "=", "self", ".", "_decoder", ".", "decode", "(", "s", ",", "final", "=", "False", ")", "self", ".", "_log", "(", "s", ",", "'read'", ")", "return", "s" ]
This reads data from the file descriptor. This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it. The timeout parameter is ignored.
[ "This", "reads", "data", "from", "the", "file", "descriptor", "." ]
python
train
33.333333
NiklasRosenstein/py-bundler
bundler/nativedeps/windll.py
https://github.com/NiklasRosenstein/py-bundler/blob/80dd6dc971667ba015f7f67481417c45cc757231/bundler/nativedeps/windll.py#L60-L94
def get_dependency_walker(): """ Checks if `depends.exe` is in the system PATH. If not, it will be downloaded and extracted to a temporary directory. Note that the file will not be deleted afterwards. Returns the path to the Dependency Walker executable. """ for dirname in os.getenv('PATH', '').split(os.pathsep): filename = os.path.join(dirname, 'depends.exe') if os.path.isfile(filename): logger.info('Dependency Walker found at "{}"'.format(filename)) return filename temp_exe = os.path.join(tempfile.gettempdir(), 'depends.exe') temp_dll = os.path.join(tempfile.gettempdir(), 'depends.dll') if os.path.isfile(temp_exe): logger.info('Dependency Walker found at "{}"'.format(temp_exe)) return temp_exe logger.info('Dependency Walker not found. Downloading ...') with urlopen('http://dependencywalker.com/depends22_x64.zip') as fp: data = fp.read() logger.info('Extracting Dependency Walker to "{}"'.format(temp_exe)) with zipfile.ZipFile(io.BytesIO(data)) as fp: with fp.open('depends.exe') as src: with open(temp_exe, 'wb') as dst: shutil.copyfileobj(src, dst) with fp.open('depends.dll') as src: with open(temp_dll, 'wb') as dst: shutil.copyfileobj(src, dst) return temp_exe
[ "def", "get_dependency_walker", "(", ")", ":", "for", "dirname", "in", "os", ".", "getenv", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pathsep", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "'depends.exe'", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "logger", ".", "info", "(", "'Dependency Walker found at \"{}\"'", ".", "format", "(", "filename", ")", ")", "return", "filename", "temp_exe", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "'depends.exe'", ")", "temp_dll", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "'depends.dll'", ")", "if", "os", ".", "path", ".", "isfile", "(", "temp_exe", ")", ":", "logger", ".", "info", "(", "'Dependency Walker found at \"{}\"'", ".", "format", "(", "temp_exe", ")", ")", "return", "temp_exe", "logger", ".", "info", "(", "'Dependency Walker not found. Downloading ...'", ")", "with", "urlopen", "(", "'http://dependencywalker.com/depends22_x64.zip'", ")", "as", "fp", ":", "data", "=", "fp", ".", "read", "(", ")", "logger", ".", "info", "(", "'Extracting Dependency Walker to \"{}\"'", ".", "format", "(", "temp_exe", ")", ")", "with", "zipfile", ".", "ZipFile", "(", "io", ".", "BytesIO", "(", "data", ")", ")", "as", "fp", ":", "with", "fp", ".", "open", "(", "'depends.exe'", ")", "as", "src", ":", "with", "open", "(", "temp_exe", ",", "'wb'", ")", "as", "dst", ":", "shutil", ".", "copyfileobj", "(", "src", ",", "dst", ")", "with", "fp", ".", "open", "(", "'depends.dll'", ")", "as", "src", ":", "with", "open", "(", "temp_dll", ",", "'wb'", ")", "as", "dst", ":", "shutil", ".", "copyfileobj", "(", "src", ",", "dst", ")", "return", "temp_exe" ]
Checks if `depends.exe` is in the system PATH. If not, it will be downloaded and extracted to a temporary directory. Note that the file will not be deleted afterwards. Returns the path to the Dependency Walker executable.
[ "Checks", "if", "depends", ".", "exe", "is", "in", "the", "system", "PATH", ".", "If", "not", "it", "will", "be", "downloaded", "and", "extracted", "to", "a", "temporary", "directory", ".", "Note", "that", "the", "file", "will", "not", "be", "deleted", "afterwards", "." ]
python
train
35.685714
yangl1996/libpagure
libpagure/libpagure.py
https://github.com/yangl1996/libpagure/blob/dd96ed29142407463790c66ed321984a6ea7465a/libpagure/libpagure.py#L138-L151
def list_tags(self, pattern=None): """ List all tags made on this project. :param pattern: filters the starting letters of the return value :return: """ request_url = "{}tags".format(self.create_basic_url()) params = None if pattern: params = {'pattern': pattern} return_value = self._call_api(request_url, params=params) return return_value['tags']
[ "def", "list_tags", "(", "self", ",", "pattern", "=", "None", ")", ":", "request_url", "=", "\"{}tags\"", ".", "format", "(", "self", ".", "create_basic_url", "(", ")", ")", "params", "=", "None", "if", "pattern", ":", "params", "=", "{", "'pattern'", ":", "pattern", "}", "return_value", "=", "self", ".", "_call_api", "(", "request_url", ",", "params", "=", "params", ")", "return", "return_value", "[", "'tags'", "]" ]
List all tags made on this project. :param pattern: filters the starting letters of the return value :return:
[ "List", "all", "tags", "made", "on", "this", "project", ".", ":", "param", "pattern", ":", "filters", "the", "starting", "letters", "of", "the", "return", "value", ":", "return", ":" ]
python
train
30.714286
openp2pdesign/makerlabs
makerlabs/techshop_ws.py
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/techshop_ws.py#L50-L191
def get_labs(format): """Gets Techshop data from techshop.ws.""" techshops_soup = data_from_techshop_ws(techshop_us_url) techshops = {} # Load all the TechShops # By first parsing the html data = techshops_soup.findAll('div', attrs={'id': 'main-content'}) for element in data: links = element.findAll('a') hrefs = {} for k, a in enumerate(links): if "contact" not in a['href']: hrefs[k] = a['href'] for k, v in hrefs.iteritems(): if "http://techshop.ws/" not in v: hrefs[k] = "http://techshop.ws/" + v else: hrefs[k] = v for k, v in hrefs.iteritems(): if "http://techshop.com/" in v: hrefs[k] = v.replace("http://techshop.com/", "") # Remove duplicate pages hr = [] for key, value in hrefs.iteritems(): if value not in hr: hr.append(value) hrefs = hr # Check all pages for page in hrefs: data = data_from_techshop_ws(page) current_lab = Techshop() name = data.title.contents[0].split('-- ')[1].encode('utf-8') if "TechShop" not in name: name = "TechShop " + name current_lab.name = name current_lab.slug = name current_lab.url = page # Find Facebook and Twitter links current_lab.links = {"facebook": "", "twitter": ""} page_links = data.findAll('a') for link in page_links: if link.has_attr("href"): if "facebook" in link.attrs["href"]: current_lab.links["facebook"] = link.attrs["href"] if "twitter" in link.attrs["href"]: current_lab.links["twitter"] = link.attrs["href"] # Find the coordinates by analysing the embedded google map iframes = data.findAll('iframe') if len(iframes) != 0: for iframe in iframes: embed_url = iframe.attrs["src"] if "google" in embed_url: two_d = embed_url.find("2d") three_d = embed_url.find("3d") longitude = embed_url[two_d:].split('!')[0] latitude = embed_url[three_d:].split('!')[0] longitude = longitude[2:] latitude = latitude[2:] # ... or the link to google map else: page_links = data.findAll('a') for link in page_links: # one case... if "maps.google.com/" in link.attrs["href"]: embed_url = link.attrs["href"] if "ll=" in embed_url: first_string = embed_url.split('&sspn')[0] coordinates = first_string.split('ll=')[1] latitude = coordinates.split(',')[0] longitude = coordinates.split(',')[1] # ... another case elif "www.google.com/maps" in link.attrs["href"]: embed_url = link.attrs["href"] if "1d" in embed_url: one_d = embed_url.find("1d") two_d = embed_url.find("2d") longitude = embed_url[one_d:].split('!')[0] latitude = embed_url[two_d:].split('!')[0] longitude = longitude[2:] latitude = latitude[2:] current_lab.latitude = latitude current_lab.longitude = longitude current_lab.continent = "North America" current_lab.country_code = "USA" current_lab.country = "United States of America" location = geolocator.reverse((latitude, longitude)) if "city" in location.raw["address"]: current_lab.county = location.raw["address"]["city"].encode( 'utf-8') if "county" in location.raw["address"]: current_lab.county = location.raw["address"]["county"].encode( 'utf-8') if "state" in location.raw["address"]: current_lab.state = location.raw["address"]["state"].encode( 'utf-8') if "postcode" in location.raw["address"]: current_lab.postal_code = location.raw["address"][ "postcode"].encode('utf-8') current_lab.address_1 = location.address.encode('utf-8') # Add the lab to the list techshops[current_lab.slug] = current_lab # Return a dictiornary / json if format.lower() == "dict" or format.lower() == "json": output = {} for j in techshops: output[j] = techshops[j].__dict__ # Return a geojson elif format.lower() == "geojson" or format.lower() == "geo": labs_list = [] for l in techshops: single = techshops[l].__dict__ single_lab = Feature( type="Feature", geometry=Point((single["latitude"], single["longitude"])), properties=single) labs_list.append(single_lab) output = dumps(FeatureCollection(labs_list)) # Return a Pandas DataFrame elif format.lower() == "pandas" or format.lower() == "dataframe": output = {} for j in techshops: output[j] = techshops[j].__dict__ # Transform the dict into a Pandas DataFrame output = pd.DataFrame.from_dict(output) output = output.transpose() # Return an object elif format.lower() == "object" or format.lower() == "obj": output = techshops # Default: return an oject else: output = techshops # Return a proper json if format.lower() == "json": output = json.dumps(output) return output
[ "def", "get_labs", "(", "format", ")", ":", "techshops_soup", "=", "data_from_techshop_ws", "(", "techshop_us_url", ")", "techshops", "=", "{", "}", "# Load all the TechShops", "# By first parsing the html", "data", "=", "techshops_soup", ".", "findAll", "(", "'div'", ",", "attrs", "=", "{", "'id'", ":", "'main-content'", "}", ")", "for", "element", "in", "data", ":", "links", "=", "element", ".", "findAll", "(", "'a'", ")", "hrefs", "=", "{", "}", "for", "k", ",", "a", "in", "enumerate", "(", "links", ")", ":", "if", "\"contact\"", "not", "in", "a", "[", "'href'", "]", ":", "hrefs", "[", "k", "]", "=", "a", "[", "'href'", "]", "for", "k", ",", "v", "in", "hrefs", ".", "iteritems", "(", ")", ":", "if", "\"http://techshop.ws/\"", "not", "in", "v", ":", "hrefs", "[", "k", "]", "=", "\"http://techshop.ws/\"", "+", "v", "else", ":", "hrefs", "[", "k", "]", "=", "v", "for", "k", ",", "v", "in", "hrefs", ".", "iteritems", "(", ")", ":", "if", "\"http://techshop.com/\"", "in", "v", ":", "hrefs", "[", "k", "]", "=", "v", ".", "replace", "(", "\"http://techshop.com/\"", ",", "\"\"", ")", "# Remove duplicate pages", "hr", "=", "[", "]", "for", "key", ",", "value", "in", "hrefs", ".", "iteritems", "(", ")", ":", "if", "value", "not", "in", "hr", ":", "hr", ".", "append", "(", "value", ")", "hrefs", "=", "hr", "# Check all pages", "for", "page", "in", "hrefs", ":", "data", "=", "data_from_techshop_ws", "(", "page", ")", "current_lab", "=", "Techshop", "(", ")", "name", "=", "data", ".", "title", ".", "contents", "[", "0", "]", ".", "split", "(", "'-- '", ")", "[", "1", "]", ".", "encode", "(", "'utf-8'", ")", "if", "\"TechShop\"", "not", "in", "name", ":", "name", "=", "\"TechShop \"", "+", "name", "current_lab", ".", "name", "=", "name", "current_lab", ".", "slug", "=", "name", "current_lab", ".", "url", "=", "page", "# Find Facebook and Twitter links", "current_lab", ".", "links", "=", "{", "\"facebook\"", ":", "\"\"", ",", "\"twitter\"", ":", "\"\"", "}", "page_links", "=", "data", ".", "findAll", "(", "'a'", ")", "for", "link", "in", "page_links", ":", "if", "link", ".", "has_attr", "(", "\"href\"", ")", ":", "if", "\"facebook\"", "in", "link", ".", "attrs", "[", "\"href\"", "]", ":", "current_lab", ".", "links", "[", "\"facebook\"", "]", "=", "link", ".", "attrs", "[", "\"href\"", "]", "if", "\"twitter\"", "in", "link", ".", "attrs", "[", "\"href\"", "]", ":", "current_lab", ".", "links", "[", "\"twitter\"", "]", "=", "link", ".", "attrs", "[", "\"href\"", "]", "# Find the coordinates by analysing the embedded google map", "iframes", "=", "data", ".", "findAll", "(", "'iframe'", ")", "if", "len", "(", "iframes", ")", "!=", "0", ":", "for", "iframe", "in", "iframes", ":", "embed_url", "=", "iframe", ".", "attrs", "[", "\"src\"", "]", "if", "\"google\"", "in", "embed_url", ":", "two_d", "=", "embed_url", ".", "find", "(", "\"2d\"", ")", "three_d", "=", "embed_url", ".", "find", "(", "\"3d\"", ")", "longitude", "=", "embed_url", "[", "two_d", ":", "]", ".", "split", "(", "'!'", ")", "[", "0", "]", "latitude", "=", "embed_url", "[", "three_d", ":", "]", ".", "split", "(", "'!'", ")", "[", "0", "]", "longitude", "=", "longitude", "[", "2", ":", "]", "latitude", "=", "latitude", "[", "2", ":", "]", "# ... or the link to google map", "else", ":", "page_links", "=", "data", ".", "findAll", "(", "'a'", ")", "for", "link", "in", "page_links", ":", "# one case...", "if", "\"maps.google.com/\"", "in", "link", ".", "attrs", "[", "\"href\"", "]", ":", "embed_url", "=", "link", ".", "attrs", "[", "\"href\"", "]", "if", "\"ll=\"", "in", "embed_url", ":", "first_string", "=", "embed_url", ".", "split", "(", "'&sspn'", ")", "[", "0", "]", "coordinates", "=", "first_string", ".", "split", "(", "'ll='", ")", "[", "1", "]", "latitude", "=", "coordinates", ".", "split", "(", "','", ")", "[", "0", "]", "longitude", "=", "coordinates", ".", "split", "(", "','", ")", "[", "1", "]", "# ... another case", "elif", "\"www.google.com/maps\"", "in", "link", ".", "attrs", "[", "\"href\"", "]", ":", "embed_url", "=", "link", ".", "attrs", "[", "\"href\"", "]", "if", "\"1d\"", "in", "embed_url", ":", "one_d", "=", "embed_url", ".", "find", "(", "\"1d\"", ")", "two_d", "=", "embed_url", ".", "find", "(", "\"2d\"", ")", "longitude", "=", "embed_url", "[", "one_d", ":", "]", ".", "split", "(", "'!'", ")", "[", "0", "]", "latitude", "=", "embed_url", "[", "two_d", ":", "]", ".", "split", "(", "'!'", ")", "[", "0", "]", "longitude", "=", "longitude", "[", "2", ":", "]", "latitude", "=", "latitude", "[", "2", ":", "]", "current_lab", ".", "latitude", "=", "latitude", "current_lab", ".", "longitude", "=", "longitude", "current_lab", ".", "continent", "=", "\"North America\"", "current_lab", ".", "country_code", "=", "\"USA\"", "current_lab", ".", "country", "=", "\"United States of America\"", "location", "=", "geolocator", ".", "reverse", "(", "(", "latitude", ",", "longitude", ")", ")", "if", "\"city\"", "in", "location", ".", "raw", "[", "\"address\"", "]", ":", "current_lab", ".", "county", "=", "location", ".", "raw", "[", "\"address\"", "]", "[", "\"city\"", "]", ".", "encode", "(", "'utf-8'", ")", "if", "\"county\"", "in", "location", ".", "raw", "[", "\"address\"", "]", ":", "current_lab", ".", "county", "=", "location", ".", "raw", "[", "\"address\"", "]", "[", "\"county\"", "]", ".", "encode", "(", "'utf-8'", ")", "if", "\"state\"", "in", "location", ".", "raw", "[", "\"address\"", "]", ":", "current_lab", ".", "state", "=", "location", ".", "raw", "[", "\"address\"", "]", "[", "\"state\"", "]", ".", "encode", "(", "'utf-8'", ")", "if", "\"postcode\"", "in", "location", ".", "raw", "[", "\"address\"", "]", ":", "current_lab", ".", "postal_code", "=", "location", ".", "raw", "[", "\"address\"", "]", "[", "\"postcode\"", "]", ".", "encode", "(", "'utf-8'", ")", "current_lab", ".", "address_1", "=", "location", ".", "address", ".", "encode", "(", "'utf-8'", ")", "# Add the lab to the list", "techshops", "[", "current_lab", ".", "slug", "]", "=", "current_lab", "# Return a dictiornary / json", "if", "format", ".", "lower", "(", ")", "==", "\"dict\"", "or", "format", ".", "lower", "(", ")", "==", "\"json\"", ":", "output", "=", "{", "}", "for", "j", "in", "techshops", ":", "output", "[", "j", "]", "=", "techshops", "[", "j", "]", ".", "__dict__", "# Return a geojson", "elif", "format", ".", "lower", "(", ")", "==", "\"geojson\"", "or", "format", ".", "lower", "(", ")", "==", "\"geo\"", ":", "labs_list", "=", "[", "]", "for", "l", "in", "techshops", ":", "single", "=", "techshops", "[", "l", "]", ".", "__dict__", "single_lab", "=", "Feature", "(", "type", "=", "\"Feature\"", ",", "geometry", "=", "Point", "(", "(", "single", "[", "\"latitude\"", "]", ",", "single", "[", "\"longitude\"", "]", ")", ")", ",", "properties", "=", "single", ")", "labs_list", ".", "append", "(", "single_lab", ")", "output", "=", "dumps", "(", "FeatureCollection", "(", "labs_list", ")", ")", "# Return a Pandas DataFrame", "elif", "format", ".", "lower", "(", ")", "==", "\"pandas\"", "or", "format", ".", "lower", "(", ")", "==", "\"dataframe\"", ":", "output", "=", "{", "}", "for", "j", "in", "techshops", ":", "output", "[", "j", "]", "=", "techshops", "[", "j", "]", ".", "__dict__", "# Transform the dict into a Pandas DataFrame", "output", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "output", ")", "output", "=", "output", ".", "transpose", "(", ")", "# Return an object", "elif", "format", ".", "lower", "(", ")", "==", "\"object\"", "or", "format", ".", "lower", "(", ")", "==", "\"obj\"", ":", "output", "=", "techshops", "# Default: return an oject", "else", ":", "output", "=", "techshops", "# Return a proper json", "if", "format", ".", "lower", "(", ")", "==", "\"json\"", ":", "output", "=", "json", ".", "dumps", "(", "output", ")", "return", "output" ]
Gets Techshop data from techshop.ws.
[ "Gets", "Techshop", "data", "from", "techshop", ".", "ws", "." ]
python
train
39.725352
noahbenson/neuropythy
neuropythy/util/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/util/core.py#L222-L266
def to_affine(aff, dims=None): ''' to_affine(None) yields None. to_affine(data) yields an affine transformation matrix equivalent to that given in data. Such a matrix may be specified either as (matrix, offset_vector), as an (n+1)x(n+1) matrix, or, as an n x (n+1) matrix. to_affine(data, dims) additionally requires that the dimensionality of the data be dims; meaning that the returned matrix will be of size (dims+1) x (dims+1). ''' if aff is None: return None if isinstance(aff, _tuple_type): # allowed to be (mtx, offset) if (len(aff) != 2 or not pimms.is_matrix(aff[0], 'real') or not pimms.is_vector(aff[1], 'real')): raise ValueError('affine transforms must be matrices or (mtx,offset) tuples') mtx = np.asarray(aff[0]) off = np.asarray(aff[1]) if dims is not None: if mtx.shape[0] != dims or mtx.shape[1] != dims: raise ValueError('%dD affine matrix must be %d x %d' % (dims,dims,dims)) if off.shape[0] != dims: raise ValueError('%dD affine offset must have length %d' % (dims,dims)) else: dims = off.shape[0] if mtx.shape[0] != dims or mtx.shape[1] != dims: raise ValueError('with offset size=%d, matrix must be %d x %d' % (dims,dims,dims)) aff = np.zeros((dims+1,dims+1), dtype=np.float) aff[dims,dims] = 1 aff[0:dims,0:dims] = mtx aff[0:dims,dims] = off return pimms.imm_array(aff) if not pimms.is_matrix(aff, 'real'): raise ValueError('affine transforms must be matrices or (mtx, offset) tuples') aff = np.asarray(aff) if dims is None: dims = aff.shape[1] - 1 if aff.shape[0] == dims: lastrow = np.zeros((1,dims+1)) lastrow[0,-1] = 1 aff = np.concatenate((aff, lastrow)) if aff.shape[1] != dims+1 or aff.shape[0] != dims+1: arg = (dims, dims,dims+1, dims+1,dims+1) raise ValueError('%dD affine matrix must be %dx%d or %dx%d' % arg) return aff
[ "def", "to_affine", "(", "aff", ",", "dims", "=", "None", ")", ":", "if", "aff", "is", "None", ":", "return", "None", "if", "isinstance", "(", "aff", ",", "_tuple_type", ")", ":", "# allowed to be (mtx, offset)", "if", "(", "len", "(", "aff", ")", "!=", "2", "or", "not", "pimms", ".", "is_matrix", "(", "aff", "[", "0", "]", ",", "'real'", ")", "or", "not", "pimms", ".", "is_vector", "(", "aff", "[", "1", "]", ",", "'real'", ")", ")", ":", "raise", "ValueError", "(", "'affine transforms must be matrices or (mtx,offset) tuples'", ")", "mtx", "=", "np", ".", "asarray", "(", "aff", "[", "0", "]", ")", "off", "=", "np", ".", "asarray", "(", "aff", "[", "1", "]", ")", "if", "dims", "is", "not", "None", ":", "if", "mtx", ".", "shape", "[", "0", "]", "!=", "dims", "or", "mtx", ".", "shape", "[", "1", "]", "!=", "dims", ":", "raise", "ValueError", "(", "'%dD affine matrix must be %d x %d'", "%", "(", "dims", ",", "dims", ",", "dims", ")", ")", "if", "off", ".", "shape", "[", "0", "]", "!=", "dims", ":", "raise", "ValueError", "(", "'%dD affine offset must have length %d'", "%", "(", "dims", ",", "dims", ")", ")", "else", ":", "dims", "=", "off", ".", "shape", "[", "0", "]", "if", "mtx", ".", "shape", "[", "0", "]", "!=", "dims", "or", "mtx", ".", "shape", "[", "1", "]", "!=", "dims", ":", "raise", "ValueError", "(", "'with offset size=%d, matrix must be %d x %d'", "%", "(", "dims", ",", "dims", ",", "dims", ")", ")", "aff", "=", "np", ".", "zeros", "(", "(", "dims", "+", "1", ",", "dims", "+", "1", ")", ",", "dtype", "=", "np", ".", "float", ")", "aff", "[", "dims", ",", "dims", "]", "=", "1", "aff", "[", "0", ":", "dims", ",", "0", ":", "dims", "]", "=", "mtx", "aff", "[", "0", ":", "dims", ",", "dims", "]", "=", "off", "return", "pimms", ".", "imm_array", "(", "aff", ")", "if", "not", "pimms", ".", "is_matrix", "(", "aff", ",", "'real'", ")", ":", "raise", "ValueError", "(", "'affine transforms must be matrices or (mtx, offset) tuples'", ")", "aff", "=", "np", ".", "asarray", "(", "aff", ")", "if", "dims", "is", "None", ":", "dims", "=", "aff", ".", "shape", "[", "1", "]", "-", "1", "if", "aff", ".", "shape", "[", "0", "]", "==", "dims", ":", "lastrow", "=", "np", ".", "zeros", "(", "(", "1", ",", "dims", "+", "1", ")", ")", "lastrow", "[", "0", ",", "-", "1", "]", "=", "1", "aff", "=", "np", ".", "concatenate", "(", "(", "aff", ",", "lastrow", ")", ")", "if", "aff", ".", "shape", "[", "1", "]", "!=", "dims", "+", "1", "or", "aff", ".", "shape", "[", "0", "]", "!=", "dims", "+", "1", ":", "arg", "=", "(", "dims", ",", "dims", ",", "dims", "+", "1", ",", "dims", "+", "1", ",", "dims", "+", "1", ")", "raise", "ValueError", "(", "'%dD affine matrix must be %dx%d or %dx%d'", "%", "arg", ")", "return", "aff" ]
to_affine(None) yields None. to_affine(data) yields an affine transformation matrix equivalent to that given in data. Such a matrix may be specified either as (matrix, offset_vector), as an (n+1)x(n+1) matrix, or, as an n x (n+1) matrix. to_affine(data, dims) additionally requires that the dimensionality of the data be dims; meaning that the returned matrix will be of size (dims+1) x (dims+1).
[ "to_affine", "(", "None", ")", "yields", "None", ".", "to_affine", "(", "data", ")", "yields", "an", "affine", "transformation", "matrix", "equivalent", "to", "that", "given", "in", "data", ".", "Such", "a", "matrix", "may", "be", "specified", "either", "as", "(", "matrix", "offset_vector", ")", "as", "an", "(", "n", "+", "1", ")", "x", "(", "n", "+", "1", ")", "matrix", "or", "as", "an", "n", "x", "(", "n", "+", "1", ")", "matrix", ".", "to_affine", "(", "data", "dims", ")", "additionally", "requires", "that", "the", "dimensionality", "of", "the", "data", "be", "dims", ";", "meaning", "that", "the", "returned", "matrix", "will", "be", "of", "size", "(", "dims", "+", "1", ")", "x", "(", "dims", "+", "1", ")", "." ]
python
train
46.2
tanghaibao/jcvi
jcvi/utils/orderedcollections.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/orderedcollections.py#L386-L392
def find_ge(self, item): 'Return first item with a key >= equal to item. Raise ValueError if not found' k = self._key(item) i = bisect_left(self._keys, k) if i != len(self): return self._items[i] raise ValueError('No item found with key at or above: %r' % (k,))
[ "def", "find_ge", "(", "self", ",", "item", ")", ":", "k", "=", "self", ".", "_key", "(", "item", ")", "i", "=", "bisect_left", "(", "self", ".", "_keys", ",", "k", ")", "if", "i", "!=", "len", "(", "self", ")", ":", "return", "self", ".", "_items", "[", "i", "]", "raise", "ValueError", "(", "'No item found with key at or above: %r'", "%", "(", "k", ",", ")", ")" ]
Return first item with a key >= equal to item. Raise ValueError if not found
[ "Return", "first", "item", "with", "a", "key", ">", "=", "equal", "to", "item", ".", "Raise", "ValueError", "if", "not", "found" ]
python
train
44
OpenKMIP/PyKMIP
kmip/core/messages/payloads/archive.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/messages/payloads/archive.py#L230-L258
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the Archive response payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined. """ local_stream = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(ArchiveResponsePayload, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
[ "def", "write", "(", "self", ",", "output_stream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "local_stream", "=", "utils", ".", "BytearrayStream", "(", ")", "if", "self", ".", "_unique_identifier", ":", "self", ".", "_unique_identifier", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "length", "=", "local_stream", ".", "length", "(", ")", "super", "(", "ArchiveResponsePayload", ",", "self", ")", ".", "write", "(", "output_stream", ",", "kmip_version", "=", "kmip_version", ")", "output_stream", ".", "write", "(", "local_stream", ".", "buffer", ")" ]
Write the data encoding the Archive response payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
[ "Write", "the", "data", "encoding", "the", "Archive", "response", "payload", "to", "a", "stream", "." ]
python
test
35.689655
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L243-L248
def _Cobject(cls, ctype): """(INTERNAL) New instance from ctypes. """ o = object.__new__(cls) o._as_parameter_ = ctype return o
[ "def", "_Cobject", "(", "cls", ",", "ctype", ")", ":", "o", "=", "object", ".", "__new__", "(", "cls", ")", "o", ".", "_as_parameter_", "=", "ctype", "return", "o" ]
(INTERNAL) New instance from ctypes.
[ "(", "INTERNAL", ")", "New", "instance", "from", "ctypes", "." ]
python
train
23.666667
ladybug-tools/ladybug
ladybug/designday.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L1341-L1355
def radiation_values(self, location, timestep=1): """Lists of driect normal, diffuse horiz, and global horiz rad at each timestep. """ # create sunpath and get altitude at every timestep of the design day sp = Sunpath.from_location(location) altitudes = [] dates = self._get_datetimes(timestep) for t_date in dates: sun = sp.calculate_sun_from_date_time(t_date) altitudes.append(sun.altitude) dir_norm, diff_horiz = ashrae_clear_sky( altitudes, self._month, self._clearness) glob_horiz = [dhr + dnr * math.sin(math.radians(alt)) for alt, dnr, dhr in zip(altitudes, dir_norm, diff_horiz)] return dir_norm, diff_horiz, glob_horiz
[ "def", "radiation_values", "(", "self", ",", "location", ",", "timestep", "=", "1", ")", ":", "# create sunpath and get altitude at every timestep of the design day", "sp", "=", "Sunpath", ".", "from_location", "(", "location", ")", "altitudes", "=", "[", "]", "dates", "=", "self", ".", "_get_datetimes", "(", "timestep", ")", "for", "t_date", "in", "dates", ":", "sun", "=", "sp", ".", "calculate_sun_from_date_time", "(", "t_date", ")", "altitudes", ".", "append", "(", "sun", ".", "altitude", ")", "dir_norm", ",", "diff_horiz", "=", "ashrae_clear_sky", "(", "altitudes", ",", "self", ".", "_month", ",", "self", ".", "_clearness", ")", "glob_horiz", "=", "[", "dhr", "+", "dnr", "*", "math", ".", "sin", "(", "math", ".", "radians", "(", "alt", ")", ")", "for", "alt", ",", "dnr", ",", "dhr", "in", "zip", "(", "altitudes", ",", "dir_norm", ",", "diff_horiz", ")", "]", "return", "dir_norm", ",", "diff_horiz", ",", "glob_horiz" ]
Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
[ "Lists", "of", "driect", "normal", "diffuse", "horiz", "and", "global", "horiz", "rad", "at", "each", "timestep", "." ]
python
train
50.066667
siemens/django-dingos
dingos/templatetags/dingos_tags.py
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/templatetags/dingos_tags.py#L36-L161
def node_indent(elt_name, node_id, fact_term, attribute, highlight_node=None): """ This tag uses a table structure to display indentation of fact terms based on the information contained in the node identifier. This tag and the closing 'node_indent_end' tag must enclose the value to be displayed after the display of the fact term. """ # Some colors to chose from: color_dict = {0: {0: '#004C80', # blueish 1: '#005C99', 2: '#006BB2', 3: '#007ACC', 4: '#008AE6', 5: '#0099FF', 6: '#19A3FF', 7: '#33ADFF', 8: '#4DB8FF', 9: '#66C2FF', 10: '#80CCFF', 11: '#99D6FF', 12: '#B2E0FF', 13: '#CCEBFF', 14: '#E6F5FF'}, 2: {0: '#008000', # greenish 1: '#009900', 2: '#00B200', 3: '#00CC00', 4: '#00E600', 5: '#00FF00', 6: '#19FF19', 7: '#33FF33', 8: '#4DFF4D', 9: '#66FF66', 10: '#80FF80', 11: '#99FF99', 12: '#B2FFB2', 13: '#CCFFCC', 14: '#E6FFE6'}, 3: {0: '#804C80', # pinkish 1: '#995C99', 2: '#B26BB2', 3: '#CC7ACC', 4: '#E68AE6', 5: '#FF99FF', 6: '#FFA3FF', 7: '#FFADFF', 8: '#FFB8FF', 9: '#FFC2FF', 10: '#FFCCFF', 11: '#FFD6FF', 12: '#FFE0FF', 13: '#FFEBFF', 14: '#FFF5FF', }, 1: {0: "#5C3D99", # violetish 1: "#6B47B2", 2: "#7A52CC", 3: "#8A5CE6", 4: "#9966FF", 5: "#A375FF", 6: "#AD85FF", 7: "#B894FF", 8: "#C2A3FF", 9: "#CCB2FF", 10: "#D6C2FF", 11: "#E0D1FF", 12: "#EBE0FF", 13: "#F5F0FF", 14: "#FFFFFF"} } indents = 100 node_ids = node_id.split(':') fact_components = fact_term.split('/') if len(fact_components) == 1 and fact_components[0] == '': fact_components = [] if attribute: fact_components.append("@%s" % attribute) fact_components = dict([(x, fact_components[x]) for x in range(0, len(fact_components))]) #node_ids.reverse() result = [] counter = 0 for node in node_ids: is_attr = False if len(node) >= 1: if node[0] == 'A': is_attr = True node = node[1:] if len(node) > 0: node_nr = int(node) else: node_nr = 0 if is_attr: node_mod = 2 else: node_mod = node_nr % 2 if is_attr: result.append("<%(elt_name)s style='background: %(color)s'>%(fact_term_component)s</%(elt_name)s>" % { 'elt_name': elt_name, 'fact_term_component': fact_components.get(counter, ''), 'color': color_dict[2][max(14 - counter,4)]}) else: result.append( "<%(elt_name)s style='width:1px; margin: 0px ; background : %(color)s'>%(fact_term_component)s</%(elt_name)s>" % { 'elt_name': elt_name, 'color': color_dict[node_mod][max(14 - counter,4)], 'fact_term_component': fact_components.get(counter, '')}) counter += 1 highlight = "style='background: #FF0000;'" if highlight_node == node_id else None result.append("<%(elt_name)s colspan='%(colspan)s' %(highlight)s>" % {'elt_name': elt_name, 'colspan': (indents - counter), 'highlight' : highlight}) return "".join(result)
[ "def", "node_indent", "(", "elt_name", ",", "node_id", ",", "fact_term", ",", "attribute", ",", "highlight_node", "=", "None", ")", ":", "# Some colors to chose from:", "color_dict", "=", "{", "0", ":", "{", "0", ":", "'#004C80'", ",", "# blueish", "1", ":", "'#005C99'", ",", "2", ":", "'#006BB2'", ",", "3", ":", "'#007ACC'", ",", "4", ":", "'#008AE6'", ",", "5", ":", "'#0099FF'", ",", "6", ":", "'#19A3FF'", ",", "7", ":", "'#33ADFF'", ",", "8", ":", "'#4DB8FF'", ",", "9", ":", "'#66C2FF'", ",", "10", ":", "'#80CCFF'", ",", "11", ":", "'#99D6FF'", ",", "12", ":", "'#B2E0FF'", ",", "13", ":", "'#CCEBFF'", ",", "14", ":", "'#E6F5FF'", "}", ",", "2", ":", "{", "0", ":", "'#008000'", ",", "# greenish", "1", ":", "'#009900'", ",", "2", ":", "'#00B200'", ",", "3", ":", "'#00CC00'", ",", "4", ":", "'#00E600'", ",", "5", ":", "'#00FF00'", ",", "6", ":", "'#19FF19'", ",", "7", ":", "'#33FF33'", ",", "8", ":", "'#4DFF4D'", ",", "9", ":", "'#66FF66'", ",", "10", ":", "'#80FF80'", ",", "11", ":", "'#99FF99'", ",", "12", ":", "'#B2FFB2'", ",", "13", ":", "'#CCFFCC'", ",", "14", ":", "'#E6FFE6'", "}", ",", "3", ":", "{", "0", ":", "'#804C80'", ",", "# pinkish", "1", ":", "'#995C99'", ",", "2", ":", "'#B26BB2'", ",", "3", ":", "'#CC7ACC'", ",", "4", ":", "'#E68AE6'", ",", "5", ":", "'#FF99FF'", ",", "6", ":", "'#FFA3FF'", ",", "7", ":", "'#FFADFF'", ",", "8", ":", "'#FFB8FF'", ",", "9", ":", "'#FFC2FF'", ",", "10", ":", "'#FFCCFF'", ",", "11", ":", "'#FFD6FF'", ",", "12", ":", "'#FFE0FF'", ",", "13", ":", "'#FFEBFF'", ",", "14", ":", "'#FFF5FF'", ",", "}", ",", "1", ":", "{", "0", ":", "\"#5C3D99\"", ",", "# violetish", "1", ":", "\"#6B47B2\"", ",", "2", ":", "\"#7A52CC\"", ",", "3", ":", "\"#8A5CE6\"", ",", "4", ":", "\"#9966FF\"", ",", "5", ":", "\"#A375FF\"", ",", "6", ":", "\"#AD85FF\"", ",", "7", ":", "\"#B894FF\"", ",", "8", ":", "\"#C2A3FF\"", ",", "9", ":", "\"#CCB2FF\"", ",", "10", ":", "\"#D6C2FF\"", ",", "11", ":", "\"#E0D1FF\"", ",", "12", ":", "\"#EBE0FF\"", ",", "13", ":", "\"#F5F0FF\"", ",", "14", ":", "\"#FFFFFF\"", "}", "}", "indents", "=", "100", "node_ids", "=", "node_id", ".", "split", "(", "':'", ")", "fact_components", "=", "fact_term", ".", "split", "(", "'/'", ")", "if", "len", "(", "fact_components", ")", "==", "1", "and", "fact_components", "[", "0", "]", "==", "''", ":", "fact_components", "=", "[", "]", "if", "attribute", ":", "fact_components", ".", "append", "(", "\"@%s\"", "%", "attribute", ")", "fact_components", "=", "dict", "(", "[", "(", "x", ",", "fact_components", "[", "x", "]", ")", "for", "x", "in", "range", "(", "0", ",", "len", "(", "fact_components", ")", ")", "]", ")", "#node_ids.reverse()", "result", "=", "[", "]", "counter", "=", "0", "for", "node", "in", "node_ids", ":", "is_attr", "=", "False", "if", "len", "(", "node", ")", ">=", "1", ":", "if", "node", "[", "0", "]", "==", "'A'", ":", "is_attr", "=", "True", "node", "=", "node", "[", "1", ":", "]", "if", "len", "(", "node", ")", ">", "0", ":", "node_nr", "=", "int", "(", "node", ")", "else", ":", "node_nr", "=", "0", "if", "is_attr", ":", "node_mod", "=", "2", "else", ":", "node_mod", "=", "node_nr", "%", "2", "if", "is_attr", ":", "result", ".", "append", "(", "\"<%(elt_name)s style='background: %(color)s'>%(fact_term_component)s</%(elt_name)s>\"", "%", "{", "'elt_name'", ":", "elt_name", ",", "'fact_term_component'", ":", "fact_components", ".", "get", "(", "counter", ",", "''", ")", ",", "'color'", ":", "color_dict", "[", "2", "]", "[", "max", "(", "14", "-", "counter", ",", "4", ")", "]", "}", ")", "else", ":", "result", ".", "append", "(", "\"<%(elt_name)s style='width:1px; margin: 0px ; background : %(color)s'>%(fact_term_component)s</%(elt_name)s>\"", "%", "{", "'elt_name'", ":", "elt_name", ",", "'color'", ":", "color_dict", "[", "node_mod", "]", "[", "max", "(", "14", "-", "counter", ",", "4", ")", "]", ",", "'fact_term_component'", ":", "fact_components", ".", "get", "(", "counter", ",", "''", ")", "}", ")", "counter", "+=", "1", "highlight", "=", "\"style='background: #FF0000;'\"", "if", "highlight_node", "==", "node_id", "else", "None", "result", ".", "append", "(", "\"<%(elt_name)s colspan='%(colspan)s' %(highlight)s>\"", "%", "{", "'elt_name'", ":", "elt_name", ",", "'colspan'", ":", "(", "indents", "-", "counter", ")", ",", "'highlight'", ":", "highlight", "}", ")", "return", "\"\"", ".", "join", "(", "result", ")" ]
This tag uses a table structure to display indentation of fact terms based on the information contained in the node identifier. This tag and the closing 'node_indent_end' tag must enclose the value to be displayed after the display of the fact term.
[ "This", "tag", "uses", "a", "table", "structure", "to", "display", "indentation", "of", "fact", "terms", "based", "on", "the", "information", "contained", "in", "the", "node", "identifier", "." ]
python
train
33.706349
hollenstein/maspy
maspy/core.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L741-L757
def jsonHook(encoded): """Custom JSON decoder that allows construction of a new ``Ci`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Ci`, :class:`MzmlProduct`, :class:`MzmlPrecursor` """ if '__Ci__' in encoded: return Ci._fromJSON(encoded['__Ci__']) elif '__MzmlProduct__' in encoded: return MzmlProduct._fromJSON(encoded['__MzmlProduct__']) elif '__MzmlPrecursor__' in encoded: return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__']) else: return encoded
[ "def", "jsonHook", "(", "encoded", ")", ":", "if", "'__Ci__'", "in", "encoded", ":", "return", "Ci", ".", "_fromJSON", "(", "encoded", "[", "'__Ci__'", "]", ")", "elif", "'__MzmlProduct__'", "in", "encoded", ":", "return", "MzmlProduct", ".", "_fromJSON", "(", "encoded", "[", "'__MzmlProduct__'", "]", ")", "elif", "'__MzmlPrecursor__'", "in", "encoded", ":", "return", "MzmlPrecursor", ".", "_fromJSON", "(", "encoded", "[", "'__MzmlPrecursor__'", "]", ")", "else", ":", "return", "encoded" ]
Custom JSON decoder that allows construction of a new ``Ci`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Ci`, :class:`MzmlProduct`, :class:`MzmlPrecursor`
[ "Custom", "JSON", "decoder", "that", "allows", "construction", "of", "a", "new", "Ci", "instance", "from", "a", "decoded", "JSON", "object", "." ]
python
train
40.058824
aws/aws-xray-sdk-python
aws_xray_sdk/core/lambda_launcher.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/lambda_launcher.py#L125-L146
def _initialize_context(self, trace_header): """ Create a facade segment based on environment variables set by AWS Lambda and initialize storage for subsegments. """ sampled = None if not global_sdk_config.sdk_enabled(): # Force subsequent subsegments to be disabled and turned into DummySegments. sampled = False elif trace_header.sampled == 0: sampled = False elif trace_header.sampled == 1: sampled = True segment = FacadeSegment( name='facade', traceid=trace_header.root, entityid=trace_header.parent, sampled=sampled, ) setattr(self._local, 'segment', segment) setattr(self._local, 'entities', [])
[ "def", "_initialize_context", "(", "self", ",", "trace_header", ")", ":", "sampled", "=", "None", "if", "not", "global_sdk_config", ".", "sdk_enabled", "(", ")", ":", "# Force subsequent subsegments to be disabled and turned into DummySegments.", "sampled", "=", "False", "elif", "trace_header", ".", "sampled", "==", "0", ":", "sampled", "=", "False", "elif", "trace_header", ".", "sampled", "==", "1", ":", "sampled", "=", "True", "segment", "=", "FacadeSegment", "(", "name", "=", "'facade'", ",", "traceid", "=", "trace_header", ".", "root", ",", "entityid", "=", "trace_header", ".", "parent", ",", "sampled", "=", "sampled", ",", ")", "setattr", "(", "self", ".", "_local", ",", "'segment'", ",", "segment", ")", "setattr", "(", "self", ".", "_local", ",", "'entities'", ",", "[", "]", ")" ]
Create a facade segment based on environment variables set by AWS Lambda and initialize storage for subsegments.
[ "Create", "a", "facade", "segment", "based", "on", "environment", "variables", "set", "by", "AWS", "Lambda", "and", "initialize", "storage", "for", "subsegments", "." ]
python
train
35.181818
theno/fabsetup
fabsetup/fabfile/setup/revealjs.py
https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/revealjs.py#L50-L90
def revealjs(basedir=None, title=None, subtitle=None, description=None, github_user=None, github_repo=None): '''Set up or update a reveals.js presentation with slides written in markdown. Several reveal.js plugins will be set up, too. More info: Demo: https://theno.github.io/revealjs_template http://lab.hakim.se/reveal-js/ https://github.com/hakimel/reveal.js plugins: https://github.com/hakimel/reveal.js/wiki/Plugins,-Tools-and-Hardware https://github.com/rajgoel/reveal.js-plugins/ https://github.com/e-gor/Reveal.js-TOC-Progress https://github.com/e-gor/Reveal.js-Title-Footer ''' basedir = basedir or query_input('Base dir of the presentation?', default='~/repos/my_presi') revealjs_repo_name = 'reveal.js' revealjs_dir = flo('{basedir}/{revealjs_repo_name}') _lazy_dict['presi_title'] = title _lazy_dict['presi_subtitle'] = subtitle _lazy_dict['presi_description'] = description _lazy_dict['github_user'] = github_user _lazy_dict['github_repo'] = github_repo question = flo("Base dir already contains a sub dir '{revealjs_repo_name}'." ' Reset (and re-download) reveal.js codebase?') if not exists(revealjs_dir) or query_yes_no(question, default='no'): run(flo('mkdir -p {basedir}')) set_up_revealjs_codebase(basedir, revealjs_repo_name) install_plugins(revealjs_dir) apply_customizations(repo_dir=revealjs_dir) if exists(revealjs_dir): install_files_in_basedir(basedir, repo_dir=revealjs_dir) init_git_repo(basedir) create_github_remote_repo(basedir) setup_npm(revealjs_dir) else: print('abort')
[ "def", "revealjs", "(", "basedir", "=", "None", ",", "title", "=", "None", ",", "subtitle", "=", "None", ",", "description", "=", "None", ",", "github_user", "=", "None", ",", "github_repo", "=", "None", ")", ":", "basedir", "=", "basedir", "or", "query_input", "(", "'Base dir of the presentation?'", ",", "default", "=", "'~/repos/my_presi'", ")", "revealjs_repo_name", "=", "'reveal.js'", "revealjs_dir", "=", "flo", "(", "'{basedir}/{revealjs_repo_name}'", ")", "_lazy_dict", "[", "'presi_title'", "]", "=", "title", "_lazy_dict", "[", "'presi_subtitle'", "]", "=", "subtitle", "_lazy_dict", "[", "'presi_description'", "]", "=", "description", "_lazy_dict", "[", "'github_user'", "]", "=", "github_user", "_lazy_dict", "[", "'github_repo'", "]", "=", "github_repo", "question", "=", "flo", "(", "\"Base dir already contains a sub dir '{revealjs_repo_name}'.\"", "' Reset (and re-download) reveal.js codebase?'", ")", "if", "not", "exists", "(", "revealjs_dir", ")", "or", "query_yes_no", "(", "question", ",", "default", "=", "'no'", ")", ":", "run", "(", "flo", "(", "'mkdir -p {basedir}'", ")", ")", "set_up_revealjs_codebase", "(", "basedir", ",", "revealjs_repo_name", ")", "install_plugins", "(", "revealjs_dir", ")", "apply_customizations", "(", "repo_dir", "=", "revealjs_dir", ")", "if", "exists", "(", "revealjs_dir", ")", ":", "install_files_in_basedir", "(", "basedir", ",", "repo_dir", "=", "revealjs_dir", ")", "init_git_repo", "(", "basedir", ")", "create_github_remote_repo", "(", "basedir", ")", "setup_npm", "(", "revealjs_dir", ")", "else", ":", "print", "(", "'abort'", ")" ]
Set up or update a reveals.js presentation with slides written in markdown. Several reveal.js plugins will be set up, too. More info: Demo: https://theno.github.io/revealjs_template http://lab.hakim.se/reveal-js/ https://github.com/hakimel/reveal.js plugins: https://github.com/hakimel/reveal.js/wiki/Plugins,-Tools-and-Hardware https://github.com/rajgoel/reveal.js-plugins/ https://github.com/e-gor/Reveal.js-TOC-Progress https://github.com/e-gor/Reveal.js-Title-Footer
[ "Set", "up", "or", "update", "a", "reveals", ".", "js", "presentation", "with", "slides", "written", "in", "markdown", "." ]
python
train
42.170732
shoebot/shoebot
lib/colors/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/colors/__init__.py#L1241-L1247
def reverse(self): """ Returns a reversed copy of the list. """ colors = ColorList.copy(self) _list.reverse(colors) return colors
[ "def", "reverse", "(", "self", ")", ":", "colors", "=", "ColorList", ".", "copy", "(", "self", ")", "_list", ".", "reverse", "(", "colors", ")", "return", "colors" ]
Returns a reversed copy of the list.
[ "Returns", "a", "reversed", "copy", "of", "the", "list", "." ]
python
valid
24.428571
loli/medpy
medpy/metric/binary.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/binary.py#L34-L81
def dc(result, reference): r""" Dice coefficient Computes the Dice coefficient (also known as Sorensen index) between the binary objects in two images. The metric is defined as .. math:: DC=\frac{2|A\cap B|}{|A|+|B|} , where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects). Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- dc : float The Dice coefficient between the object(s) in ```result``` and the object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap). Notes ----- This is a real metric. The binary images can therefore be supplied in any order. """ result = numpy.atleast_1d(result.astype(numpy.bool)) reference = numpy.atleast_1d(reference.astype(numpy.bool)) intersection = numpy.count_nonzero(result & reference) size_i1 = numpy.count_nonzero(result) size_i2 = numpy.count_nonzero(reference) try: dc = 2. * intersection / float(size_i1 + size_i2) except ZeroDivisionError: dc = 0.0 return dc
[ "def", "dc", "(", "result", ",", "reference", ")", ":", "result", "=", "numpy", ".", "atleast_1d", "(", "result", ".", "astype", "(", "numpy", ".", "bool", ")", ")", "reference", "=", "numpy", ".", "atleast_1d", "(", "reference", ".", "astype", "(", "numpy", ".", "bool", ")", ")", "intersection", "=", "numpy", ".", "count_nonzero", "(", "result", "&", "reference", ")", "size_i1", "=", "numpy", ".", "count_nonzero", "(", "result", ")", "size_i2", "=", "numpy", ".", "count_nonzero", "(", "reference", ")", "try", ":", "dc", "=", "2.", "*", "intersection", "/", "float", "(", "size_i1", "+", "size_i2", ")", "except", "ZeroDivisionError", ":", "dc", "=", "0.0", "return", "dc" ]
r""" Dice coefficient Computes the Dice coefficient (also known as Sorensen index) between the binary objects in two images. The metric is defined as .. math:: DC=\frac{2|A\cap B|}{|A|+|B|} , where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects). Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- dc : float The Dice coefficient between the object(s) in ```result``` and the object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap). Notes ----- This is a real metric. The binary images can therefore be supplied in any order.
[ "r", "Dice", "coefficient", "Computes", "the", "Dice", "coefficient", "(", "also", "known", "as", "Sorensen", "index", ")", "between", "the", "binary", "objects", "in", "two", "images", ".", "The", "metric", "is", "defined", "as", "..", "math", "::", "DC", "=", "\\", "frac", "{", "2|A", "\\", "cap", "B|", "}", "{", "|A|", "+", "|B|", "}", "where", ":", "math", ":", "A", "is", "the", "first", "and", ":", "math", ":", "B", "the", "second", "set", "of", "samples", "(", "here", ":", "binary", "objects", ")", ".", "Parameters", "----------", "result", ":", "array_like", "Input", "data", "containing", "objects", ".", "Can", "be", "any", "type", "but", "will", "be", "converted", "into", "binary", ":", "background", "where", "0", "object", "everywhere", "else", ".", "reference", ":", "array_like", "Input", "data", "containing", "objects", ".", "Can", "be", "any", "type", "but", "will", "be", "converted", "into", "binary", ":", "background", "where", "0", "object", "everywhere", "else", ".", "Returns", "-------", "dc", ":", "float", "The", "Dice", "coefficient", "between", "the", "object", "(", "s", ")", "in", "result", "and", "the", "object", "(", "s", ")", "in", "reference", ".", "It", "ranges", "from", "0", "(", "no", "overlap", ")", "to", "1", "(", "perfect", "overlap", ")", ".", "Notes", "-----", "This", "is", "a", "real", "metric", ".", "The", "binary", "images", "can", "therefore", "be", "supplied", "in", "any", "order", "." ]
python
train
30.208333
saltstack/salt
salt/modules/lxc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxc.py#L2440-L2481
def stop(name, kill=False, path=None, use_vt=None): ''' Stop the named container path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 kill: False Do not wait for the container to stop, kill all tasks in the container. Older LXC versions will stop containers like this irrespective of this argument. .. versionchanged:: 2015.5.0 Default value changed to ``False`` use_vt run the command through VT .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion lxc.stop name ''' _ensure_exists(name, path=path) orig_state = state(name, path=path) if orig_state == 'frozen' and not kill: # Gracefully stopping a frozen container is slower than unfreezing and # then stopping it (at least in my testing), so if we're not # force-stopping the container, unfreeze it first. unfreeze(name, path=path) cmd = 'lxc-stop' if kill: cmd += ' -k' ret = _change_state(cmd, name, 'stopped', use_vt=use_vt, path=path) ret['state']['old'] = orig_state return ret
[ "def", "stop", "(", "name", ",", "kill", "=", "False", ",", "path", "=", "None", ",", "use_vt", "=", "None", ")", ":", "_ensure_exists", "(", "name", ",", "path", "=", "path", ")", "orig_state", "=", "state", "(", "name", ",", "path", "=", "path", ")", "if", "orig_state", "==", "'frozen'", "and", "not", "kill", ":", "# Gracefully stopping a frozen container is slower than unfreezing and", "# then stopping it (at least in my testing), so if we're not", "# force-stopping the container, unfreeze it first.", "unfreeze", "(", "name", ",", "path", "=", "path", ")", "cmd", "=", "'lxc-stop'", "if", "kill", ":", "cmd", "+=", "' -k'", "ret", "=", "_change_state", "(", "cmd", ",", "name", ",", "'stopped'", ",", "use_vt", "=", "use_vt", ",", "path", "=", "path", ")", "ret", "[", "'state'", "]", "[", "'old'", "]", "=", "orig_state", "return", "ret" ]
Stop the named container path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 kill: False Do not wait for the container to stop, kill all tasks in the container. Older LXC versions will stop containers like this irrespective of this argument. .. versionchanged:: 2015.5.0 Default value changed to ``False`` use_vt run the command through VT .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion lxc.stop name
[ "Stop", "the", "named", "container" ]
python
train
27.857143
apache/spark
python/pyspark/broadcast.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L162-L175
def destroy(self, blocking=False): """ Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """ if self._jbroadcast is None: raise Exception("Broadcast can only be destroyed in driver") self._jbroadcast.destroy(blocking) os.unlink(self._path)
[ "def", "destroy", "(", "self", ",", "blocking", "=", "False", ")", ":", "if", "self", ".", "_jbroadcast", "is", "None", ":", "raise", "Exception", "(", "\"Broadcast can only be destroyed in driver\"", ")", "self", ".", "_jbroadcast", ".", "destroy", "(", "blocking", ")", "os", ".", "unlink", "(", "self", ".", "_path", ")" ]
Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted.
[ "Destroy", "all", "data", "and", "metadata", "related", "to", "this", "broadcast", "variable", ".", "Use", "this", "with", "caution", ";", "once", "a", "broadcast", "variable", "has", "been", "destroyed", "it", "cannot", "be", "used", "again", "." ]
python
train
40.142857
tanghaibao/goatools
goatools/anno/init/reader_gaf.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/init/reader_gaf.py#L173-L192
def get_gafvals(self, line): """Convert fields from string to preferred format for GAF ver 2.1 and 2.0.""" flds = line.split('\t') flds[3] = self._get_qualifier(flds[3]) # 3 Qualifier flds[5] = self._get_set(flds[5]) # 5 DB_Reference flds[7] = self._get_set(flds[7]) # 7 With_From flds[8] = self.aspect2ns[flds[8]] # 8 GAF Aspect field converted to BP, MF, or CC flds[9] = self._get_set(flds[9]) # 9 DB_Name flds[10] = self._get_set(flds[10]) # 10 DB_Synonym flds[12] = self._do_taxons(flds[12]) # 12 Taxon flds[13] = GET_DATE_YYYYMMDD(flds[13]) # self.strptime(flds[13], '%Y%m%d').date(), # 13 Date 20190406 # Version 2.x has these additional fields not found in v1.0 if self.is_long: flds[15] = get_extensions(flds[15]) # Extensions (or Annotation_Extension) flds[16] = self._get_set(flds[16].rstrip()) else: flds[14] = self._get_set(flds[14].rstrip()) return flds
[ "def", "get_gafvals", "(", "self", ",", "line", ")", ":", "flds", "=", "line", ".", "split", "(", "'\\t'", ")", "flds", "[", "3", "]", "=", "self", ".", "_get_qualifier", "(", "flds", "[", "3", "]", ")", "# 3 Qualifier", "flds", "[", "5", "]", "=", "self", ".", "_get_set", "(", "flds", "[", "5", "]", ")", "# 5 DB_Reference", "flds", "[", "7", "]", "=", "self", ".", "_get_set", "(", "flds", "[", "7", "]", ")", "# 7 With_From", "flds", "[", "8", "]", "=", "self", ".", "aspect2ns", "[", "flds", "[", "8", "]", "]", "# 8 GAF Aspect field converted to BP, MF, or CC", "flds", "[", "9", "]", "=", "self", ".", "_get_set", "(", "flds", "[", "9", "]", ")", "# 9 DB_Name", "flds", "[", "10", "]", "=", "self", ".", "_get_set", "(", "flds", "[", "10", "]", ")", "# 10 DB_Synonym", "flds", "[", "12", "]", "=", "self", ".", "_do_taxons", "(", "flds", "[", "12", "]", ")", "# 12 Taxon", "flds", "[", "13", "]", "=", "GET_DATE_YYYYMMDD", "(", "flds", "[", "13", "]", ")", "# self.strptime(flds[13], '%Y%m%d').date(), # 13 Date 20190406", "# Version 2.x has these additional fields not found in v1.0", "if", "self", ".", "is_long", ":", "flds", "[", "15", "]", "=", "get_extensions", "(", "flds", "[", "15", "]", ")", "# Extensions (or Annotation_Extension)", "flds", "[", "16", "]", "=", "self", ".", "_get_set", "(", "flds", "[", "16", "]", ".", "rstrip", "(", ")", ")", "else", ":", "flds", "[", "14", "]", "=", "self", ".", "_get_set", "(", "flds", "[", "14", "]", ".", "rstrip", "(", ")", ")", "return", "flds" ]
Convert fields from string to preferred format for GAF ver 2.1 and 2.0.
[ "Convert", "fields", "from", "string", "to", "preferred", "format", "for", "GAF", "ver", "2", ".", "1", "and", "2", ".", "0", "." ]
python
train
51.2
pmacosta/pexdoc
pexdoc/exh.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/exh.py#L260-L305
def get_or_create_exh_obj(full_cname=False, exclude=None, callables_fname=None): r""" Return global exception handler if set, otherwise create a new one and return it. :param full_cname: Flag that indicates whether fully qualified function/method/class property names are obtained for functions/methods/class properties that use the exception manager (True) or not (False). There is a performance penalty if the flag is True as the call stack needs to be traced. This argument is only relevant if the global exception handler is not set and a new one is created :type full_cname: boolean :param exclude: Module exclusion list. A particular callable in an otherwise fully qualified name is omitted if it belongs to a module in this list. If None all callables are included :type exclude: list of strings or None :param callables_fname: File name that contains traced modules information. File can be produced by either the :py:meth:`pexdoc.pinspect.Callables.save` or :py:meth:`pexdoc.ExHandle.save_callables` methods :type callables_fname: :ref:`FileNameExists` or None :rtype: :py:class:`pexdoc.ExHandle` :raises: * OSError (File *[callables_fname]* could not be found * RuntimeError (Argument \\`exclude\\` is not valid) * RuntimeError (Argument \\`callables_fname\\` is not valid) * RuntimeError (Argument \\`full_cname\\` is not valid) """ if not hasattr(__builtin__, "_EXH"): set_exh_obj( ExHandle( full_cname=full_cname, exclude=exclude, callables_fname=callables_fname ) ) return get_exh_obj()
[ "def", "get_or_create_exh_obj", "(", "full_cname", "=", "False", ",", "exclude", "=", "None", ",", "callables_fname", "=", "None", ")", ":", "if", "not", "hasattr", "(", "__builtin__", ",", "\"_EXH\"", ")", ":", "set_exh_obj", "(", "ExHandle", "(", "full_cname", "=", "full_cname", ",", "exclude", "=", "exclude", ",", "callables_fname", "=", "callables_fname", ")", ")", "return", "get_exh_obj", "(", ")" ]
r""" Return global exception handler if set, otherwise create a new one and return it. :param full_cname: Flag that indicates whether fully qualified function/method/class property names are obtained for functions/methods/class properties that use the exception manager (True) or not (False). There is a performance penalty if the flag is True as the call stack needs to be traced. This argument is only relevant if the global exception handler is not set and a new one is created :type full_cname: boolean :param exclude: Module exclusion list. A particular callable in an otherwise fully qualified name is omitted if it belongs to a module in this list. If None all callables are included :type exclude: list of strings or None :param callables_fname: File name that contains traced modules information. File can be produced by either the :py:meth:`pexdoc.pinspect.Callables.save` or :py:meth:`pexdoc.ExHandle.save_callables` methods :type callables_fname: :ref:`FileNameExists` or None :rtype: :py:class:`pexdoc.ExHandle` :raises: * OSError (File *[callables_fname]* could not be found * RuntimeError (Argument \\`exclude\\` is not valid) * RuntimeError (Argument \\`callables_fname\\` is not valid) * RuntimeError (Argument \\`full_cname\\` is not valid)
[ "r", "Return", "global", "exception", "handler", "if", "set", "otherwise", "create", "a", "new", "one", "and", "return", "it", "." ]
python
train
41.913043
virtuald/pygi-composite-templates
gi_composites.py
https://github.com/virtuald/pygi-composite-templates/blob/a22be54ea95b8125b36deaa3ce7171e84158d486/gi_composites.py#L104-L136
def _init_template(self, cls, base_init_template): '''This would be better as an override for Gtk.Widget''' # TODO: could disallow using a metaclass.. but this is good enough # .. if you disagree, feel free to fix it and issue a PR :) if self.__class__ is not cls: raise TypeError("Inheritance from classes with @GtkTemplate decorators " "is not allowed at this time") connected_signals = set() self.__connected_template_signals__ = connected_signals base_init_template(self) for name in self.__gtemplate_widgets__: widget = self.get_template_child(cls, name) self.__dict__[name] = widget if widget is None: # Bug: if you bind a template child, and one of them was # not present, then the whole template is broken (and # it's not currently possible for us to know which # one is broken either -- but the stderr should show # something useful with a Gtk-CRITICAL message) raise AttributeError("A missing child widget was set using " "GtkTemplate.Child and the entire " "template is now broken (widgets: %s)" % ', '.join(self.__gtemplate_widgets__)) for name in self.__gtemplate_methods__.difference(connected_signals): errmsg = ("Signal '%s' was declared with @GtkTemplate.Callback " + "but was not present in template") % name warnings.warn(errmsg, GtkTemplateWarning)
[ "def", "_init_template", "(", "self", ",", "cls", ",", "base_init_template", ")", ":", "# TODO: could disallow using a metaclass.. but this is good enough", "# .. if you disagree, feel free to fix it and issue a PR :)", "if", "self", ".", "__class__", "is", "not", "cls", ":", "raise", "TypeError", "(", "\"Inheritance from classes with @GtkTemplate decorators \"", "\"is not allowed at this time\"", ")", "connected_signals", "=", "set", "(", ")", "self", ".", "__connected_template_signals__", "=", "connected_signals", "base_init_template", "(", "self", ")", "for", "name", "in", "self", ".", "__gtemplate_widgets__", ":", "widget", "=", "self", ".", "get_template_child", "(", "cls", ",", "name", ")", "self", ".", "__dict__", "[", "name", "]", "=", "widget", "if", "widget", "is", "None", ":", "# Bug: if you bind a template child, and one of them was", "# not present, then the whole template is broken (and", "# it's not currently possible for us to know which", "# one is broken either -- but the stderr should show", "# something useful with a Gtk-CRITICAL message)", "raise", "AttributeError", "(", "\"A missing child widget was set using \"", "\"GtkTemplate.Child and the entire \"", "\"template is now broken (widgets: %s)\"", "%", "', '", ".", "join", "(", "self", ".", "__gtemplate_widgets__", ")", ")", "for", "name", "in", "self", ".", "__gtemplate_methods__", ".", "difference", "(", "connected_signals", ")", ":", "errmsg", "=", "(", "\"Signal '%s' was declared with @GtkTemplate.Callback \"", "+", "\"but was not present in template\"", ")", "%", "name", "warnings", ".", "warn", "(", "errmsg", ",", "GtkTemplateWarning", ")" ]
This would be better as an override for Gtk.Widget
[ "This", "would", "be", "better", "as", "an", "override", "for", "Gtk", ".", "Widget" ]
python
train
47.242424
xaptum/xtt-python
xtt/_utils.py
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/_utils.py#L21-L31
def to_bytes(s, encoding="utf-8"): """ Converts the string to a bytes type, if not already. :s: the string to convert to bytes :returns: `str` on Python2 and `bytes` on Python3. """ if isinstance(s, six.binary_type): return s else: return six.text_type(s).encode(encoding)
[ "def", "to_bytes", "(", "s", ",", "encoding", "=", "\"utf-8\"", ")", ":", "if", "isinstance", "(", "s", ",", "six", ".", "binary_type", ")", ":", "return", "s", "else", ":", "return", "six", ".", "text_type", "(", "s", ")", ".", "encode", "(", "encoding", ")" ]
Converts the string to a bytes type, if not already. :s: the string to convert to bytes :returns: `str` on Python2 and `bytes` on Python3.
[ "Converts", "the", "string", "to", "a", "bytes", "type", "if", "not", "already", "." ]
python
train
27.909091
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L968-L979
def get_vmpolicy_macaddr_input_datacenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr") config = get_vmpolicy_macaddr input = ET.SubElement(get_vmpolicy_macaddr, "input") datacenter = ET.SubElement(input, "datacenter") datacenter.text = kwargs.pop('datacenter') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vmpolicy_macaddr_input_datacenter", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vmpolicy_macaddr", "=", "ET", ".", "Element", "(", "\"get_vmpolicy_macaddr\"", ")", "config", "=", "get_vmpolicy_macaddr", "input", "=", "ET", ".", "SubElement", "(", "get_vmpolicy_macaddr", ",", "\"input\"", ")", "datacenter", "=", "ET", ".", "SubElement", "(", "input", ",", "\"datacenter\"", ")", "datacenter", ".", "text", "=", "kwargs", ".", "pop", "(", "'datacenter'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
40.916667
reingart/gui2py
gui/dialog.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/dialog.py#L127-L131
def find(default='', whole_words=0, case_sensitive=0, parent=None): "Shows a find text dialog" result = dialogs.findDialog(parent, default, whole_words, case_sensitive) return {'text': result.searchText, 'whole_words': result.wholeWordsOnly, 'case_sensitive': result.caseSensitive}
[ "def", "find", "(", "default", "=", "''", ",", "whole_words", "=", "0", ",", "case_sensitive", "=", "0", ",", "parent", "=", "None", ")", ":", "result", "=", "dialogs", ".", "findDialog", "(", "parent", ",", "default", ",", "whole_words", ",", "case_sensitive", ")", "return", "{", "'text'", ":", "result", ".", "searchText", ",", "'whole_words'", ":", "result", ".", "wholeWordsOnly", ",", "'case_sensitive'", ":", "result", ".", "caseSensitive", "}" ]
Shows a find text dialog
[ "Shows", "a", "find", "text", "dialog" ]
python
test
61
fakedrake/overlay_parse
overlay_parse/matchers.py
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L50-L66
def offset_overlays(self, text, offset=0, **kw): """ Generate overlays after offset. :param text: The text to be searched. :param offset: Match starting that index. If none just search. :returns: An overlay or None """ # This may be a bit slower but overlayedtext takes care of # unicode issues. if not isinstance(text, OverlayedText): text = OverlayedText(text) for m in self.regex.finditer(unicode(text)[offset:]): yield Overlay(text, (offset + m.start(), offset + m.end()), props=self.props, value=self.value(rxmatch=m))
[ "def", "offset_overlays", "(", "self", ",", "text", ",", "offset", "=", "0", ",", "*", "*", "kw", ")", ":", "# This may be a bit slower but overlayedtext takes care of", "# unicode issues.", "if", "not", "isinstance", "(", "text", ",", "OverlayedText", ")", ":", "text", "=", "OverlayedText", "(", "text", ")", "for", "m", "in", "self", ".", "regex", ".", "finditer", "(", "unicode", "(", "text", ")", "[", "offset", ":", "]", ")", ":", "yield", "Overlay", "(", "text", ",", "(", "offset", "+", "m", ".", "start", "(", ")", ",", "offset", "+", "m", ".", "end", "(", ")", ")", ",", "props", "=", "self", ".", "props", ",", "value", "=", "self", ".", "value", "(", "rxmatch", "=", "m", ")", ")" ]
Generate overlays after offset. :param text: The text to be searched. :param offset: Match starting that index. If none just search. :returns: An overlay or None
[ "Generate", "overlays", "after", "offset", ".", ":", "param", "text", ":", "The", "text", "to", "be", "searched", ".", ":", "param", "offset", ":", "Match", "starting", "that", "index", ".", "If", "none", "just", "search", ".", ":", "returns", ":", "An", "overlay", "or", "None" ]
python
train
39.117647
hobson/aima
aima/search.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/search.py#L601-L606
def random_boggle(n=4): """Return a random Boggle board of size n x n. We represent a board as a linear list of letters.""" cubes = [cubes16[i % 16] for i in range(n*n)] random.shuffle(cubes) return map(random.choice, cubes)
[ "def", "random_boggle", "(", "n", "=", "4", ")", ":", "cubes", "=", "[", "cubes16", "[", "i", "%", "16", "]", "for", "i", "in", "range", "(", "n", "*", "n", ")", "]", "random", ".", "shuffle", "(", "cubes", ")", "return", "map", "(", "random", ".", "choice", ",", "cubes", ")" ]
Return a random Boggle board of size n x n. We represent a board as a linear list of letters.
[ "Return", "a", "random", "Boggle", "board", "of", "size", "n", "x", "n", ".", "We", "represent", "a", "board", "as", "a", "linear", "list", "of", "letters", "." ]
python
valid
39.833333
llazzaro/analyzerdam
analyzerdam/hbaseDAM.py
https://github.com/llazzaro/analyzerdam/blob/c5bc7483dae23bd2e14bbf36147b7a43a0067bc0/analyzerdam/hbaseDAM.py#L44-L48
def readQuotes(self, start, end): ''' read quotes ''' rows = self.__hbase.scanTable(self.tableName(HBaseDAM.QUOTE), [HBaseDAM.QUOTE], start, end) return [self.__rowResultToQuote(row) for row in rows]
[ "def", "readQuotes", "(", "self", ",", "start", ",", "end", ")", ":", "rows", "=", "self", ".", "__hbase", ".", "scanTable", "(", "self", ".", "tableName", "(", "HBaseDAM", ".", "QUOTE", ")", ",", "[", "HBaseDAM", ".", "QUOTE", "]", ",", "start", ",", "end", ")", "return", "[", "self", ".", "__rowResultToQuote", "(", "row", ")", "for", "row", "in", "rows", "]" ]
read quotes
[ "read", "quotes" ]
python
train
44.8
codelv/enaml-native
src/enamlnative/android/http.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/http.py#L276-L285
def _fetch(self, request): """ Fetch using the OkHttpClient """ client = self.client #: Dispatch the async call call = Call(__id__=client.newCall(request.request)) call.enqueue(request.handler) #: Save the call reference request.call = call
[ "def", "_fetch", "(", "self", ",", "request", ")", ":", "client", "=", "self", ".", "client", "#: Dispatch the async call", "call", "=", "Call", "(", "__id__", "=", "client", ".", "newCall", "(", "request", ".", "request", ")", ")", "call", ".", "enqueue", "(", "request", ".", "handler", ")", "#: Save the call reference", "request", ".", "call", "=", "call" ]
Fetch using the OkHttpClient
[ "Fetch", "using", "the", "OkHttpClient" ]
python
train
28.9
charlesthomas/proauth2
proauth2/async_proauth2.py
https://github.com/charlesthomas/proauth2/blob/f88c8df966a1802414047ed304d02df1dd520097/proauth2/async_proauth2.py#L123-L128
def revoke_token(self, token, callback): ''' revoke_token removes the access token from the data_store ''' yield Task(self.data_store.remove, 'tokens', token=token) callback()
[ "def", "revoke_token", "(", "self", ",", "token", ",", "callback", ")", ":", "yield", "Task", "(", "self", ".", "data_store", ".", "remove", ",", "'tokens'", ",", "token", "=", "token", ")", "callback", "(", ")" ]
revoke_token removes the access token from the data_store
[ "revoke_token", "removes", "the", "access", "token", "from", "the", "data_store" ]
python
valid
35
AnalogJ/lexicon
lexicon/providers/hetzner.py
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/hetzner.py#L648-L675
def _get_domain_id(self, domain): """ Pulls all domains managed by authenticated Hetzner account, extracts their IDs and returns the ID for the current domain, if exists. Otherwise raises error. """ api = self.api[self.account]['domain_id'] qdomain = dns.name.from_text(domain).to_unicode(True) domains, last_count, page = {}, -1, 0 while last_count != len(domains): last_count = len(domains) page += 1 url = (api['GET'].copy()).get('url', '/').replace('<index>', str(page)) params = api['GET'].get('params', {}).copy() for param in params: params[param] = params[param].replace('<index>', str(page)) response = self._get(url, query_params=params) domain_tags = Provider._filter_dom(response.text, api['filter'], True) for domain_tag in domain_tags: domain_id = Provider._extract_domain_id(dict(domain_tag.attrs)[api['id']['attr']], api['id']['regex']) domain = (Provider._filter_dom(domain_tag, api['domain']) .renderContents().decode('UTF-8')) domains[domain] = domain_id if domain == qdomain: LOGGER.info('Hetzner => Get ID %s for domain %s', domain_id, qdomain) return domain_id LOGGER.error('Hetzner => ID for domain %s does not exists', qdomain) raise AssertionError
[ "def", "_get_domain_id", "(", "self", ",", "domain", ")", ":", "api", "=", "self", ".", "api", "[", "self", ".", "account", "]", "[", "'domain_id'", "]", "qdomain", "=", "dns", ".", "name", ".", "from_text", "(", "domain", ")", ".", "to_unicode", "(", "True", ")", "domains", ",", "last_count", ",", "page", "=", "{", "}", ",", "-", "1", ",", "0", "while", "last_count", "!=", "len", "(", "domains", ")", ":", "last_count", "=", "len", "(", "domains", ")", "page", "+=", "1", "url", "=", "(", "api", "[", "'GET'", "]", ".", "copy", "(", ")", ")", ".", "get", "(", "'url'", ",", "'/'", ")", ".", "replace", "(", "'<index>'", ",", "str", "(", "page", ")", ")", "params", "=", "api", "[", "'GET'", "]", ".", "get", "(", "'params'", ",", "{", "}", ")", ".", "copy", "(", ")", "for", "param", "in", "params", ":", "params", "[", "param", "]", "=", "params", "[", "param", "]", ".", "replace", "(", "'<index>'", ",", "str", "(", "page", ")", ")", "response", "=", "self", ".", "_get", "(", "url", ",", "query_params", "=", "params", ")", "domain_tags", "=", "Provider", ".", "_filter_dom", "(", "response", ".", "text", ",", "api", "[", "'filter'", "]", ",", "True", ")", "for", "domain_tag", "in", "domain_tags", ":", "domain_id", "=", "Provider", ".", "_extract_domain_id", "(", "dict", "(", "domain_tag", ".", "attrs", ")", "[", "api", "[", "'id'", "]", "[", "'attr'", "]", "]", ",", "api", "[", "'id'", "]", "[", "'regex'", "]", ")", "domain", "=", "(", "Provider", ".", "_filter_dom", "(", "domain_tag", ",", "api", "[", "'domain'", "]", ")", ".", "renderContents", "(", ")", ".", "decode", "(", "'UTF-8'", ")", ")", "domains", "[", "domain", "]", "=", "domain_id", "if", "domain", "==", "qdomain", ":", "LOGGER", ".", "info", "(", "'Hetzner => Get ID %s for domain %s'", ",", "domain_id", ",", "qdomain", ")", "return", "domain_id", "LOGGER", ".", "error", "(", "'Hetzner => ID for domain %s does not exists'", ",", "qdomain", ")", "raise", "AssertionError" ]
Pulls all domains managed by authenticated Hetzner account, extracts their IDs and returns the ID for the current domain, if exists. Otherwise raises error.
[ "Pulls", "all", "domains", "managed", "by", "authenticated", "Hetzner", "account", "extracts", "their", "IDs", "and", "returns", "the", "ID", "for", "the", "current", "domain", "if", "exists", ".", "Otherwise", "raises", "error", "." ]
python
train
54.392857
last-partizan/pytils
pytils/numeral.py
https://github.com/last-partizan/pytils/blob/1c570a32b15e564bc68587b8207e32d464e61d08/pytils/numeral.py#L126-L157
def choose_plural(amount, variants): """ Choose proper case depending on amount @param amount: amount of objects @type amount: C{integer types} @param variants: variants (forms) of object in such form: (1 object, 2 objects, 5 objects). @type variants: 3-element C{sequence} of C{unicode} or C{unicode} (three variants with delimeter ',') @return: proper variant @rtype: C{unicode} @raise ValueError: variants' length lesser than 3 """ if isinstance(variants, six.text_type): variants = split_values(variants) check_length(variants, 3) amount = abs(amount) if amount % 10 == 1 and amount % 100 != 11: variant = 0 elif amount % 10 >= 2 and amount % 10 <= 4 and \ (amount % 100 < 10 or amount % 100 >= 20): variant = 1 else: variant = 2 return variants[variant]
[ "def", "choose_plural", "(", "amount", ",", "variants", ")", ":", "if", "isinstance", "(", "variants", ",", "six", ".", "text_type", ")", ":", "variants", "=", "split_values", "(", "variants", ")", "check_length", "(", "variants", ",", "3", ")", "amount", "=", "abs", "(", "amount", ")", "if", "amount", "%", "10", "==", "1", "and", "amount", "%", "100", "!=", "11", ":", "variant", "=", "0", "elif", "amount", "%", "10", ">=", "2", "and", "amount", "%", "10", "<=", "4", "and", "(", "amount", "%", "100", "<", "10", "or", "amount", "%", "100", ">=", "20", ")", ":", "variant", "=", "1", "else", ":", "variant", "=", "2", "return", "variants", "[", "variant", "]" ]
Choose proper case depending on amount @param amount: amount of objects @type amount: C{integer types} @param variants: variants (forms) of object in such form: (1 object, 2 objects, 5 objects). @type variants: 3-element C{sequence} of C{unicode} or C{unicode} (three variants with delimeter ',') @return: proper variant @rtype: C{unicode} @raise ValueError: variants' length lesser than 3
[ "Choose", "proper", "case", "depending", "on", "amount" ]
python
train
27.21875
bitcraze/crazyflie-lib-python
cflib/crazyflie/mem.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/mem.py#L928-L934
def ow_search(self, vid=0xBC, pid=None, name=None): """Search for specific memory id/name and return it""" for m in self.get_mems(MemoryElement.TYPE_1W): if pid and m.pid == pid or name and m.name == name: return m return None
[ "def", "ow_search", "(", "self", ",", "vid", "=", "0xBC", ",", "pid", "=", "None", ",", "name", "=", "None", ")", ":", "for", "m", "in", "self", ".", "get_mems", "(", "MemoryElement", ".", "TYPE_1W", ")", ":", "if", "pid", "and", "m", ".", "pid", "==", "pid", "or", "name", "and", "m", ".", "name", "==", "name", ":", "return", "m", "return", "None" ]
Search for specific memory id/name and return it
[ "Search", "for", "specific", "memory", "id", "/", "name", "and", "return", "it" ]
python
train
39
nickmckay/LiPD-utilities
Python/lipd/excel.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/excel.py#L1412-L1450
def compile_fund(workbook, sheet, row, col): """ Compile funding entries. Iter both rows at the same time. Keep adding entries until both cells are empty. :param obj workbook: :param str sheet: :param int row: :param int col: :return list of dict: l """ logger_excel.info("enter compile_fund") l = [] temp_sheet = workbook.sheet_by_name(sheet) while col < temp_sheet.ncols: col += 1 try: # Make a dictionary for this funding entry. _curr = { 'agency': temp_sheet.cell_value(row, col), 'grant': temp_sheet.cell_value(row+1, col), "principalInvestigator": temp_sheet.cell_value(row+2, col), "country": temp_sheet.cell_value(row + 3, col) } # Make a list for all _exist = [temp_sheet.cell_value(row, col), temp_sheet.cell_value(row+1, col), temp_sheet.cell_value(row+2, col), temp_sheet.cell_value(row+3, col)] # Remove all empty items from the list _exist = [i for i in _exist if i] # If we have all empty entries, then don't continue. Quit funding and return what we have. if not _exist: return l # We have funding data. Add this funding block to the growing list. l.append(_curr) except IndexError as e: logger_excel.debug("compile_fund: IndexError: sheet:{} row:{} col:{}, {}".format(sheet, row, col, e)) logger_excel.info("exit compile_fund") return l
[ "def", "compile_fund", "(", "workbook", ",", "sheet", ",", "row", ",", "col", ")", ":", "logger_excel", ".", "info", "(", "\"enter compile_fund\"", ")", "l", "=", "[", "]", "temp_sheet", "=", "workbook", ".", "sheet_by_name", "(", "sheet", ")", "while", "col", "<", "temp_sheet", ".", "ncols", ":", "col", "+=", "1", "try", ":", "# Make a dictionary for this funding entry.", "_curr", "=", "{", "'agency'", ":", "temp_sheet", ".", "cell_value", "(", "row", ",", "col", ")", ",", "'grant'", ":", "temp_sheet", ".", "cell_value", "(", "row", "+", "1", ",", "col", ")", ",", "\"principalInvestigator\"", ":", "temp_sheet", ".", "cell_value", "(", "row", "+", "2", ",", "col", ")", ",", "\"country\"", ":", "temp_sheet", ".", "cell_value", "(", "row", "+", "3", ",", "col", ")", "}", "# Make a list for all", "_exist", "=", "[", "temp_sheet", ".", "cell_value", "(", "row", ",", "col", ")", ",", "temp_sheet", ".", "cell_value", "(", "row", "+", "1", ",", "col", ")", ",", "temp_sheet", ".", "cell_value", "(", "row", "+", "2", ",", "col", ")", ",", "temp_sheet", ".", "cell_value", "(", "row", "+", "3", ",", "col", ")", "]", "# Remove all empty items from the list", "_exist", "=", "[", "i", "for", "i", "in", "_exist", "if", "i", "]", "# If we have all empty entries, then don't continue. Quit funding and return what we have.", "if", "not", "_exist", ":", "return", "l", "# We have funding data. Add this funding block to the growing list.", "l", ".", "append", "(", "_curr", ")", "except", "IndexError", "as", "e", ":", "logger_excel", ".", "debug", "(", "\"compile_fund: IndexError: sheet:{} row:{} col:{}, {}\"", ".", "format", "(", "sheet", ",", "row", ",", "col", ",", "e", ")", ")", "logger_excel", ".", "info", "(", "\"exit compile_fund\"", ")", "return", "l" ]
Compile funding entries. Iter both rows at the same time. Keep adding entries until both cells are empty. :param obj workbook: :param str sheet: :param int row: :param int col: :return list of dict: l
[ "Compile", "funding", "entries", ".", "Iter", "both", "rows", "at", "the", "same", "time", ".", "Keep", "adding", "entries", "until", "both", "cells", "are", "empty", ".", ":", "param", "obj", "workbook", ":", ":", "param", "str", "sheet", ":", ":", "param", "int", "row", ":", ":", "param", "int", "col", ":", ":", "return", "list", "of", "dict", ":", "l" ]
python
train
39.615385
trendels/rhino
rhino/request.py
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/request.py#L91-L105
def get(self, key, default=None, type=None): """Returns the first value for a key. If `type` is not None, the value will be converted by calling `type` with the value as argument. If type() raises `ValueError`, it will be treated as if the value didn't exist, and `default` will be returned instead. """ try: value = self[key] if type is not None: return type(value) return value except (KeyError, ValueError): return default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ",", "type", "=", "None", ")", ":", "try", ":", "value", "=", "self", "[", "key", "]", "if", "type", "is", "not", "None", ":", "return", "type", "(", "value", ")", "return", "value", "except", "(", "KeyError", ",", "ValueError", ")", ":", "return", "default" ]
Returns the first value for a key. If `type` is not None, the value will be converted by calling `type` with the value as argument. If type() raises `ValueError`, it will be treated as if the value didn't exist, and `default` will be returned instead.
[ "Returns", "the", "first", "value", "for", "a", "key", "." ]
python
train
36
klen/makesite
makesite/site.py
https://github.com/klen/makesite/blob/f6f77a43a04a256189e8fffbeac1ffd63f35a10c/makesite/site.py#L24-L31
def get_info(self, full=False): " Return printable information about current site. " if full: context = self.as_dict() return "".join("{0:<25} = {1}\n".format( key, context[key]) for key in sorted(context.iterkeys())) return "%s [%s]" % (self.get_name(), self.template)
[ "def", "get_info", "(", "self", ",", "full", "=", "False", ")", ":", "if", "full", ":", "context", "=", "self", ".", "as_dict", "(", ")", "return", "\"\"", ".", "join", "(", "\"{0:<25} = {1}\\n\"", ".", "format", "(", "key", ",", "context", "[", "key", "]", ")", "for", "key", "in", "sorted", "(", "context", ".", "iterkeys", "(", ")", ")", ")", "return", "\"%s [%s]\"", "%", "(", "self", ".", "get_name", "(", ")", ",", "self", ".", "template", ")" ]
Return printable information about current site.
[ "Return", "printable", "information", "about", "current", "site", "." ]
python
train
42.25
CivicSpleen/ambry
ambry/library/search_backends/postgres_backend.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/postgres_backend.py#L515-L523
def is_indexed(self, partition): """ Returns True if partition is already indexed. Otherwise returns False. """ query = text(""" SELECT vid FROM partition_index WHERE vid = :vid; """) result = self.execute(query, vid=partition.vid) return bool(result.fetchall())
[ "def", "is_indexed", "(", "self", ",", "partition", ")", ":", "query", "=", "text", "(", "\"\"\"\n SELECT vid\n FROM partition_index\n WHERE vid = :vid;\n \"\"\"", ")", "result", "=", "self", ".", "execute", "(", "query", ",", "vid", "=", "partition", ".", "vid", ")", "return", "bool", "(", "result", ".", "fetchall", "(", ")", ")" ]
Returns True if partition is already indexed. Otherwise returns False.
[ "Returns", "True", "if", "partition", "is", "already", "indexed", ".", "Otherwise", "returns", "False", "." ]
python
train
36.666667
RedHatInsights/insights-core
insights/client/collection_rules.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L160-L167
def write_collection_data(self, path, data): """ Write collections rules to disk """ flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC fd = os.open(path, flags, 0o600) with os.fdopen(fd, 'w') as dyn_conf_file: dyn_conf_file.write(data)
[ "def", "write_collection_data", "(", "self", ",", "path", ",", "data", ")", ":", "flags", "=", "os", ".", "O_WRONLY", "|", "os", ".", "O_CREAT", "|", "os", ".", "O_TRUNC", "fd", "=", "os", ".", "open", "(", "path", ",", "flags", ",", "0o600", ")", "with", "os", ".", "fdopen", "(", "fd", ",", "'w'", ")", "as", "dyn_conf_file", ":", "dyn_conf_file", ".", "write", "(", "data", ")" ]
Write collections rules to disk
[ "Write", "collections", "rules", "to", "disk" ]
python
train
35.5
python-openxml/python-docx
docx/oxml/xmlchemy.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/xmlchemy.py#L469-L477
def _prop_name(self): """ Calculate property name from tag name, e.g. a:schemeClr -> schemeClr. """ if ':' in self._nsptagname: start = self._nsptagname.index(':') + 1 else: start = 0 return self._nsptagname[start:]
[ "def", "_prop_name", "(", "self", ")", ":", "if", "':'", "in", "self", ".", "_nsptagname", ":", "start", "=", "self", ".", "_nsptagname", ".", "index", "(", "':'", ")", "+", "1", "else", ":", "start", "=", "0", "return", "self", ".", "_nsptagname", "[", "start", ":", "]" ]
Calculate property name from tag name, e.g. a:schemeClr -> schemeClr.
[ "Calculate", "property", "name", "from", "tag", "name", "e", ".", "g", ".", "a", ":", "schemeClr", "-", ">", "schemeClr", "." ]
python
train
31
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/settings/settings_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/settings/settings_client.py#L60-L74
def set_entries(self, entries, user_scope): """SetEntries. [Preview API] Set the specified setting entry values for the given user/all-users scope :param {object} entries: The entries to set :param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users. """ route_values = {} if user_scope is not None: route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str') content = self._serialize.body(entries, '{object}') self._send(http_method='PATCH', location_id='cd006711-163d-4cd4-a597-b05bad2556ff', version='5.0-preview.1', route_values=route_values, content=content)
[ "def", "set_entries", "(", "self", ",", "entries", ",", "user_scope", ")", ":", "route_values", "=", "{", "}", "if", "user_scope", "is", "not", "None", ":", "route_values", "[", "'userScope'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'user_scope'", ",", "user_scope", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "entries", ",", "'{object}'", ")", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'cd006711-163d-4cd4-a597-b05bad2556ff'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")" ]
SetEntries. [Preview API] Set the specified setting entry values for the given user/all-users scope :param {object} entries: The entries to set :param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
[ "SetEntries", ".", "[", "Preview", "API", "]", "Set", "the", "specified", "setting", "entry", "values", "for", "the", "given", "user", "/", "all", "-", "users", "scope", ":", "param", "{", "object", "}", "entries", ":", "The", "entries", "to", "set", ":", "param", "str", "user_scope", ":", "User", "-", "Scope", "at", "which", "to", "set", "the", "values", ".", "Should", "be", "me", "for", "the", "current", "user", "or", "host", "for", "all", "users", "." ]
python
train
52.933333
saltstack/salt
salt/runners/venafiapi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/venafiapi.py#L87-L167
def gen_key(minion_id, dns_name=None, zone='default', password=None): ''' Generate and return an private_key. If a ``dns_name`` is passed in, the private_key will be cached under that name. The type of key and the parameters used to generate the key are based on the default certificate use policy associated with the specified zone. CLI Example: .. code-block:: bash salt-run venafi.gen_key <minion_id> [dns_name] [zone] [password] ''' # Get the default certificate use policy associated with the zone # so we can generate keys that conform with policy # The /v1/zones/tag/{name} API call is a shortcut to get the zoneID # directly from the name qdata = __utils__['http.query']( '{0}/zones/tag/{1}'.format(_base_url(), zone), method='GET', decode=True, decode_type='json', header_dict={ 'tppl-api-key': _api_key(), 'Content-Type': 'application/json', }, ) zone_id = qdata['dict']['id'] # the /v1/certificatepolicies?zoneId API call returns the default # certificate use and certificate identity policies qdata = __utils__['http.query']( '{0}/certificatepolicies?zoneId={1}'.format(_base_url(), zone_id), method='GET', decode=True, decode_type='json', header_dict={ 'tppl-api-key': _api_key(), 'Content-Type': 'application/json', }, ) policies = qdata['dict']['certificatePolicies'] # Extract the key length and key type from the certificate use policy # and generate the private key accordingly for policy in policies: if policy['certificatePolicyType'] == "CERTIFICATE_USE": keyTypes = policy['keyTypes'] # in case multiple keytypes and key lengths are supported # always use the first key type and key length keygen_type = keyTypes[0]['keyType'] key_len = keyTypes[0]['keyLengths'][0] if int(key_len) < 2048: key_len = 2048 if keygen_type == "RSA": if HAS_M2: gen = RSA.gen_key(key_len, 65537) private_key = gen.as_pem(cipher='des_ede3_cbc', callback=lambda x: six.b(password)) else: gen = RSA.generate(bits=key_len) private_key = gen.exportKey('PEM', password) if dns_name is not None: bank = 'venafi/domains' cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR) try: data = cache.fetch(bank, dns_name) data['private_key'] = private_key data['minion_id'] = minion_id except TypeError: data = {'private_key': private_key, 'minion_id': minion_id} cache.store(bank, dns_name, data) return private_key
[ "def", "gen_key", "(", "minion_id", ",", "dns_name", "=", "None", ",", "zone", "=", "'default'", ",", "password", "=", "None", ")", ":", "# Get the default certificate use policy associated with the zone", "# so we can generate keys that conform with policy", "# The /v1/zones/tag/{name} API call is a shortcut to get the zoneID", "# directly from the name", "qdata", "=", "__utils__", "[", "'http.query'", "]", "(", "'{0}/zones/tag/{1}'", ".", "format", "(", "_base_url", "(", ")", ",", "zone", ")", ",", "method", "=", "'GET'", ",", "decode", "=", "True", ",", "decode_type", "=", "'json'", ",", "header_dict", "=", "{", "'tppl-api-key'", ":", "_api_key", "(", ")", ",", "'Content-Type'", ":", "'application/json'", ",", "}", ",", ")", "zone_id", "=", "qdata", "[", "'dict'", "]", "[", "'id'", "]", "# the /v1/certificatepolicies?zoneId API call returns the default", "# certificate use and certificate identity policies", "qdata", "=", "__utils__", "[", "'http.query'", "]", "(", "'{0}/certificatepolicies?zoneId={1}'", ".", "format", "(", "_base_url", "(", ")", ",", "zone_id", ")", ",", "method", "=", "'GET'", ",", "decode", "=", "True", ",", "decode_type", "=", "'json'", ",", "header_dict", "=", "{", "'tppl-api-key'", ":", "_api_key", "(", ")", ",", "'Content-Type'", ":", "'application/json'", ",", "}", ",", ")", "policies", "=", "qdata", "[", "'dict'", "]", "[", "'certificatePolicies'", "]", "# Extract the key length and key type from the certificate use policy", "# and generate the private key accordingly", "for", "policy", "in", "policies", ":", "if", "policy", "[", "'certificatePolicyType'", "]", "==", "\"CERTIFICATE_USE\"", ":", "keyTypes", "=", "policy", "[", "'keyTypes'", "]", "# in case multiple keytypes and key lengths are supported", "# always use the first key type and key length", "keygen_type", "=", "keyTypes", "[", "0", "]", "[", "'keyType'", "]", "key_len", "=", "keyTypes", "[", "0", "]", "[", "'keyLengths'", "]", "[", "0", "]", "if", "int", "(", "key_len", ")", "<", "2048", ":", "key_len", "=", "2048", "if", "keygen_type", "==", "\"RSA\"", ":", "if", "HAS_M2", ":", "gen", "=", "RSA", ".", "gen_key", "(", "key_len", ",", "65537", ")", "private_key", "=", "gen", ".", "as_pem", "(", "cipher", "=", "'des_ede3_cbc'", ",", "callback", "=", "lambda", "x", ":", "six", ".", "b", "(", "password", ")", ")", "else", ":", "gen", "=", "RSA", ".", "generate", "(", "bits", "=", "key_len", ")", "private_key", "=", "gen", ".", "exportKey", "(", "'PEM'", ",", "password", ")", "if", "dns_name", "is", "not", "None", ":", "bank", "=", "'venafi/domains'", "cache", "=", "salt", ".", "cache", ".", "Cache", "(", "__opts__", ",", "syspaths", ".", "CACHE_DIR", ")", "try", ":", "data", "=", "cache", ".", "fetch", "(", "bank", ",", "dns_name", ")", "data", "[", "'private_key'", "]", "=", "private_key", "data", "[", "'minion_id'", "]", "=", "minion_id", "except", "TypeError", ":", "data", "=", "{", "'private_key'", ":", "private_key", ",", "'minion_id'", ":", "minion_id", "}", "cache", ".", "store", "(", "bank", ",", "dns_name", ",", "data", ")", "return", "private_key" ]
Generate and return an private_key. If a ``dns_name`` is passed in, the private_key will be cached under that name. The type of key and the parameters used to generate the key are based on the default certificate use policy associated with the specified zone. CLI Example: .. code-block:: bash salt-run venafi.gen_key <minion_id> [dns_name] [zone] [password]
[ "Generate", "and", "return", "an", "private_key", ".", "If", "a", "dns_name", "is", "passed", "in", "the", "private_key", "will", "be", "cached", "under", "that", "name", ".", "The", "type", "of", "key", "and", "the", "parameters", "used", "to", "generate", "the", "key", "are", "based", "on", "the", "default", "certificate", "use", "policy", "associated", "with", "the", "specified", "zone", "." ]
python
train
34.419753
StackStorm/pybind
pybind/slxos/v17s_1_02/interface/ethernet/switchport/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/interface/ethernet/switchport/__init__.py#L147-L170
def _set_port_security(self, v, load=False): """ Setter method for port_security, mapped from YANG variable /interface/ethernet/switchport/port_security (container) If this variable is read-only (config: false) in the source YANG file, then _set_port_security is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_port_security() directly. YANG Description: Enable port-security feature """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """port_security must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__port_security = t if hasattr(self, '_set'): self._set()
[ "def", "_set_port_security", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "port_security", ".", "port_security", ",", "is_container", "=", "'container'", ",", "presence", "=", "True", ",", "yang_name", "=", "\"port-security\"", ",", "rest_name", "=", "\"port-security\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Enable port-security feature'", ",", "u'callpoint'", ":", "u'interface_portsecurity'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"port_security must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=port_security.port_security, is_container='container', presence=True, yang_name=\"port-security\", rest_name=\"port-security\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__port_security", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for port_security, mapped from YANG variable /interface/ethernet/switchport/port_security (container) If this variable is read-only (config: false) in the source YANG file, then _set_port_security is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_port_security() directly. YANG Description: Enable port-security feature
[ "Setter", "method", "for", "port_security", "mapped", "from", "YANG", "variable", "/", "interface", "/", "ethernet", "/", "switchport", "/", "port_security", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_port_security", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_port_security", "()", "directly", "." ]
python
train
75.291667
iotile/coretools
iotilecore/iotile/core/utilities/schema_verify/dict_verify.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/schema_verify/dict_verify.py#L43-L63
def key_rule(self, regex, verifier): """Add a rule with a pattern that should apply to all keys. Any key not explicitly listed in an add_required or add_optional rule must match ONE OF the rules given in a call to key_rule(). So these rules are all OR'ed together. In this case you should pass a raw string specifying a regex that is used to determine if the rule is used to check a given key. Args: regex (str): The regular expression used to match the rule or None if this should apply to all verifier (Verifier): The verification rule """ if regex is not None: regex = re.compile(regex) self._additional_key_rules.append((regex, verifier))
[ "def", "key_rule", "(", "self", ",", "regex", ",", "verifier", ")", ":", "if", "regex", "is", "not", "None", ":", "regex", "=", "re", ".", "compile", "(", "regex", ")", "self", ".", "_additional_key_rules", ".", "append", "(", "(", "regex", ",", "verifier", ")", ")" ]
Add a rule with a pattern that should apply to all keys. Any key not explicitly listed in an add_required or add_optional rule must match ONE OF the rules given in a call to key_rule(). So these rules are all OR'ed together. In this case you should pass a raw string specifying a regex that is used to determine if the rule is used to check a given key. Args: regex (str): The regular expression used to match the rule or None if this should apply to all verifier (Verifier): The verification rule
[ "Add", "a", "rule", "with", "a", "pattern", "that", "should", "apply", "to", "all", "keys", "." ]
python
train
36.190476
hannes-brt/cudnn-python-wrappers
libcudnn.py
https://github.com/hannes-brt/cudnn-python-wrappers/blob/55aab1242924c2fd43db150cf2ccc2a3df958dd5/libcudnn.py#L804-L822
def cudnnCreateConvolutionDescriptor(): """" Create a convolution descriptor. This function creates a convolution descriptor object by allocating the memory needed to hold its opaque structure. Returns ------- convDesc : cudnnConvolutionDescriptor Handle to newly allocated convolution descriptor. """ convDesc = ctypes.c_void_p() status = _libcudnn.cudnnCreateConvolutionDescriptor(ctypes.byref(convDesc)) cudnnCheckStatus(status) return convDesc.value
[ "def", "cudnnCreateConvolutionDescriptor", "(", ")", ":", "convDesc", "=", "ctypes", ".", "c_void_p", "(", ")", "status", "=", "_libcudnn", ".", "cudnnCreateConvolutionDescriptor", "(", "ctypes", ".", "byref", "(", "convDesc", ")", ")", "cudnnCheckStatus", "(", "status", ")", "return", "convDesc", ".", "value" ]
Create a convolution descriptor. This function creates a convolution descriptor object by allocating the memory needed to hold its opaque structure. Returns ------- convDesc : cudnnConvolutionDescriptor Handle to newly allocated convolution descriptor.
[ "Create", "a", "convolution", "descriptor", "." ]
python
train
26.105263
rackerlabs/simpl
simpl/git.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/git.py#L240-L259
def git_list_refs(repo_dir): """List references available in the local repo with commit ids. This is similar to ls-remote, but shows the *local* refs. Return format: .. code-block:: python {<ref1>: <commit_hash1>, <ref2>: <commit_hash2>, ..., <refN>: <commit_hashN>, } """ command = ['git', 'show-ref', '--dereference', '--head'] raw = execute_git_command(command, repo_dir=repo_dir).splitlines() output = [l.strip() for l in raw if l.strip()] return {ref: commit_hash for commit_hash, ref in [l.split(None, 1) for l in output]}
[ "def", "git_list_refs", "(", "repo_dir", ")", ":", "command", "=", "[", "'git'", ",", "'show-ref'", ",", "'--dereference'", ",", "'--head'", "]", "raw", "=", "execute_git_command", "(", "command", ",", "repo_dir", "=", "repo_dir", ")", ".", "splitlines", "(", ")", "output", "=", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "raw", "if", "l", ".", "strip", "(", ")", "]", "return", "{", "ref", ":", "commit_hash", "for", "commit_hash", ",", "ref", "in", "[", "l", ".", "split", "(", "None", ",", "1", ")", "for", "l", "in", "output", "]", "}" ]
List references available in the local repo with commit ids. This is similar to ls-remote, but shows the *local* refs. Return format: .. code-block:: python {<ref1>: <commit_hash1>, <ref2>: <commit_hash2>, ..., <refN>: <commit_hashN>, }
[ "List", "references", "available", "in", "the", "local", "repo", "with", "commit", "ids", "." ]
python
train
30.2
DistrictDataLabs/yellowbrick
yellowbrick/cluster/elbow.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/cluster/elbow.py#L48-L107
def distortion_score(X, labels, metric='euclidean'): """ Compute the mean distortion of all samples. The distortion is computed as the the sum of the squared distances between each observation and its closest centroid. Logically, this is the metric that K-Means attempts to minimize as it is fitting the model. .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/ Parameters ---------- X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a] Array of pairwise distances between samples if metric == "precomputed" or a feature array for computing distances against the labels. labels : array, shape = [n_samples] Predicted labels for each sample metric : string The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by `sklearn.metrics.pairwise.pairwise_distances <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_ .. todo:: add sample_size and random_state kwds similar to silhouette_score """ # Encode labels to get unique centers and groups le = LabelEncoder() labels = le.fit_transform(labels) unique_labels = le.classes_ # Sum of the distortions distortion = 0 # Loop through each label (center) to compute the centroid for current_label in unique_labels: # Mask the instances that belong to the current label mask = labels == current_label instances = X[mask] # Compute the center of these instances center = instances.mean(axis=0) # NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean # returns an array of 1 dimension less than the input. We expect # instances to be a 2D array, therefore to do pairwise computation we # require center to be a 2D array with a single row (the center). # See #370 for more detail. if not sp.issparse(instances): center = np.array([center]) # Compute the square distances from the instances to the center distances = pairwise_distances(instances, center, metric=metric) distances = distances ** 2 # Add the sum of square distance to the distortion distortion += distances.sum() return distortion
[ "def", "distortion_score", "(", "X", ",", "labels", ",", "metric", "=", "'euclidean'", ")", ":", "# Encode labels to get unique centers and groups", "le", "=", "LabelEncoder", "(", ")", "labels", "=", "le", ".", "fit_transform", "(", "labels", ")", "unique_labels", "=", "le", ".", "classes_", "# Sum of the distortions", "distortion", "=", "0", "# Loop through each label (center) to compute the centroid", "for", "current_label", "in", "unique_labels", ":", "# Mask the instances that belong to the current label", "mask", "=", "labels", "==", "current_label", "instances", "=", "X", "[", "mask", "]", "# Compute the center of these instances", "center", "=", "instances", ".", "mean", "(", "axis", "=", "0", ")", "# NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean", "# returns an array of 1 dimension less than the input. We expect", "# instances to be a 2D array, therefore to do pairwise computation we", "# require center to be a 2D array with a single row (the center).", "# See #370 for more detail.", "if", "not", "sp", ".", "issparse", "(", "instances", ")", ":", "center", "=", "np", ".", "array", "(", "[", "center", "]", ")", "# Compute the square distances from the instances to the center", "distances", "=", "pairwise_distances", "(", "instances", ",", "center", ",", "metric", "=", "metric", ")", "distances", "=", "distances", "**", "2", "# Add the sum of square distance to the distortion", "distortion", "+=", "distances", ".", "sum", "(", ")", "return", "distortion" ]
Compute the mean distortion of all samples. The distortion is computed as the the sum of the squared distances between each observation and its closest centroid. Logically, this is the metric that K-Means attempts to minimize as it is fitting the model. .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/ Parameters ---------- X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a] Array of pairwise distances between samples if metric == "precomputed" or a feature array for computing distances against the labels. labels : array, shape = [n_samples] Predicted labels for each sample metric : string The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by `sklearn.metrics.pairwise.pairwise_distances <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_ .. todo:: add sample_size and random_state kwds similar to silhouette_score
[ "Compute", "the", "mean", "distortion", "of", "all", "samples", "." ]
python
train
40.016667
minio/minio-py
minio/helpers.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/helpers.py#L308-L327
def is_virtual_host(endpoint_url, bucket_name): """ Check to see if the ``bucket_name`` can be part of virtual host style. :param endpoint_url: Endpoint url which will be used for virtual host. :param bucket_name: Bucket name to be validated against. """ is_valid_bucket_name(bucket_name) parsed_url = urlsplit(endpoint_url) # bucket_name can be valid but '.' in the hostname will fail # SSL certificate validation. So do not use host-style for # such buckets. if 'https' in parsed_url.scheme and '.' in bucket_name: return False for host in ['s3.amazonaws.com', 'aliyuncs.com']: if host in parsed_url.netloc: return True return False
[ "def", "is_virtual_host", "(", "endpoint_url", ",", "bucket_name", ")", ":", "is_valid_bucket_name", "(", "bucket_name", ")", "parsed_url", "=", "urlsplit", "(", "endpoint_url", ")", "# bucket_name can be valid but '.' in the hostname will fail", "# SSL certificate validation. So do not use host-style for", "# such buckets.", "if", "'https'", "in", "parsed_url", ".", "scheme", "and", "'.'", "in", "bucket_name", ":", "return", "False", "for", "host", "in", "[", "'s3.amazonaws.com'", ",", "'aliyuncs.com'", "]", ":", "if", "host", "in", "parsed_url", ".", "netloc", ":", "return", "True", "return", "False" ]
Check to see if the ``bucket_name`` can be part of virtual host style. :param endpoint_url: Endpoint url which will be used for virtual host. :param bucket_name: Bucket name to be validated against.
[ "Check", "to", "see", "if", "the", "bucket_name", "can", "be", "part", "of", "virtual", "host", "style", "." ]
python
train
35.05
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_import.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_import.py#L262-L297
def import_lane_element(lane_element, plane_element): """ Method for importing 'laneSet' element from diagram file. :param lane_element: XML document element, :param plane_element: object representing a BPMN XML 'plane' element. """ lane_id = lane_element.getAttribute(consts.Consts.id) lane_name = lane_element.getAttribute(consts.Consts.name) child_lane_set_attr = {} flow_node_refs = [] for element in utils.BpmnImportUtils.iterate_elements(lane_element): if element.nodeType != element.TEXT_NODE: tag_name = utils.BpmnImportUtils.remove_namespace_from_tag_name(element.tagName) if tag_name == consts.Consts.child_lane_set: child_lane_set_attr = BpmnDiagramGraphImport.import_child_lane_set_element(element, plane_element) elif tag_name == consts.Consts.flow_node_ref: flow_node_ref_id = element.firstChild.nodeValue flow_node_refs.append(flow_node_ref_id) lane_attr = {consts.Consts.id: lane_id, consts.Consts.name: lane_name, consts.Consts.child_lane_set: child_lane_set_attr, consts.Consts.flow_node_refs: flow_node_refs} shape_element = None for element in utils.BpmnImportUtils.iterate_elements(plane_element): if element.nodeType != element.TEXT_NODE and element.getAttribute(consts.Consts.bpmn_element) == lane_id: shape_element = element if shape_element is not None: bounds = shape_element.getElementsByTagNameNS("*", "Bounds")[0] lane_attr[consts.Consts.is_horizontal] = shape_element.getAttribute(consts.Consts.is_horizontal) lane_attr[consts.Consts.width] = bounds.getAttribute(consts.Consts.width) lane_attr[consts.Consts.height] = bounds.getAttribute(consts.Consts.height) lane_attr[consts.Consts.x] = bounds.getAttribute(consts.Consts.x) lane_attr[consts.Consts.y] = bounds.getAttribute(consts.Consts.y) return lane_attr
[ "def", "import_lane_element", "(", "lane_element", ",", "plane_element", ")", ":", "lane_id", "=", "lane_element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "id", ")", "lane_name", "=", "lane_element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "name", ")", "child_lane_set_attr", "=", "{", "}", "flow_node_refs", "=", "[", "]", "for", "element", "in", "utils", ".", "BpmnImportUtils", ".", "iterate_elements", "(", "lane_element", ")", ":", "if", "element", ".", "nodeType", "!=", "element", ".", "TEXT_NODE", ":", "tag_name", "=", "utils", ".", "BpmnImportUtils", ".", "remove_namespace_from_tag_name", "(", "element", ".", "tagName", ")", "if", "tag_name", "==", "consts", ".", "Consts", ".", "child_lane_set", ":", "child_lane_set_attr", "=", "BpmnDiagramGraphImport", ".", "import_child_lane_set_element", "(", "element", ",", "plane_element", ")", "elif", "tag_name", "==", "consts", ".", "Consts", ".", "flow_node_ref", ":", "flow_node_ref_id", "=", "element", ".", "firstChild", ".", "nodeValue", "flow_node_refs", ".", "append", "(", "flow_node_ref_id", ")", "lane_attr", "=", "{", "consts", ".", "Consts", ".", "id", ":", "lane_id", ",", "consts", ".", "Consts", ".", "name", ":", "lane_name", ",", "consts", ".", "Consts", ".", "child_lane_set", ":", "child_lane_set_attr", ",", "consts", ".", "Consts", ".", "flow_node_refs", ":", "flow_node_refs", "}", "shape_element", "=", "None", "for", "element", "in", "utils", ".", "BpmnImportUtils", ".", "iterate_elements", "(", "plane_element", ")", ":", "if", "element", ".", "nodeType", "!=", "element", ".", "TEXT_NODE", "and", "element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "bpmn_element", ")", "==", "lane_id", ":", "shape_element", "=", "element", "if", "shape_element", "is", "not", "None", ":", "bounds", "=", "shape_element", ".", "getElementsByTagNameNS", "(", "\"*\"", ",", "\"Bounds\"", ")", "[", "0", "]", "lane_attr", "[", "consts", ".", "Consts", ".", "is_horizontal", "]", "=", "shape_element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "is_horizontal", ")", "lane_attr", "[", "consts", ".", "Consts", ".", "width", "]", "=", "bounds", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "width", ")", "lane_attr", "[", "consts", ".", "Consts", ".", "height", "]", "=", "bounds", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "height", ")", "lane_attr", "[", "consts", ".", "Consts", ".", "x", "]", "=", "bounds", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "x", ")", "lane_attr", "[", "consts", ".", "Consts", ".", "y", "]", "=", "bounds", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "y", ")", "return", "lane_attr" ]
Method for importing 'laneSet' element from diagram file. :param lane_element: XML document element, :param plane_element: object representing a BPMN XML 'plane' element.
[ "Method", "for", "importing", "laneSet", "element", "from", "diagram", "file", "." ]
python
train
58
earwig/mwparserfromhell
mwparserfromhell/wikicode.py
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/wikicode.py#L334-L367
def get_ancestors(self, obj): """Return a list of all ancestor nodes of the :class:`.Node` *obj*. The list is ordered from the most shallow ancestor (greatest great- grandparent) to the direct parent. The node itself is not included in the list. For example:: >>> text = "{{a|{{b|{{c|{{d}}}}}}}}" >>> code = mwparserfromhell.parse(text) >>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0] >>> code.get_ancestors(node) ['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}'] Will return an empty list if *obj* is at the top level of this Wikicode object. Will raise :exc:`ValueError` if it wasn't found. """ def _get_ancestors(code, needle): for node in code.nodes: if node is needle: return [] for code in node.__children__(): ancestors = _get_ancestors(code, needle) if ancestors is not None: return [node] + ancestors if isinstance(obj, Wikicode): obj = obj.get(0) elif not isinstance(obj, Node): raise ValueError(obj) ancestors = _get_ancestors(self, obj) if ancestors is None: raise ValueError(obj) return ancestors
[ "def", "get_ancestors", "(", "self", ",", "obj", ")", ":", "def", "_get_ancestors", "(", "code", ",", "needle", ")", ":", "for", "node", "in", "code", ".", "nodes", ":", "if", "node", "is", "needle", ":", "return", "[", "]", "for", "code", "in", "node", ".", "__children__", "(", ")", ":", "ancestors", "=", "_get_ancestors", "(", "code", ",", "needle", ")", "if", "ancestors", "is", "not", "None", ":", "return", "[", "node", "]", "+", "ancestors", "if", "isinstance", "(", "obj", ",", "Wikicode", ")", ":", "obj", "=", "obj", ".", "get", "(", "0", ")", "elif", "not", "isinstance", "(", "obj", ",", "Node", ")", ":", "raise", "ValueError", "(", "obj", ")", "ancestors", "=", "_get_ancestors", "(", "self", ",", "obj", ")", "if", "ancestors", "is", "None", ":", "raise", "ValueError", "(", "obj", ")", "return", "ancestors" ]
Return a list of all ancestor nodes of the :class:`.Node` *obj*. The list is ordered from the most shallow ancestor (greatest great- grandparent) to the direct parent. The node itself is not included in the list. For example:: >>> text = "{{a|{{b|{{c|{{d}}}}}}}}" >>> code = mwparserfromhell.parse(text) >>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0] >>> code.get_ancestors(node) ['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}'] Will return an empty list if *obj* is at the top level of this Wikicode object. Will raise :exc:`ValueError` if it wasn't found.
[ "Return", "a", "list", "of", "all", "ancestor", "nodes", "of", "the", ":", "class", ":", ".", "Node", "*", "obj", "*", "." ]
python
train
39.5
hydraplatform/hydra-base
hydra_base/lib/template.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L2147-L2176
def get_etree_layout_as_dict(layout_tree): """ Convert something that looks like this: <layout> <item> <name>color</name> <value>red</value> </item> <item> <name>shapefile</name> <value>blah.shp</value> </item> </layout> Into something that looks like this: { 'color' : ['red'], 'shapefile' : ['blah.shp'] } """ layout_dict = dict() for item in layout_tree.findall('item'): name = item.find('name').text val_element = item.find('value') value = val_element.text.strip() if value == '': children = val_element.getchildren() value = etree.tostring(children[0], pretty_print=True, encoding="unicode") layout_dict[name] = value return layout_dict
[ "def", "get_etree_layout_as_dict", "(", "layout_tree", ")", ":", "layout_dict", "=", "dict", "(", ")", "for", "item", "in", "layout_tree", ".", "findall", "(", "'item'", ")", ":", "name", "=", "item", ".", "find", "(", "'name'", ")", ".", "text", "val_element", "=", "item", ".", "find", "(", "'value'", ")", "value", "=", "val_element", ".", "text", ".", "strip", "(", ")", "if", "value", "==", "''", ":", "children", "=", "val_element", ".", "getchildren", "(", ")", "value", "=", "etree", ".", "tostring", "(", "children", "[", "0", "]", ",", "pretty_print", "=", "True", ",", "encoding", "=", "\"unicode\"", ")", "layout_dict", "[", "name", "]", "=", "value", "return", "layout_dict" ]
Convert something that looks like this: <layout> <item> <name>color</name> <value>red</value> </item> <item> <name>shapefile</name> <value>blah.shp</value> </item> </layout> Into something that looks like this: { 'color' : ['red'], 'shapefile' : ['blah.shp'] }
[ "Convert", "something", "that", "looks", "like", "this", ":", "<layout", ">", "<item", ">", "<name", ">", "color<", "/", "name", ">", "<value", ">", "red<", "/", "value", ">", "<", "/", "item", ">", "<item", ">", "<name", ">", "shapefile<", "/", "name", ">", "<value", ">", "blah", ".", "shp<", "/", "value", ">", "<", "/", "item", ">", "<", "/", "layout", ">", "Into", "something", "that", "looks", "like", "this", ":", "{", "color", ":", "[", "red", "]", "shapefile", ":", "[", "blah", ".", "shp", "]", "}" ]
python
train
27.3
joke2k/django-faker
django_faker/guessers.py
https://github.com/joke2k/django-faker/blob/345e3eebcf636e2566d9890ae7b35788ebdb5173/django_faker/guessers.py#L10-L33
def guessFormat(self, name): """ :param name: :type name: str """ name = name.lower() generator = self.generator if re.findall(r'^is[_A-Z]', name): return lambda x:generator.boolean() if re.findall(r'(_a|A)t$', name): return lambda x:generator.dateTime() if name in ('first_name','firstname'): return lambda x: generator.firstName() if name in ('last_name','lastname'): return lambda x: generator.lastName() if name in ('username','login','nickname'): return lambda x:generator.userName() if name in ('email','email_address'): return lambda x:generator.email() if name in ('phone_number','phonenumber','phone'): return lambda x:generator.phoneNumber() if name == 'address' : return lambda x:generator.address() if name == 'city' : return lambda x: generator.city() if name == 'streetaddress' : return lambda x: generator.streetaddress() if name in ('postcode','zipcode'): return lambda x: generator.postcode() if name == 'state' : return lambda x: generator.state() if name == 'country' : return lambda x: generator.country() if name == 'title' : return lambda x: generator.sentence() if name in ('body','summary', 'description'): return lambda x: generator.text()
[ "def", "guessFormat", "(", "self", ",", "name", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "generator", "=", "self", ".", "generator", "if", "re", ".", "findall", "(", "r'^is[_A-Z]'", ",", "name", ")", ":", "return", "lambda", "x", ":", "generator", ".", "boolean", "(", ")", "if", "re", ".", "findall", "(", "r'(_a|A)t$'", ",", "name", ")", ":", "return", "lambda", "x", ":", "generator", ".", "dateTime", "(", ")", "if", "name", "in", "(", "'first_name'", ",", "'firstname'", ")", ":", "return", "lambda", "x", ":", "generator", ".", "firstName", "(", ")", "if", "name", "in", "(", "'last_name'", ",", "'lastname'", ")", ":", "return", "lambda", "x", ":", "generator", ".", "lastName", "(", ")", "if", "name", "in", "(", "'username'", ",", "'login'", ",", "'nickname'", ")", ":", "return", "lambda", "x", ":", "generator", ".", "userName", "(", ")", "if", "name", "in", "(", "'email'", ",", "'email_address'", ")", ":", "return", "lambda", "x", ":", "generator", ".", "email", "(", ")", "if", "name", "in", "(", "'phone_number'", ",", "'phonenumber'", ",", "'phone'", ")", ":", "return", "lambda", "x", ":", "generator", ".", "phoneNumber", "(", ")", "if", "name", "==", "'address'", ":", "return", "lambda", "x", ":", "generator", ".", "address", "(", ")", "if", "name", "==", "'city'", ":", "return", "lambda", "x", ":", "generator", ".", "city", "(", ")", "if", "name", "==", "'streetaddress'", ":", "return", "lambda", "x", ":", "generator", ".", "streetaddress", "(", ")", "if", "name", "in", "(", "'postcode'", ",", "'zipcode'", ")", ":", "return", "lambda", "x", ":", "generator", ".", "postcode", "(", ")", "if", "name", "==", "'state'", ":", "return", "lambda", "x", ":", "generator", ".", "state", "(", ")", "if", "name", "==", "'country'", ":", "return", "lambda", "x", ":", "generator", ".", "country", "(", ")", "if", "name", "==", "'title'", ":", "return", "lambda", "x", ":", "generator", ".", "sentence", "(", ")", "if", "name", "in", "(", "'body'", ",", "'summary'", ",", "'description'", ")", ":", "return", "lambda", "x", ":", "generator", ".", "text", "(", ")" ]
:param name: :type name: str
[ ":", "param", "name", ":", ":", "type", "name", ":", "str" ]
python
train
54.625
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/boto/boto_config.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/boto/boto_config.py#L66-L89
def _CreateConfig(self, project_id): """Create the boto config to support standalone GSUtil. Args: project_id: string, the project ID to use in the config file. """ project_id = project_id or self._GetNumericProjectId() # Our project doesn't support service accounts. if not project_id: return self.boto_config_header %= ( self.boto_config_script, self.boto_config_template) config = config_manager.ConfigManager( config_file=self.boto_config_template, config_header=self.boto_config_header) boto_dir = os.path.dirname(self.boto_config_script) config.SetOption('GSUtil', 'default_project_id', project_id) config.SetOption('GSUtil', 'default_api_version', '2') config.SetOption('GoogleCompute', 'service_account', 'default') config.SetOption('Plugin', 'plugin_directory', boto_dir) config.WriteConfig(config_file=self.boto_config)
[ "def", "_CreateConfig", "(", "self", ",", "project_id", ")", ":", "project_id", "=", "project_id", "or", "self", ".", "_GetNumericProjectId", "(", ")", "# Our project doesn't support service accounts.", "if", "not", "project_id", ":", "return", "self", ".", "boto_config_header", "%=", "(", "self", ".", "boto_config_script", ",", "self", ".", "boto_config_template", ")", "config", "=", "config_manager", ".", "ConfigManager", "(", "config_file", "=", "self", ".", "boto_config_template", ",", "config_header", "=", "self", ".", "boto_config_header", ")", "boto_dir", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "boto_config_script", ")", "config", ".", "SetOption", "(", "'GSUtil'", ",", "'default_project_id'", ",", "project_id", ")", "config", ".", "SetOption", "(", "'GSUtil'", ",", "'default_api_version'", ",", "'2'", ")", "config", ".", "SetOption", "(", "'GoogleCompute'", ",", "'service_account'", ",", "'default'", ")", "config", ".", "SetOption", "(", "'Plugin'", ",", "'plugin_directory'", ",", "boto_dir", ")", "config", ".", "WriteConfig", "(", "config_file", "=", "self", ".", "boto_config", ")" ]
Create the boto config to support standalone GSUtil. Args: project_id: string, the project ID to use in the config file.
[ "Create", "the", "boto", "config", "to", "support", "standalone", "GSUtil", "." ]
python
train
37.583333
PyMySQL/PyMySQL
pymysql/protocol.py
https://github.com/PyMySQL/PyMySQL/blob/3674bc6fd064bf88524e839c07690e8c35223709/pymysql/protocol.py#L94-L98
def rewind(self, position=0): """Set the position of the data buffer cursor to 'position'.""" if position < 0 or position > len(self._data): raise Exception("Invalid position to rewind cursor to: %s." % position) self._position = position
[ "def", "rewind", "(", "self", ",", "position", "=", "0", ")", ":", "if", "position", "<", "0", "or", "position", ">", "len", "(", "self", ".", "_data", ")", ":", "raise", "Exception", "(", "\"Invalid position to rewind cursor to: %s.\"", "%", "position", ")", "self", ".", "_position", "=", "position" ]
Set the position of the data buffer cursor to 'position'.
[ "Set", "the", "position", "of", "the", "data", "buffer", "cursor", "to", "position", "." ]
python
train
54
luckydonald/pytgbot
code_generation/code_generator_online.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/code_generator_online.py#L508-L512
def calc_path_and_create_folders(folder, import_path): """ calculate the path and create the needed folders """ file_path = abspath(path_join(folder, import_path[:import_path.rfind(".")].replace(".", folder_seperator) + ".py")) mkdir_p(dirname(file_path)) return file_path
[ "def", "calc_path_and_create_folders", "(", "folder", ",", "import_path", ")", ":", "file_path", "=", "abspath", "(", "path_join", "(", "folder", ",", "import_path", "[", ":", "import_path", ".", "rfind", "(", "\".\"", ")", "]", ".", "replace", "(", "\".\"", ",", "folder_seperator", ")", "+", "\".py\"", ")", ")", "mkdir_p", "(", "dirname", "(", "file_path", ")", ")", "return", "file_path" ]
calculate the path and create the needed folders
[ "calculate", "the", "path", "and", "create", "the", "needed", "folders" ]
python
train
56.8
log2timeline/plaso
plaso/engine/extractors.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/extractors.py#L481-L546
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0): """Extracts path specification from a directory. Args: file_entry (dfvfs.FileEntry): file entry that refers to the directory. depth (Optional[int]): current depth where 0 represents the file system root. Yields: dfvfs.PathSpec: path specification of a file entry found in the directory. """ if depth >= self._MAXIMUM_DEPTH: raise errors.MaximumRecursionDepth('Maximum recursion depth reached.') # Need to do a breadth-first search otherwise we'll hit the Python # maximum recursion depth. sub_directories = [] for sub_file_entry in file_entry.sub_file_entries: try: if not sub_file_entry.IsAllocated() or sub_file_entry.IsLink(): continue except dfvfs_errors.BackEndError as exception: logger.warning( 'Unable to process file: {0:s} with error: {1!s}'.format( sub_file_entry.path_spec.comparable.replace( '\n', ';'), exception)) continue # For TSK-based file entries only, ignore the virtual /$OrphanFiles # directory. if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK: if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles': continue if sub_file_entry.IsDirectory(): sub_directories.append(sub_file_entry) elif sub_file_entry.IsFile(): # If we are dealing with a VSS we want to calculate a hash # value based on available timestamps and compare that to previously # calculated hash values, and only include the file into the queue if # the hash does not match. if self._duplicate_file_check: hash_value = self._CalculateNTFSTimeHash(sub_file_entry) inode = getattr(sub_file_entry.path_spec, 'inode', 0) if inode in self._hashlist: if hash_value in self._hashlist[inode]: continue self._hashlist.setdefault(inode, []).append(hash_value) for path_spec in self._ExtractPathSpecsFromFile(sub_file_entry): yield path_spec for sub_file_entry in sub_directories: try: for path_spec in self._ExtractPathSpecsFromDirectory( sub_file_entry, depth=(depth + 1)): yield path_spec except ( IOError, dfvfs_errors.AccessError, dfvfs_errors.BackEndError, dfvfs_errors.PathSpecError) as exception: logger.warning('{0!s}'.format(exception))
[ "def", "_ExtractPathSpecsFromDirectory", "(", "self", ",", "file_entry", ",", "depth", "=", "0", ")", ":", "if", "depth", ">=", "self", ".", "_MAXIMUM_DEPTH", ":", "raise", "errors", ".", "MaximumRecursionDepth", "(", "'Maximum recursion depth reached.'", ")", "# Need to do a breadth-first search otherwise we'll hit the Python", "# maximum recursion depth.", "sub_directories", "=", "[", "]", "for", "sub_file_entry", "in", "file_entry", ".", "sub_file_entries", ":", "try", ":", "if", "not", "sub_file_entry", ".", "IsAllocated", "(", ")", "or", "sub_file_entry", ".", "IsLink", "(", ")", ":", "continue", "except", "dfvfs_errors", ".", "BackEndError", "as", "exception", ":", "logger", ".", "warning", "(", "'Unable to process file: {0:s} with error: {1!s}'", ".", "format", "(", "sub_file_entry", ".", "path_spec", ".", "comparable", ".", "replace", "(", "'\\n'", ",", "';'", ")", ",", "exception", ")", ")", "continue", "# For TSK-based file entries only, ignore the virtual /$OrphanFiles", "# directory.", "if", "sub_file_entry", ".", "type_indicator", "==", "dfvfs_definitions", ".", "TYPE_INDICATOR_TSK", ":", "if", "file_entry", ".", "IsRoot", "(", ")", "and", "sub_file_entry", ".", "name", "==", "'$OrphanFiles'", ":", "continue", "if", "sub_file_entry", ".", "IsDirectory", "(", ")", ":", "sub_directories", ".", "append", "(", "sub_file_entry", ")", "elif", "sub_file_entry", ".", "IsFile", "(", ")", ":", "# If we are dealing with a VSS we want to calculate a hash", "# value based on available timestamps and compare that to previously", "# calculated hash values, and only include the file into the queue if", "# the hash does not match.", "if", "self", ".", "_duplicate_file_check", ":", "hash_value", "=", "self", ".", "_CalculateNTFSTimeHash", "(", "sub_file_entry", ")", "inode", "=", "getattr", "(", "sub_file_entry", ".", "path_spec", ",", "'inode'", ",", "0", ")", "if", "inode", "in", "self", ".", "_hashlist", ":", "if", "hash_value", "in", "self", ".", "_hashlist", "[", "inode", "]", ":", "continue", "self", ".", "_hashlist", ".", "setdefault", "(", "inode", ",", "[", "]", ")", ".", "append", "(", "hash_value", ")", "for", "path_spec", "in", "self", ".", "_ExtractPathSpecsFromFile", "(", "sub_file_entry", ")", ":", "yield", "path_spec", "for", "sub_file_entry", "in", "sub_directories", ":", "try", ":", "for", "path_spec", "in", "self", ".", "_ExtractPathSpecsFromDirectory", "(", "sub_file_entry", ",", "depth", "=", "(", "depth", "+", "1", ")", ")", ":", "yield", "path_spec", "except", "(", "IOError", ",", "dfvfs_errors", ".", "AccessError", ",", "dfvfs_errors", ".", "BackEndError", ",", "dfvfs_errors", ".", "PathSpecError", ")", "as", "exception", ":", "logger", ".", "warning", "(", "'{0!s}'", ".", "format", "(", "exception", ")", ")" ]
Extracts path specification from a directory. Args: file_entry (dfvfs.FileEntry): file entry that refers to the directory. depth (Optional[int]): current depth where 0 represents the file system root. Yields: dfvfs.PathSpec: path specification of a file entry found in the directory.
[ "Extracts", "path", "specification", "from", "a", "directory", "." ]
python
train
37.560606
Azure/azure-cli-extensions
src/express-route/azext_express_route/vendored_sdks/network_management_client.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/express-route/azext_express_route/vendored_sdks/network_management_client.py#L2058-L2071
def service_endpoint_policy_definitions(self): """Instance depends on the API version: * 2018-07-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPolicyDefinitionsOperations>` * 2018-08-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPolicyDefinitionsOperations>` """ api_version = self._get_api_version('service_endpoint_policy_definitions') if api_version == '2018-07-01': from .v2018_07_01.operations import ServiceEndpointPolicyDefinitionsOperations as OperationClass elif api_version == '2018-08-01': from .v2018_08_01.operations import ServiceEndpointPolicyDefinitionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "service_endpoint_policy_definitions", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'service_endpoint_policy_definitions'", ")", "if", "api_version", "==", "'2018-07-01'", ":", "from", ".", "v2018_07_01", ".", "operations", "import", "ServiceEndpointPolicyDefinitionsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-08-01'", ":", "from", ".", "v2018_08_01", ".", "operations", "import", "ServiceEndpointPolicyDefinitionsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2018-07-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPolicyDefinitionsOperations>` * 2018-08-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
train
75.428571
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L1510-L1523
def poke_8(library, session, address, data): """Write an 8-bit value from the specified address. Corresponds to viPoke8 function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :param data: value to be written to the bus. :return: Data read from bus. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode` """ return library.viPoke8(session, address, data)
[ "def", "poke_8", "(", "library", ",", "session", ",", "address", ",", "data", ")", ":", "return", "library", ".", "viPoke8", "(", "session", ",", "address", ",", "data", ")" ]
Write an 8-bit value from the specified address. Corresponds to viPoke8 function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :param data: value to be written to the bus. :return: Data read from bus. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode`
[ "Write", "an", "8", "-", "bit", "value", "from", "the", "specified", "address", "." ]
python
train
39.5
spotify/docker_interface
docker_interface/util.py
https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L114-L136
def pop_value(instance, path, ref=None): """ Pop the value from `instance` at the given `path`. Parameters ---------- instance : dict or list instance from which to retrieve a value path : str path to retrieve a value from ref : str or None reference path if `path` is relative Returns ------- value : value at `path` in `instance` """ head, tail = os.path.split(abspath(path, ref)) instance = get_value(instance, head) if isinstance(instance, list): tail = int(tail) return instance.pop(tail)
[ "def", "pop_value", "(", "instance", ",", "path", ",", "ref", "=", "None", ")", ":", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "abspath", "(", "path", ",", "ref", ")", ")", "instance", "=", "get_value", "(", "instance", ",", "head", ")", "if", "isinstance", "(", "instance", ",", "list", ")", ":", "tail", "=", "int", "(", "tail", ")", "return", "instance", ".", "pop", "(", "tail", ")" ]
Pop the value from `instance` at the given `path`. Parameters ---------- instance : dict or list instance from which to retrieve a value path : str path to retrieve a value from ref : str or None reference path if `path` is relative Returns ------- value : value at `path` in `instance`
[ "Pop", "the", "value", "from", "instance", "at", "the", "given", "path", "." ]
python
train
24.913043
FlorianRhiem/pyGLFW
glfw/__init__.py
https://github.com/FlorianRhiem/pyGLFW/blob/87767dfbe15ba15d2a8338cdfddf6afc6a25dff5/glfw/__init__.py#L744-L756
def get_monitor_physical_size(monitor): """ Returns the physical size of the monitor. Wrapper for: void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height); """ width_value = ctypes.c_int(0) width = ctypes.pointer(width_value) height_value = ctypes.c_int(0) height = ctypes.pointer(height_value) _glfw.glfwGetMonitorPhysicalSize(monitor, width, height) return width_value.value, height_value.value
[ "def", "get_monitor_physical_size", "(", "monitor", ")", ":", "width_value", "=", "ctypes", ".", "c_int", "(", "0", ")", "width", "=", "ctypes", ".", "pointer", "(", "width_value", ")", "height_value", "=", "ctypes", ".", "c_int", "(", "0", ")", "height", "=", "ctypes", ".", "pointer", "(", "height_value", ")", "_glfw", ".", "glfwGetMonitorPhysicalSize", "(", "monitor", ",", "width", ",", "height", ")", "return", "width_value", ".", "value", ",", "height_value", ".", "value" ]
Returns the physical size of the monitor. Wrapper for: void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height);
[ "Returns", "the", "physical", "size", "of", "the", "monitor", "." ]
python
train
35.076923
Metatab/metapack
metapack/cli/build.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/build.py#L34-L126
def build(subparsers): """ Build source packages. The mp build program runs all of the resources listed in a Metatab file and produces one or more Metapack packages with those resources localized. It will always try to produce a Filesystem package, and may optionally produce Excel, Zip and CSV packages. Typical usage is to be run inside a source package directory with .. code-block:: bash $ mp build To build all of the package types: .. code-block:: bash $ mp build -fezc By default, packages are built with versioned names. The :option:`--nonversion-name` option will create file packages with non-versioned name, and the :option:`--nonversioned-link` option will produce a non-versioned soft link pointing to the versioned file. """ parser = subparsers.add_parser( 'build', help='Build derived packages', description=build.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, epilog='') parser.set_defaults(run_command=run_metapack) parser.add_argument('metatabfile', nargs='?', help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. " ) parser.add_argument('-p', '--profile', help="Name of a BOTO or AWS credentails profile", required=False) parser.add_argument('-D', '--package-directory', help="Write Zip, Excel and CSV packages to an alternate directory", required=False) parser.add_argument('-F', '--force', action='store_true', default=False, help='Force some operations, like updating the name and building packages') parser.add_argument('-R', '--reuse-resources', action='store_true', default=False, help='When building Filesystem package, try to reuse resources built in prior build') group = parser.add_mutually_exclusive_group() group.add_argument('-n', '--nonversion-name', action='store_true', default=False, help='Write file packages with non-versioned names') group.add_argument('-N', '--nonversion-link', action='store_true', default=False, help='Create links with nonversioned names to file packages') parser.set_defaults(handler=None) ## ## Derived Package Group derived_group = parser.add_argument_group('Derived Packages', 'Generate other types of packages') derived_group.add_argument('-e', '--excel', action='store_true', default=False, help='Create an excel archive from a metatab file') derived_group.add_argument('-z', '--zip', action='store_true', default=False, help='Create a zip archive from a metatab file') derived_group.add_argument('-f', '--filesystem', action='store_true', default=False, help='Create a filesystem archive from a metatab file') derived_group.add_argument('-c', '--csv', action='store_true', default=False, help='Create a CSV archive from a metatab file') ## ## Administration Group admin_group = parser.add_argument_group('Administration', 'Information and administration') admin_group.add_argument('--clean-cache', default=False, action='store_true', help="Clean the download cache") admin_group.add_argument('-C', '--clean', default=False, action='store_true', help="For some operations, like updating schemas, clear the section of existing terms first")
[ "def", "build", "(", "subparsers", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "'build'", ",", "help", "=", "'Build derived packages'", ",", "description", "=", "build", ".", "__doc__", ",", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", ",", "epilog", "=", "''", ")", "parser", ".", "set_defaults", "(", "run_command", "=", "run_metapack", ")", "parser", ".", "add_argument", "(", "'metatabfile'", ",", "nargs", "=", "'?'", ",", "help", "=", "\"Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. \"", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--profile'", ",", "help", "=", "\"Name of a BOTO or AWS credentails profile\"", ",", "required", "=", "False", ")", "parser", ".", "add_argument", "(", "'-D'", ",", "'--package-directory'", ",", "help", "=", "\"Write Zip, Excel and CSV packages to an alternate directory\"", ",", "required", "=", "False", ")", "parser", ".", "add_argument", "(", "'-F'", ",", "'--force'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Force some operations, like updating the name and building packages'", ")", "parser", ".", "add_argument", "(", "'-R'", ",", "'--reuse-resources'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'When building Filesystem package, try to reuse resources built in prior build'", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "group", ".", "add_argument", "(", "'-n'", ",", "'--nonversion-name'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Write file packages with non-versioned names'", ")", "group", ".", "add_argument", "(", "'-N'", ",", "'--nonversion-link'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create links with nonversioned names to file packages'", ")", "parser", ".", "set_defaults", "(", "handler", "=", "None", ")", "##", "## Derived Package Group", "derived_group", "=", "parser", ".", "add_argument_group", "(", "'Derived Packages'", ",", "'Generate other types of packages'", ")", "derived_group", ".", "add_argument", "(", "'-e'", ",", "'--excel'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create an excel archive from a metatab file'", ")", "derived_group", ".", "add_argument", "(", "'-z'", ",", "'--zip'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create a zip archive from a metatab file'", ")", "derived_group", ".", "add_argument", "(", "'-f'", ",", "'--filesystem'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create a filesystem archive from a metatab file'", ")", "derived_group", ".", "add_argument", "(", "'-c'", ",", "'--csv'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create a CSV archive from a metatab file'", ")", "##", "## Administration Group", "admin_group", "=", "parser", ".", "add_argument_group", "(", "'Administration'", ",", "'Information and administration'", ")", "admin_group", ".", "add_argument", "(", "'--clean-cache'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Clean the download cache\"", ")", "admin_group", ".", "add_argument", "(", "'-C'", ",", "'--clean'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "\"For some operations, like updating schemas, clear the section of existing terms first\"", ")" ]
Build source packages. The mp build program runs all of the resources listed in a Metatab file and produces one or more Metapack packages with those resources localized. It will always try to produce a Filesystem package, and may optionally produce Excel, Zip and CSV packages. Typical usage is to be run inside a source package directory with .. code-block:: bash $ mp build To build all of the package types: .. code-block:: bash $ mp build -fezc By default, packages are built with versioned names. The :option:`--nonversion-name` option will create file packages with non-versioned name, and the :option:`--nonversioned-link` option will produce a non-versioned soft link pointing to the versioned file.
[ "Build", "source", "packages", "." ]
python
train
38.344086
nicolargo/glances
glances/plugins/glances_now.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_now.py#L48-L58
def update(self): """Update current date/time.""" # Had to convert it to string because datetime is not JSON serializable self.stats = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # Add the time zone (issue #1249 and issue #1337) if 'tmzone' in localtime(): self.stats += ' {}'.format(localtime().tm_zone) elif len(tzname) > 0: self.stats += ' {}'.format(tzname[1]) return self.stats
[ "def", "update", "(", "self", ")", ":", "# Had to convert it to string because datetime is not JSON serializable", "self", ".", "stats", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "# Add the time zone (issue #1249 and issue #1337)", "if", "'tmzone'", "in", "localtime", "(", ")", ":", "self", ".", "stats", "+=", "' {}'", ".", "format", "(", "localtime", "(", ")", ".", "tm_zone", ")", "elif", "len", "(", "tzname", ")", ">", "0", ":", "self", ".", "stats", "+=", "' {}'", ".", "format", "(", "tzname", "[", "1", "]", ")", "return", "self", ".", "stats" ]
Update current date/time.
[ "Update", "current", "date", "/", "time", "." ]
python
train
41.272727
kinegratii/borax
borax/calendars/lunardate.py
https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/calendars/lunardate.py#L84-L98
def _iter_year_month(year_info): """ Iter the month days in a lunar year. """ # info => month, days, leap leap_month, leap_days = _parse_leap(year_info) months = [(i, 0) for i in range(1, 13)] if leap_month > 0: months.insert(leap_month, (leap_month, 1)) for month, leap in months: if leap: days = leap_days else: days = (year_info >> (16 - month)) % 2 + 29 yield month, days, leap
[ "def", "_iter_year_month", "(", "year_info", ")", ":", "# info => month, days, leap", "leap_month", ",", "leap_days", "=", "_parse_leap", "(", "year_info", ")", "months", "=", "[", "(", "i", ",", "0", ")", "for", "i", "in", "range", "(", "1", ",", "13", ")", "]", "if", "leap_month", ">", "0", ":", "months", ".", "insert", "(", "leap_month", ",", "(", "leap_month", ",", "1", ")", ")", "for", "month", ",", "leap", "in", "months", ":", "if", "leap", ":", "days", "=", "leap_days", "else", ":", "days", "=", "(", "year_info", ">>", "(", "16", "-", "month", ")", ")", "%", "2", "+", "29", "yield", "month", ",", "days", ",", "leap" ]
Iter the month days in a lunar year.
[ "Iter", "the", "month", "days", "in", "a", "lunar", "year", "." ]
python
train
30.133333
empymod/empymod
empymod/utils.py
https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/utils.py#L1895-L1903
def _prnt_min_max_val(var, text, verb): r"""Print variable; if more than three, just min/max, unless verb > 3.""" if var.size > 3: print(text, _strvar(var.min()), "-", _strvar(var.max()), ":", _strvar(var.size), " [min-max; #]") if verb > 3: print(" : ", _strvar(var)) else: print(text, _strvar(np.atleast_1d(var)))
[ "def", "_prnt_min_max_val", "(", "var", ",", "text", ",", "verb", ")", ":", "if", "var", ".", "size", ">", "3", ":", "print", "(", "text", ",", "_strvar", "(", "var", ".", "min", "(", ")", ")", ",", "\"-\"", ",", "_strvar", "(", "var", ".", "max", "(", ")", ")", ",", "\":\"", ",", "_strvar", "(", "var", ".", "size", ")", ",", "\" [min-max; #]\"", ")", "if", "verb", ">", "3", ":", "print", "(", "\" : \"", ",", "_strvar", "(", "var", ")", ")", "else", ":", "print", "(", "text", ",", "_strvar", "(", "np", ".", "atleast_1d", "(", "var", ")", ")", ")" ]
r"""Print variable; if more than three, just min/max, unless verb > 3.
[ "r", "Print", "variable", ";", "if", "more", "than", "three", "just", "min", "/", "max", "unless", "verb", ">", "3", "." ]
python
train
43
ThreatConnect-Inc/tcex
tcex/tcex.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex.py#L822-L858
def results_tc(self, key, value): """Write data to results_tc file in TcEX specified directory. The TcEx platform support persistent values between executions of the App. This method will store the values for TC to read and put into the Database. Args: key (string): The data key to be stored. value (string): The data value to be stored. """ if os.access(self.default_args.tc_out_path, os.W_OK): results_file = '{}/results.tc'.format(self.default_args.tc_out_path) else: results_file = 'results.tc' new = True open(results_file, 'a').close() # ensure file exists with open(results_file, 'r+') as fh: results = '' for line in fh.read().strip().split('\n'): if not line: continue try: k, v = line.split(' = ') except ValueError: # handle null/empty value (e.g., "name =") k, v = line.split(' =') if k == key: v = value new = False if v is not None: results += '{} = {}\n'.format(k, v) if new and value is not None: # indicates the key/value pair didn't already exist results += '{} = {}\n'.format(key, value) fh.seek(0) fh.write(results) fh.truncate()
[ "def", "results_tc", "(", "self", ",", "key", ",", "value", ")", ":", "if", "os", ".", "access", "(", "self", ".", "default_args", ".", "tc_out_path", ",", "os", ".", "W_OK", ")", ":", "results_file", "=", "'{}/results.tc'", ".", "format", "(", "self", ".", "default_args", ".", "tc_out_path", ")", "else", ":", "results_file", "=", "'results.tc'", "new", "=", "True", "open", "(", "results_file", ",", "'a'", ")", ".", "close", "(", ")", "# ensure file exists", "with", "open", "(", "results_file", ",", "'r+'", ")", "as", "fh", ":", "results", "=", "''", "for", "line", "in", "fh", ".", "read", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", ":", "if", "not", "line", ":", "continue", "try", ":", "k", ",", "v", "=", "line", ".", "split", "(", "' = '", ")", "except", "ValueError", ":", "# handle null/empty value (e.g., \"name =\")", "k", ",", "v", "=", "line", ".", "split", "(", "' ='", ")", "if", "k", "==", "key", ":", "v", "=", "value", "new", "=", "False", "if", "v", "is", "not", "None", ":", "results", "+=", "'{} = {}\\n'", ".", "format", "(", "k", ",", "v", ")", "if", "new", "and", "value", "is", "not", "None", ":", "# indicates the key/value pair didn't already exist", "results", "+=", "'{} = {}\\n'", ".", "format", "(", "key", ",", "value", ")", "fh", ".", "seek", "(", "0", ")", "fh", ".", "write", "(", "results", ")", "fh", ".", "truncate", "(", ")" ]
Write data to results_tc file in TcEX specified directory. The TcEx platform support persistent values between executions of the App. This method will store the values for TC to read and put into the Database. Args: key (string): The data key to be stored. value (string): The data value to be stored.
[ "Write", "data", "to", "results_tc", "file", "in", "TcEX", "specified", "directory", "." ]
python
train
39.351351
qacafe/cdrouter.py
cdrouter/filters.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/filters.py#L137-L147
def ge(self, value): """Construct a greater than or equal to (``>=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field """ self.op = '>=' self.negate_op = '<' self.value = self._value(value) return self
[ "def", "ge", "(", "self", ",", "value", ")", ":", "self", ".", "op", "=", "'>='", "self", ".", "negate_op", "=", "'<'", "self", ".", "value", "=", "self", ".", "_value", "(", "value", ")", "return", "self" ]
Construct a greater than or equal to (``>=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field
[ "Construct", "a", "greater", "than", "or", "equal", "to", "(", ">", "=", ")", "filter", "." ]
python
train
29.818182