Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
1,700
MozillaSecurity/laniakea
laniakea/core/userdata.py
UserData.handle_import_tags
def handle_import_tags(userdata, import_root): """Handle @import(filepath)@ tags in a UserData script. :param import_root: Location for imports. :type import_root: str :param userdata: UserData script content. :type userdata: str :return: UserData script with the contents of the imported files. :rtype: str """ imports = re.findall('@import\((.*?)\)@', userdata) # pylint: disable=anomalous-backslash-in-string if not imports: return userdata for filepath in imports: logger.info('Processing "import" of %s', filepath) import_path = os.path.join(import_root, filepath) try: with open(import_path) as fo: content = fo.read() userdata = userdata.replace('@import(%s)@' % filepath, content) except FileNotFoundError: raise UserDataException('Import path {} not found.'.format(import_path)) return userdata
python
def handle_import_tags(userdata, import_root): """Handle @import(filepath)@ tags in a UserData script. :param import_root: Location for imports. :type import_root: str :param userdata: UserData script content. :type userdata: str :return: UserData script with the contents of the imported files. :rtype: str """ imports = re.findall('@import\((.*?)\)@', userdata) # pylint: disable=anomalous-backslash-in-string if not imports: return userdata for filepath in imports: logger.info('Processing "import" of %s', filepath) import_path = os.path.join(import_root, filepath) try: with open(import_path) as fo: content = fo.read() userdata = userdata.replace('@import(%s)@' % filepath, content) except FileNotFoundError: raise UserDataException('Import path {} not found.'.format(import_path)) return userdata
['def', 'handle_import_tags', '(', 'userdata', ',', 'import_root', ')', ':', 'imports', '=', 're', '.', 'findall', '(', "'@import\\((.*?)\\)@'", ',', 'userdata', ')', '# pylint: disable=anomalous-backslash-in-string', 'if', 'not', 'imports', ':', 'return', 'userdata', 'for', 'filepath', 'in', 'imports', ':', 'logger', '.', 'info', '(', '\'Processing "import" of %s\'', ',', 'filepath', ')', 'import_path', '=', 'os', '.', 'path', '.', 'join', '(', 'import_root', ',', 'filepath', ')', 'try', ':', 'with', 'open', '(', 'import_path', ')', 'as', 'fo', ':', 'content', '=', 'fo', '.', 'read', '(', ')', 'userdata', '=', 'userdata', '.', 'replace', '(', "'@import(%s)@'", '%', 'filepath', ',', 'content', ')', 'except', 'FileNotFoundError', ':', 'raise', 'UserDataException', '(', "'Import path {} not found.'", '.', 'format', '(', 'import_path', ')', ')', 'return', 'userdata']
Handle @import(filepath)@ tags in a UserData script. :param import_root: Location for imports. :type import_root: str :param userdata: UserData script content. :type userdata: str :return: UserData script with the contents of the imported files. :rtype: str
['Handle', '@import', '(', 'filepath', ')', '@', 'tags', 'in', 'a', 'UserData', 'script', '.']
train
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/userdata.py#L112-L136
1,701
pvlib/pvlib-python
pvlib/pvsystem.py
physicaliam
def physicaliam(aoi, n=1.526, K=4., L=0.002): ''' Determine the incidence angle modifier using refractive index, extinction coefficient, and glazing thickness. physicaliam calculates the incidence angle modifier as described in De Soto et al. "Improvement and validation of a model for photovoltaic array performance", section 3. The calculation is based on a physical model of absorbtion and transmission through a cover. Note: The authors of this function believe that eqn. 14 in [1] is incorrect. This function uses the following equation in its place: theta_r = arcsin(1/n * sin(aoi)) Parameters ---------- aoi : numeric The angle of incidence between the module normal vector and the sun-beam vector in degrees. Angles of 0 are replaced with 1e-06 to ensure non-nan results. Angles of nan will result in nan. n : numeric, default 1.526 The effective index of refraction (unitless). Reference [1] indicates that a value of 1.526 is acceptable for glass. n must be a numeric scalar or vector with all values >=0. If n is a vector, it must be the same size as all other input vectors. K : numeric, default 4.0 The glazing extinction coefficient in units of 1/meters. Reference [1] indicates that a value of 4 is reasonable for "water white" glass. K must be a numeric scalar or vector with all values >=0. If K is a vector, it must be the same size as all other input vectors. L : numeric, default 0.002 The glazing thickness in units of meters. Reference [1] indicates that 0.002 meters (2 mm) is reasonable for most glass-covered PV panels. L must be a numeric scalar or vector with all values >=0. If L is a vector, it must be the same size as all other input vectors. Returns ------- iam : numeric The incident angle modifier References ---------- [1] W. De Soto et al., "Improvement and validation of a model for photovoltaic array performance", Solar Energy, vol 80, pp. 78-88, 2006. [2] Duffie, John A. & Beckman, William A.. (2006). Solar Engineering of Thermal Processes, third edition. [Books24x7 version] Available from http://common.books24x7.com/toc.aspx?bookid=17160. See Also -------- getaoi ephemeris spa ashraeiam ''' zeroang = 1e-06 # hold a new reference to the input aoi object since we're going to # overwrite the aoi reference below, but we'll need it for the # series check at the end of the function aoi_input = aoi aoi = np.where(aoi == 0, zeroang, aoi) # angle of reflection thetar_deg = tools.asind(1.0 / n*(tools.sind(aoi))) # reflectance and transmittance for normal incidence light rho_zero = ((1-n) / (1+n)) ** 2 tau_zero = np.exp(-K*L) # reflectance for parallel and perpendicular polarized light rho_para = (tools.tand(thetar_deg - aoi) / tools.tand(thetar_deg + aoi)) ** 2 rho_perp = (tools.sind(thetar_deg - aoi) / tools.sind(thetar_deg + aoi)) ** 2 # transmittance for non-normal light tau = np.exp(-K*L / tools.cosd(thetar_deg)) # iam is ratio of non-normal to normal incidence transmitted light # after deducting the reflected portion of each iam = ((1 - (rho_para + rho_perp) / 2) / (1 - rho_zero) * tau / tau_zero) with np.errstate(invalid='ignore'): # angles near zero produce nan, but iam is defined as one small_angle = 1e-06 iam = np.where(np.abs(aoi) < small_angle, 1.0, iam) # angles at 90 degrees can produce tiny negative values, # which should be zero. this is a result of calculation precision # rather than the physical model iam = np.where(iam < 0, 0, iam) # for light coming from behind the plane, none can enter the module iam = np.where(aoi > 90, 0, iam) if isinstance(aoi_input, pd.Series): iam = pd.Series(iam, index=aoi_input.index) return iam
python
def physicaliam(aoi, n=1.526, K=4., L=0.002): ''' Determine the incidence angle modifier using refractive index, extinction coefficient, and glazing thickness. physicaliam calculates the incidence angle modifier as described in De Soto et al. "Improvement and validation of a model for photovoltaic array performance", section 3. The calculation is based on a physical model of absorbtion and transmission through a cover. Note: The authors of this function believe that eqn. 14 in [1] is incorrect. This function uses the following equation in its place: theta_r = arcsin(1/n * sin(aoi)) Parameters ---------- aoi : numeric The angle of incidence between the module normal vector and the sun-beam vector in degrees. Angles of 0 are replaced with 1e-06 to ensure non-nan results. Angles of nan will result in nan. n : numeric, default 1.526 The effective index of refraction (unitless). Reference [1] indicates that a value of 1.526 is acceptable for glass. n must be a numeric scalar or vector with all values >=0. If n is a vector, it must be the same size as all other input vectors. K : numeric, default 4.0 The glazing extinction coefficient in units of 1/meters. Reference [1] indicates that a value of 4 is reasonable for "water white" glass. K must be a numeric scalar or vector with all values >=0. If K is a vector, it must be the same size as all other input vectors. L : numeric, default 0.002 The glazing thickness in units of meters. Reference [1] indicates that 0.002 meters (2 mm) is reasonable for most glass-covered PV panels. L must be a numeric scalar or vector with all values >=0. If L is a vector, it must be the same size as all other input vectors. Returns ------- iam : numeric The incident angle modifier References ---------- [1] W. De Soto et al., "Improvement and validation of a model for photovoltaic array performance", Solar Energy, vol 80, pp. 78-88, 2006. [2] Duffie, John A. & Beckman, William A.. (2006). Solar Engineering of Thermal Processes, third edition. [Books24x7 version] Available from http://common.books24x7.com/toc.aspx?bookid=17160. See Also -------- getaoi ephemeris spa ashraeiam ''' zeroang = 1e-06 # hold a new reference to the input aoi object since we're going to # overwrite the aoi reference below, but we'll need it for the # series check at the end of the function aoi_input = aoi aoi = np.where(aoi == 0, zeroang, aoi) # angle of reflection thetar_deg = tools.asind(1.0 / n*(tools.sind(aoi))) # reflectance and transmittance for normal incidence light rho_zero = ((1-n) / (1+n)) ** 2 tau_zero = np.exp(-K*L) # reflectance for parallel and perpendicular polarized light rho_para = (tools.tand(thetar_deg - aoi) / tools.tand(thetar_deg + aoi)) ** 2 rho_perp = (tools.sind(thetar_deg - aoi) / tools.sind(thetar_deg + aoi)) ** 2 # transmittance for non-normal light tau = np.exp(-K*L / tools.cosd(thetar_deg)) # iam is ratio of non-normal to normal incidence transmitted light # after deducting the reflected portion of each iam = ((1 - (rho_para + rho_perp) / 2) / (1 - rho_zero) * tau / tau_zero) with np.errstate(invalid='ignore'): # angles near zero produce nan, but iam is defined as one small_angle = 1e-06 iam = np.where(np.abs(aoi) < small_angle, 1.0, iam) # angles at 90 degrees can produce tiny negative values, # which should be zero. this is a result of calculation precision # rather than the physical model iam = np.where(iam < 0, 0, iam) # for light coming from behind the plane, none can enter the module iam = np.where(aoi > 90, 0, iam) if isinstance(aoi_input, pd.Series): iam = pd.Series(iam, index=aoi_input.index) return iam
['def', 'physicaliam', '(', 'aoi', ',', 'n', '=', '1.526', ',', 'K', '=', '4.', ',', 'L', '=', '0.002', ')', ':', 'zeroang', '=', '1e-06', "# hold a new reference to the input aoi object since we're going to", "# overwrite the aoi reference below, but we'll need it for the", '# series check at the end of the function', 'aoi_input', '=', 'aoi', 'aoi', '=', 'np', '.', 'where', '(', 'aoi', '==', '0', ',', 'zeroang', ',', 'aoi', ')', '# angle of reflection', 'thetar_deg', '=', 'tools', '.', 'asind', '(', '1.0', '/', 'n', '*', '(', 'tools', '.', 'sind', '(', 'aoi', ')', ')', ')', '# reflectance and transmittance for normal incidence light', 'rho_zero', '=', '(', '(', '1', '-', 'n', ')', '/', '(', '1', '+', 'n', ')', ')', '**', '2', 'tau_zero', '=', 'np', '.', 'exp', '(', '-', 'K', '*', 'L', ')', '# reflectance for parallel and perpendicular polarized light', 'rho_para', '=', '(', 'tools', '.', 'tand', '(', 'thetar_deg', '-', 'aoi', ')', '/', 'tools', '.', 'tand', '(', 'thetar_deg', '+', 'aoi', ')', ')', '**', '2', 'rho_perp', '=', '(', 'tools', '.', 'sind', '(', 'thetar_deg', '-', 'aoi', ')', '/', 'tools', '.', 'sind', '(', 'thetar_deg', '+', 'aoi', ')', ')', '**', '2', '# transmittance for non-normal light', 'tau', '=', 'np', '.', 'exp', '(', '-', 'K', '*', 'L', '/', 'tools', '.', 'cosd', '(', 'thetar_deg', ')', ')', '# iam is ratio of non-normal to normal incidence transmitted light', '# after deducting the reflected portion of each', 'iam', '=', '(', '(', '1', '-', '(', 'rho_para', '+', 'rho_perp', ')', '/', '2', ')', '/', '(', '1', '-', 'rho_zero', ')', '*', 'tau', '/', 'tau_zero', ')', 'with', 'np', '.', 'errstate', '(', 'invalid', '=', "'ignore'", ')', ':', '# angles near zero produce nan, but iam is defined as one', 'small_angle', '=', '1e-06', 'iam', '=', 'np', '.', 'where', '(', 'np', '.', 'abs', '(', 'aoi', ')', '<', 'small_angle', ',', '1.0', ',', 'iam', ')', '# angles at 90 degrees can produce tiny negative values,', '# which should be zero. this is a result of calculation precision', '# rather than the physical model', 'iam', '=', 'np', '.', 'where', '(', 'iam', '<', '0', ',', '0', ',', 'iam', ')', '# for light coming from behind the plane, none can enter the module', 'iam', '=', 'np', '.', 'where', '(', 'aoi', '>', '90', ',', '0', ',', 'iam', ')', 'if', 'isinstance', '(', 'aoi_input', ',', 'pd', '.', 'Series', ')', ':', 'iam', '=', 'pd', '.', 'Series', '(', 'iam', ',', 'index', '=', 'aoi_input', '.', 'index', ')', 'return', 'iam']
Determine the incidence angle modifier using refractive index, extinction coefficient, and glazing thickness. physicaliam calculates the incidence angle modifier as described in De Soto et al. "Improvement and validation of a model for photovoltaic array performance", section 3. The calculation is based on a physical model of absorbtion and transmission through a cover. Note: The authors of this function believe that eqn. 14 in [1] is incorrect. This function uses the following equation in its place: theta_r = arcsin(1/n * sin(aoi)) Parameters ---------- aoi : numeric The angle of incidence between the module normal vector and the sun-beam vector in degrees. Angles of 0 are replaced with 1e-06 to ensure non-nan results. Angles of nan will result in nan. n : numeric, default 1.526 The effective index of refraction (unitless). Reference [1] indicates that a value of 1.526 is acceptable for glass. n must be a numeric scalar or vector with all values >=0. If n is a vector, it must be the same size as all other input vectors. K : numeric, default 4.0 The glazing extinction coefficient in units of 1/meters. Reference [1] indicates that a value of 4 is reasonable for "water white" glass. K must be a numeric scalar or vector with all values >=0. If K is a vector, it must be the same size as all other input vectors. L : numeric, default 0.002 The glazing thickness in units of meters. Reference [1] indicates that 0.002 meters (2 mm) is reasonable for most glass-covered PV panels. L must be a numeric scalar or vector with all values >=0. If L is a vector, it must be the same size as all other input vectors. Returns ------- iam : numeric The incident angle modifier References ---------- [1] W. De Soto et al., "Improvement and validation of a model for photovoltaic array performance", Solar Energy, vol 80, pp. 78-88, 2006. [2] Duffie, John A. & Beckman, William A.. (2006). Solar Engineering of Thermal Processes, third edition. [Books24x7 version] Available from http://common.books24x7.com/toc.aspx?bookid=17160. See Also -------- getaoi ephemeris spa ashraeiam
['Determine', 'the', 'incidence', 'angle', 'modifier', 'using', 'refractive', 'index', 'extinction', 'coefficient', 'and', 'glazing', 'thickness', '.']
train
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/pvsystem.py#L955-L1064
1,702
Cue/scales
src/greplin/scales/__init__.py
_Stats.getStat
def getStat(cls, obj, name): """Gets the stat for the given object with the given name, or None if no such stat exists.""" objClass = type(obj) for theClass in objClass.__mro__: if theClass == object: break for value in theClass.__dict__.values(): if isinstance(value, Stat) and value.getName() == name: return value
python
def getStat(cls, obj, name): """Gets the stat for the given object with the given name, or None if no such stat exists.""" objClass = type(obj) for theClass in objClass.__mro__: if theClass == object: break for value in theClass.__dict__.values(): if isinstance(value, Stat) and value.getName() == name: return value
['def', 'getStat', '(', 'cls', ',', 'obj', ',', 'name', ')', ':', 'objClass', '=', 'type', '(', 'obj', ')', 'for', 'theClass', 'in', 'objClass', '.', '__mro__', ':', 'if', 'theClass', '==', 'object', ':', 'break', 'for', 'value', 'in', 'theClass', '.', '__dict__', '.', 'values', '(', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'Stat', ')', 'and', 'value', '.', 'getName', '(', ')', '==', 'name', ':', 'return', 'value']
Gets the stat for the given object with the given name, or None if no such stat exists.
['Gets', 'the', 'stat', 'for', 'the', 'given', 'object', 'with', 'the', 'given', 'name', 'or', 'None', 'if', 'no', 'such', 'stat', 'exists', '.']
train
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/__init__.py#L193-L201
1,703
ipinfo/python
ipinfo/handler.py
Handler._requestDetails
def _requestDetails(self, ip_address=None): """Get IP address data by sending request to IPinfo API.""" if ip_address not in self.cache: url = self.API_URL if ip_address: url += '/' + ip_address response = requests.get(url, headers=self._get_headers(), **self.request_options) if response.status_code == 429: raise RequestQuotaExceededError() response.raise_for_status() self.cache[ip_address] = response.json() return self.cache[ip_address]
python
def _requestDetails(self, ip_address=None): """Get IP address data by sending request to IPinfo API.""" if ip_address not in self.cache: url = self.API_URL if ip_address: url += '/' + ip_address response = requests.get(url, headers=self._get_headers(), **self.request_options) if response.status_code == 429: raise RequestQuotaExceededError() response.raise_for_status() self.cache[ip_address] = response.json() return self.cache[ip_address]
['def', '_requestDetails', '(', 'self', ',', 'ip_address', '=', 'None', ')', ':', 'if', 'ip_address', 'not', 'in', 'self', '.', 'cache', ':', 'url', '=', 'self', '.', 'API_URL', 'if', 'ip_address', ':', 'url', '+=', "'/'", '+', 'ip_address', 'response', '=', 'requests', '.', 'get', '(', 'url', ',', 'headers', '=', 'self', '.', '_get_headers', '(', ')', ',', '*', '*', 'self', '.', 'request_options', ')', 'if', 'response', '.', 'status_code', '==', '429', ':', 'raise', 'RequestQuotaExceededError', '(', ')', 'response', '.', 'raise_for_status', '(', ')', 'self', '.', 'cache', '[', 'ip_address', ']', '=', 'response', '.', 'json', '(', ')', 'return', 'self', '.', 'cache', '[', 'ip_address', ']']
Get IP address data by sending request to IPinfo API.
['Get', 'IP', 'address', 'data', 'by', 'sending', 'request', 'to', 'IPinfo', 'API', '.']
train
https://github.com/ipinfo/python/blob/62fef9136069eab280806cc772dc578d3f1d8d63/ipinfo/handler.py#L52-L65
1,704
rigetti/pyquil
pyquil/api/_base_connection.py
parse_error
def parse_error(res): """ Every server error should contain a "status" field with a human readable explanation of what went wrong as well as a "error_type" field indicating the kind of error that can be mapped to a Python type. There's a fallback error UnknownError for other types of exceptions (network issues, api gateway problems, etc.) """ try: body = res.json() except JSONDecodeError: raise UnknownApiError(res.text) if 'error_type' not in body: raise UnknownApiError(str(body)) error_type = body['error_type'] status = body['status'] if re.search(r"[0-9]+ qubits were requested, but the QVM is limited to [0-9]+ qubits.", status): return TooManyQubitsError(status) error_cls = error_mapping.get(error_type, UnknownApiError) return error_cls(status)
python
def parse_error(res): """ Every server error should contain a "status" field with a human readable explanation of what went wrong as well as a "error_type" field indicating the kind of error that can be mapped to a Python type. There's a fallback error UnknownError for other types of exceptions (network issues, api gateway problems, etc.) """ try: body = res.json() except JSONDecodeError: raise UnknownApiError(res.text) if 'error_type' not in body: raise UnknownApiError(str(body)) error_type = body['error_type'] status = body['status'] if re.search(r"[0-9]+ qubits were requested, but the QVM is limited to [0-9]+ qubits.", status): return TooManyQubitsError(status) error_cls = error_mapping.get(error_type, UnknownApiError) return error_cls(status)
['def', 'parse_error', '(', 'res', ')', ':', 'try', ':', 'body', '=', 'res', '.', 'json', '(', ')', 'except', 'JSONDecodeError', ':', 'raise', 'UnknownApiError', '(', 'res', '.', 'text', ')', 'if', "'error_type'", 'not', 'in', 'body', ':', 'raise', 'UnknownApiError', '(', 'str', '(', 'body', ')', ')', 'error_type', '=', 'body', '[', "'error_type'", ']', 'status', '=', 'body', '[', "'status'", ']', 'if', 're', '.', 'search', '(', 'r"[0-9]+ qubits were requested, but the QVM is limited to [0-9]+ qubits."', ',', 'status', ')', ':', 'return', 'TooManyQubitsError', '(', 'status', ')', 'error_cls', '=', 'error_mapping', '.', 'get', '(', 'error_type', ',', 'UnknownApiError', ')', 'return', 'error_cls', '(', 'status', ')']
Every server error should contain a "status" field with a human readable explanation of what went wrong as well as a "error_type" field indicating the kind of error that can be mapped to a Python type. There's a fallback error UnknownError for other types of exceptions (network issues, api gateway problems, etc.)
['Every', 'server', 'error', 'should', 'contain', 'a', 'status', 'field', 'with', 'a', 'human', 'readable', 'explanation', 'of', 'what', 'went', 'wrong', 'as', 'well', 'as', 'a', 'error_type', 'field', 'indicating', 'the', 'kind', 'of', 'error', 'that', 'can', 'be', 'mapped', 'to', 'a', 'Python', 'type', '.']
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_base_connection.py#L62-L86
1,705
orbingol/NURBS-Python
geomdl/linalg.py
binomial_coefficient
def binomial_coefficient(k, i): """ Computes the binomial coefficient (denoted by *k choose i*). Please see the following website for details: http://mathworld.wolfram.com/BinomialCoefficient.html :param k: size of the set of distinct elements :type k: int :param i: size of the subsets :type i: int :return: combination of *k* and *i* :rtype: float """ # Special case if i > k: return float(0) # Compute binomial coefficient k_fact = math.factorial(k) i_fact = math.factorial(i) k_i_fact = math.factorial(k - i) return float(k_fact / (k_i_fact * i_fact))
python
def binomial_coefficient(k, i): """ Computes the binomial coefficient (denoted by *k choose i*). Please see the following website for details: http://mathworld.wolfram.com/BinomialCoefficient.html :param k: size of the set of distinct elements :type k: int :param i: size of the subsets :type i: int :return: combination of *k* and *i* :rtype: float """ # Special case if i > k: return float(0) # Compute binomial coefficient k_fact = math.factorial(k) i_fact = math.factorial(i) k_i_fact = math.factorial(k - i) return float(k_fact / (k_i_fact * i_fact))
['def', 'binomial_coefficient', '(', 'k', ',', 'i', ')', ':', '# Special case', 'if', 'i', '>', 'k', ':', 'return', 'float', '(', '0', ')', '# Compute binomial coefficient', 'k_fact', '=', 'math', '.', 'factorial', '(', 'k', ')', 'i_fact', '=', 'math', '.', 'factorial', '(', 'i', ')', 'k_i_fact', '=', 'math', '.', 'factorial', '(', 'k', '-', 'i', ')', 'return', 'float', '(', 'k_fact', '/', '(', 'k_i_fact', '*', 'i_fact', ')', ')']
Computes the binomial coefficient (denoted by *k choose i*). Please see the following website for details: http://mathworld.wolfram.com/BinomialCoefficient.html :param k: size of the set of distinct elements :type k: int :param i: size of the subsets :type i: int :return: combination of *k* and *i* :rtype: float
['Computes', 'the', 'binomial', 'coefficient', '(', 'denoted', 'by', '*', 'k', 'choose', 'i', '*', ')', '.']
train
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L419-L438
1,706
cloudendpoints/endpoints-management-python
endpoints_management/control/timestamp.py
from_rfc3339
def from_rfc3339(rfc3339_text, with_nanos=False): """Parse a RFC 3339 date string format to datetime.date. Example of accepted format: '1972-01-01T10:00:20.021-05:00' - By default, the result is a datetime.datetime - If with_nanos is true, the result is a 2-tuple, (datetime.datetime, nanos), where the second field represents the possible nanosecond resolution component of the second field. Args: rfc3339_text (string): An rfc3339 formatted date string with_nanos (bool): Determines if nanoseconds should be parsed from the string Raises: ValueError: if ``rfc3339_text`` is invalid Returns: :class:`datetime.datetime`: when with_nanos is False tuple(:class:`datetime.datetime`, int): when with_nanos is True """ timestamp = strict_rfc3339.rfc3339_to_timestamp(rfc3339_text) result = datetime.datetime.utcfromtimestamp(timestamp) if with_nanos: return (result, int((timestamp - int(timestamp)) * 1e9)) else: return result
python
def from_rfc3339(rfc3339_text, with_nanos=False): """Parse a RFC 3339 date string format to datetime.date. Example of accepted format: '1972-01-01T10:00:20.021-05:00' - By default, the result is a datetime.datetime - If with_nanos is true, the result is a 2-tuple, (datetime.datetime, nanos), where the second field represents the possible nanosecond resolution component of the second field. Args: rfc3339_text (string): An rfc3339 formatted date string with_nanos (bool): Determines if nanoseconds should be parsed from the string Raises: ValueError: if ``rfc3339_text`` is invalid Returns: :class:`datetime.datetime`: when with_nanos is False tuple(:class:`datetime.datetime`, int): when with_nanos is True """ timestamp = strict_rfc3339.rfc3339_to_timestamp(rfc3339_text) result = datetime.datetime.utcfromtimestamp(timestamp) if with_nanos: return (result, int((timestamp - int(timestamp)) * 1e9)) else: return result
['def', 'from_rfc3339', '(', 'rfc3339_text', ',', 'with_nanos', '=', 'False', ')', ':', 'timestamp', '=', 'strict_rfc3339', '.', 'rfc3339_to_timestamp', '(', 'rfc3339_text', ')', 'result', '=', 'datetime', '.', 'datetime', '.', 'utcfromtimestamp', '(', 'timestamp', ')', 'if', 'with_nanos', ':', 'return', '(', 'result', ',', 'int', '(', '(', 'timestamp', '-', 'int', '(', 'timestamp', ')', ')', '*', '1e9', ')', ')', 'else', ':', 'return', 'result']
Parse a RFC 3339 date string format to datetime.date. Example of accepted format: '1972-01-01T10:00:20.021-05:00' - By default, the result is a datetime.datetime - If with_nanos is true, the result is a 2-tuple, (datetime.datetime, nanos), where the second field represents the possible nanosecond resolution component of the second field. Args: rfc3339_text (string): An rfc3339 formatted date string with_nanos (bool): Determines if nanoseconds should be parsed from the string Raises: ValueError: if ``rfc3339_text`` is invalid Returns: :class:`datetime.datetime`: when with_nanos is False tuple(:class:`datetime.datetime`, int): when with_nanos is True
['Parse', 'a', 'RFC', '3339', 'date', 'string', 'format', 'to', 'datetime', '.', 'date', '.']
train
https://github.com/cloudendpoints/endpoints-management-python/blob/ec3c4a330ae9d65738861ce6df4dd6c3cb9f7731/endpoints_management/control/timestamp.py#L105-L133
1,707
log2timeline/plaso
plaso/storage/interface.py
StorageFileWriter.ReadPreprocessingInformation
def ReadPreprocessingInformation(self, knowledge_base): """Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ if not self._storage_file: raise IOError('Unable to read from closed storage writer.') self._storage_file.ReadPreprocessingInformation(knowledge_base)
python
def ReadPreprocessingInformation(self, knowledge_base): """Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ if not self._storage_file: raise IOError('Unable to read from closed storage writer.') self._storage_file.ReadPreprocessingInformation(knowledge_base)
['def', 'ReadPreprocessingInformation', '(', 'self', ',', 'knowledge_base', ')', ':', 'if', 'not', 'self', '.', '_storage_file', ':', 'raise', 'IOError', '(', "'Unable to read from closed storage writer.'", ')', 'self', '.', '_storage_file', '.', 'ReadPreprocessingInformation', '(', 'knowledge_base', ')']
Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
['Reads', 'preprocessing', 'information', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/interface.py#L1683-L1701
1,708
materialsproject/pymatgen
pymatgen/io/vasp/outputs.py
Wavecar.get_parchg
def get_parchg(self, poscar, kpoint, band, spin=None, phase=False, scale=2): """ Generates a Chgcar object, which is the charge density of the specified wavefunction. This function generates a Chgcar object with the charge density of the wavefunction specified by band and kpoint (and spin, if the WAVECAR corresponds to a spin-polarized calculation). The phase tag is a feature that is not present in VASP. For a real wavefunction, the phase tag being turned on means that the charge density is multiplied by the sign of the wavefunction at that point in space. A warning is generated if the phase tag is on and the chosen kpoint is not Gamma. Note: Augmentation from the PAWs is NOT included in this function. The maximal charge density will differ from the PARCHG from VASP, but the qualitative shape of the charge density will match. Args: poscar (pymatgen.io.vasp.inputs.Poscar): Poscar object that has the structure associated with the WAVECAR file kpoint (int): the index of the kpoint for the wavefunction band (int): the index of the band for the wavefunction spin (int): optional argument to specify the spin. If the Wavecar has ISPIN = 2, spin == None generates a Chgcar with total spin and magnetization, and spin == {0, 1} specifies just the spin up or down component. phase (bool): flag to determine if the charge density is multiplied by the sign of the wavefunction. Only valid for real wavefunctions. scale (int): scaling for the FFT grid. The default value of 2 is at least as fine as the VASP default. Returns: a pymatgen.io.vasp.outputs.Chgcar object """ if phase and not np.all(self.kpoints[kpoint] == 0.): warnings.warn('phase == True should only be used for the Gamma ' 'kpoint! I hope you know what you\'re doing!') # scaling of ng for the fft grid, need to restore value at the end temp_ng = self.ng self.ng = self.ng * scale N = np.prod(self.ng) data = {} if self.spin == 2: if spin is not None: wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=spin)) * N den = np.abs(np.conj(wfr) * wfr) if phase: den = np.sign(np.real(wfr)) * den data['total'] = den else: wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=0)) * N denup = np.abs(np.conj(wfr) * wfr) wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=1)) * N dendn = np.abs(np.conj(wfr) * wfr) data['total'] = denup + dendn data['diff'] = denup - dendn else: wfr = np.fft.ifftn(self.fft_mesh(kpoint, band)) * N den = np.abs(np.conj(wfr) * wfr) if phase: den = np.sign(np.real(wfr)) * den data['total'] = den self.ng = temp_ng return Chgcar(poscar, data)
python
def get_parchg(self, poscar, kpoint, band, spin=None, phase=False, scale=2): """ Generates a Chgcar object, which is the charge density of the specified wavefunction. This function generates a Chgcar object with the charge density of the wavefunction specified by band and kpoint (and spin, if the WAVECAR corresponds to a spin-polarized calculation). The phase tag is a feature that is not present in VASP. For a real wavefunction, the phase tag being turned on means that the charge density is multiplied by the sign of the wavefunction at that point in space. A warning is generated if the phase tag is on and the chosen kpoint is not Gamma. Note: Augmentation from the PAWs is NOT included in this function. The maximal charge density will differ from the PARCHG from VASP, but the qualitative shape of the charge density will match. Args: poscar (pymatgen.io.vasp.inputs.Poscar): Poscar object that has the structure associated with the WAVECAR file kpoint (int): the index of the kpoint for the wavefunction band (int): the index of the band for the wavefunction spin (int): optional argument to specify the spin. If the Wavecar has ISPIN = 2, spin == None generates a Chgcar with total spin and magnetization, and spin == {0, 1} specifies just the spin up or down component. phase (bool): flag to determine if the charge density is multiplied by the sign of the wavefunction. Only valid for real wavefunctions. scale (int): scaling for the FFT grid. The default value of 2 is at least as fine as the VASP default. Returns: a pymatgen.io.vasp.outputs.Chgcar object """ if phase and not np.all(self.kpoints[kpoint] == 0.): warnings.warn('phase == True should only be used for the Gamma ' 'kpoint! I hope you know what you\'re doing!') # scaling of ng for the fft grid, need to restore value at the end temp_ng = self.ng self.ng = self.ng * scale N = np.prod(self.ng) data = {} if self.spin == 2: if spin is not None: wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=spin)) * N den = np.abs(np.conj(wfr) * wfr) if phase: den = np.sign(np.real(wfr)) * den data['total'] = den else: wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=0)) * N denup = np.abs(np.conj(wfr) * wfr) wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=1)) * N dendn = np.abs(np.conj(wfr) * wfr) data['total'] = denup + dendn data['diff'] = denup - dendn else: wfr = np.fft.ifftn(self.fft_mesh(kpoint, band)) * N den = np.abs(np.conj(wfr) * wfr) if phase: den = np.sign(np.real(wfr)) * den data['total'] = den self.ng = temp_ng return Chgcar(poscar, data)
['def', 'get_parchg', '(', 'self', ',', 'poscar', ',', 'kpoint', ',', 'band', ',', 'spin', '=', 'None', ',', 'phase', '=', 'False', ',', 'scale', '=', '2', ')', ':', 'if', 'phase', 'and', 'not', 'np', '.', 'all', '(', 'self', '.', 'kpoints', '[', 'kpoint', ']', '==', '0.', ')', ':', 'warnings', '.', 'warn', '(', "'phase == True should only be used for the Gamma '", "'kpoint! I hope you know what you\\'re doing!'", ')', '# scaling of ng for the fft grid, need to restore value at the end', 'temp_ng', '=', 'self', '.', 'ng', 'self', '.', 'ng', '=', 'self', '.', 'ng', '*', 'scale', 'N', '=', 'np', '.', 'prod', '(', 'self', '.', 'ng', ')', 'data', '=', '{', '}', 'if', 'self', '.', 'spin', '==', '2', ':', 'if', 'spin', 'is', 'not', 'None', ':', 'wfr', '=', 'np', '.', 'fft', '.', 'ifftn', '(', 'self', '.', 'fft_mesh', '(', 'kpoint', ',', 'band', ',', 'spin', '=', 'spin', ')', ')', '*', 'N', 'den', '=', 'np', '.', 'abs', '(', 'np', '.', 'conj', '(', 'wfr', ')', '*', 'wfr', ')', 'if', 'phase', ':', 'den', '=', 'np', '.', 'sign', '(', 'np', '.', 'real', '(', 'wfr', ')', ')', '*', 'den', 'data', '[', "'total'", ']', '=', 'den', 'else', ':', 'wfr', '=', 'np', '.', 'fft', '.', 'ifftn', '(', 'self', '.', 'fft_mesh', '(', 'kpoint', ',', 'band', ',', 'spin', '=', '0', ')', ')', '*', 'N', 'denup', '=', 'np', '.', 'abs', '(', 'np', '.', 'conj', '(', 'wfr', ')', '*', 'wfr', ')', 'wfr', '=', 'np', '.', 'fft', '.', 'ifftn', '(', 'self', '.', 'fft_mesh', '(', 'kpoint', ',', 'band', ',', 'spin', '=', '1', ')', ')', '*', 'N', 'dendn', '=', 'np', '.', 'abs', '(', 'np', '.', 'conj', '(', 'wfr', ')', '*', 'wfr', ')', 'data', '[', "'total'", ']', '=', 'denup', '+', 'dendn', 'data', '[', "'diff'", ']', '=', 'denup', '-', 'dendn', 'else', ':', 'wfr', '=', 'np', '.', 'fft', '.', 'ifftn', '(', 'self', '.', 'fft_mesh', '(', 'kpoint', ',', 'band', ')', ')', '*', 'N', 'den', '=', 'np', '.', 'abs', '(', 'np', '.', 'conj', '(', 'wfr', ')', '*', 'wfr', ')', 'if', 'phase', ':', 'den', '=', 'np', '.', 'sign', '(', 'np', '.', 'real', '(', 'wfr', ')', ')', '*', 'den', 'data', '[', "'total'", ']', '=', 'den', 'self', '.', 'ng', '=', 'temp_ng', 'return', 'Chgcar', '(', 'poscar', ',', 'data', ')']
Generates a Chgcar object, which is the charge density of the specified wavefunction. This function generates a Chgcar object with the charge density of the wavefunction specified by band and kpoint (and spin, if the WAVECAR corresponds to a spin-polarized calculation). The phase tag is a feature that is not present in VASP. For a real wavefunction, the phase tag being turned on means that the charge density is multiplied by the sign of the wavefunction at that point in space. A warning is generated if the phase tag is on and the chosen kpoint is not Gamma. Note: Augmentation from the PAWs is NOT included in this function. The maximal charge density will differ from the PARCHG from VASP, but the qualitative shape of the charge density will match. Args: poscar (pymatgen.io.vasp.inputs.Poscar): Poscar object that has the structure associated with the WAVECAR file kpoint (int): the index of the kpoint for the wavefunction band (int): the index of the band for the wavefunction spin (int): optional argument to specify the spin. If the Wavecar has ISPIN = 2, spin == None generates a Chgcar with total spin and magnetization, and spin == {0, 1} specifies just the spin up or down component. phase (bool): flag to determine if the charge density is multiplied by the sign of the wavefunction. Only valid for real wavefunctions. scale (int): scaling for the FFT grid. The default value of 2 is at least as fine as the VASP default. Returns: a pymatgen.io.vasp.outputs.Chgcar object
['Generates', 'a', 'Chgcar', 'object', 'which', 'is', 'the', 'charge', 'density', 'of', 'the', 'specified', 'wavefunction', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L4238-L4307
1,709
CodersOfTheNight/oshino
oshino/agents/__init__.py
Agent.ready
def ready(self): """ Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied """ logger = self.get_logger() now = current_ts() logger.trace("Current time: {0}".format(now)) logger.trace("Last Run: {0}".format(self._last_run)) delta = (now - self._last_run) logger.trace("Delta: {0}, Interval: {1}" .format(delta, self.interval * 1000)) return delta > self.interval * 1000
python
def ready(self): """ Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied """ logger = self.get_logger() now = current_ts() logger.trace("Current time: {0}".format(now)) logger.trace("Last Run: {0}".format(self._last_run)) delta = (now - self._last_run) logger.trace("Delta: {0}, Interval: {1}" .format(delta, self.interval * 1000)) return delta > self.interval * 1000
['def', 'ready', '(', 'self', ')', ':', 'logger', '=', 'self', '.', 'get_logger', '(', ')', 'now', '=', 'current_ts', '(', ')', 'logger', '.', 'trace', '(', '"Current time: {0}"', '.', 'format', '(', 'now', ')', ')', 'logger', '.', 'trace', '(', '"Last Run: {0}"', '.', 'format', '(', 'self', '.', '_last_run', ')', ')', 'delta', '=', '(', 'now', '-', 'self', '.', '_last_run', ')', 'logger', '.', 'trace', '(', '"Delta: {0}, Interval: {1}"', '.', 'format', '(', 'delta', ',', 'self', '.', 'interval', '*', '1000', ')', ')', 'return', 'delta', '>', 'self', '.', 'interval', '*', '1000']
Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied
['Function', 'used', 'when', 'agent', 'is', 'lazy', '.', 'It', 'is', 'being', 'processed', 'only', 'when', 'ready', 'condition', 'is', 'satisfied']
train
https://github.com/CodersOfTheNight/oshino/blob/00f7e151e3ce1f3a7f43b353b695c4dba83c7f28/oshino/agents/__init__.py#L66-L78
1,710
gvanderheide/discreteMarkovChain
discreteMarkovChain/markovChain.py
markovChain.setStateCodes
def setStateCodes(self): """ Generates (sorted) codes for the states in the statespace This is used to quickly identify which states occur after a transition/action """ #calculate the statespace and determine the minima and maxima each element in the state vector statespace = self.statespace() self.minvalues = np.amin(statespace,axis=0) self.maxvalues = np.amax(statespace,axis=0) #calculate the largest number of values and create a state code statesize = statespace.shape[1] largestRange = 1+np.max(self.maxvalues-self.minvalues) self.statecode = np.power(largestRange, np.arange(statesize),dtype=int) #Calculate the codes, sort them, and store them in self.codes codes = self.getStateCode(statespace) sorted_indices = np.argsort(codes) self.codes = codes[sorted_indices] if np.unique(self.codes).shape != self.codes.shape: raise "Non-unique coding of states, results are unreliable" #For the end results, it is useful to put the indices and corresponding states in a dictionary mapping = OrderedDict() for index,state in enumerate(statespace[sorted_indices]): mapping[index] = state self.mapping = mapping
python
def setStateCodes(self): """ Generates (sorted) codes for the states in the statespace This is used to quickly identify which states occur after a transition/action """ #calculate the statespace and determine the minima and maxima each element in the state vector statespace = self.statespace() self.minvalues = np.amin(statespace,axis=0) self.maxvalues = np.amax(statespace,axis=0) #calculate the largest number of values and create a state code statesize = statespace.shape[1] largestRange = 1+np.max(self.maxvalues-self.minvalues) self.statecode = np.power(largestRange, np.arange(statesize),dtype=int) #Calculate the codes, sort them, and store them in self.codes codes = self.getStateCode(statespace) sorted_indices = np.argsort(codes) self.codes = codes[sorted_indices] if np.unique(self.codes).shape != self.codes.shape: raise "Non-unique coding of states, results are unreliable" #For the end results, it is useful to put the indices and corresponding states in a dictionary mapping = OrderedDict() for index,state in enumerate(statespace[sorted_indices]): mapping[index] = state self.mapping = mapping
['def', 'setStateCodes', '(', 'self', ')', ':', '#calculate the statespace and determine the minima and maxima each element in the state vector ', 'statespace', '=', 'self', '.', 'statespace', '(', ')', 'self', '.', 'minvalues', '=', 'np', '.', 'amin', '(', 'statespace', ',', 'axis', '=', '0', ')', 'self', '.', 'maxvalues', '=', 'np', '.', 'amax', '(', 'statespace', ',', 'axis', '=', '0', ')', '#calculate the largest number of values and create a state code ', 'statesize', '=', 'statespace', '.', 'shape', '[', '1', ']', 'largestRange', '=', '1', '+', 'np', '.', 'max', '(', 'self', '.', 'maxvalues', '-', 'self', '.', 'minvalues', ')', 'self', '.', 'statecode', '=', 'np', '.', 'power', '(', 'largestRange', ',', 'np', '.', 'arange', '(', 'statesize', ')', ',', 'dtype', '=', 'int', ')', '#Calculate the codes, sort them, and store them in self.codes', 'codes', '=', 'self', '.', 'getStateCode', '(', 'statespace', ')', 'sorted_indices', '=', 'np', '.', 'argsort', '(', 'codes', ')', 'self', '.', 'codes', '=', 'codes', '[', 'sorted_indices', ']', 'if', 'np', '.', 'unique', '(', 'self', '.', 'codes', ')', '.', 'shape', '!=', 'self', '.', 'codes', '.', 'shape', ':', 'raise', '"Non-unique coding of states, results are unreliable"', '#For the end results, it is useful to put the indices and corresponding states in a dictionary ', 'mapping', '=', 'OrderedDict', '(', ')', 'for', 'index', ',', 'state', 'in', 'enumerate', '(', 'statespace', '[', 'sorted_indices', ']', ')', ':', 'mapping', '[', 'index', ']', '=', 'state', 'self', '.', 'mapping', '=', 'mapping']
Generates (sorted) codes for the states in the statespace This is used to quickly identify which states occur after a transition/action
['Generates', '(', 'sorted', ')', 'codes', 'for', 'the', 'states', 'in', 'the', 'statespace', 'This', 'is', 'used', 'to', 'quickly', 'identify', 'which', 'states', 'occur', 'after', 'a', 'transition', '/', 'action']
train
https://github.com/gvanderheide/discreteMarkovChain/blob/8325ffdb791c109eee600684ee0dc9126ce80700/discreteMarkovChain/markovChain.py#L254-L282
1,711
FNNDSC/pfurl
pfurl/pfurl.py
Pfurl.storage_resolveBasedOnKey
def storage_resolveBasedOnKey(self, *args, **kwargs): """ Call the remote service and ask for the storage location based on the key. :param args: :param kwargs: :return: """ global Gd_internalvar d_msg = { 'action': 'internalctl', 'meta': { 'var': 'key2address', 'compute': '<key>' } } str_key = "" b_status = False for k,v in kwargs.items(): if k == 'key': str_key = v d_msg['meta']['key'] = str_key # d_ret = self.pullPath_core(d_msg = d_msg) return { 'status': b_status, 'path': str_internalLocation }
python
def storage_resolveBasedOnKey(self, *args, **kwargs): """ Call the remote service and ask for the storage location based on the key. :param args: :param kwargs: :return: """ global Gd_internalvar d_msg = { 'action': 'internalctl', 'meta': { 'var': 'key2address', 'compute': '<key>' } } str_key = "" b_status = False for k,v in kwargs.items(): if k == 'key': str_key = v d_msg['meta']['key'] = str_key # d_ret = self.pullPath_core(d_msg = d_msg) return { 'status': b_status, 'path': str_internalLocation }
['def', 'storage_resolveBasedOnKey', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'global', 'Gd_internalvar', 'd_msg', '=', '{', "'action'", ':', "'internalctl'", ',', "'meta'", ':', '{', "'var'", ':', "'key2address'", ',', "'compute'", ':', "'<key>'", '}', '}', 'str_key', '=', '""', 'b_status', '=', 'False', 'for', 'k', ',', 'v', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'if', 'k', '==', "'key'", ':', 'str_key', '=', 'v', 'd_msg', '[', "'meta'", ']', '[', "'key'", ']', '=', 'str_key', '# ', 'd_ret', '=', 'self', '.', 'pullPath_core', '(', 'd_msg', '=', 'd_msg', ')', 'return', '{', "'status'", ':', 'b_status', ',', "'path'", ':', 'str_internalLocation', '}']
Call the remote service and ask for the storage location based on the key. :param args: :param kwargs: :return:
['Call', 'the', 'remote', 'service', 'and', 'ask', 'for', 'the', 'storage', 'location', 'based', 'on', 'the', 'key', '.']
train
https://github.com/FNNDSC/pfurl/blob/572f634ab582b7b7b7a3fbfd5bf12aadc1ba7958/pfurl/pfurl.py#L190-L221
1,712
nutechsoftware/alarmdecoder
examples/virtual_zone_expander.py
main
def main(): """ Example application that periodically faults a virtual zone and then restores it. This is an advanced feature that allows you to emulate a virtual zone. When the AlarmDecoder is configured to emulate a zone expander we can fault and restore those zones programmatically at will. These events can also be seen by others, such as home automation platforms which allows you to connect other devices or services and monitor them as you would any physical zone. For example, you could connect a ZigBee device and receiver and fault or restore it's zone(s) based on the data received. In order for this to happen you need to perform a couple configuration steps: 1. Enable zone expander emulation on your AlarmDecoder device by hitting '!' in a terminal and going through the prompts. 2. Enable the zone expander in your panel programming. """ try: # Retrieve the first USB device device = AlarmDecoder(SerialDevice(interface=SERIAL_DEVICE)) # Set up an event handlers and open the device device.on_zone_fault += handle_zone_fault device.on_zone_restore += handle_zone_restore with device.open(baudrate=BAUDRATE): last_update = time.time() while True: if time.time() - last_update > WAIT_TIME: last_update = time.time() device.fault_zone(TARGET_ZONE) time.sleep(1) except Exception as ex: print('Exception:', ex)
python
def main(): """ Example application that periodically faults a virtual zone and then restores it. This is an advanced feature that allows you to emulate a virtual zone. When the AlarmDecoder is configured to emulate a zone expander we can fault and restore those zones programmatically at will. These events can also be seen by others, such as home automation platforms which allows you to connect other devices or services and monitor them as you would any physical zone. For example, you could connect a ZigBee device and receiver and fault or restore it's zone(s) based on the data received. In order for this to happen you need to perform a couple configuration steps: 1. Enable zone expander emulation on your AlarmDecoder device by hitting '!' in a terminal and going through the prompts. 2. Enable the zone expander in your panel programming. """ try: # Retrieve the first USB device device = AlarmDecoder(SerialDevice(interface=SERIAL_DEVICE)) # Set up an event handlers and open the device device.on_zone_fault += handle_zone_fault device.on_zone_restore += handle_zone_restore with device.open(baudrate=BAUDRATE): last_update = time.time() while True: if time.time() - last_update > WAIT_TIME: last_update = time.time() device.fault_zone(TARGET_ZONE) time.sleep(1) except Exception as ex: print('Exception:', ex)
['def', 'main', '(', ')', ':', 'try', ':', '# Retrieve the first USB device', 'device', '=', 'AlarmDecoder', '(', 'SerialDevice', '(', 'interface', '=', 'SERIAL_DEVICE', ')', ')', '# Set up an event handlers and open the device', 'device', '.', 'on_zone_fault', '+=', 'handle_zone_fault', 'device', '.', 'on_zone_restore', '+=', 'handle_zone_restore', 'with', 'device', '.', 'open', '(', 'baudrate', '=', 'BAUDRATE', ')', ':', 'last_update', '=', 'time', '.', 'time', '(', ')', 'while', 'True', ':', 'if', 'time', '.', 'time', '(', ')', '-', 'last_update', '>', 'WAIT_TIME', ':', 'last_update', '=', 'time', '.', 'time', '(', ')', 'device', '.', 'fault_zone', '(', 'TARGET_ZONE', ')', 'time', '.', 'sleep', '(', '1', ')', 'except', 'Exception', 'as', 'ex', ':', 'print', '(', "'Exception:'", ',', 'ex', ')']
Example application that periodically faults a virtual zone and then restores it. This is an advanced feature that allows you to emulate a virtual zone. When the AlarmDecoder is configured to emulate a zone expander we can fault and restore those zones programmatically at will. These events can also be seen by others, such as home automation platforms which allows you to connect other devices or services and monitor them as you would any physical zone. For example, you could connect a ZigBee device and receiver and fault or restore it's zone(s) based on the data received. In order for this to happen you need to perform a couple configuration steps: 1. Enable zone expander emulation on your AlarmDecoder device by hitting '!' in a terminal and going through the prompts. 2. Enable the zone expander in your panel programming.
['Example', 'application', 'that', 'periodically', 'faults', 'a', 'virtual', 'zone', 'and', 'then', 'restores', 'it', '.']
train
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/examples/virtual_zone_expander.py#L12-L51
1,713
python-diamond/Diamond
src/collectors/xen_collector/xen_collector.py
XENCollector.collect
def collect(self): """ Collect libvirt data """ if libvirt is None: self.log.error('Unable to import either libvirt') return {} # Open a restricted (non-root) connection to the hypervisor conn = libvirt.openReadOnly(None) # Get hardware info conninfo = conn.getInfo() # Initialize variables memallocated = 0 coresallocated = 0 totalcores = 0 results = {} domIds = conn.listDomainsID() if 0 in domIds: # Total cores domU = conn.lookupByID(0) totalcores = domU.info()[3] # Free Space s = os.statvfs('/') freeSpace = (s.f_bavail * s.f_frsize) / 1024 # Calculate allocated memory and cores for i in domIds: # Ignore 0 if i == 0: continue domU = conn.lookupByID(i) dominfo = domU.info() memallocated += dominfo[2] if i > 0: coresallocated += dominfo[3] results = { 'InstalledMem': conninfo[1], 'MemAllocated': memallocated / 1024, 'MemFree': conninfo[1] - (memallocated / 1024), 'AllocatedCores': coresallocated, 'DiskFree': freeSpace, 'TotalCores': totalcores, 'FreeCores': (totalcores - coresallocated) } for k in results.keys(): self.publish(k, results[k], 0)
python
def collect(self): """ Collect libvirt data """ if libvirt is None: self.log.error('Unable to import either libvirt') return {} # Open a restricted (non-root) connection to the hypervisor conn = libvirt.openReadOnly(None) # Get hardware info conninfo = conn.getInfo() # Initialize variables memallocated = 0 coresallocated = 0 totalcores = 0 results = {} domIds = conn.listDomainsID() if 0 in domIds: # Total cores domU = conn.lookupByID(0) totalcores = domU.info()[3] # Free Space s = os.statvfs('/') freeSpace = (s.f_bavail * s.f_frsize) / 1024 # Calculate allocated memory and cores for i in domIds: # Ignore 0 if i == 0: continue domU = conn.lookupByID(i) dominfo = domU.info() memallocated += dominfo[2] if i > 0: coresallocated += dominfo[3] results = { 'InstalledMem': conninfo[1], 'MemAllocated': memallocated / 1024, 'MemFree': conninfo[1] - (memallocated / 1024), 'AllocatedCores': coresallocated, 'DiskFree': freeSpace, 'TotalCores': totalcores, 'FreeCores': (totalcores - coresallocated) } for k in results.keys(): self.publish(k, results[k], 0)
['def', 'collect', '(', 'self', ')', ':', 'if', 'libvirt', 'is', 'None', ':', 'self', '.', 'log', '.', 'error', '(', "'Unable to import either libvirt'", ')', 'return', '{', '}', '# Open a restricted (non-root) connection to the hypervisor', 'conn', '=', 'libvirt', '.', 'openReadOnly', '(', 'None', ')', '# Get hardware info', 'conninfo', '=', 'conn', '.', 'getInfo', '(', ')', '# Initialize variables', 'memallocated', '=', '0', 'coresallocated', '=', '0', 'totalcores', '=', '0', 'results', '=', '{', '}', 'domIds', '=', 'conn', '.', 'listDomainsID', '(', ')', 'if', '0', 'in', 'domIds', ':', '# Total cores', 'domU', '=', 'conn', '.', 'lookupByID', '(', '0', ')', 'totalcores', '=', 'domU', '.', 'info', '(', ')', '[', '3', ']', '# Free Space', 's', '=', 'os', '.', 'statvfs', '(', "'/'", ')', 'freeSpace', '=', '(', 's', '.', 'f_bavail', '*', 's', '.', 'f_frsize', ')', '/', '1024', '# Calculate allocated memory and cores', 'for', 'i', 'in', 'domIds', ':', '# Ignore 0', 'if', 'i', '==', '0', ':', 'continue', 'domU', '=', 'conn', '.', 'lookupByID', '(', 'i', ')', 'dominfo', '=', 'domU', '.', 'info', '(', ')', 'memallocated', '+=', 'dominfo', '[', '2', ']', 'if', 'i', '>', '0', ':', 'coresallocated', '+=', 'dominfo', '[', '3', ']', 'results', '=', '{', "'InstalledMem'", ':', 'conninfo', '[', '1', ']', ',', "'MemAllocated'", ':', 'memallocated', '/', '1024', ',', "'MemFree'", ':', 'conninfo', '[', '1', ']', '-', '(', 'memallocated', '/', '1024', ')', ',', "'AllocatedCores'", ':', 'coresallocated', ',', "'DiskFree'", ':', 'freeSpace', ',', "'TotalCores'", ':', 'totalcores', ',', "'FreeCores'", ':', '(', 'totalcores', '-', 'coresallocated', ')', '}', 'for', 'k', 'in', 'results', '.', 'keys', '(', ')', ':', 'self', '.', 'publish', '(', 'k', ',', 'results', '[', 'k', ']', ',', '0', ')']
Collect libvirt data
['Collect', 'libvirt', 'data']
train
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/xen_collector/xen_collector.py#L38-L82
1,714
davenquinn/Attitude
attitude/display/plot/cov_types/misc.py
ci
def ci(a, which=95, axis=None): """Return a percentile range from an array of values.""" p = 50 - which / 2, 50 + which / 2 return percentiles(a, p, axis)
python
def ci(a, which=95, axis=None): """Return a percentile range from an array of values.""" p = 50 - which / 2, 50 + which / 2 return percentiles(a, p, axis)
['def', 'ci', '(', 'a', ',', 'which', '=', '95', ',', 'axis', '=', 'None', ')', ':', 'p', '=', '50', '-', 'which', '/', '2', ',', '50', '+', 'which', '/', '2', 'return', 'percentiles', '(', 'a', ',', 'p', ',', 'axis', ')']
Return a percentile range from an array of values.
['Return', 'a', 'percentile', 'range', 'from', 'an', 'array', 'of', 'values', '.']
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/plot/cov_types/misc.py#L40-L43
1,715
jeongyoonlee/Kaggler
kaggler/model/nn.py
NN.predict
def predict(self, X): """Predict targets for a feature matrix. Args: X (np.array of float): feature matrix for prediction Returns: prediction (np.array) """ logger.info('predicting ...') ps = self.predict_raw(X) return sigm(ps[:, 0])
python
def predict(self, X): """Predict targets for a feature matrix. Args: X (np.array of float): feature matrix for prediction Returns: prediction (np.array) """ logger.info('predicting ...') ps = self.predict_raw(X) return sigm(ps[:, 0])
['def', 'predict', '(', 'self', ',', 'X', ')', ':', 'logger', '.', 'info', '(', "'predicting ...'", ')', 'ps', '=', 'self', '.', 'predict_raw', '(', 'X', ')', 'return', 'sigm', '(', 'ps', '[', ':', ',', '0', ']', ')']
Predict targets for a feature matrix. Args: X (np.array of float): feature matrix for prediction Returns: prediction (np.array)
['Predict', 'targets', 'for', 'a', 'feature', 'matrix', '.']
train
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/model/nn.py#L160-L172
1,716
ethereum/py-evm
eth/db/journal.py
Journal.clear
def clear(self) -> None: """ Treat as if the *underlying* database will also be cleared by some other mechanism. We build a special empty changeset just for marking that all previous data should be ignored. """ # these internal records are used as a way to tell the difference between # changes that came before and after the clear self.record_changeset() self._clears_at.add(self.latest_id) self.record_changeset()
python
def clear(self) -> None: """ Treat as if the *underlying* database will also be cleared by some other mechanism. We build a special empty changeset just for marking that all previous data should be ignored. """ # these internal records are used as a way to tell the difference between # changes that came before and after the clear self.record_changeset() self._clears_at.add(self.latest_id) self.record_changeset()
['def', 'clear', '(', 'self', ')', '->', 'None', ':', '# these internal records are used as a way to tell the difference between', '# changes that came before and after the clear', 'self', '.', 'record_changeset', '(', ')', 'self', '.', '_clears_at', '.', 'add', '(', 'self', '.', 'latest_id', ')', 'self', '.', 'record_changeset', '(', ')']
Treat as if the *underlying* database will also be cleared by some other mechanism. We build a special empty changeset just for marking that all previous data should be ignored.
['Treat', 'as', 'if', 'the', '*', 'underlying', '*', 'database', 'will', 'also', 'be', 'cleared', 'by', 'some', 'other', 'mechanism', '.', 'We', 'build', 'a', 'special', 'empty', 'changeset', 'just', 'for', 'marking', 'that', 'all', 'previous', 'data', 'should', 'be', 'ignored', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/journal.py#L148-L158
1,717
neighbordog/deviantart
deviantart/api.py
Api.get_watchers
def get_watchers(self, username, offset=0, limit=10): """Get the user's list of watchers :param username: The username you want to get a list of watchers of :param offset: the pagination offset :param limit: the pagination limit """ response = self._req('/user/watchers/{}'.format(username), { 'offset' : offset, 'limit' : limit }) watchers = [] for item in response['results']: w = {} w['user'] = User() w['user'].from_dict(item['user']) w['is_watching'] = item['is_watching'] w['lastvisit'] = item['lastvisit'] w['watch'] = { "friend" : item['watch']['friend'], "deviations" : item['watch']['deviations'], "journals" : item['watch']['journals'], "forum_threads" : item['watch']['forum_threads'], "critiques" : item['watch']['critiques'], "scraps" : item['watch']['scraps'], "activity" : item['watch']['activity'], "collections" : item['watch']['collections'] } watchers.append(w) return { "results" : watchers, "has_more" : response['has_more'], "next_offset" : response['next_offset'] }
python
def get_watchers(self, username, offset=0, limit=10): """Get the user's list of watchers :param username: The username you want to get a list of watchers of :param offset: the pagination offset :param limit: the pagination limit """ response = self._req('/user/watchers/{}'.format(username), { 'offset' : offset, 'limit' : limit }) watchers = [] for item in response['results']: w = {} w['user'] = User() w['user'].from_dict(item['user']) w['is_watching'] = item['is_watching'] w['lastvisit'] = item['lastvisit'] w['watch'] = { "friend" : item['watch']['friend'], "deviations" : item['watch']['deviations'], "journals" : item['watch']['journals'], "forum_threads" : item['watch']['forum_threads'], "critiques" : item['watch']['critiques'], "scraps" : item['watch']['scraps'], "activity" : item['watch']['activity'], "collections" : item['watch']['collections'] } watchers.append(w) return { "results" : watchers, "has_more" : response['has_more'], "next_offset" : response['next_offset'] }
['def', 'get_watchers', '(', 'self', ',', 'username', ',', 'offset', '=', '0', ',', 'limit', '=', '10', ')', ':', 'response', '=', 'self', '.', '_req', '(', "'/user/watchers/{}'", '.', 'format', '(', 'username', ')', ',', '{', "'offset'", ':', 'offset', ',', "'limit'", ':', 'limit', '}', ')', 'watchers', '=', '[', ']', 'for', 'item', 'in', 'response', '[', "'results'", ']', ':', 'w', '=', '{', '}', 'w', '[', "'user'", ']', '=', 'User', '(', ')', 'w', '[', "'user'", ']', '.', 'from_dict', '(', 'item', '[', "'user'", ']', ')', 'w', '[', "'is_watching'", ']', '=', 'item', '[', "'is_watching'", ']', 'w', '[', "'lastvisit'", ']', '=', 'item', '[', "'lastvisit'", ']', 'w', '[', "'watch'", ']', '=', '{', '"friend"', ':', 'item', '[', "'watch'", ']', '[', "'friend'", ']', ',', '"deviations"', ':', 'item', '[', "'watch'", ']', '[', "'deviations'", ']', ',', '"journals"', ':', 'item', '[', "'watch'", ']', '[', "'journals'", ']', ',', '"forum_threads"', ':', 'item', '[', "'watch'", ']', '[', "'forum_threads'", ']', ',', '"critiques"', ':', 'item', '[', "'watch'", ']', '[', "'critiques'", ']', ',', '"scraps"', ':', 'item', '[', "'watch'", ']', '[', "'scraps'", ']', ',', '"activity"', ':', 'item', '[', "'watch'", ']', '[', "'activity'", ']', ',', '"collections"', ':', 'item', '[', "'watch'", ']', '[', "'collections'", ']', '}', 'watchers', '.', 'append', '(', 'w', ')', 'return', '{', '"results"', ':', 'watchers', ',', '"has_more"', ':', 'response', '[', "'has_more'", ']', ',', '"next_offset"', ':', 'response', '[', "'next_offset'", ']', '}']
Get the user's list of watchers :param username: The username you want to get a list of watchers of :param offset: the pagination offset :param limit: the pagination limit
['Get', 'the', 'user', 's', 'list', 'of', 'watchers']
train
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L1024-L1063
1,718
tyarkoni/pliers
pliers/datasets/text.py
fetch_dictionary
def fetch_dictionary(name, url=None, format=None, index=0, rename=None, save=True, force_retrieve=False): ''' Retrieve a dictionary of text norms from the web or local storage. Args: name (str): The name of the dictionary. If no url is passed, this must match either one of the keys in the predefined dictionary file (see dictionaries.json), or the name assigned to a previous dictionary retrieved from a specific URL. url (str): The URL of dictionary file to retrieve. Optional if name matches an existing dictionary. format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data appropriately. Note that most forms of compression will be detected and handled automatically, so the format string refers only to the format of the decompressed file. When format is None, the format will be inferred from the filename. index (str, int): The name or numeric index of the column to used as the dictionary index. Passed directly to pd.ix. rename (dict): An optional dictionary passed to pd.rename(); can be used to rename columns in the loaded dictionary. Note that the locally-saved dictionary will retain the renamed columns. save (bool): Whether or not to save the dictionary locally the first time it is retrieved. force_retrieve (bool): If True, remote dictionary will always be downloaded, even if a local copy exists (and the local copy will be overwritten). Returns: A pandas DataFrame indexed by strings (typically words). ''' file_path = os.path.join(_get_dictionary_path(), name + '.csv') if not force_retrieve and os.path.exists(file_path): df = pd.read_csv(file_path) index = datasets[name].get('index', df.columns[index]) return df.set_index(index) if name in datasets: url = datasets[name]['url'] format = datasets[name].get('format', format) index = datasets[name].get('index', index) rename = datasets.get('rename', rename) if url is None: raise ValueError("Dataset '%s' not found in local storage or presets, " "and no download URL provided." % name) data = _download_dictionary(url, format=format, rename=rename) if isinstance(index, int): index = data.columns[index] data = data.set_index(index) if save: file_path = os.path.join(_get_dictionary_path(), name + '.csv') data.to_csv(file_path, encoding='utf-8') return data
python
def fetch_dictionary(name, url=None, format=None, index=0, rename=None, save=True, force_retrieve=False): ''' Retrieve a dictionary of text norms from the web or local storage. Args: name (str): The name of the dictionary. If no url is passed, this must match either one of the keys in the predefined dictionary file (see dictionaries.json), or the name assigned to a previous dictionary retrieved from a specific URL. url (str): The URL of dictionary file to retrieve. Optional if name matches an existing dictionary. format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data appropriately. Note that most forms of compression will be detected and handled automatically, so the format string refers only to the format of the decompressed file. When format is None, the format will be inferred from the filename. index (str, int): The name or numeric index of the column to used as the dictionary index. Passed directly to pd.ix. rename (dict): An optional dictionary passed to pd.rename(); can be used to rename columns in the loaded dictionary. Note that the locally-saved dictionary will retain the renamed columns. save (bool): Whether or not to save the dictionary locally the first time it is retrieved. force_retrieve (bool): If True, remote dictionary will always be downloaded, even if a local copy exists (and the local copy will be overwritten). Returns: A pandas DataFrame indexed by strings (typically words). ''' file_path = os.path.join(_get_dictionary_path(), name + '.csv') if not force_retrieve and os.path.exists(file_path): df = pd.read_csv(file_path) index = datasets[name].get('index', df.columns[index]) return df.set_index(index) if name in datasets: url = datasets[name]['url'] format = datasets[name].get('format', format) index = datasets[name].get('index', index) rename = datasets.get('rename', rename) if url is None: raise ValueError("Dataset '%s' not found in local storage or presets, " "and no download URL provided." % name) data = _download_dictionary(url, format=format, rename=rename) if isinstance(index, int): index = data.columns[index] data = data.set_index(index) if save: file_path = os.path.join(_get_dictionary_path(), name + '.csv') data.to_csv(file_path, encoding='utf-8') return data
['def', 'fetch_dictionary', '(', 'name', ',', 'url', '=', 'None', ',', 'format', '=', 'None', ',', 'index', '=', '0', ',', 'rename', '=', 'None', ',', 'save', '=', 'True', ',', 'force_retrieve', '=', 'False', ')', ':', 'file_path', '=', 'os', '.', 'path', '.', 'join', '(', '_get_dictionary_path', '(', ')', ',', 'name', '+', "'.csv'", ')', 'if', 'not', 'force_retrieve', 'and', 'os', '.', 'path', '.', 'exists', '(', 'file_path', ')', ':', 'df', '=', 'pd', '.', 'read_csv', '(', 'file_path', ')', 'index', '=', 'datasets', '[', 'name', ']', '.', 'get', '(', "'index'", ',', 'df', '.', 'columns', '[', 'index', ']', ')', 'return', 'df', '.', 'set_index', '(', 'index', ')', 'if', 'name', 'in', 'datasets', ':', 'url', '=', 'datasets', '[', 'name', ']', '[', "'url'", ']', 'format', '=', 'datasets', '[', 'name', ']', '.', 'get', '(', "'format'", ',', 'format', ')', 'index', '=', 'datasets', '[', 'name', ']', '.', 'get', '(', "'index'", ',', 'index', ')', 'rename', '=', 'datasets', '.', 'get', '(', "'rename'", ',', 'rename', ')', 'if', 'url', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Dataset \'%s\' not found in local storage or presets, "', '"and no download URL provided."', '%', 'name', ')', 'data', '=', '_download_dictionary', '(', 'url', ',', 'format', '=', 'format', ',', 'rename', '=', 'rename', ')', 'if', 'isinstance', '(', 'index', ',', 'int', ')', ':', 'index', '=', 'data', '.', 'columns', '[', 'index', ']', 'data', '=', 'data', '.', 'set_index', '(', 'index', ')', 'if', 'save', ':', 'file_path', '=', 'os', '.', 'path', '.', 'join', '(', '_get_dictionary_path', '(', ')', ',', 'name', '+', "'.csv'", ')', 'data', '.', 'to_csv', '(', 'file_path', ',', 'encoding', '=', "'utf-8'", ')', 'return', 'data']
Retrieve a dictionary of text norms from the web or local storage. Args: name (str): The name of the dictionary. If no url is passed, this must match either one of the keys in the predefined dictionary file (see dictionaries.json), or the name assigned to a previous dictionary retrieved from a specific URL. url (str): The URL of dictionary file to retrieve. Optional if name matches an existing dictionary. format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data appropriately. Note that most forms of compression will be detected and handled automatically, so the format string refers only to the format of the decompressed file. When format is None, the format will be inferred from the filename. index (str, int): The name or numeric index of the column to used as the dictionary index. Passed directly to pd.ix. rename (dict): An optional dictionary passed to pd.rename(); can be used to rename columns in the loaded dictionary. Note that the locally-saved dictionary will retain the renamed columns. save (bool): Whether or not to save the dictionary locally the first time it is retrieved. force_retrieve (bool): If True, remote dictionary will always be downloaded, even if a local copy exists (and the local copy will be overwritten). Returns: A pandas DataFrame indexed by strings (typically words).
['Retrieve', 'a', 'dictionary', 'of', 'text', 'norms', 'from', 'the', 'web', 'or', 'local', 'storage', '.']
train
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/datasets/text.py#L57-L111
1,719
acutesoftware/AIKIF
aikif/toolbox/text_tools.py
identify_delim
def identify_delim(txt): """ identifies delimiters and returns a count by ROW in the text file as well as the delimiter value (if any) The delim is determined if the count of delims is consistant in all rows. """ possible_delims = _get_dict_char_count(txt) # {'C': 3, 'a': 4, 'b': 5, 'c': 6, ',': 6, 'A': 3, '\n': 3, 'B': 3}) delim = max(possible_delims.keys(), key=(lambda k: possible_delims[k])) """ count_by_row = [] max_cols = 0 max_rows = 0 lines = txt.split('\n') for line in lines: if len(line) > max_cols: max_cols = len(line) this_count = _get_dict_char_count(line) count_by_row.append(this_count) print('line = ', line) print('count_by_row = ', this_count) max_rows += 1 # make a matrix matrix = [[0 for i in range(max_rows)] for j in range(max_cols)] pprint.pprint(matrix) """ return delim
python
def identify_delim(txt): """ identifies delimiters and returns a count by ROW in the text file as well as the delimiter value (if any) The delim is determined if the count of delims is consistant in all rows. """ possible_delims = _get_dict_char_count(txt) # {'C': 3, 'a': 4, 'b': 5, 'c': 6, ',': 6, 'A': 3, '\n': 3, 'B': 3}) delim = max(possible_delims.keys(), key=(lambda k: possible_delims[k])) """ count_by_row = [] max_cols = 0 max_rows = 0 lines = txt.split('\n') for line in lines: if len(line) > max_cols: max_cols = len(line) this_count = _get_dict_char_count(line) count_by_row.append(this_count) print('line = ', line) print('count_by_row = ', this_count) max_rows += 1 # make a matrix matrix = [[0 for i in range(max_rows)] for j in range(max_cols)] pprint.pprint(matrix) """ return delim
['def', 'identify_delim', '(', 'txt', ')', ':', 'possible_delims', '=', '_get_dict_char_count', '(', 'txt', ')', "# {'C': 3, 'a': 4, 'b': 5, 'c': 6, ',': 6, 'A': 3, '\\n': 3, 'B': 3})\r", 'delim', '=', 'max', '(', 'possible_delims', '.', 'keys', '(', ')', ',', 'key', '=', '(', 'lambda', 'k', ':', 'possible_delims', '[', 'k', ']', ')', ')', '"""\r\n\tcount_by_row = []\r\n\tmax_cols = 0\r\n\tmax_rows = 0\r\n\r\n\tlines = txt.split(\'\\n\')\r\n\tfor line in lines:\r\n\t\tif len(line) > max_cols:\r\n\t\t\tmax_cols = len(line)\r\n\t\tthis_count = _get_dict_char_count(line)\r\n\t\tcount_by_row.append(this_count)\r\n\t\tprint(\'line = \', line)\r\n\t\tprint(\'count_by_row = \', this_count)\r\n\t\tmax_rows += 1\r\n\r\n\t# make a matrix\r\n\tmatrix = [[0 for i in range(max_rows)] for j in range(max_cols)]\r\n\tpprint.pprint(matrix)\r\n\t"""', 'return', 'delim']
identifies delimiters and returns a count by ROW in the text file as well as the delimiter value (if any) The delim is determined if the count of delims is consistant in all rows.
['identifies', 'delimiters', 'and', 'returns', 'a', 'count', 'by', 'ROW', 'in', 'the', 'text', 'file', 'as', 'well', 'as', 'the', 'delimiter', 'value', '(', 'if', 'any', ')', 'The', 'delim', 'is', 'determined', 'if', 'the', 'count', 'of', 'delims', 'is', 'consistant', 'in', 'all', 'rows', '.']
train
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/text_tools.py#L114-L146
1,720
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py
MAVLink.pid_tuning_encode
def pid_tuning_encode(self, axis, desired, achieved, FF, P, I, D): ''' PID tuning information axis : axis (uint8_t) desired : desired rate (degrees/s) (float) achieved : achieved rate (degrees/s) (float) FF : FF component (float) P : P component (float) I : I component (float) D : D component (float) ''' return MAVLink_pid_tuning_message(axis, desired, achieved, FF, P, I, D)
python
def pid_tuning_encode(self, axis, desired, achieved, FF, P, I, D): ''' PID tuning information axis : axis (uint8_t) desired : desired rate (degrees/s) (float) achieved : achieved rate (degrees/s) (float) FF : FF component (float) P : P component (float) I : I component (float) D : D component (float) ''' return MAVLink_pid_tuning_message(axis, desired, achieved, FF, P, I, D)
['def', 'pid_tuning_encode', '(', 'self', ',', 'axis', ',', 'desired', ',', 'achieved', ',', 'FF', ',', 'P', ',', 'I', ',', 'D', ')', ':', 'return', 'MAVLink_pid_tuning_message', '(', 'axis', ',', 'desired', ',', 'achieved', ',', 'FF', ',', 'P', ',', 'I', ',', 'D', ')']
PID tuning information axis : axis (uint8_t) desired : desired rate (degrees/s) (float) achieved : achieved rate (degrees/s) (float) FF : FF component (float) P : P component (float) I : I component (float) D : D component (float)
['PID', 'tuning', 'information']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10824-L10837
1,721
n1analytics/python-paillier
phe/paillier.py
EncryptedNumber.obfuscate
def obfuscate(self): """Disguise ciphertext by multiplying by r ** n with random r. This operation must be performed for every `EncryptedNumber` that is sent to an untrusted party, otherwise eavesdroppers might deduce relationships between this and an antecedent `EncryptedNumber`. For example:: enc = public_key.encrypt(1337) send_to_nsa(enc) # NSA can't decrypt (we hope!) product = enc * 3.14 send_to_nsa(product) # NSA can deduce 3.14 by bruteforce attack product2 = enc * 2.718 product2.obfuscate() send_to_nsa(product) # NSA can't deduce 2.718 by bruteforce attack """ r = self.public_key.get_random_lt_n() r_pow_n = powmod(r, self.public_key.n, self.public_key.nsquare) self.__ciphertext = self.__ciphertext * r_pow_n % self.public_key.nsquare self.__is_obfuscated = True
python
def obfuscate(self): """Disguise ciphertext by multiplying by r ** n with random r. This operation must be performed for every `EncryptedNumber` that is sent to an untrusted party, otherwise eavesdroppers might deduce relationships between this and an antecedent `EncryptedNumber`. For example:: enc = public_key.encrypt(1337) send_to_nsa(enc) # NSA can't decrypt (we hope!) product = enc * 3.14 send_to_nsa(product) # NSA can deduce 3.14 by bruteforce attack product2 = enc * 2.718 product2.obfuscate() send_to_nsa(product) # NSA can't deduce 2.718 by bruteforce attack """ r = self.public_key.get_random_lt_n() r_pow_n = powmod(r, self.public_key.n, self.public_key.nsquare) self.__ciphertext = self.__ciphertext * r_pow_n % self.public_key.nsquare self.__is_obfuscated = True
['def', 'obfuscate', '(', 'self', ')', ':', 'r', '=', 'self', '.', 'public_key', '.', 'get_random_lt_n', '(', ')', 'r_pow_n', '=', 'powmod', '(', 'r', ',', 'self', '.', 'public_key', '.', 'n', ',', 'self', '.', 'public_key', '.', 'nsquare', ')', 'self', '.', '__ciphertext', '=', 'self', '.', '__ciphertext', '*', 'r_pow_n', '%', 'self', '.', 'public_key', '.', 'nsquare', 'self', '.', '__is_obfuscated', '=', 'True']
Disguise ciphertext by multiplying by r ** n with random r. This operation must be performed for every `EncryptedNumber` that is sent to an untrusted party, otherwise eavesdroppers might deduce relationships between this and an antecedent `EncryptedNumber`. For example:: enc = public_key.encrypt(1337) send_to_nsa(enc) # NSA can't decrypt (we hope!) product = enc * 3.14 send_to_nsa(product) # NSA can deduce 3.14 by bruteforce attack product2 = enc * 2.718 product2.obfuscate() send_to_nsa(product) # NSA can't deduce 2.718 by bruteforce attack
['Disguise', 'ciphertext', 'by', 'multiplying', 'by', 'r', '**', 'n', 'with', 'random', 'r', '.']
train
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/phe/paillier.py#L596-L617
1,722
saltstack/salt
salt/returners/__init__.py
_fetch_option
def _fetch_option(cfg, ret_config, virtualname, attr_name): """ Fetch a given option value from the config. @see :func:`get_returner_options` """ # c_cfg is a dictionary returned from config.option for # any options configured for this returner. if isinstance(cfg, dict): c_cfg = cfg else: c_cfg = cfg('{0}'.format(virtualname), {}) default_cfg_key = '{0}.{1}'.format(virtualname, attr_name) if not ret_config: # Using the default configuration key if isinstance(cfg, dict): if default_cfg_key in cfg: return cfg[default_cfg_key] else: return c_cfg.get(attr_name) else: return c_cfg.get(attr_name, cfg(default_cfg_key)) # Using ret_config to override the default configuration key ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {}) override_default_cfg_key = '{0}.{1}.{2}'.format( ret_config, virtualname, attr_name, ) override_cfg_default = cfg(override_default_cfg_key) # Look for the configuration item in the override location ret_override_cfg = ret_cfg.get( attr_name, override_cfg_default ) if ret_override_cfg: return ret_override_cfg # if not configuration item found, fall back to the default location. return c_cfg.get(attr_name, cfg(default_cfg_key))
python
def _fetch_option(cfg, ret_config, virtualname, attr_name): """ Fetch a given option value from the config. @see :func:`get_returner_options` """ # c_cfg is a dictionary returned from config.option for # any options configured for this returner. if isinstance(cfg, dict): c_cfg = cfg else: c_cfg = cfg('{0}'.format(virtualname), {}) default_cfg_key = '{0}.{1}'.format(virtualname, attr_name) if not ret_config: # Using the default configuration key if isinstance(cfg, dict): if default_cfg_key in cfg: return cfg[default_cfg_key] else: return c_cfg.get(attr_name) else: return c_cfg.get(attr_name, cfg(default_cfg_key)) # Using ret_config to override the default configuration key ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {}) override_default_cfg_key = '{0}.{1}.{2}'.format( ret_config, virtualname, attr_name, ) override_cfg_default = cfg(override_default_cfg_key) # Look for the configuration item in the override location ret_override_cfg = ret_cfg.get( attr_name, override_cfg_default ) if ret_override_cfg: return ret_override_cfg # if not configuration item found, fall back to the default location. return c_cfg.get(attr_name, cfg(default_cfg_key))
['def', '_fetch_option', '(', 'cfg', ',', 'ret_config', ',', 'virtualname', ',', 'attr_name', ')', ':', '# c_cfg is a dictionary returned from config.option for', '# any options configured for this returner.', 'if', 'isinstance', '(', 'cfg', ',', 'dict', ')', ':', 'c_cfg', '=', 'cfg', 'else', ':', 'c_cfg', '=', 'cfg', '(', "'{0}'", '.', 'format', '(', 'virtualname', ')', ',', '{', '}', ')', 'default_cfg_key', '=', "'{0}.{1}'", '.', 'format', '(', 'virtualname', ',', 'attr_name', ')', 'if', 'not', 'ret_config', ':', '# Using the default configuration key', 'if', 'isinstance', '(', 'cfg', ',', 'dict', ')', ':', 'if', 'default_cfg_key', 'in', 'cfg', ':', 'return', 'cfg', '[', 'default_cfg_key', ']', 'else', ':', 'return', 'c_cfg', '.', 'get', '(', 'attr_name', ')', 'else', ':', 'return', 'c_cfg', '.', 'get', '(', 'attr_name', ',', 'cfg', '(', 'default_cfg_key', ')', ')', '# Using ret_config to override the default configuration key', 'ret_cfg', '=', 'cfg', '(', "'{0}.{1}'", '.', 'format', '(', 'ret_config', ',', 'virtualname', ')', ',', '{', '}', ')', 'override_default_cfg_key', '=', "'{0}.{1}.{2}'", '.', 'format', '(', 'ret_config', ',', 'virtualname', ',', 'attr_name', ',', ')', 'override_cfg_default', '=', 'cfg', '(', 'override_default_cfg_key', ')', '# Look for the configuration item in the override location', 'ret_override_cfg', '=', 'ret_cfg', '.', 'get', '(', 'attr_name', ',', 'override_cfg_default', ')', 'if', 'ret_override_cfg', ':', 'return', 'ret_override_cfg', '# if not configuration item found, fall back to the default location.', 'return', 'c_cfg', '.', 'get', '(', 'attr_name', ',', 'cfg', '(', 'default_cfg_key', ')', ')']
Fetch a given option value from the config. @see :func:`get_returner_options`
['Fetch', 'a', 'given', 'option', 'value', 'from', 'the', 'config', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/__init__.py#L123-L166
1,723
sorend/sshconf
sshconf.py
SshConfig.__check_host_args
def __check_host_args(self, host, keys): """Checks parameters""" if host not in self.hosts_: raise ValueError("Host %s: not found" % host) if "host" in [x.lower() for x in keys]: raise ValueError("Cannot modify Host value")
python
def __check_host_args(self, host, keys): """Checks parameters""" if host not in self.hosts_: raise ValueError("Host %s: not found" % host) if "host" in [x.lower() for x in keys]: raise ValueError("Cannot modify Host value")
['def', '__check_host_args', '(', 'self', ',', 'host', ',', 'keys', ')', ':', 'if', 'host', 'not', 'in', 'self', '.', 'hosts_', ':', 'raise', 'ValueError', '(', '"Host %s: not found"', '%', 'host', ')', 'if', '"host"', 'in', '[', 'x', '.', 'lower', '(', ')', 'for', 'x', 'in', 'keys', ']', ':', 'raise', 'ValueError', '(', '"Cannot modify Host value"', ')']
Checks parameters
['Checks', 'parameters']
train
https://github.com/sorend/sshconf/blob/59f3fc165b1ba9e76ba23444b1205d88462938f3/sshconf.py#L252-L258
1,724
pyvisa/pyvisa
pyvisa/highlevel.py
VisaLibraryBase.install_visa_handler
def install_visa_handler(self, session, event_type, handler, user_handle=None): """Installs handlers for event callbacks. :param session: Unique logical identifier to a session. :param event_type: Logical event identifier. :param handler: Interpreted as a valid reference to a handler to be installed by a client application. :param user_handle: A value specified by an application that can be used for identifying handlers uniquely for an event type. :returns: user handle (a ctypes object) """ try: new_handler = self.install_handler(session, event_type, handler, user_handle) except TypeError as e: raise errors.VisaTypeError(str(e)) self.handlers[session].append(new_handler + (event_type,)) return new_handler[1]
python
def install_visa_handler(self, session, event_type, handler, user_handle=None): """Installs handlers for event callbacks. :param session: Unique logical identifier to a session. :param event_type: Logical event identifier. :param handler: Interpreted as a valid reference to a handler to be installed by a client application. :param user_handle: A value specified by an application that can be used for identifying handlers uniquely for an event type. :returns: user handle (a ctypes object) """ try: new_handler = self.install_handler(session, event_type, handler, user_handle) except TypeError as e: raise errors.VisaTypeError(str(e)) self.handlers[session].append(new_handler + (event_type,)) return new_handler[1]
['def', 'install_visa_handler', '(', 'self', ',', 'session', ',', 'event_type', ',', 'handler', ',', 'user_handle', '=', 'None', ')', ':', 'try', ':', 'new_handler', '=', 'self', '.', 'install_handler', '(', 'session', ',', 'event_type', ',', 'handler', ',', 'user_handle', ')', 'except', 'TypeError', 'as', 'e', ':', 'raise', 'errors', '.', 'VisaTypeError', '(', 'str', '(', 'e', ')', ')', 'self', '.', 'handlers', '[', 'session', ']', '.', 'append', '(', 'new_handler', '+', '(', 'event_type', ',', ')', ')', 'return', 'new_handler', '[', '1', ']']
Installs handlers for event callbacks. :param session: Unique logical identifier to a session. :param event_type: Logical event identifier. :param handler: Interpreted as a valid reference to a handler to be installed by a client application. :param user_handle: A value specified by an application that can be used for identifying handlers uniquely for an event type. :returns: user handle (a ctypes object)
['Installs', 'handlers', 'for', 'event', 'callbacks', '.']
train
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/highlevel.py#L175-L191
1,725
plandes/actioncli
src/python/zensols/actioncli/config.py
Configurable.populate
def populate(self, obj=None, section=None, parse_types=True): """Set attributes in ``obj`` with ``setattr`` from the all values in ``section``. """ section = self.default_section if section is None else section obj = Settings() if obj is None else obj is_dict = isinstance(obj, dict) for k, v in self.get_options(section).items(): if parse_types: if v == 'None': v = None elif self.FLOAT_REGEXP.match(v): v = float(v) elif self.INT_REGEXP.match(v): v = int(v) elif self.BOOL_REGEXP.match(v): v = v == 'True' else: m = self.EVAL_REGEXP.match(v) if m: evalstr = m.group(1) v = eval(evalstr) logger.debug('setting {} => {} on {}'.format(k, v, obj)) if is_dict: obj[k] = v else: setattr(obj, k, v) return obj
python
def populate(self, obj=None, section=None, parse_types=True): """Set attributes in ``obj`` with ``setattr`` from the all values in ``section``. """ section = self.default_section if section is None else section obj = Settings() if obj is None else obj is_dict = isinstance(obj, dict) for k, v in self.get_options(section).items(): if parse_types: if v == 'None': v = None elif self.FLOAT_REGEXP.match(v): v = float(v) elif self.INT_REGEXP.match(v): v = int(v) elif self.BOOL_REGEXP.match(v): v = v == 'True' else: m = self.EVAL_REGEXP.match(v) if m: evalstr = m.group(1) v = eval(evalstr) logger.debug('setting {} => {} on {}'.format(k, v, obj)) if is_dict: obj[k] = v else: setattr(obj, k, v) return obj
['def', 'populate', '(', 'self', ',', 'obj', '=', 'None', ',', 'section', '=', 'None', ',', 'parse_types', '=', 'True', ')', ':', 'section', '=', 'self', '.', 'default_section', 'if', 'section', 'is', 'None', 'else', 'section', 'obj', '=', 'Settings', '(', ')', 'if', 'obj', 'is', 'None', 'else', 'obj', 'is_dict', '=', 'isinstance', '(', 'obj', ',', 'dict', ')', 'for', 'k', ',', 'v', 'in', 'self', '.', 'get_options', '(', 'section', ')', '.', 'items', '(', ')', ':', 'if', 'parse_types', ':', 'if', 'v', '==', "'None'", ':', 'v', '=', 'None', 'elif', 'self', '.', 'FLOAT_REGEXP', '.', 'match', '(', 'v', ')', ':', 'v', '=', 'float', '(', 'v', ')', 'elif', 'self', '.', 'INT_REGEXP', '.', 'match', '(', 'v', ')', ':', 'v', '=', 'int', '(', 'v', ')', 'elif', 'self', '.', 'BOOL_REGEXP', '.', 'match', '(', 'v', ')', ':', 'v', '=', 'v', '==', "'True'", 'else', ':', 'm', '=', 'self', '.', 'EVAL_REGEXP', '.', 'match', '(', 'v', ')', 'if', 'm', ':', 'evalstr', '=', 'm', '.', 'group', '(', '1', ')', 'v', '=', 'eval', '(', 'evalstr', ')', 'logger', '.', 'debug', '(', "'setting {} => {} on {}'", '.', 'format', '(', 'k', ',', 'v', ',', 'obj', ')', ')', 'if', 'is_dict', ':', 'obj', '[', 'k', ']', '=', 'v', 'else', ':', 'setattr', '(', 'obj', ',', 'k', ',', 'v', ')', 'return', 'obj']
Set attributes in ``obj`` with ``setattr`` from the all values in ``section``.
['Set', 'attributes', 'in', 'obj', 'with', 'setattr', 'from', 'the', 'all', 'values', 'in', 'section', '.']
train
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/config.py#L49-L77
1,726
korfuri/django-prometheus
django_prometheus/models.py
ExportModelOperationsMixin
def ExportModelOperationsMixin(model_name): """Returns a mixin for models to export counters for lifecycle operations. Usage: class User(ExportModelOperationsMixin('user'), Model): ... """ # Force create the labels for this model in the counters. This # is not necessary but it avoids gaps in the aggregated data. model_inserts.labels(model_name) model_updates.labels(model_name) model_deletes.labels(model_name) class Mixin(object): def _do_insert(self, *args, **kwargs): model_inserts.labels(model_name).inc() return super(Mixin, self)._do_insert(*args, **kwargs) def _do_update(self, *args, **kwargs): model_updates.labels(model_name).inc() return super(Mixin, self)._do_update(*args, **kwargs) def delete(self, *args, **kwargs): model_deletes.labels(model_name).inc() return super(Mixin, self).delete(*args, **kwargs) return Mixin
python
def ExportModelOperationsMixin(model_name): """Returns a mixin for models to export counters for lifecycle operations. Usage: class User(ExportModelOperationsMixin('user'), Model): ... """ # Force create the labels for this model in the counters. This # is not necessary but it avoids gaps in the aggregated data. model_inserts.labels(model_name) model_updates.labels(model_name) model_deletes.labels(model_name) class Mixin(object): def _do_insert(self, *args, **kwargs): model_inserts.labels(model_name).inc() return super(Mixin, self)._do_insert(*args, **kwargs) def _do_update(self, *args, **kwargs): model_updates.labels(model_name).inc() return super(Mixin, self)._do_update(*args, **kwargs) def delete(self, *args, **kwargs): model_deletes.labels(model_name).inc() return super(Mixin, self).delete(*args, **kwargs) return Mixin
['def', 'ExportModelOperationsMixin', '(', 'model_name', ')', ':', '# Force create the labels for this model in the counters. This', '# is not necessary but it avoids gaps in the aggregated data.', 'model_inserts', '.', 'labels', '(', 'model_name', ')', 'model_updates', '.', 'labels', '(', 'model_name', ')', 'model_deletes', '.', 'labels', '(', 'model_name', ')', 'class', 'Mixin', '(', 'object', ')', ':', 'def', '_do_insert', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'model_inserts', '.', 'labels', '(', 'model_name', ')', '.', 'inc', '(', ')', 'return', 'super', '(', 'Mixin', ',', 'self', ')', '.', '_do_insert', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'def', '_do_update', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'model_updates', '.', 'labels', '(', 'model_name', ')', '.', 'inc', '(', ')', 'return', 'super', '(', 'Mixin', ',', 'self', ')', '.', '_do_update', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'def', 'delete', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'model_deletes', '.', 'labels', '(', 'model_name', ')', '.', 'inc', '(', ')', 'return', 'super', '(', 'Mixin', ',', 'self', ')', '.', 'delete', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'Mixin']
Returns a mixin for models to export counters for lifecycle operations. Usage: class User(ExportModelOperationsMixin('user'), Model): ...
['Returns', 'a', 'mixin', 'for', 'models', 'to', 'export', 'counters', 'for', 'lifecycle', 'operations', '.']
train
https://github.com/korfuri/django-prometheus/blob/c3a19ce46d812f76d9316e50a232878c27c9bdf5/django_prometheus/models.py#L19-L44
1,727
iotile/coretools
iotilebuild/iotile/build/build/build.py
TargetSettings.archs
def archs(self, as_list=False): """Return all of the architectures for this target. Args: as_list (bool): Return a list instead of the default set object. Returns: set or list: All of the architectures used in this TargetSettings object. """ archs = self.arch_list().split('/') if as_list: return archs return set(archs)
python
def archs(self, as_list=False): """Return all of the architectures for this target. Args: as_list (bool): Return a list instead of the default set object. Returns: set or list: All of the architectures used in this TargetSettings object. """ archs = self.arch_list().split('/') if as_list: return archs return set(archs)
['def', 'archs', '(', 'self', ',', 'as_list', '=', 'False', ')', ':', 'archs', '=', 'self', '.', 'arch_list', '(', ')', '.', 'split', '(', "'/'", ')', 'if', 'as_list', ':', 'return', 'archs', 'return', 'set', '(', 'archs', ')']
Return all of the architectures for this target. Args: as_list (bool): Return a list instead of the default set object. Returns: set or list: All of the architectures used in this TargetSettings object.
['Return', 'all', 'of', 'the', 'architectures', 'for', 'this', 'target', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/build/build.py#L92-L107
1,728
boriel/zxbasic
arch/zx48k/backend/__parray.py
_pastorestr
def _pastorestr(ins): ''' Stores a string value into a memory address. It copies content of 2nd operand (string), into 1st, reallocating dynamic memory for the 1st str. These instruction DOES ALLOW inmediate strings for the 2nd parameter, starting with '#'. ''' output = _paddr(ins.quad[1]) temporal = False value = ins.quad[2] indirect = value[0] == '*' if indirect: value = value[1:] immediate = value[0] if immediate: value = value[1:] if value[0] == '_': if indirect: if immediate: output.append('ld de, (%s)' % value) else: output.append('ld de, (%s)' % value) output.append('call __LOAD_DE_DE') REQUIRES.add('lddede.asm') else: if immediate: output.append('ld de, %s' % value) else: output.append('ld de, (%s)' % value) else: output.append('pop de') temporal = True if indirect: output.append('call __LOAD_DE_DE') REQUIRES.add('lddede.asm') if not temporal: output.append('call __STORE_STR') REQUIRES.add('storestr.asm') else: # A value already on dynamic memory output.append('call __STORE_STR2') REQUIRES.add('storestr2.asm') return output
python
def _pastorestr(ins): ''' Stores a string value into a memory address. It copies content of 2nd operand (string), into 1st, reallocating dynamic memory for the 1st str. These instruction DOES ALLOW inmediate strings for the 2nd parameter, starting with '#'. ''' output = _paddr(ins.quad[1]) temporal = False value = ins.quad[2] indirect = value[0] == '*' if indirect: value = value[1:] immediate = value[0] if immediate: value = value[1:] if value[0] == '_': if indirect: if immediate: output.append('ld de, (%s)' % value) else: output.append('ld de, (%s)' % value) output.append('call __LOAD_DE_DE') REQUIRES.add('lddede.asm') else: if immediate: output.append('ld de, %s' % value) else: output.append('ld de, (%s)' % value) else: output.append('pop de') temporal = True if indirect: output.append('call __LOAD_DE_DE') REQUIRES.add('lddede.asm') if not temporal: output.append('call __STORE_STR') REQUIRES.add('storestr.asm') else: # A value already on dynamic memory output.append('call __STORE_STR2') REQUIRES.add('storestr2.asm') return output
['def', '_pastorestr', '(', 'ins', ')', ':', 'output', '=', '_paddr', '(', 'ins', '.', 'quad', '[', '1', ']', ')', 'temporal', '=', 'False', 'value', '=', 'ins', '.', 'quad', '[', '2', ']', 'indirect', '=', 'value', '[', '0', ']', '==', "'*'", 'if', 'indirect', ':', 'value', '=', 'value', '[', '1', ':', ']', 'immediate', '=', 'value', '[', '0', ']', 'if', 'immediate', ':', 'value', '=', 'value', '[', '1', ':', ']', 'if', 'value', '[', '0', ']', '==', "'_'", ':', 'if', 'indirect', ':', 'if', 'immediate', ':', 'output', '.', 'append', '(', "'ld de, (%s)'", '%', 'value', ')', 'else', ':', 'output', '.', 'append', '(', "'ld de, (%s)'", '%', 'value', ')', 'output', '.', 'append', '(', "'call __LOAD_DE_DE'", ')', 'REQUIRES', '.', 'add', '(', "'lddede.asm'", ')', 'else', ':', 'if', 'immediate', ':', 'output', '.', 'append', '(', "'ld de, %s'", '%', 'value', ')', 'else', ':', 'output', '.', 'append', '(', "'ld de, (%s)'", '%', 'value', ')', 'else', ':', 'output', '.', 'append', '(', "'pop de'", ')', 'temporal', '=', 'True', 'if', 'indirect', ':', 'output', '.', 'append', '(', "'call __LOAD_DE_DE'", ')', 'REQUIRES', '.', 'add', '(', "'lddede.asm'", ')', 'if', 'not', 'temporal', ':', 'output', '.', 'append', '(', "'call __STORE_STR'", ')', 'REQUIRES', '.', 'add', '(', "'storestr.asm'", ')', 'else', ':', '# A value already on dynamic memory', 'output', '.', 'append', '(', "'call __STORE_STR2'", ')', 'REQUIRES', '.', 'add', '(', "'storestr2.asm'", ')', 'return', 'output']
Stores a string value into a memory address. It copies content of 2nd operand (string), into 1st, reallocating dynamic memory for the 1st str. These instruction DOES ALLOW inmediate strings for the 2nd parameter, starting with '#'.
['Stores', 'a', 'string', 'value', 'into', 'a', 'memory', 'address', '.', 'It', 'copies', 'content', 'of', '2nd', 'operand', '(', 'string', ')', 'into', '1st', 'reallocating', 'dynamic', 'memory', 'for', 'the', '1st', 'str', '.', 'These', 'instruction', 'DOES', 'ALLOW', 'inmediate', 'strings', 'for', 'the', '2nd', 'parameter', 'starting', 'with', '#', '.']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__parray.py#L308-L354
1,729
serge-sans-paille/pythran
pythran/types/types.py
Types.visit_Return
def visit_Return(self, node): """ Compute return type and merges with others possible return type.""" self.generic_visit(node) # No merge are done if the function is a generator. if not self.yield_points: assert node.value, "Values were added in each return statement." self.combine(self.current, node.value)
python
def visit_Return(self, node): """ Compute return type and merges with others possible return type.""" self.generic_visit(node) # No merge are done if the function is a generator. if not self.yield_points: assert node.value, "Values were added in each return statement." self.combine(self.current, node.value)
['def', 'visit_Return', '(', 'self', ',', 'node', ')', ':', 'self', '.', 'generic_visit', '(', 'node', ')', '# No merge are done if the function is a generator.', 'if', 'not', 'self', '.', 'yield_points', ':', 'assert', 'node', '.', 'value', ',', '"Values were added in each return statement."', 'self', '.', 'combine', '(', 'self', '.', 'current', ',', 'node', '.', 'value', ')']
Compute return type and merges with others possible return type.
['Compute', 'return', 'type', 'and', 'merges', 'with', 'others', 'possible', 'return', 'type', '.']
train
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L293-L299
1,730
twisted/mantissa
xmantissa/liveform.py
ListChangeParameter._coerceAll
def _coerceAll(self, inputs): """ XXX """ def associate(result, obj): return (obj, result) coerceDeferreds = [] for obj, dataSet in inputs: oneCoerce = self._coerceSingleRepetition(dataSet) oneCoerce.addCallback(associate, obj) coerceDeferreds.append(oneCoerce) return gatherResults(coerceDeferreds)
python
def _coerceAll(self, inputs): """ XXX """ def associate(result, obj): return (obj, result) coerceDeferreds = [] for obj, dataSet in inputs: oneCoerce = self._coerceSingleRepetition(dataSet) oneCoerce.addCallback(associate, obj) coerceDeferreds.append(oneCoerce) return gatherResults(coerceDeferreds)
['def', '_coerceAll', '(', 'self', ',', 'inputs', ')', ':', 'def', 'associate', '(', 'result', ',', 'obj', ')', ':', 'return', '(', 'obj', ',', 'result', ')', 'coerceDeferreds', '=', '[', ']', 'for', 'obj', ',', 'dataSet', 'in', 'inputs', ':', 'oneCoerce', '=', 'self', '.', '_coerceSingleRepetition', '(', 'dataSet', ')', 'oneCoerce', '.', 'addCallback', '(', 'associate', ',', 'obj', ')', 'coerceDeferreds', '.', 'append', '(', 'oneCoerce', ')', 'return', 'gatherResults', '(', 'coerceDeferreds', ')']
XXX
['XXX']
train
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/liveform.py#L531-L543
1,731
aio-libs/aiodocker
aiodocker/swarm.py
DockerSwarm.join
async def join( self, *, remote_addrs: Iterable[str], listen_addr: str = "0.0.0.0:2377", join_token: str, advertise_addr: str = None, data_path_addr: str = None ) -> bool: """ Join a swarm. Args: listen_addr Used for inter-manager communication advertise_addr Externally reachable address advertised to other nodes. data_path_addr Address or interface to use for data path traffic. remote_addrs Addresses of manager nodes already participating in the swarm. join_token Secret token for joining this swarm. """ data = { "RemoteAddrs": list(remote_addrs), "JoinToken": join_token, "ListenAddr": listen_addr, "AdvertiseAddr": advertise_addr, "DataPathAddr": data_path_addr, } await self.docker._query("swarm/join", method="POST", data=clean_map(data)) return True
python
async def join( self, *, remote_addrs: Iterable[str], listen_addr: str = "0.0.0.0:2377", join_token: str, advertise_addr: str = None, data_path_addr: str = None ) -> bool: """ Join a swarm. Args: listen_addr Used for inter-manager communication advertise_addr Externally reachable address advertised to other nodes. data_path_addr Address or interface to use for data path traffic. remote_addrs Addresses of manager nodes already participating in the swarm. join_token Secret token for joining this swarm. """ data = { "RemoteAddrs": list(remote_addrs), "JoinToken": join_token, "ListenAddr": listen_addr, "AdvertiseAddr": advertise_addr, "DataPathAddr": data_path_addr, } await self.docker._query("swarm/join", method="POST", data=clean_map(data)) return True
['async', 'def', 'join', '(', 'self', ',', '*', ',', 'remote_addrs', ':', 'Iterable', '[', 'str', ']', ',', 'listen_addr', ':', 'str', '=', '"0.0.0.0:2377"', ',', 'join_token', ':', 'str', ',', 'advertise_addr', ':', 'str', '=', 'None', ',', 'data_path_addr', ':', 'str', '=', 'None', ')', '->', 'bool', ':', 'data', '=', '{', '"RemoteAddrs"', ':', 'list', '(', 'remote_addrs', ')', ',', '"JoinToken"', ':', 'join_token', ',', '"ListenAddr"', ':', 'listen_addr', ',', '"AdvertiseAddr"', ':', 'advertise_addr', ',', '"DataPathAddr"', ':', 'data_path_addr', ',', '}', 'await', 'self', '.', 'docker', '.', '_query', '(', '"swarm/join"', ',', 'method', '=', '"POST"', ',', 'data', '=', 'clean_map', '(', 'data', ')', ')', 'return', 'True']
Join a swarm. Args: listen_addr Used for inter-manager communication advertise_addr Externally reachable address advertised to other nodes. data_path_addr Address or interface to use for data path traffic. remote_addrs Addresses of manager nodes already participating in the swarm. join_token Secret token for joining this swarm.
['Join', 'a', 'swarm', '.']
train
https://github.com/aio-libs/aiodocker/blob/88d0285ddba8e606ff684278e0a831347209189c/aiodocker/swarm.py#L54-L93
1,732
gersolar/goescalibration
goescalibration/instrument.py
calibrate
def calibrate(filename): """ Append the calibration parameters as variables of the netcdf file. Keyword arguments: filename -- the name of a netcdf file. """ params = calibration_to(filename) with nc.loader(filename) as root: for key, value in params.items(): nc.getdim(root, 'xc_1', 1) nc.getdim(root, 'yc_1', 1) if isinstance(value, list): for i in range(len(value)): nc.getvar(root, '%s_%i' % (key, i), 'f4', ('time', 'yc_1', 'xc_1' ))[:] = value[i] else: nc.getvar(root, key, 'f4', ('time', 'yc_1', 'xc_1'))[:] = value
python
def calibrate(filename): """ Append the calibration parameters as variables of the netcdf file. Keyword arguments: filename -- the name of a netcdf file. """ params = calibration_to(filename) with nc.loader(filename) as root: for key, value in params.items(): nc.getdim(root, 'xc_1', 1) nc.getdim(root, 'yc_1', 1) if isinstance(value, list): for i in range(len(value)): nc.getvar(root, '%s_%i' % (key, i), 'f4', ('time', 'yc_1', 'xc_1' ))[:] = value[i] else: nc.getvar(root, key, 'f4', ('time', 'yc_1', 'xc_1'))[:] = value
['def', 'calibrate', '(', 'filename', ')', ':', 'params', '=', 'calibration_to', '(', 'filename', ')', 'with', 'nc', '.', 'loader', '(', 'filename', ')', 'as', 'root', ':', 'for', 'key', ',', 'value', 'in', 'params', '.', 'items', '(', ')', ':', 'nc', '.', 'getdim', '(', 'root', ',', "'xc_1'", ',', '1', ')', 'nc', '.', 'getdim', '(', 'root', ',', "'yc_1'", ',', '1', ')', 'if', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'value', ')', ')', ':', 'nc', '.', 'getvar', '(', 'root', ',', "'%s_%i'", '%', '(', 'key', ',', 'i', ')', ',', "'f4'", ',', '(', "'time'", ',', "'yc_1'", ',', "'xc_1'", ')', ')', '[', ':', ']', '=', 'value', '[', 'i', ']', 'else', ':', 'nc', '.', 'getvar', '(', 'root', ',', 'key', ',', "'f4'", ',', '(', "'time'", ',', "'yc_1'", ',', "'xc_1'", ')', ')', '[', ':', ']', '=', 'value']
Append the calibration parameters as variables of the netcdf file. Keyword arguments: filename -- the name of a netcdf file.
['Append', 'the', 'calibration', 'parameters', 'as', 'variables', 'of', 'the', 'netcdf', 'file', '.']
train
https://github.com/gersolar/goescalibration/blob/aab7f3e3cede9694e90048ceeaea74566578bc75/goescalibration/instrument.py#L37-L53
1,733
cloud-custodian/cloud-custodian
c7n/cli.py
_logs_options
def _logs_options(p): """ Add options specific to logs subcommand. """ _default_options(p, blacklist=['cache', 'quiet']) # default time range is 0 to "now" (to include all log entries) p.add_argument( '--start', default='the beginning', # invalid, will result in 0 help='Start date and/or time', ) p.add_argument( '--end', default=datetime.now().strftime('%c'), help='End date and/or time', )
python
def _logs_options(p): """ Add options specific to logs subcommand. """ _default_options(p, blacklist=['cache', 'quiet']) # default time range is 0 to "now" (to include all log entries) p.add_argument( '--start', default='the beginning', # invalid, will result in 0 help='Start date and/or time', ) p.add_argument( '--end', default=datetime.now().strftime('%c'), help='End date and/or time', )
['def', '_logs_options', '(', 'p', ')', ':', '_default_options', '(', 'p', ',', 'blacklist', '=', '[', "'cache'", ',', "'quiet'", ']', ')', '# default time range is 0 to "now" (to include all log entries)', 'p', '.', 'add_argument', '(', "'--start'", ',', 'default', '=', "'the beginning'", ',', '# invalid, will result in 0', 'help', '=', "'Start date and/or time'", ',', ')', 'p', '.', 'add_argument', '(', "'--end'", ',', 'default', '=', 'datetime', '.', 'now', '(', ')', '.', 'strftime', '(', "'%c'", ')', ',', 'help', '=', "'End date and/or time'", ',', ')']
Add options specific to logs subcommand.
['Add', 'options', 'specific', 'to', 'logs', 'subcommand', '.']
train
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/cli.py#L153-L167
1,734
FutunnOpen/futuquant
futuquant/quote/quote_response_handler.py
BrokerHandlerBase.on_recv_rsp
def on_recv_rsp(self, rsp_pb): """ 在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法 注意该回调是在独立子线程中 :param rsp_pb: 派生类中不需要直接处理该参数 :return: 成功时返回(RET_OK, stock_code, [bid_frame_table, ask_frame_table]), 相关frame table含义见 get_broker_queue_ 的返回值说明 失败时返回(RET_ERROR, ERR_MSG, None) """ ret_code, content = self.parse_rsp_pb(rsp_pb) if ret_code != RET_OK: return ret_code, content, None else: stock_code, bid_content, ask_content = content bid_list = [ 'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos' ] ask_list = [ 'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos' ] bid_frame_table = pd.DataFrame(bid_content, columns=bid_list) ask_frame_table = pd.DataFrame(ask_content, columns=ask_list) return RET_OK, stock_code, [bid_frame_table, ask_frame_table]
python
def on_recv_rsp(self, rsp_pb): """ 在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法 注意该回调是在独立子线程中 :param rsp_pb: 派生类中不需要直接处理该参数 :return: 成功时返回(RET_OK, stock_code, [bid_frame_table, ask_frame_table]), 相关frame table含义见 get_broker_queue_ 的返回值说明 失败时返回(RET_ERROR, ERR_MSG, None) """ ret_code, content = self.parse_rsp_pb(rsp_pb) if ret_code != RET_OK: return ret_code, content, None else: stock_code, bid_content, ask_content = content bid_list = [ 'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos' ] ask_list = [ 'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos' ] bid_frame_table = pd.DataFrame(bid_content, columns=bid_list) ask_frame_table = pd.DataFrame(ask_content, columns=ask_list) return RET_OK, stock_code, [bid_frame_table, ask_frame_table]
['def', 'on_recv_rsp', '(', 'self', ',', 'rsp_pb', ')', ':', 'ret_code', ',', 'content', '=', 'self', '.', 'parse_rsp_pb', '(', 'rsp_pb', ')', 'if', 'ret_code', '!=', 'RET_OK', ':', 'return', 'ret_code', ',', 'content', ',', 'None', 'else', ':', 'stock_code', ',', 'bid_content', ',', 'ask_content', '=', 'content', 'bid_list', '=', '[', "'code'", ',', "'bid_broker_id'", ',', "'bid_broker_name'", ',', "'bid_broker_pos'", ']', 'ask_list', '=', '[', "'code'", ',', "'ask_broker_id'", ',', "'ask_broker_name'", ',', "'ask_broker_pos'", ']', 'bid_frame_table', '=', 'pd', '.', 'DataFrame', '(', 'bid_content', ',', 'columns', '=', 'bid_list', ')', 'ask_frame_table', '=', 'pd', '.', 'DataFrame', '(', 'ask_content', ',', 'columns', '=', 'ask_list', ')', 'return', 'RET_OK', ',', 'stock_code', ',', '[', 'bid_frame_table', ',', 'ask_frame_table', ']']
在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法 注意该回调是在独立子线程中 :param rsp_pb: 派生类中不需要直接处理该参数 :return: 成功时返回(RET_OK, stock_code, [bid_frame_table, ask_frame_table]), 相关frame table含义见 get_broker_queue_ 的返回值说明 失败时返回(RET_ERROR, ERR_MSG, None)
['在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法']
train
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/quote/quote_response_handler.py#L266-L291
1,735
bitesofcode/projex
projex/cli.py
climethod.parser
def parser(self): """ Creates a parser for the method based on the documentation. :return <OptionParser> """ usage = self.usage() if self.__doc__: usage += '\n' + nstr(self.__doc__) parse = PARSER_CLASS(usage=usage) shorts = {v: k for k, v in self.short_keys.items()} for key, default in self.cmd_opts.items(): # default key, cannot be duplicated if key == 'help': continue try: short = '-' + shorts[key] except KeyError: short = '' if default is True: action = 'store_false' elif default is False: action = 'store_true' else: action = 'store' # add the option parse.add_option(short, '--%s' % key, action=action, default=default) return parse
python
def parser(self): """ Creates a parser for the method based on the documentation. :return <OptionParser> """ usage = self.usage() if self.__doc__: usage += '\n' + nstr(self.__doc__) parse = PARSER_CLASS(usage=usage) shorts = {v: k for k, v in self.short_keys.items()} for key, default in self.cmd_opts.items(): # default key, cannot be duplicated if key == 'help': continue try: short = '-' + shorts[key] except KeyError: short = '' if default is True: action = 'store_false' elif default is False: action = 'store_true' else: action = 'store' # add the option parse.add_option(short, '--%s' % key, action=action, default=default) return parse
['def', 'parser', '(', 'self', ')', ':', 'usage', '=', 'self', '.', 'usage', '(', ')', 'if', 'self', '.', '__doc__', ':', 'usage', '+=', "'\\n'", '+', 'nstr', '(', 'self', '.', '__doc__', ')', 'parse', '=', 'PARSER_CLASS', '(', 'usage', '=', 'usage', ')', 'shorts', '=', '{', 'v', ':', 'k', 'for', 'k', ',', 'v', 'in', 'self', '.', 'short_keys', '.', 'items', '(', ')', '}', 'for', 'key', ',', 'default', 'in', 'self', '.', 'cmd_opts', '.', 'items', '(', ')', ':', '# default key, cannot be duplicated', 'if', 'key', '==', "'help'", ':', 'continue', 'try', ':', 'short', '=', "'-'", '+', 'shorts', '[', 'key', ']', 'except', 'KeyError', ':', 'short', '=', "''", 'if', 'default', 'is', 'True', ':', 'action', '=', "'store_false'", 'elif', 'default', 'is', 'False', ':', 'action', '=', "'store_true'", 'else', ':', 'action', '=', "'store'", '# add the option', 'parse', '.', 'add_option', '(', 'short', ',', "'--%s'", '%', 'key', ',', 'action', '=', 'action', ',', 'default', '=', 'default', ')', 'return', 'parse']
Creates a parser for the method based on the documentation. :return <OptionParser>
['Creates', 'a', 'parser', 'for', 'the', 'method', 'based', 'on', 'the', 'documentation', '.', ':', 'return', '<OptionParser', '>']
train
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/cli.py#L101-L134
1,736
thingful/hypercat-py
hypercat/hypercat.py
Hypercat.findByPath
def findByPath(self, rel, path): """Traverses children, building a path based on relation <rel>, until given path is found.""" if((path=="") or (path=="/")): return(self) (front,dummy,rest) = path.lstrip("/").partition("/") for child in self.items: if front in child.values(rel): return child.findByPath(rel, rest) return None
python
def findByPath(self, rel, path): """Traverses children, building a path based on relation <rel>, until given path is found.""" if((path=="") or (path=="/")): return(self) (front,dummy,rest) = path.lstrip("/").partition("/") for child in self.items: if front in child.values(rel): return child.findByPath(rel, rest) return None
['def', 'findByPath', '(', 'self', ',', 'rel', ',', 'path', ')', ':', 'if', '(', '(', 'path', '==', '""', ')', 'or', '(', 'path', '==', '"/"', ')', ')', ':', 'return', '(', 'self', ')', '(', 'front', ',', 'dummy', ',', 'rest', ')', '=', 'path', '.', 'lstrip', '(', '"/"', ')', '.', 'partition', '(', '"/"', ')', 'for', 'child', 'in', 'self', '.', 'items', ':', 'if', 'front', 'in', 'child', '.', 'values', '(', 'rel', ')', ':', 'return', 'child', '.', 'findByPath', '(', 'rel', ',', 'rest', ')', 'return', 'None']
Traverses children, building a path based on relation <rel>, until given path is found.
['Traverses', 'children', 'building', 'a', 'path', 'based', 'on', 'relation', '<rel', '>', 'until', 'given', 'path', 'is', 'found', '.']
train
https://github.com/thingful/hypercat-py/blob/db24ef66ec92d74fbea90afbeadc3a268f18f6e3/hypercat/hypercat.py#L161-L169
1,737
nerdvegas/rez
src/rez/vendor/sortedcontainers/sortedset.py
SortedSet.symmetric_difference
def symmetric_difference(self, that): """ Return a new set with elements in either *self* or *that* but not both. """ diff = self._set.symmetric_difference(that) return self._fromset(diff, key=self._key)
python
def symmetric_difference(self, that): """ Return a new set with elements in either *self* or *that* but not both. """ diff = self._set.symmetric_difference(that) return self._fromset(diff, key=self._key)
['def', 'symmetric_difference', '(', 'self', ',', 'that', ')', ':', 'diff', '=', 'self', '.', '_set', '.', 'symmetric_difference', '(', 'that', ')', 'return', 'self', '.', '_fromset', '(', 'diff', ',', 'key', '=', 'self', '.', '_key', ')']
Return a new set with elements in either *self* or *that* but not both.
['Return', 'a', 'new', 'set', 'with', 'elements', 'in', 'either', '*', 'self', '*', 'or', '*', 'that', '*', 'but', 'not', 'both', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/sortedcontainers/sortedset.py#L261-L266
1,738
gmr/queries
queries/pool.py
PoolManager.remove_connection
def remove_connection(cls, pid, connection): """Remove a connection from the pool, closing it if is open. :param str pid: The pool ID :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError """ cls._ensure_pool_exists(pid) cls._pools[pid].remove(connection)
python
def remove_connection(cls, pid, connection): """Remove a connection from the pool, closing it if is open. :param str pid: The pool ID :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError """ cls._ensure_pool_exists(pid) cls._pools[pid].remove(connection)
['def', 'remove_connection', '(', 'cls', ',', 'pid', ',', 'connection', ')', ':', 'cls', '.', '_ensure_pool_exists', '(', 'pid', ')', 'cls', '.', '_pools', '[', 'pid', ']', '.', 'remove', '(', 'connection', ')']
Remove a connection from the pool, closing it if is open. :param str pid: The pool ID :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError
['Remove', 'a', 'connection', 'from', 'the', 'pool', 'closing', 'it', 'if', 'is', 'open', '.']
train
https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/pool.py#L616-L626
1,739
hazelcast/hazelcast-python-client
hazelcast/proxy/atomic_long.py
AtomicLong.compare_and_set
def compare_and_set(self, expected, updated): """ Atomically sets the value to the given updated value only if the current value == the expected value. :param expected: (long), the expected value. :param updated: (long), the new value. :return: (bool), ``true`` if successful; or ``false`` if the actual value was not equal to the expected value. """ return self._encode_invoke(atomic_long_compare_and_set_codec, expected=expected, updated=updated)
python
def compare_and_set(self, expected, updated): """ Atomically sets the value to the given updated value only if the current value == the expected value. :param expected: (long), the expected value. :param updated: (long), the new value. :return: (bool), ``true`` if successful; or ``false`` if the actual value was not equal to the expected value. """ return self._encode_invoke(atomic_long_compare_and_set_codec, expected=expected, updated=updated)
['def', 'compare_and_set', '(', 'self', ',', 'expected', ',', 'updated', ')', ':', 'return', 'self', '.', '_encode_invoke', '(', 'atomic_long_compare_and_set_codec', ',', 'expected', '=', 'expected', ',', 'updated', '=', 'updated', ')']
Atomically sets the value to the given updated value only if the current value == the expected value. :param expected: (long), the expected value. :param updated: (long), the new value. :return: (bool), ``true`` if successful; or ``false`` if the actual value was not equal to the expected value.
['Atomically', 'sets', 'the', 'value', 'to', 'the', 'given', 'updated', 'value', 'only', 'if', 'the', 'current', 'value', '==', 'the', 'expected', 'value', '.']
train
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/atomic_long.py#L60-L69
1,740
kinegratii/borax
borax/calendars/lunardate.py
TermUtils.get_term_info
def get_term_info(year, month, day): """Parse solar term and stem-branch year/month/day from a solar date. (sy, sm, sd) => (term, next_gz_month) term for year 2101,:2101.1.5(初六) 小寒 2101.1.20(廿一) 大寒 """ if year == 2101: days = [5, 20] else: days = TermUtils.parse_term_days(year) term_index1 = 2 * (month - 1) term_index2 = 2 * (month - 1) + 1 day1 = days[term_index1] day2 = days[term_index2] if day == day1: term_name = TERMS_CN[term_index1] elif day == day2: term_name = TERMS_CN[term_index2] else: term_name = None next_gz_month = day >= day1 return term_name, next_gz_month
python
def get_term_info(year, month, day): """Parse solar term and stem-branch year/month/day from a solar date. (sy, sm, sd) => (term, next_gz_month) term for year 2101,:2101.1.5(初六) 小寒 2101.1.20(廿一) 大寒 """ if year == 2101: days = [5, 20] else: days = TermUtils.parse_term_days(year) term_index1 = 2 * (month - 1) term_index2 = 2 * (month - 1) + 1 day1 = days[term_index1] day2 = days[term_index2] if day == day1: term_name = TERMS_CN[term_index1] elif day == day2: term_name = TERMS_CN[term_index2] else: term_name = None next_gz_month = day >= day1 return term_name, next_gz_month
['def', 'get_term_info', '(', 'year', ',', 'month', ',', 'day', ')', ':', 'if', 'year', '==', '2101', ':', 'days', '=', '[', '5', ',', '20', ']', 'else', ':', 'days', '=', 'TermUtils', '.', 'parse_term_days', '(', 'year', ')', 'term_index1', '=', '2', '*', '(', 'month', '-', '1', ')', 'term_index2', '=', '2', '*', '(', 'month', '-', '1', ')', '+', '1', 'day1', '=', 'days', '[', 'term_index1', ']', 'day2', '=', 'days', '[', 'term_index2', ']', 'if', 'day', '==', 'day1', ':', 'term_name', '=', 'TERMS_CN', '[', 'term_index1', ']', 'elif', 'day', '==', 'day2', ':', 'term_name', '=', 'TERMS_CN', '[', 'term_index2', ']', 'else', ':', 'term_name', '=', 'None', 'next_gz_month', '=', 'day', '>=', 'day1', 'return', 'term_name', ',', 'next_gz_month']
Parse solar term and stem-branch year/month/day from a solar date. (sy, sm, sd) => (term, next_gz_month) term for year 2101,:2101.1.5(初六) 小寒 2101.1.20(廿一) 大寒
['Parse', 'solar', 'term', 'and', 'stem', '-', 'branch', 'year', '/', 'month', '/', 'day', 'from', 'a', 'solar', 'date', '.', '(', 'sy', 'sm', 'sd', ')', '=', '>', '(', 'term', 'next_gz_month', ')', 'term', 'for', 'year', '2101', ':', '2101', '.', '1', '.', '5', '(', '初六', ')', '小寒', '2101', '.', '1', '.', '20', '(', '廿一', ')', '大寒']
train
https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/calendars/lunardate.py#L288-L309
1,741
leonidessaguisagjr/unicodeutil
unicodeutil/unicodeutil.py
UnicodeData.lookup_by_name
def lookup_by_name(self, name): """ Function for retrieving the UnicodeCharacter associated with a name. The name lookup uses the loose matching rule UAX44-LM2 for loose matching. See the following for more info: https://www.unicode.org/reports/tr44/#UAX44-LM2 For example: ucd = UnicodeData() ucd.lookup_by_name("LATIN SMALL LETTER SHARP S") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) ucd.lookup_by_name("latin_small_letter_sharp_s") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) :param name: Name of the character to look up. :return: UnicodeCharacter instance with data associated with the character. """ try: return self._name_database[_uax44lm2transform(name)] except KeyError: raise KeyError(u"Unknown character name: '{0}'!".format(name))
python
def lookup_by_name(self, name): """ Function for retrieving the UnicodeCharacter associated with a name. The name lookup uses the loose matching rule UAX44-LM2 for loose matching. See the following for more info: https://www.unicode.org/reports/tr44/#UAX44-LM2 For example: ucd = UnicodeData() ucd.lookup_by_name("LATIN SMALL LETTER SHARP S") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) ucd.lookup_by_name("latin_small_letter_sharp_s") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) :param name: Name of the character to look up. :return: UnicodeCharacter instance with data associated with the character. """ try: return self._name_database[_uax44lm2transform(name)] except KeyError: raise KeyError(u"Unknown character name: '{0}'!".format(name))
['def', 'lookup_by_name', '(', 'self', ',', 'name', ')', ':', 'try', ':', 'return', 'self', '.', '_name_database', '[', '_uax44lm2transform', '(', 'name', ')', ']', 'except', 'KeyError', ':', 'raise', 'KeyError', '(', 'u"Unknown character name: \'{0}\'!"', '.', 'format', '(', 'name', ')', ')']
Function for retrieving the UnicodeCharacter associated with a name. The name lookup uses the loose matching rule UAX44-LM2 for loose matching. See the following for more info: https://www.unicode.org/reports/tr44/#UAX44-LM2 For example: ucd = UnicodeData() ucd.lookup_by_name("LATIN SMALL LETTER SHARP S") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) ucd.lookup_by_name("latin_small_letter_sharp_s") -> UnicodeCharacter(name='LATIN SMALL LETTER SHARP S',...) :param name: Name of the character to look up. :return: UnicodeCharacter instance with data associated with the character.
['Function', 'for', 'retrieving', 'the', 'UnicodeCharacter', 'associated', 'with', 'a', 'name', '.', 'The', 'name', 'lookup', 'uses', 'the', 'loose', 'matching', 'rule', 'UAX44', '-', 'LM2', 'for', 'loose', 'matching', '.', 'See', 'the', 'following', 'for', 'more', 'info', ':']
train
https://github.com/leonidessaguisagjr/unicodeutil/blob/c25c882cf9cb38c123df49fad365be67e5818928/unicodeutil/unicodeutil.py#L339-L359
1,742
ethereum/eth-abi
eth_abi/decoding.py
ContextFramesBytesIO.push_frame
def push_frame(self, offset): """ Pushes a new contextual frame onto the stack with the given offset and a return position at the current cursor position then seeks to the new total offset. """ self._frames.append((offset, self.tell())) self._total_offset += offset self.seek_in_frame(0)
python
def push_frame(self, offset): """ Pushes a new contextual frame onto the stack with the given offset and a return position at the current cursor position then seeks to the new total offset. """ self._frames.append((offset, self.tell())) self._total_offset += offset self.seek_in_frame(0)
['def', 'push_frame', '(', 'self', ',', 'offset', ')', ':', 'self', '.', '_frames', '.', 'append', '(', '(', 'offset', ',', 'self', '.', 'tell', '(', ')', ')', ')', 'self', '.', '_total_offset', '+=', 'offset', 'self', '.', 'seek_in_frame', '(', '0', ')']
Pushes a new contextual frame onto the stack with the given offset and a return position at the current cursor position then seeks to the new total offset.
['Pushes', 'a', 'new', 'contextual', 'frame', 'onto', 'the', 'stack', 'with', 'the', 'given', 'offset', 'and', 'a', 'return', 'position', 'at', 'the', 'current', 'cursor', 'position', 'then', 'seeks', 'to', 'the', 'new', 'total', 'offset', '.']
train
https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/decoding.py#L86-L95
1,743
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/mds/apis/endpoints_api.py
EndpointsApi.get_endpoint_resources
def get_endpoint_resources(self, device_id, **kwargs): # noqa: E501 """List the resources on an endpoint # noqa: E501 The list of resources is cached by Device Management Connect, so this call does not create a message to the device. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/endpoints/{device-id} -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_endpoint_resources(device_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique device ID for an endpoint. Note that the ID needs to be an exact match. You cannot use wildcards here. (required) :return: list[Resource] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_endpoint_resources_with_http_info(device_id, **kwargs) # noqa: E501 else: (data) = self.get_endpoint_resources_with_http_info(device_id, **kwargs) # noqa: E501 return data
python
def get_endpoint_resources(self, device_id, **kwargs): # noqa: E501 """List the resources on an endpoint # noqa: E501 The list of resources is cached by Device Management Connect, so this call does not create a message to the device. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/endpoints/{device-id} -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_endpoint_resources(device_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique device ID for an endpoint. Note that the ID needs to be an exact match. You cannot use wildcards here. (required) :return: list[Resource] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_endpoint_resources_with_http_info(device_id, **kwargs) # noqa: E501 else: (data) = self.get_endpoint_resources_with_http_info(device_id, **kwargs) # noqa: E501 return data
['def', 'get_endpoint_resources', '(', 'self', ',', 'device_id', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'asynchronous'", ')', ':', 'return', 'self', '.', 'get_endpoint_resources_with_http_info', '(', 'device_id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'get_endpoint_resources_with_http_info', '(', 'device_id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data']
List the resources on an endpoint # noqa: E501 The list of resources is cached by Device Management Connect, so this call does not create a message to the device. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/endpoints/{device-id} -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_endpoint_resources(device_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_id: A unique device ID for an endpoint. Note that the ID needs to be an exact match. You cannot use wildcards here. (required) :return: list[Resource] If the method is called asynchronously, returns the request thread.
['List', 'the', 'resources', 'on', 'an', 'endpoint', '#', 'noqa', ':', 'E501']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/mds/apis/endpoints_api.py#L127-L147
1,744
linkedin/luminol
src/luminol/modules/time_series.py
TimeSeries.max
def max(self, default=None): """ Calculate the maximum value over the time series. :param default: Value to return as a default should the calculation not be possible. :return: Float representing the maximum value or `None`. """ return numpy.asscalar(numpy.max(self.values)) if self.values else default
python
def max(self, default=None): """ Calculate the maximum value over the time series. :param default: Value to return as a default should the calculation not be possible. :return: Float representing the maximum value or `None`. """ return numpy.asscalar(numpy.max(self.values)) if self.values else default
['def', 'max', '(', 'self', ',', 'default', '=', 'None', ')', ':', 'return', 'numpy', '.', 'asscalar', '(', 'numpy', '.', 'max', '(', 'self', '.', 'values', ')', ')', 'if', 'self', '.', 'values', 'else', 'default']
Calculate the maximum value over the time series. :param default: Value to return as a default should the calculation not be possible. :return: Float representing the maximum value or `None`.
['Calculate', 'the', 'maximum', 'value', 'over', 'the', 'time', 'series', '.']
train
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L330-L337
1,745
abakan-zz/napi
napi/functions.py
neval
def neval(expression, globals=None, locals=None, **kwargs): """Evaluate *expression* using *globals* and *locals* dictionaries as *global* and *local* namespace. *expression* is transformed using :class:`.NapiTransformer`.""" try: import __builtin__ as builtins except ImportError: import builtins from ast import parse from ast import fix_missing_locations as fml try: transformer = kwargs['transformer'] except KeyError: from napi.transformers import NapiTransformer as transformer #try: node = parse(expression, '<string>', 'eval') #except ImportError: # builtins.eval(expression) #else: if globals is None: globals = builtins.globals() if locals is None: locals = {} trans = transformer(globals=globals, locals=locals, **kwargs) trans.visit(node) code = compile(fml(node), '<string>', 'eval') return builtins.eval(code, globals, locals)
python
def neval(expression, globals=None, locals=None, **kwargs): """Evaluate *expression* using *globals* and *locals* dictionaries as *global* and *local* namespace. *expression* is transformed using :class:`.NapiTransformer`.""" try: import __builtin__ as builtins except ImportError: import builtins from ast import parse from ast import fix_missing_locations as fml try: transformer = kwargs['transformer'] except KeyError: from napi.transformers import NapiTransformer as transformer #try: node = parse(expression, '<string>', 'eval') #except ImportError: # builtins.eval(expression) #else: if globals is None: globals = builtins.globals() if locals is None: locals = {} trans = transformer(globals=globals, locals=locals, **kwargs) trans.visit(node) code = compile(fml(node), '<string>', 'eval') return builtins.eval(code, globals, locals)
['def', 'neval', '(', 'expression', ',', 'globals', '=', 'None', ',', 'locals', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'import', '__builtin__', 'as', 'builtins', 'except', 'ImportError', ':', 'import', 'builtins', 'from', 'ast', 'import', 'parse', 'from', 'ast', 'import', 'fix_missing_locations', 'as', 'fml', 'try', ':', 'transformer', '=', 'kwargs', '[', "'transformer'", ']', 'except', 'KeyError', ':', 'from', 'napi', '.', 'transformers', 'import', 'NapiTransformer', 'as', 'transformer', '#try:', 'node', '=', 'parse', '(', 'expression', ',', "'<string>'", ',', "'eval'", ')', '#except ImportError:', '# builtins.eval(expression)', '#else:', 'if', 'globals', 'is', 'None', ':', 'globals', '=', 'builtins', '.', 'globals', '(', ')', 'if', 'locals', 'is', 'None', ':', 'locals', '=', '{', '}', 'trans', '=', 'transformer', '(', 'globals', '=', 'globals', ',', 'locals', '=', 'locals', ',', '*', '*', 'kwargs', ')', 'trans', '.', 'visit', '(', 'node', ')', 'code', '=', 'compile', '(', 'fml', '(', 'node', ')', ',', "'<string>'", ',', "'eval'", ')', 'return', 'builtins', '.', 'eval', '(', 'code', ',', 'globals', ',', 'locals', ')']
Evaluate *expression* using *globals* and *locals* dictionaries as *global* and *local* namespace. *expression* is transformed using :class:`.NapiTransformer`.
['Evaluate', '*', 'expression', '*', 'using', '*', 'globals', '*', 'and', '*', 'locals', '*', 'dictionaries', 'as', '*', 'global', '*', 'and', '*', 'local', '*', 'namespace', '.', '*', 'expression', '*', 'is', 'transformed', 'using', ':', 'class', ':', '.', 'NapiTransformer', '.']
train
https://github.com/abakan-zz/napi/blob/314da65bd78e2c716b7efb6deaf3816d8f38f7fd/napi/functions.py#L3-L33
1,746
google/mobly
mobly/controllers/monsoon.py
MonsoonData._validate_data
def _validate_data(self): """Verifies that the data points contained in the class are valid. """ msg = "Error! Expected {} timestamps, found {}.".format( len(self._data_points), len(self._timestamps)) if len(self._data_points) != len(self._timestamps): raise MonsoonError(msg)
python
def _validate_data(self): """Verifies that the data points contained in the class are valid. """ msg = "Error! Expected {} timestamps, found {}.".format( len(self._data_points), len(self._timestamps)) if len(self._data_points) != len(self._timestamps): raise MonsoonError(msg)
['def', '_validate_data', '(', 'self', ')', ':', 'msg', '=', '"Error! Expected {} timestamps, found {}."', '.', 'format', '(', 'len', '(', 'self', '.', '_data_points', ')', ',', 'len', '(', 'self', '.', '_timestamps', ')', ')', 'if', 'len', '(', 'self', '.', '_data_points', ')', '!=', 'len', '(', 'self', '.', '_timestamps', ')', ':', 'raise', 'MonsoonError', '(', 'msg', ')']
Verifies that the data points contained in the class are valid.
['Verifies', 'that', 'the', 'data', 'points', 'contained', 'in', 'the', 'class', 'are', 'valid', '.']
train
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L580-L586
1,747
CityOfZion/neo-python
neo/VM/ExecutionEngine.py
ExecutionEngine.write_log
def write_log(self, message): """ Write a line to the VM instruction log file. Args: message (str): string message to write to file. """ if self._is_write_log and self.log_file and not self.log_file.closed: self.log_file.write(message + '\n')
python
def write_log(self, message): """ Write a line to the VM instruction log file. Args: message (str): string message to write to file. """ if self._is_write_log and self.log_file and not self.log_file.closed: self.log_file.write(message + '\n')
['def', 'write_log', '(', 'self', ',', 'message', ')', ':', 'if', 'self', '.', '_is_write_log', 'and', 'self', '.', 'log_file', 'and', 'not', 'self', '.', 'log_file', '.', 'closed', ':', 'self', '.', 'log_file', '.', 'write', '(', 'message', '+', "'\\n'", ')']
Write a line to the VM instruction log file. Args: message (str): string message to write to file.
['Write', 'a', 'line', 'to', 'the', 'VM', 'instruction', 'log', 'file', '.']
train
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/VM/ExecutionEngine.py#L47-L55
1,748
IrvKalb/pygwidgets
pygwidgets/pygwidgets.py
PygWidgetsCheckBox.draw
def draw(self): """Draws the checkbox.""" if not self.visible: return # Blit the current checkbox's image. if self.isEnabled: if self.mouseIsDown and self.lastMouseDownOverButton and self.mouseOverButton: if self.value: self.window.blit(self.surfaceOnDown, self.loc) else: self.window.blit(self.surfaceOffDown, self.loc) else: if self.value: self.window.blit(self.surfaceOn, self.loc) else: self.window.blit(self.surfaceOff, self.loc) else: if self.value: self.window.blit(self.surfaceOnDisabled, self.loc) else: self.window.blit(self.surfaceOffDisabled, self.loc)
python
def draw(self): """Draws the checkbox.""" if not self.visible: return # Blit the current checkbox's image. if self.isEnabled: if self.mouseIsDown and self.lastMouseDownOverButton and self.mouseOverButton: if self.value: self.window.blit(self.surfaceOnDown, self.loc) else: self.window.blit(self.surfaceOffDown, self.loc) else: if self.value: self.window.blit(self.surfaceOn, self.loc) else: self.window.blit(self.surfaceOff, self.loc) else: if self.value: self.window.blit(self.surfaceOnDisabled, self.loc) else: self.window.blit(self.surfaceOffDisabled, self.loc)
['def', 'draw', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'visible', ':', 'return', "# Blit the current checkbox's image.\r", 'if', 'self', '.', 'isEnabled', ':', 'if', 'self', '.', 'mouseIsDown', 'and', 'self', '.', 'lastMouseDownOverButton', 'and', 'self', '.', 'mouseOverButton', ':', 'if', 'self', '.', 'value', ':', 'self', '.', 'window', '.', 'blit', '(', 'self', '.', 'surfaceOnDown', ',', 'self', '.', 'loc', ')', 'else', ':', 'self', '.', 'window', '.', 'blit', '(', 'self', '.', 'surfaceOffDown', ',', 'self', '.', 'loc', ')', 'else', ':', 'if', 'self', '.', 'value', ':', 'self', '.', 'window', '.', 'blit', '(', 'self', '.', 'surfaceOn', ',', 'self', '.', 'loc', ')', 'else', ':', 'self', '.', 'window', '.', 'blit', '(', 'self', '.', 'surfaceOff', ',', 'self', '.', 'loc', ')', 'else', ':', 'if', 'self', '.', 'value', ':', 'self', '.', 'window', '.', 'blit', '(', 'self', '.', 'surfaceOnDisabled', ',', 'self', '.', 'loc', ')', 'else', ':', 'self', '.', 'window', '.', 'blit', '(', 'self', '.', 'surfaceOffDisabled', ',', 'self', '.', 'loc', ')']
Draws the checkbox.
['Draws', 'the', 'checkbox', '.']
train
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L839-L863
1,749
aiortc/aioice
aioice/candidate.py
candidate_priority
def candidate_priority(candidate_component, candidate_type, local_pref=65535): """ See RFC 5245 - 4.1.2.1. Recommended Formula """ if candidate_type == 'host': type_pref = 126 elif candidate_type == 'prflx': type_pref = 110 elif candidate_type == 'srflx': type_pref = 100 else: type_pref = 0 return (1 << 24) * type_pref + \ (1 << 8) * local_pref + \ (256 - candidate_component)
python
def candidate_priority(candidate_component, candidate_type, local_pref=65535): """ See RFC 5245 - 4.1.2.1. Recommended Formula """ if candidate_type == 'host': type_pref = 126 elif candidate_type == 'prflx': type_pref = 110 elif candidate_type == 'srflx': type_pref = 100 else: type_pref = 0 return (1 << 24) * type_pref + \ (1 << 8) * local_pref + \ (256 - candidate_component)
['def', 'candidate_priority', '(', 'candidate_component', ',', 'candidate_type', ',', 'local_pref', '=', '65535', ')', ':', 'if', 'candidate_type', '==', "'host'", ':', 'type_pref', '=', '126', 'elif', 'candidate_type', '==', "'prflx'", ':', 'type_pref', '=', '110', 'elif', 'candidate_type', '==', "'srflx'", ':', 'type_pref', '=', '100', 'else', ':', 'type_pref', '=', '0', 'return', '(', '1', '<<', '24', ')', '*', 'type_pref', '+', '(', '1', '<<', '8', ')', '*', 'local_pref', '+', '(', '256', '-', 'candidate_component', ')']
See RFC 5245 - 4.1.2.1. Recommended Formula
['See', 'RFC', '5245', '-', '4', '.', '1', '.', '2', '.', '1', '.', 'Recommended', 'Formula']
train
https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/candidate.py#L13-L28
1,750
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
brocade_xstp_ext.get_stp_mst_detail_output_msti_port_designated_bridge_id
def get_stp_mst_detail_output_msti_port_designated_bridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") designated_bridge_id = ET.SubElement(port, "designated-bridge-id") designated_bridge_id.text = kwargs.pop('designated_bridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_stp_mst_detail_output_msti_port_designated_bridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") designated_bridge_id = ET.SubElement(port, "designated-bridge-id") designated_bridge_id.text = kwargs.pop('designated_bridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_stp_mst_detail_output_msti_port_designated_bridge_id', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_stp_mst_detail', '=', 'ET', '.', 'Element', '(', '"get_stp_mst_detail"', ')', 'config', '=', 'get_stp_mst_detail', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_stp_mst_detail', ',', '"output"', ')', 'msti', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"msti"', ')', 'instance_id_key', '=', 'ET', '.', 'SubElement', '(', 'msti', ',', '"instance-id"', ')', 'instance_id_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'instance_id'", ')', 'port', '=', 'ET', '.', 'SubElement', '(', 'msti', ',', '"port"', ')', 'designated_bridge_id', '=', 'ET', '.', 'SubElement', '(', 'port', ',', '"designated-bridge-id"', ')', 'designated_bridge_id', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'designated_bridge_id'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4742-L4757
1,751
pycontribs/pyrax
samples/cloud_monitoring/util.py
option_chooser
def option_chooser(options, attr=None): """Given an iterable, enumerate its contents for a user to choose from. If the optional `attr` is not None, that attribute in each iterated object will be printed. This function will exit the program if the user chooses the escape option. """ for num, option in enumerate(options): if attr: print("%s: %s" % (num, getattr(option, attr))) else: print("%s: %s" % (num, option)) # Add an escape option escape_opt = num + 1 print("%s: I want to exit!" % escape_opt) choice = six.moves.input("Selection: ") try: ichoice = int(choice) if ichoice > escape_opt: raise ValueError except ValueError: print("Valid entries are the numbers 0-%s. Received '%s'." % (escape_opt, choice)) sys.exit() if ichoice == escape_opt: print("Bye!") sys.exit() return ichoice
python
def option_chooser(options, attr=None): """Given an iterable, enumerate its contents for a user to choose from. If the optional `attr` is not None, that attribute in each iterated object will be printed. This function will exit the program if the user chooses the escape option. """ for num, option in enumerate(options): if attr: print("%s: %s" % (num, getattr(option, attr))) else: print("%s: %s" % (num, option)) # Add an escape option escape_opt = num + 1 print("%s: I want to exit!" % escape_opt) choice = six.moves.input("Selection: ") try: ichoice = int(choice) if ichoice > escape_opt: raise ValueError except ValueError: print("Valid entries are the numbers 0-%s. Received '%s'." % (escape_opt, choice)) sys.exit() if ichoice == escape_opt: print("Bye!") sys.exit() return ichoice
['def', 'option_chooser', '(', 'options', ',', 'attr', '=', 'None', ')', ':', 'for', 'num', ',', 'option', 'in', 'enumerate', '(', 'options', ')', ':', 'if', 'attr', ':', 'print', '(', '"%s: %s"', '%', '(', 'num', ',', 'getattr', '(', 'option', ',', 'attr', ')', ')', ')', 'else', ':', 'print', '(', '"%s: %s"', '%', '(', 'num', ',', 'option', ')', ')', '# Add an escape option', 'escape_opt', '=', 'num', '+', '1', 'print', '(', '"%s: I want to exit!"', '%', 'escape_opt', ')', 'choice', '=', 'six', '.', 'moves', '.', 'input', '(', '"Selection: "', ')', 'try', ':', 'ichoice', '=', 'int', '(', 'choice', ')', 'if', 'ichoice', '>', 'escape_opt', ':', 'raise', 'ValueError', 'except', 'ValueError', ':', 'print', '(', '"Valid entries are the numbers 0-%s. Received \'%s\'."', '%', '(', 'escape_opt', ',', 'choice', ')', ')', 'sys', '.', 'exit', '(', ')', 'if', 'ichoice', '==', 'escape_opt', ':', 'print', '(', '"Bye!"', ')', 'sys', '.', 'exit', '(', ')', 'return', 'ichoice']
Given an iterable, enumerate its contents for a user to choose from. If the optional `attr` is not None, that attribute in each iterated object will be printed. This function will exit the program if the user chooses the escape option.
['Given', 'an', 'iterable', 'enumerate', 'its', 'contents', 'for', 'a', 'user', 'to', 'choose', 'from', '.', 'If', 'the', 'optional', 'attr', 'is', 'not', 'None', 'that', 'attribute', 'in', 'each', 'iterated', 'object', 'will', 'be', 'printed', '.']
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/samples/cloud_monitoring/util.py#L24-L53
1,752
onecodex/onecodex
onecodex/taxonomy.py
TaxonomyMixin.tree_prune_rank
def tree_prune_rank(self, tree, rank="species"): """Takes a TreeNode tree and prunes off any tips not at the specified rank and backwards up until all of the tips are at the specified rank. Parameters ---------- tree : `skbio.tree.TreeNode` The root node of the tree to perform this operation on. rank : {kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- `skbio.tree.TreeNode`, the root of the tree where all tips are at the given rank, and all tips have a path back to the root node. Examples -------- tree_prune_rank(tree, 'species') will remove all subspecies/strain nodes and return a tree containing all genus-level nodes and higher. """ if rank is None: return tree.copy() tree = tree.copy() for node in tree.postorder(): if node.rank == rank: node._above_rank = True elif any([getattr(n, "_above_rank", False) for n in node.children]): node._above_rank = True else: node._above_rank = False tree.remove_deleted(lambda n: not getattr(n, "_above_rank", False)) return tree
python
def tree_prune_rank(self, tree, rank="species"): """Takes a TreeNode tree and prunes off any tips not at the specified rank and backwards up until all of the tips are at the specified rank. Parameters ---------- tree : `skbio.tree.TreeNode` The root node of the tree to perform this operation on. rank : {kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- `skbio.tree.TreeNode`, the root of the tree where all tips are at the given rank, and all tips have a path back to the root node. Examples -------- tree_prune_rank(tree, 'species') will remove all subspecies/strain nodes and return a tree containing all genus-level nodes and higher. """ if rank is None: return tree.copy() tree = tree.copy() for node in tree.postorder(): if node.rank == rank: node._above_rank = True elif any([getattr(n, "_above_rank", False) for n in node.children]): node._above_rank = True else: node._above_rank = False tree.remove_deleted(lambda n: not getattr(n, "_above_rank", False)) return tree
['def', 'tree_prune_rank', '(', 'self', ',', 'tree', ',', 'rank', '=', '"species"', ')', ':', 'if', 'rank', 'is', 'None', ':', 'return', 'tree', '.', 'copy', '(', ')', 'tree', '=', 'tree', '.', 'copy', '(', ')', 'for', 'node', 'in', 'tree', '.', 'postorder', '(', ')', ':', 'if', 'node', '.', 'rank', '==', 'rank', ':', 'node', '.', '_above_rank', '=', 'True', 'elif', 'any', '(', '[', 'getattr', '(', 'n', ',', '"_above_rank"', ',', 'False', ')', 'for', 'n', 'in', 'node', '.', 'children', ']', ')', ':', 'node', '.', '_above_rank', '=', 'True', 'else', ':', 'node', '.', '_above_rank', '=', 'False', 'tree', '.', 'remove_deleted', '(', 'lambda', 'n', ':', 'not', 'getattr', '(', 'n', ',', '"_above_rank"', ',', 'False', ')', ')', 'return', 'tree']
Takes a TreeNode tree and prunes off any tips not at the specified rank and backwards up until all of the tips are at the specified rank. Parameters ---------- tree : `skbio.tree.TreeNode` The root node of the tree to perform this operation on. rank : {kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- `skbio.tree.TreeNode`, the root of the tree where all tips are at the given rank, and all tips have a path back to the root node. Examples -------- tree_prune_rank(tree, 'species') will remove all subspecies/strain nodes and return a tree containing all genus-level nodes and higher.
['Takes', 'a', 'TreeNode', 'tree', 'and', 'prunes', 'off', 'any', 'tips', 'not', 'at', 'the', 'specified', 'rank', 'and', 'backwards', 'up', 'until', 'all', 'of', 'the', 'tips', 'are', 'at', 'the', 'specified', 'rank', '.']
train
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/taxonomy.py#L70-L106
1,753
timothydmorton/isochrones
isochrones/observation.py
Observation.observe
def observe(self, stars, unc, ic=None): """Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now) """ if ic is None: ic = get_ichrone('mist') if len(stars) > 2: raise NotImplementedError('No support yet for > 2 synthetic stars') mags = [ic(*s.pars)['{}_mag'.format(self.band)].values[0] for s in stars] d = stars[0].distance(stars[1]) if d < self.resolution: mag = addmags(*mags) + unc*np.random.randn() sources = [Source(mag, unc, stars[0].separation, stars[0].pa, relative=self.relative)] else: mags = np.array([m + unc*np.random.randn() for m in mags]) if self.relative: mags -= mags.min() sources = [Source(m, unc, s.separation, s.pa, relative=self.relative) for m,s in zip(mags, stars)] for s in sources: self.add_source(s) self._set_reference()
python
def observe(self, stars, unc, ic=None): """Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now) """ if ic is None: ic = get_ichrone('mist') if len(stars) > 2: raise NotImplementedError('No support yet for > 2 synthetic stars') mags = [ic(*s.pars)['{}_mag'.format(self.band)].values[0] for s in stars] d = stars[0].distance(stars[1]) if d < self.resolution: mag = addmags(*mags) + unc*np.random.randn() sources = [Source(mag, unc, stars[0].separation, stars[0].pa, relative=self.relative)] else: mags = np.array([m + unc*np.random.randn() for m in mags]) if self.relative: mags -= mags.min() sources = [Source(m, unc, s.separation, s.pa, relative=self.relative) for m,s in zip(mags, stars)] for s in sources: self.add_source(s) self._set_reference()
['def', 'observe', '(', 'self', ',', 'stars', ',', 'unc', ',', 'ic', '=', 'None', ')', ':', 'if', 'ic', 'is', 'None', ':', 'ic', '=', 'get_ichrone', '(', "'mist'", ')', 'if', 'len', '(', 'stars', ')', '>', '2', ':', 'raise', 'NotImplementedError', '(', "'No support yet for > 2 synthetic stars'", ')', 'mags', '=', '[', 'ic', '(', '*', 's', '.', 'pars', ')', '[', "'{}_mag'", '.', 'format', '(', 'self', '.', 'band', ')', ']', '.', 'values', '[', '0', ']', 'for', 's', 'in', 'stars', ']', 'd', '=', 'stars', '[', '0', ']', '.', 'distance', '(', 'stars', '[', '1', ']', ')', 'if', 'd', '<', 'self', '.', 'resolution', ':', 'mag', '=', 'addmags', '(', '*', 'mags', ')', '+', 'unc', '*', 'np', '.', 'random', '.', 'randn', '(', ')', 'sources', '=', '[', 'Source', '(', 'mag', ',', 'unc', ',', 'stars', '[', '0', ']', '.', 'separation', ',', 'stars', '[', '0', ']', '.', 'pa', ',', 'relative', '=', 'self', '.', 'relative', ')', ']', 'else', ':', 'mags', '=', 'np', '.', 'array', '(', '[', 'm', '+', 'unc', '*', 'np', '.', 'random', '.', 'randn', '(', ')', 'for', 'm', 'in', 'mags', ']', ')', 'if', 'self', '.', 'relative', ':', 'mags', '-=', 'mags', '.', 'min', '(', ')', 'sources', '=', '[', 'Source', '(', 'm', ',', 'unc', ',', 's', '.', 'separation', ',', 's', '.', 'pa', ',', 'relative', '=', 'self', '.', 'relative', ')', 'for', 'm', ',', 's', 'in', 'zip', '(', 'mags', ',', 'stars', ')', ']', 'for', 's', 'in', 'sources', ':', 'self', '.', 'add_source', '(', 's', ')', 'self', '.', '_set_reference', '(', ')']
Creates and adds appropriate synthetic Source objects for list of stars (max 2 for now)
['Creates', 'and', 'adds', 'appropriate', 'synthetic', 'Source', 'objects', 'for', 'list', 'of', 'stars', '(', 'max', '2', 'for', 'now', ')']
train
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/observation.py#L643-L670
1,754
MillionIntegrals/vel
vel/api/train_phase.py
TrainPhase.restore
def restore(self, training_info: TrainingInfo, local_batch_idx: int, model: Model, hidden_state: dict): """ Restore learning from intermediate state. """ pass
python
def restore(self, training_info: TrainingInfo, local_batch_idx: int, model: Model, hidden_state: dict): """ Restore learning from intermediate state. """ pass
['def', 'restore', '(', 'self', ',', 'training_info', ':', 'TrainingInfo', ',', 'local_batch_idx', ':', 'int', ',', 'model', ':', 'Model', ',', 'hidden_state', ':', 'dict', ')', ':', 'pass']
Restore learning from intermediate state.
['Restore', 'learning', 'from', 'intermediate', 'state', '.']
train
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/train_phase.py#L18-L22
1,755
NaPs/Kolekto
kolekto/commands/stats.py
format_top
def format_top(counter, top=3): """ Format a top. """ items = islice(reversed(sorted(counter.iteritems(), key=lambda x: x[1])), 0, top) return u'; '.join(u'{g} ({nb})'.format(g=g, nb=nb) for g, nb in items)
python
def format_top(counter, top=3): """ Format a top. """ items = islice(reversed(sorted(counter.iteritems(), key=lambda x: x[1])), 0, top) return u'; '.join(u'{g} ({nb})'.format(g=g, nb=nb) for g, nb in items)
['def', 'format_top', '(', 'counter', ',', 'top', '=', '3', ')', ':', 'items', '=', 'islice', '(', 'reversed', '(', 'sorted', '(', 'counter', '.', 'iteritems', '(', ')', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', '1', ']', ')', ')', ',', '0', ',', 'top', ')', 'return', "u'; '", '.', 'join', '(', "u'{g} ({nb})'", '.', 'format', '(', 'g', '=', 'g', ',', 'nb', '=', 'nb', ')', 'for', 'g', ',', 'nb', 'in', 'items', ')']
Format a top.
['Format', 'a', 'top', '.']
train
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/stats.py#L34-L38
1,756
pyblish/pyblish-qml
pyblish_qml/models.py
ProxyModel._set_rules
def _set_rules(self, group, rules): """Implementation detail""" group.clear() for rule in rules: self._add_rule(group, *rule) self.invalidate()
python
def _set_rules(self, group, rules): """Implementation detail""" group.clear() for rule in rules: self._add_rule(group, *rule) self.invalidate()
['def', '_set_rules', '(', 'self', ',', 'group', ',', 'rules', ')', ':', 'group', '.', 'clear', '(', ')', 'for', 'rule', 'in', 'rules', ':', 'self', '.', '_add_rule', '(', 'group', ',', '*', 'rule', ')', 'self', '.', 'invalidate', '(', ')']
Implementation detail
['Implementation', 'detail']
train
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/models.py#L842-L849
1,757
saltstack/salt
salt/modules/openbsdpkg.py
install
def install(name=None, pkgs=None, sources=None, **kwargs): ''' Install the passed package Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example, Install one package: .. code-block:: bash salt '*' pkg.install <package name> CLI Example, Install more than one package: .. code-block:: bash salt '*' pkg.install pkgs='["<package name>", "<package name>"]' CLI Example, Install more than one package from a alternate source (e.g. salt file-server, HTTP, FTP, local filesystem): .. code-block:: bash salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} old = list_pkgs() errors = [] for pkg in pkg_params: # A special case for OpenBSD package "branches" is also required in # salt/states/pkg.py if pkg_type == 'repository': stem, branch = (pkg.split('%') + [''])[:2] base, flavor = (stem.split('--') + [''])[:2] pkg = '{0}--{1}%{2}'.format(base, flavor, branch) cmd = 'pkg_add -x -I {0}'.format(pkg) out = __salt__['cmd.run_all']( cmd, python_shell=False, output_loglevel='trace' ) if out['retcode'] != 0 and out['stderr']: errors.append(out['stderr']) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret
python
def install(name=None, pkgs=None, sources=None, **kwargs): ''' Install the passed package Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example, Install one package: .. code-block:: bash salt '*' pkg.install <package name> CLI Example, Install more than one package: .. code-block:: bash salt '*' pkg.install pkgs='["<package name>", "<package name>"]' CLI Example, Install more than one package from a alternate source (e.g. salt file-server, HTTP, FTP, local filesystem): .. code-block:: bash salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} old = list_pkgs() errors = [] for pkg in pkg_params: # A special case for OpenBSD package "branches" is also required in # salt/states/pkg.py if pkg_type == 'repository': stem, branch = (pkg.split('%') + [''])[:2] base, flavor = (stem.split('--') + [''])[:2] pkg = '{0}--{1}%{2}'.format(base, flavor, branch) cmd = 'pkg_add -x -I {0}'.format(pkg) out = __salt__['cmd.run_all']( cmd, python_shell=False, output_loglevel='trace' ) if out['retcode'] != 0 and out['stderr']: errors.append(out['stderr']) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret
['def', 'install', '(', 'name', '=', 'None', ',', 'pkgs', '=', 'None', ',', 'sources', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'pkg_params', ',', 'pkg_type', '=', '__salt__', '[', "'pkg_resource.parse_targets'", ']', '(', 'name', ',', 'pkgs', ',', 'sources', ',', '*', '*', 'kwargs', ')', 'except', 'MinionError', 'as', 'exc', ':', 'raise', 'CommandExecutionError', '(', 'exc', ')', 'if', 'not', 'pkg_params', ':', 'return', '{', '}', 'old', '=', 'list_pkgs', '(', ')', 'errors', '=', '[', ']', 'for', 'pkg', 'in', 'pkg_params', ':', '# A special case for OpenBSD package "branches" is also required in', '# salt/states/pkg.py', 'if', 'pkg_type', '==', "'repository'", ':', 'stem', ',', 'branch', '=', '(', 'pkg', '.', 'split', '(', "'%'", ')', '+', '[', "''", ']', ')', '[', ':', '2', ']', 'base', ',', 'flavor', '=', '(', 'stem', '.', 'split', '(', "'--'", ')', '+', '[', "''", ']', ')', '[', ':', '2', ']', 'pkg', '=', "'{0}--{1}%{2}'", '.', 'format', '(', 'base', ',', 'flavor', ',', 'branch', ')', 'cmd', '=', "'pkg_add -x -I {0}'", '.', 'format', '(', 'pkg', ')', 'out', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', 'cmd', ',', 'python_shell', '=', 'False', ',', 'output_loglevel', '=', "'trace'", ')', 'if', 'out', '[', "'retcode'", ']', '!=', '0', 'and', 'out', '[', "'stderr'", ']', ':', 'errors', '.', 'append', '(', 'out', '[', "'stderr'", ']', ')', '__context__', '.', 'pop', '(', "'pkg.list_pkgs'", ',', 'None', ')', 'new', '=', 'list_pkgs', '(', ')', 'ret', '=', 'salt', '.', 'utils', '.', 'data', '.', 'compare_dicts', '(', 'old', ',', 'new', ')', 'if', 'errors', ':', 'raise', 'CommandExecutionError', '(', "'Problem encountered installing package(s)'", ',', 'info', '=', '{', "'errors'", ':', 'errors', ',', "'changes'", ':', 'ret', '}', ')', 'return', 'ret']
Install the passed package Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example, Install one package: .. code-block:: bash salt '*' pkg.install <package name> CLI Example, Install more than one package: .. code-block:: bash salt '*' pkg.install pkgs='["<package name>", "<package name>"]' CLI Example, Install more than one package from a alternate source (e.g. salt file-server, HTTP, FTP, local filesystem): .. code-block:: bash salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]'
['Install', 'the', 'passed', 'package']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openbsdpkg.py#L184-L250
1,758
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/printer.py
summarize_neural_network_spec
def summarize_neural_network_spec(mlmodel_spec): """ Summarize network into the following structure. Args: mlmodel_spec : mlmodel spec Returns: inputs : list[(str, str)] - a list of two tuple (name, descriptor) for each input blob. outputs : list[(str, str)] - a list of two tuple (name, descriptor) for each output blob layers : list[(str, list[str], list[str], list[(str, str)])] - a list of layers represented by layer name, input blobs, output blobs, a list of (parameter name, content) """ inputs = [(blob.name, _get_feature_description_summary(blob)) for blob in mlmodel_spec.description.input] outputs = [(blob.name, _get_feature_description_summary(blob)) for blob in mlmodel_spec.description.output] nn = None if mlmodel_spec.HasField('neuralNetwork'): nn = mlmodel_spec.neuralNetwork elif mlmodel_spec.HasField('neuralNetworkClassifier'): nn = mlmodel_spec.neuralNetworkClassifier elif mlmodel_spec.HasField('neuralNetworkRegressor'): nn = mlmodel_spec.neuralNetworkRegressor layers = [_summarize_network_layer_info(layer) for layer in nn.layers] if nn != None else None return (inputs, outputs, layers)
python
def summarize_neural_network_spec(mlmodel_spec): """ Summarize network into the following structure. Args: mlmodel_spec : mlmodel spec Returns: inputs : list[(str, str)] - a list of two tuple (name, descriptor) for each input blob. outputs : list[(str, str)] - a list of two tuple (name, descriptor) for each output blob layers : list[(str, list[str], list[str], list[(str, str)])] - a list of layers represented by layer name, input blobs, output blobs, a list of (parameter name, content) """ inputs = [(blob.name, _get_feature_description_summary(blob)) for blob in mlmodel_spec.description.input] outputs = [(blob.name, _get_feature_description_summary(blob)) for blob in mlmodel_spec.description.output] nn = None if mlmodel_spec.HasField('neuralNetwork'): nn = mlmodel_spec.neuralNetwork elif mlmodel_spec.HasField('neuralNetworkClassifier'): nn = mlmodel_spec.neuralNetworkClassifier elif mlmodel_spec.HasField('neuralNetworkRegressor'): nn = mlmodel_spec.neuralNetworkRegressor layers = [_summarize_network_layer_info(layer) for layer in nn.layers] if nn != None else None return (inputs, outputs, layers)
['def', 'summarize_neural_network_spec', '(', 'mlmodel_spec', ')', ':', 'inputs', '=', '[', '(', 'blob', '.', 'name', ',', '_get_feature_description_summary', '(', 'blob', ')', ')', 'for', 'blob', 'in', 'mlmodel_spec', '.', 'description', '.', 'input', ']', 'outputs', '=', '[', '(', 'blob', '.', 'name', ',', '_get_feature_description_summary', '(', 'blob', ')', ')', 'for', 'blob', 'in', 'mlmodel_spec', '.', 'description', '.', 'output', ']', 'nn', '=', 'None', 'if', 'mlmodel_spec', '.', 'HasField', '(', "'neuralNetwork'", ')', ':', 'nn', '=', 'mlmodel_spec', '.', 'neuralNetwork', 'elif', 'mlmodel_spec', '.', 'HasField', '(', "'neuralNetworkClassifier'", ')', ':', 'nn', '=', 'mlmodel_spec', '.', 'neuralNetworkClassifier', 'elif', 'mlmodel_spec', '.', 'HasField', '(', "'neuralNetworkRegressor'", ')', ':', 'nn', '=', 'mlmodel_spec', '.', 'neuralNetworkRegressor', 'layers', '=', '[', '_summarize_network_layer_info', '(', 'layer', ')', 'for', 'layer', 'in', 'nn', '.', 'layers', ']', 'if', 'nn', '!=', 'None', 'else', 'None', 'return', '(', 'inputs', ',', 'outputs', ',', 'layers', ')']
Summarize network into the following structure. Args: mlmodel_spec : mlmodel spec Returns: inputs : list[(str, str)] - a list of two tuple (name, descriptor) for each input blob. outputs : list[(str, str)] - a list of two tuple (name, descriptor) for each output blob layers : list[(str, list[str], list[str], list[(str, str)])] - a list of layers represented by layer name, input blobs, output blobs, a list of (parameter name, content)
['Summarize', 'network', 'into', 'the', 'following', 'structure', '.', 'Args', ':', 'mlmodel_spec', ':', 'mlmodel', 'spec', 'Returns', ':', 'inputs', ':', 'list', '[', '(', 'str', 'str', ')', ']', '-', 'a', 'list', 'of', 'two', 'tuple', '(', 'name', 'descriptor', ')', 'for', 'each', 'input', 'blob', '.', 'outputs', ':', 'list', '[', '(', 'str', 'str', ')', ']', '-', 'a', 'list', 'of', 'two', 'tuple', '(', 'name', 'descriptor', ')', 'for', 'each', 'output', 'blob', 'layers', ':', 'list', '[', '(', 'str', 'list', '[', 'str', ']', 'list', '[', 'str', ']', 'list', '[', '(', 'str', 'str', ')', ']', ')', ']', '-', 'a', 'list', 'of', 'layers', 'represented', 'by', 'layer', 'name', 'input', 'blobs', 'output', 'blobs', 'a', 'list', 'of', '(', 'parameter', 'name', 'content', ')']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/printer.py#L105-L127
1,759
gwastro/pycbc
pycbc/inference/models/base_data.py
BaseDataModel.data
def data(self, data): """Store a copy of the data.""" self._data = {det: d.copy() for (det, d) in data.items()}
python
def data(self, data): """Store a copy of the data.""" self._data = {det: d.copy() for (det, d) in data.items()}
['def', 'data', '(', 'self', ',', 'data', ')', ':', 'self', '.', '_data', '=', '{', 'det', ':', 'd', '.', 'copy', '(', ')', 'for', '(', 'det', ',', 'd', ')', 'in', 'data', '.', 'items', '(', ')', '}']
Store a copy of the data.
['Store', 'a', 'copy', 'of', 'the', 'data', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base_data.py#L90-L92
1,760
SoCo/SoCo
dev_tools/analyse_ws.py
AnalyzeWS.__to_browser
def __to_browser(self, message_no): """ Write a single message to file and open the file in a browser """ filename = self.__to_file(message_no) try: command = self.config.get('General', 'browser_command') except (ConfigParser.NoOptionError, AttributeError): print 'Incorrect or missing .ini file. See --help.' sys.exit(5) command = str(command).format(filename) command_list = command.split(' ') try: subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: print 'Unable to execute the browsercommand:' print command print 'Exiting!' sys.exit(21)
python
def __to_browser(self, message_no): """ Write a single message to file and open the file in a browser """ filename = self.__to_file(message_no) try: command = self.config.get('General', 'browser_command') except (ConfigParser.NoOptionError, AttributeError): print 'Incorrect or missing .ini file. See --help.' sys.exit(5) command = str(command).format(filename) command_list = command.split(' ') try: subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: print 'Unable to execute the browsercommand:' print command print 'Exiting!' sys.exit(21)
['def', '__to_browser', '(', 'self', ',', 'message_no', ')', ':', 'filename', '=', 'self', '.', '__to_file', '(', 'message_no', ')', 'try', ':', 'command', '=', 'self', '.', 'config', '.', 'get', '(', "'General'", ',', "'browser_command'", ')', 'except', '(', 'ConfigParser', '.', 'NoOptionError', ',', 'AttributeError', ')', ':', 'print', "'Incorrect or missing .ini file. See --help.'", 'sys', '.', 'exit', '(', '5', ')', 'command', '=', 'str', '(', 'command', ')', '.', 'format', '(', 'filename', ')', 'command_list', '=', 'command', '.', 'split', '(', "' '", ')', 'try', ':', 'subprocess', '.', 'Popen', '(', 'command_list', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'except', 'OSError', ':', 'print', "'Unable to execute the browsercommand:'", 'print', 'command', 'print', "'Exiting!'", 'sys', '.', 'exit', '(', '21', ')']
Write a single message to file and open the file in a browser
['Write', 'a', 'single', 'message', 'to', 'file', 'and', 'open', 'the', 'file', 'in', 'a', 'browser']
train
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L209-L229
1,761
ibis-project/ibis
ibis/mapd/client.py
MapDClient.load_data
def load_data(self, table_name, obj, database=None, **kwargs): """ Wraps the LOAD DATA DDL statement. Loads data into an MapD table by physically moving data files. Parameters ---------- table_name : string obj: pandas.DataFrame or pyarrow.Table database : string, default None (optional) """ _database = self.db_name self.set_database(database) self.con.load_table(table_name, obj, **kwargs) self.set_database(_database)
python
def load_data(self, table_name, obj, database=None, **kwargs): """ Wraps the LOAD DATA DDL statement. Loads data into an MapD table by physically moving data files. Parameters ---------- table_name : string obj: pandas.DataFrame or pyarrow.Table database : string, default None (optional) """ _database = self.db_name self.set_database(database) self.con.load_table(table_name, obj, **kwargs) self.set_database(_database)
['def', 'load_data', '(', 'self', ',', 'table_name', ',', 'obj', ',', 'database', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '_database', '=', 'self', '.', 'db_name', 'self', '.', 'set_database', '(', 'database', ')', 'self', '.', 'con', '.', 'load_table', '(', 'table_name', ',', 'obj', ',', '*', '*', 'kwargs', ')', 'self', '.', 'set_database', '(', '_database', ')']
Wraps the LOAD DATA DDL statement. Loads data into an MapD table by physically moving data files. Parameters ---------- table_name : string obj: pandas.DataFrame or pyarrow.Table database : string, default None (optional)
['Wraps', 'the', 'LOAD', 'DATA', 'DDL', 'statement', '.', 'Loads', 'data', 'into', 'an', 'MapD', 'table', 'by', 'physically', 'moving', 'data', 'files', '.']
train
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/mapd/client.py#L728-L742
1,762
DarkEnergySurvey/ugali
ugali/utils/stats.py
sky
def sky(lon=None,lat=None,size=1): """ Outputs uniform points on sphere from: [0 < lon < 360] & [-90 < lat < 90] """ if lon is None: umin,umax = 0,1 else: lon = np.asarray(lon) lon = np.radians(lon + 360.*(lon<0)) if lon.size==1: umin=umax=lon/(2*np.pi) elif lon.size==2: umin,umax=lon/(2*np.pi) else: raise Exception('...') if lat is None: vmin,vmax = -1,1 else: lat = np.asarray(lat) lat = np.radians(90 - lat) if lat.size==1: vmin=vmax=np.cos(lat) elif lat.size==2: vmin,vmax=np.cos(lat) else: raise Exception('...') phi = 2*np.pi*np.random.uniform(umin,umax,size=size) theta = np.arcsin(np.random.uniform(vmin,vmax,size=size)) return np.degrees(phi),np.degrees(theta)
python
def sky(lon=None,lat=None,size=1): """ Outputs uniform points on sphere from: [0 < lon < 360] & [-90 < lat < 90] """ if lon is None: umin,umax = 0,1 else: lon = np.asarray(lon) lon = np.radians(lon + 360.*(lon<0)) if lon.size==1: umin=umax=lon/(2*np.pi) elif lon.size==2: umin,umax=lon/(2*np.pi) else: raise Exception('...') if lat is None: vmin,vmax = -1,1 else: lat = np.asarray(lat) lat = np.radians(90 - lat) if lat.size==1: vmin=vmax=np.cos(lat) elif lat.size==2: vmin,vmax=np.cos(lat) else: raise Exception('...') phi = 2*np.pi*np.random.uniform(umin,umax,size=size) theta = np.arcsin(np.random.uniform(vmin,vmax,size=size)) return np.degrees(phi),np.degrees(theta)
['def', 'sky', '(', 'lon', '=', 'None', ',', 'lat', '=', 'None', ',', 'size', '=', '1', ')', ':', 'if', 'lon', 'is', 'None', ':', 'umin', ',', 'umax', '=', '0', ',', '1', 'else', ':', 'lon', '=', 'np', '.', 'asarray', '(', 'lon', ')', 'lon', '=', 'np', '.', 'radians', '(', 'lon', '+', '360.', '*', '(', 'lon', '<', '0', ')', ')', 'if', 'lon', '.', 'size', '==', '1', ':', 'umin', '=', 'umax', '=', 'lon', '/', '(', '2', '*', 'np', '.', 'pi', ')', 'elif', 'lon', '.', 'size', '==', '2', ':', 'umin', ',', 'umax', '=', 'lon', '/', '(', '2', '*', 'np', '.', 'pi', ')', 'else', ':', 'raise', 'Exception', '(', "'...'", ')', 'if', 'lat', 'is', 'None', ':', 'vmin', ',', 'vmax', '=', '-', '1', ',', '1', 'else', ':', 'lat', '=', 'np', '.', 'asarray', '(', 'lat', ')', 'lat', '=', 'np', '.', 'radians', '(', '90', '-', 'lat', ')', 'if', 'lat', '.', 'size', '==', '1', ':', 'vmin', '=', 'vmax', '=', 'np', '.', 'cos', '(', 'lat', ')', 'elif', 'lat', '.', 'size', '==', '2', ':', 'vmin', ',', 'vmax', '=', 'np', '.', 'cos', '(', 'lat', ')', 'else', ':', 'raise', 'Exception', '(', "'...'", ')', 'phi', '=', '2', '*', 'np', '.', 'pi', '*', 'np', '.', 'random', '.', 'uniform', '(', 'umin', ',', 'umax', ',', 'size', '=', 'size', ')', 'theta', '=', 'np', '.', 'arcsin', '(', 'np', '.', 'random', '.', 'uniform', '(', 'vmin', ',', 'vmax', ',', 'size', '=', 'size', ')', ')', 'return', 'np', '.', 'degrees', '(', 'phi', ')', ',', 'np', '.', 'degrees', '(', 'theta', ')']
Outputs uniform points on sphere from: [0 < lon < 360] & [-90 < lat < 90]
['Outputs', 'uniform', 'points', 'on', 'sphere', 'from', ':', '[', '0', '<', 'lon', '<', '360', ']', '&', '[', '-', '90', '<', 'lat', '<', '90', ']']
train
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/stats.py#L131-L156
1,763
pytries/DAWG-Python
dawg_python/wrapper.py
Dictionary.contains
def contains(self, key): "Exact matching." index = self.follow_bytes(key, self.ROOT) if index is None: return False return self.has_value(index)
python
def contains(self, key): "Exact matching." index = self.follow_bytes(key, self.ROOT) if index is None: return False return self.has_value(index)
['def', 'contains', '(', 'self', ',', 'key', ')', ':', 'index', '=', 'self', '.', 'follow_bytes', '(', 'key', ',', 'self', '.', 'ROOT', ')', 'if', 'index', 'is', 'None', ':', 'return', 'False', 'return', 'self', '.', 'has_value', '(', 'index', ')']
Exact matching.
['Exact', 'matching', '.']
train
https://github.com/pytries/DAWG-Python/blob/e56241ec919b78735ff79014bf18d7fd1f8e08b9/dawg_python/wrapper.py#L35-L40
1,764
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
missing_whitespace_after_import_keyword
def missing_whitespace_after_import_keyword(logical_line): r"""Multiple imports in form from x import (a, b, c) should have space between import statement and parenthesised name list. Okay: from foo import (bar, baz) E275: from foo import(bar, baz) E275: from importable.module import(bar, baz) """ line = logical_line indicator = ' import(' if line.startswith('from '): found = line.find(indicator) if -1 < found: pos = found + len(indicator) - 1 yield pos, "E275 missing whitespace after keyword"
python
def missing_whitespace_after_import_keyword(logical_line): r"""Multiple imports in form from x import (a, b, c) should have space between import statement and parenthesised name list. Okay: from foo import (bar, baz) E275: from foo import(bar, baz) E275: from importable.module import(bar, baz) """ line = logical_line indicator = ' import(' if line.startswith('from '): found = line.find(indicator) if -1 < found: pos = found + len(indicator) - 1 yield pos, "E275 missing whitespace after keyword"
['def', 'missing_whitespace_after_import_keyword', '(', 'logical_line', ')', ':', 'line', '=', 'logical_line', 'indicator', '=', "' import('", 'if', 'line', '.', 'startswith', '(', "'from '", ')', ':', 'found', '=', 'line', '.', 'find', '(', 'indicator', ')', 'if', '-', '1', '<', 'found', ':', 'pos', '=', 'found', '+', 'len', '(', 'indicator', ')', '-', '1', 'yield', 'pos', ',', '"E275 missing whitespace after keyword"']
r"""Multiple imports in form from x import (a, b, c) should have space between import statement and parenthesised name list. Okay: from foo import (bar, baz) E275: from foo import(bar, baz) E275: from importable.module import(bar, baz)
['r', 'Multiple', 'imports', 'in', 'form', 'from', 'x', 'import', '(', 'a', 'b', 'c', ')', 'should', 'have', 'space', 'between', 'import', 'statement', 'and', 'parenthesised', 'name', 'list', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L371-L385
1,765
alfredodeza/notario
notario/validators/types.py
dictionary
def dictionary(_object, *args): """ Validates a given input is of type dictionary. Example usage:: data = {'a' : {'b': 1}} schema = ('a', dictionary) You can also use this as a decorator, as a way to check for the input before it even hits a validator you may be writing. .. note:: If the argument is a callable, the decorating behavior will be triggered, otherwise it will act as a normal function. """ error_msg = 'not of type dictionary' if is_callable(_object): _validator = _object @wraps(_validator) def decorated(value): ensure(isinstance(value, dict), error_msg) return _validator(value) return decorated try: ensure(isinstance(_object, dict), error_msg) except AssertionError: if args: msg = 'did not pass validation against callable: dictionary' raise Invalid('', msg=msg, reason=error_msg, *args) raise
python
def dictionary(_object, *args): """ Validates a given input is of type dictionary. Example usage:: data = {'a' : {'b': 1}} schema = ('a', dictionary) You can also use this as a decorator, as a way to check for the input before it even hits a validator you may be writing. .. note:: If the argument is a callable, the decorating behavior will be triggered, otherwise it will act as a normal function. """ error_msg = 'not of type dictionary' if is_callable(_object): _validator = _object @wraps(_validator) def decorated(value): ensure(isinstance(value, dict), error_msg) return _validator(value) return decorated try: ensure(isinstance(_object, dict), error_msg) except AssertionError: if args: msg = 'did not pass validation against callable: dictionary' raise Invalid('', msg=msg, reason=error_msg, *args) raise
['def', 'dictionary', '(', '_object', ',', '*', 'args', ')', ':', 'error_msg', '=', "'not of type dictionary'", 'if', 'is_callable', '(', '_object', ')', ':', '_validator', '=', '_object', '@', 'wraps', '(', '_validator', ')', 'def', 'decorated', '(', 'value', ')', ':', 'ensure', '(', 'isinstance', '(', 'value', ',', 'dict', ')', ',', 'error_msg', ')', 'return', '_validator', '(', 'value', ')', 'return', 'decorated', 'try', ':', 'ensure', '(', 'isinstance', '(', '_object', ',', 'dict', ')', ',', 'error_msg', ')', 'except', 'AssertionError', ':', 'if', 'args', ':', 'msg', '=', "'did not pass validation against callable: dictionary'", 'raise', 'Invalid', '(', "''", ',', 'msg', '=', 'msg', ',', 'reason', '=', 'error_msg', ',', '*', 'args', ')', 'raise']
Validates a given input is of type dictionary. Example usage:: data = {'a' : {'b': 1}} schema = ('a', dictionary) You can also use this as a decorator, as a way to check for the input before it even hits a validator you may be writing. .. note:: If the argument is a callable, the decorating behavior will be triggered, otherwise it will act as a normal function.
['Validates', 'a', 'given', 'input', 'is', 'of', 'type', 'dictionary', '.']
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/types.py#L66-L98
1,766
JIC-CSB/jicimagelib
jicimagelib/image.py
Image.from_array
def from_array(cls, array, name=None, log_in_history=True): """Return :class:`jicimagelib.image.Image` instance from an array. :param array: :class:`numpy.ndarray` :param name: name of the image :param log_in_history: whether or not to log the creation event in the image's history :returns: :class:`jicimagelib.image.Image` """ image = array.view(cls) event = 'Created image from array' if name: event = '{} as {}'.format(event, name) if log_in_history: image.history.append(event) return image
python
def from_array(cls, array, name=None, log_in_history=True): """Return :class:`jicimagelib.image.Image` instance from an array. :param array: :class:`numpy.ndarray` :param name: name of the image :param log_in_history: whether or not to log the creation event in the image's history :returns: :class:`jicimagelib.image.Image` """ image = array.view(cls) event = 'Created image from array' if name: event = '{} as {}'.format(event, name) if log_in_history: image.history.append(event) return image
['def', 'from_array', '(', 'cls', ',', 'array', ',', 'name', '=', 'None', ',', 'log_in_history', '=', 'True', ')', ':', 'image', '=', 'array', '.', 'view', '(', 'cls', ')', 'event', '=', "'Created image from array'", 'if', 'name', ':', 'event', '=', "'{} as {}'", '.', 'format', '(', 'event', ',', 'name', ')', 'if', 'log_in_history', ':', 'image', '.', 'history', '.', 'append', '(', 'event', ')', 'return', 'image']
Return :class:`jicimagelib.image.Image` instance from an array. :param array: :class:`numpy.ndarray` :param name: name of the image :param log_in_history: whether or not to log the creation event in the image's history :returns: :class:`jicimagelib.image.Image`
['Return', ':', 'class', ':', 'jicimagelib', '.', 'image', '.', 'Image', 'instance', 'from', 'an', 'array', '.', ':', 'param', 'array', ':', ':', 'class', ':', 'numpy', '.', 'ndarray', ':', 'param', 'name', ':', 'name', 'of', 'the', 'image', ':', 'param', 'log_in_history', ':', 'whether', 'or', 'not', 'to', 'log', 'the', 'creation', 'event', 'in', 'the', 'image', 's', 'history', ':', 'returns', ':', ':', 'class', ':', 'jicimagelib', '.', 'image', '.', 'Image']
train
https://github.com/JIC-CSB/jicimagelib/blob/fbd67accb2e6d55969c6d4ed7e8b4bb4ab65cd44/jicimagelib/image.py#L20-L35
1,767
rytilahti/python-songpal
songpal/main.py
remove
async def remove(gc: GroupControl, slaves): """Remove speakers from group.""" click.echo("Removing from existing group: %s" % slaves) click.echo(await gc.remove(slaves))
python
async def remove(gc: GroupControl, slaves): """Remove speakers from group.""" click.echo("Removing from existing group: %s" % slaves) click.echo(await gc.remove(slaves))
['async', 'def', 'remove', '(', 'gc', ':', 'GroupControl', ',', 'slaves', ')', ':', 'click', '.', 'echo', '(', '"Removing from existing group: %s"', '%', 'slaves', ')', 'click', '.', 'echo', '(', 'await', 'gc', '.', 'remove', '(', 'slaves', ')', ')']
Remove speakers from group.
['Remove', 'speakers', 'from', 'group', '.']
train
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/main.py#L724-L727
1,768
gkbrk/JustIRC
JustIRC.py
IRCConnection.connect
def connect(self, server, port=6667): """Connects to a given IRC server. After the connection is established, it calls the on_connect event handler. """ self.socket.connect((server, port)) self.lines = self._read_lines() for event_handler in list(self.on_connect): event_handler(self)
python
def connect(self, server, port=6667): """Connects to a given IRC server. After the connection is established, it calls the on_connect event handler. """ self.socket.connect((server, port)) self.lines = self._read_lines() for event_handler in list(self.on_connect): event_handler(self)
['def', 'connect', '(', 'self', ',', 'server', ',', 'port', '=', '6667', ')', ':', 'self', '.', 'socket', '.', 'connect', '(', '(', 'server', ',', 'port', ')', ')', 'self', '.', 'lines', '=', 'self', '.', '_read_lines', '(', ')', 'for', 'event_handler', 'in', 'list', '(', 'self', '.', 'on_connect', ')', ':', 'event_handler', '(', 'self', ')']
Connects to a given IRC server. After the connection is established, it calls the on_connect event handler.
['Connects', 'to', 'a', 'given', 'IRC', 'server', '.', 'After', 'the', 'connection', 'is', 'established', 'it', 'calls', 'the', 'on_connect', 'event', 'handler', '.']
train
https://github.com/gkbrk/JustIRC/blob/135bc0a7b67d66b7b4cd13d62c46c7d9613d2163/JustIRC.py#L113-L121
1,769
pdkit/pdkit
pdkit/finger_tapping_processor.py
FingerTappingProcessor.dysmetria_score
def dysmetria_score(self, data_frame): """ This method calculates accuracy of target taps in pixels :param data_frame: the data frame :type data_frame: pandas.DataFrame :return ds: dysmetria score in pixels :rtype ds: float """ tap_data = data_frame[data_frame.action_type == 0] ds = np.mean(np.sqrt((tap_data.x - tap_data.x_target) ** 2 + (tap_data.y - tap_data.y_target) ** 2)) duration = math.ceil(data_frame.td[-1]) return ds, duration
python
def dysmetria_score(self, data_frame): """ This method calculates accuracy of target taps in pixels :param data_frame: the data frame :type data_frame: pandas.DataFrame :return ds: dysmetria score in pixels :rtype ds: float """ tap_data = data_frame[data_frame.action_type == 0] ds = np.mean(np.sqrt((tap_data.x - tap_data.x_target) ** 2 + (tap_data.y - tap_data.y_target) ** 2)) duration = math.ceil(data_frame.td[-1]) return ds, duration
['def', 'dysmetria_score', '(', 'self', ',', 'data_frame', ')', ':', 'tap_data', '=', 'data_frame', '[', 'data_frame', '.', 'action_type', '==', '0', ']', 'ds', '=', 'np', '.', 'mean', '(', 'np', '.', 'sqrt', '(', '(', 'tap_data', '.', 'x', '-', 'tap_data', '.', 'x_target', ')', '**', '2', '+', '(', 'tap_data', '.', 'y', '-', 'tap_data', '.', 'y_target', ')', '**', '2', ')', ')', 'duration', '=', 'math', '.', 'ceil', '(', 'data_frame', '.', 'td', '[', '-', '1', ']', ')', 'return', 'ds', ',', 'duration']
This method calculates accuracy of target taps in pixels :param data_frame: the data frame :type data_frame: pandas.DataFrame :return ds: dysmetria score in pixels :rtype ds: float
['This', 'method', 'calculates', 'accuracy', 'of', 'target', 'taps', 'in', 'pixels']
train
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L194-L207
1,770
phodge/homely
homely/_cli.py
add
def add(repo_path, dest_path): ''' Registers a git repository with homely so that it will run its `HOMELY.py` script on each invocation of `homely update`. `homely add` also immediately executes a `homely update` so that the dotfiles are installed straight away. If the git repository is hosted online, a local clone will be created first. REPO_PATH A path to a local git repository, or the URL for a git repository hosted online. If REPO_PATH is a URL, then it should be in a format accepted by `git clone`. If REPO_PATH is a URL, you may also specify DEST_PATH. DEST_PATH If REPO_PATH is a URL, then the local clone will be created at DEST_PATH. If DEST_PATH is omitted then the path to the local clone will be automatically derived from REPO_PATH. ''' mkcfgdir() try: repo = getrepohandler(repo_path) except NotARepo as err: echo("ERROR: {}: {}".format(ERR_NOT_A_REPO, err.repo_path)) sys.exit(1) # if the repo isn't on disk yet, we'll need to make a local clone of it if repo.isremote: localrepo, needpull = addfromremote(repo, dest_path) elif dest_path: raise UsageError("DEST_PATH is only for repos hosted online") else: try: repoid = repo.getrepoid() except RepoHasNoCommitsError as err: echo("ERROR: {}".format(ERR_NO_COMMITS)) sys.exit(1) localrepo = RepoInfo(repo, repoid, None) needpull = False # if we don't have a local repo, then there is nothing more to do if not localrepo: return # remember this new local repo with saveconfig(RepoListConfig()) as cfg: cfg.add_repo(localrepo) success = run_update([localrepo], pullfirst=needpull, cancleanup=True) if not success: sys.exit(1)
python
def add(repo_path, dest_path): ''' Registers a git repository with homely so that it will run its `HOMELY.py` script on each invocation of `homely update`. `homely add` also immediately executes a `homely update` so that the dotfiles are installed straight away. If the git repository is hosted online, a local clone will be created first. REPO_PATH A path to a local git repository, or the URL for a git repository hosted online. If REPO_PATH is a URL, then it should be in a format accepted by `git clone`. If REPO_PATH is a URL, you may also specify DEST_PATH. DEST_PATH If REPO_PATH is a URL, then the local clone will be created at DEST_PATH. If DEST_PATH is omitted then the path to the local clone will be automatically derived from REPO_PATH. ''' mkcfgdir() try: repo = getrepohandler(repo_path) except NotARepo as err: echo("ERROR: {}: {}".format(ERR_NOT_A_REPO, err.repo_path)) sys.exit(1) # if the repo isn't on disk yet, we'll need to make a local clone of it if repo.isremote: localrepo, needpull = addfromremote(repo, dest_path) elif dest_path: raise UsageError("DEST_PATH is only for repos hosted online") else: try: repoid = repo.getrepoid() except RepoHasNoCommitsError as err: echo("ERROR: {}".format(ERR_NO_COMMITS)) sys.exit(1) localrepo = RepoInfo(repo, repoid, None) needpull = False # if we don't have a local repo, then there is nothing more to do if not localrepo: return # remember this new local repo with saveconfig(RepoListConfig()) as cfg: cfg.add_repo(localrepo) success = run_update([localrepo], pullfirst=needpull, cancleanup=True) if not success: sys.exit(1)
['def', 'add', '(', 'repo_path', ',', 'dest_path', ')', ':', 'mkcfgdir', '(', ')', 'try', ':', 'repo', '=', 'getrepohandler', '(', 'repo_path', ')', 'except', 'NotARepo', 'as', 'err', ':', 'echo', '(', '"ERROR: {}: {}"', '.', 'format', '(', 'ERR_NOT_A_REPO', ',', 'err', '.', 'repo_path', ')', ')', 'sys', '.', 'exit', '(', '1', ')', "# if the repo isn't on disk yet, we'll need to make a local clone of it", 'if', 'repo', '.', 'isremote', ':', 'localrepo', ',', 'needpull', '=', 'addfromremote', '(', 'repo', ',', 'dest_path', ')', 'elif', 'dest_path', ':', 'raise', 'UsageError', '(', '"DEST_PATH is only for repos hosted online"', ')', 'else', ':', 'try', ':', 'repoid', '=', 'repo', '.', 'getrepoid', '(', ')', 'except', 'RepoHasNoCommitsError', 'as', 'err', ':', 'echo', '(', '"ERROR: {}"', '.', 'format', '(', 'ERR_NO_COMMITS', ')', ')', 'sys', '.', 'exit', '(', '1', ')', 'localrepo', '=', 'RepoInfo', '(', 'repo', ',', 'repoid', ',', 'None', ')', 'needpull', '=', 'False', "# if we don't have a local repo, then there is nothing more to do", 'if', 'not', 'localrepo', ':', 'return', '# remember this new local repo', 'with', 'saveconfig', '(', 'RepoListConfig', '(', ')', ')', 'as', 'cfg', ':', 'cfg', '.', 'add_repo', '(', 'localrepo', ')', 'success', '=', 'run_update', '(', '[', 'localrepo', ']', ',', 'pullfirst', '=', 'needpull', ',', 'cancleanup', '=', 'True', ')', 'if', 'not', 'success', ':', 'sys', '.', 'exit', '(', '1', ')']
Registers a git repository with homely so that it will run its `HOMELY.py` script on each invocation of `homely update`. `homely add` also immediately executes a `homely update` so that the dotfiles are installed straight away. If the git repository is hosted online, a local clone will be created first. REPO_PATH A path to a local git repository, or the URL for a git repository hosted online. If REPO_PATH is a URL, then it should be in a format accepted by `git clone`. If REPO_PATH is a URL, you may also specify DEST_PATH. DEST_PATH If REPO_PATH is a URL, then the local clone will be created at DEST_PATH. If DEST_PATH is omitted then the path to the local clone will be automatically derived from REPO_PATH.
['Registers', 'a', 'git', 'repository', 'with', 'homely', 'so', 'that', 'it', 'will', 'run', 'its', 'HOMELY', '.', 'py', 'script', 'on', 'each', 'invocation', 'of', 'homely', 'update', '.', 'homely', 'add', 'also', 'immediately', 'executes', 'a', 'homely', 'update', 'so', 'that', 'the', 'dotfiles', 'are', 'installed', 'straight', 'away', '.', 'If', 'the', 'git', 'repository', 'is', 'hosted', 'online', 'a', 'local', 'clone', 'will', 'be', 'created', 'first', '.']
train
https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_cli.py#L72-L120
1,771
hydpy-dev/hydpy
hydpy/core/modeltools.py
ModelELS.reset_sum_fluxes
def reset_sum_fluxes(self): """Set the sum of the fluxes calculated so far to zero. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> fluxes.fastaccess._q_sum = 5. >>> model.reset_sum_fluxes() >>> fluxes.fastaccess._q_sum 0.0 """ fluxes = self.sequences.fluxes for flux in fluxes.numerics: if flux.NDIM == 0: setattr(fluxes.fastaccess, '_%s_sum' % flux.name, 0.) else: getattr(fluxes.fastaccess, '_%s_sum' % flux.name)[:] = 0.
python
def reset_sum_fluxes(self): """Set the sum of the fluxes calculated so far to zero. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> fluxes.fastaccess._q_sum = 5. >>> model.reset_sum_fluxes() >>> fluxes.fastaccess._q_sum 0.0 """ fluxes = self.sequences.fluxes for flux in fluxes.numerics: if flux.NDIM == 0: setattr(fluxes.fastaccess, '_%s_sum' % flux.name, 0.) else: getattr(fluxes.fastaccess, '_%s_sum' % flux.name)[:] = 0.
['def', 'reset_sum_fluxes', '(', 'self', ')', ':', 'fluxes', '=', 'self', '.', 'sequences', '.', 'fluxes', 'for', 'flux', 'in', 'fluxes', '.', 'numerics', ':', 'if', 'flux', '.', 'NDIM', '==', '0', ':', 'setattr', '(', 'fluxes', '.', 'fastaccess', ',', "'_%s_sum'", '%', 'flux', '.', 'name', ',', '0.', ')', 'else', ':', 'getattr', '(', 'fluxes', '.', 'fastaccess', ',', "'_%s_sum'", '%', 'flux', '.', 'name', ')', '[', ':', ']', '=', '0.']
Set the sum of the fluxes calculated so far to zero. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> fluxes.fastaccess._q_sum = 5. >>> model.reset_sum_fluxes() >>> fluxes.fastaccess._q_sum 0.0
['Set', 'the', 'sum', 'of', 'the', 'fluxes', 'calculated', 'so', 'far', 'to', 'zero', '.']
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/modeltools.py#L693-L708
1,772
MartinThoma/hwrt
bin/merge.py
merge
def merge(d1, d2): """Merge two raw datasets into one. Parameters ---------- d1 : dict d2 : dict Returns ------- dict """ if d1['formula_id2latex'] is None: formula_id2latex = {} else: formula_id2latex = d1['formula_id2latex'].copy() formula_id2latex.update(d2['formula_id2latex']) handwriting_datasets = d1['handwriting_datasets'] for dataset in d2['handwriting_datasets']: handwriting_datasets.append(dataset) return {'formula_id2latex': formula_id2latex, 'handwriting_datasets': handwriting_datasets}
python
def merge(d1, d2): """Merge two raw datasets into one. Parameters ---------- d1 : dict d2 : dict Returns ------- dict """ if d1['formula_id2latex'] is None: formula_id2latex = {} else: formula_id2latex = d1['formula_id2latex'].copy() formula_id2latex.update(d2['formula_id2latex']) handwriting_datasets = d1['handwriting_datasets'] for dataset in d2['handwriting_datasets']: handwriting_datasets.append(dataset) return {'formula_id2latex': formula_id2latex, 'handwriting_datasets': handwriting_datasets}
['def', 'merge', '(', 'd1', ',', 'd2', ')', ':', 'if', 'd1', '[', "'formula_id2latex'", ']', 'is', 'None', ':', 'formula_id2latex', '=', '{', '}', 'else', ':', 'formula_id2latex', '=', 'd1', '[', "'formula_id2latex'", ']', '.', 'copy', '(', ')', 'formula_id2latex', '.', 'update', '(', 'd2', '[', "'formula_id2latex'", ']', ')', 'handwriting_datasets', '=', 'd1', '[', "'handwriting_datasets'", ']', 'for', 'dataset', 'in', 'd2', '[', "'handwriting_datasets'", ']', ':', 'handwriting_datasets', '.', 'append', '(', 'dataset', ')', 'return', '{', "'formula_id2latex'", ':', 'formula_id2latex', ',', "'handwriting_datasets'", ':', 'handwriting_datasets', '}']
Merge two raw datasets into one. Parameters ---------- d1 : dict d2 : dict Returns ------- dict
['Merge', 'two', 'raw', 'datasets', 'into', 'one', '.']
train
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/bin/merge.py#L37-L58
1,773
log2timeline/plaso
plaso/engine/engine.py
BaseEngine._StartProfiling
def _StartProfiling(self, configuration): """Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration. """ if not configuration: return if configuration.HaveProfileMemoryGuppy(): self._guppy_memory_profiler = profilers.GuppyMemoryProfiler( self._name, configuration) self._guppy_memory_profiler.Start() if configuration.HaveProfileMemory(): self._memory_profiler = profilers.MemoryProfiler( self._name, configuration) self._memory_profiler.Start() if configuration.HaveProfileProcessing(): identifier = '{0:s}-processing'.format(self._name) self._processing_profiler = profilers.ProcessingProfiler( identifier, configuration) self._processing_profiler.Start() if configuration.HaveProfileSerializers(): identifier = '{0:s}-serializers'.format(self._name) self._serializers_profiler = profilers.SerializersProfiler( identifier, configuration) self._serializers_profiler.Start() if configuration.HaveProfileStorage(): self._storage_profiler = profilers.StorageProfiler( self._name, configuration) self._storage_profiler.Start() if configuration.HaveProfileTaskQueue(): self._task_queue_profiler = profilers.TaskQueueProfiler( self._name, configuration) self._task_queue_profiler.Start()
python
def _StartProfiling(self, configuration): """Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration. """ if not configuration: return if configuration.HaveProfileMemoryGuppy(): self._guppy_memory_profiler = profilers.GuppyMemoryProfiler( self._name, configuration) self._guppy_memory_profiler.Start() if configuration.HaveProfileMemory(): self._memory_profiler = profilers.MemoryProfiler( self._name, configuration) self._memory_profiler.Start() if configuration.HaveProfileProcessing(): identifier = '{0:s}-processing'.format(self._name) self._processing_profiler = profilers.ProcessingProfiler( identifier, configuration) self._processing_profiler.Start() if configuration.HaveProfileSerializers(): identifier = '{0:s}-serializers'.format(self._name) self._serializers_profiler = profilers.SerializersProfiler( identifier, configuration) self._serializers_profiler.Start() if configuration.HaveProfileStorage(): self._storage_profiler = profilers.StorageProfiler( self._name, configuration) self._storage_profiler.Start() if configuration.HaveProfileTaskQueue(): self._task_queue_profiler = profilers.TaskQueueProfiler( self._name, configuration) self._task_queue_profiler.Start()
['def', '_StartProfiling', '(', 'self', ',', 'configuration', ')', ':', 'if', 'not', 'configuration', ':', 'return', 'if', 'configuration', '.', 'HaveProfileMemoryGuppy', '(', ')', ':', 'self', '.', '_guppy_memory_profiler', '=', 'profilers', '.', 'GuppyMemoryProfiler', '(', 'self', '.', '_name', ',', 'configuration', ')', 'self', '.', '_guppy_memory_profiler', '.', 'Start', '(', ')', 'if', 'configuration', '.', 'HaveProfileMemory', '(', ')', ':', 'self', '.', '_memory_profiler', '=', 'profilers', '.', 'MemoryProfiler', '(', 'self', '.', '_name', ',', 'configuration', ')', 'self', '.', '_memory_profiler', '.', 'Start', '(', ')', 'if', 'configuration', '.', 'HaveProfileProcessing', '(', ')', ':', 'identifier', '=', "'{0:s}-processing'", '.', 'format', '(', 'self', '.', '_name', ')', 'self', '.', '_processing_profiler', '=', 'profilers', '.', 'ProcessingProfiler', '(', 'identifier', ',', 'configuration', ')', 'self', '.', '_processing_profiler', '.', 'Start', '(', ')', 'if', 'configuration', '.', 'HaveProfileSerializers', '(', ')', ':', 'identifier', '=', "'{0:s}-serializers'", '.', 'format', '(', 'self', '.', '_name', ')', 'self', '.', '_serializers_profiler', '=', 'profilers', '.', 'SerializersProfiler', '(', 'identifier', ',', 'configuration', ')', 'self', '.', '_serializers_profiler', '.', 'Start', '(', ')', 'if', 'configuration', '.', 'HaveProfileStorage', '(', ')', ':', 'self', '.', '_storage_profiler', '=', 'profilers', '.', 'StorageProfiler', '(', 'self', '.', '_name', ',', 'configuration', ')', 'self', '.', '_storage_profiler', '.', 'Start', '(', ')', 'if', 'configuration', '.', 'HaveProfileTaskQueue', '(', ')', ':', 'self', '.', '_task_queue_profiler', '=', 'profilers', '.', 'TaskQueueProfiler', '(', 'self', '.', '_name', ',', 'configuration', ')', 'self', '.', '_task_queue_profiler', '.', 'Start', '(', ')']
Starts profiling. Args: configuration (ProfilingConfiguration): profiling configuration.
['Starts', 'profiling', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/engine.py#L108-L147
1,774
saltstack/salt
salt/utils/stringutils.py
get_context
def get_context(template, line, num_lines=5, marker=None): ''' Returns debugging context around a line in a given string Returns:: string ''' template_lines = template.splitlines() num_template_lines = len(template_lines) # In test mode, a single line template would return a crazy line number like, # 357. Do this sanity check and if the given line is obviously wrong, just # return the entire template if line > num_template_lines: return template context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing context_end = min(num_template_lines, line + num_lines) error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx buf = [] if context_start > 0: buf.append('[...]') error_line_in_context += 1 buf.extend(template_lines[context_start:context_end]) if context_end < num_template_lines: buf.append('[...]') if marker: buf[error_line_in_context] += marker return '---\n{0}\n---'.format('\n'.join(buf))
python
def get_context(template, line, num_lines=5, marker=None): ''' Returns debugging context around a line in a given string Returns:: string ''' template_lines = template.splitlines() num_template_lines = len(template_lines) # In test mode, a single line template would return a crazy line number like, # 357. Do this sanity check and if the given line is obviously wrong, just # return the entire template if line > num_template_lines: return template context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing context_end = min(num_template_lines, line + num_lines) error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx buf = [] if context_start > 0: buf.append('[...]') error_line_in_context += 1 buf.extend(template_lines[context_start:context_end]) if context_end < num_template_lines: buf.append('[...]') if marker: buf[error_line_in_context] += marker return '---\n{0}\n---'.format('\n'.join(buf))
['def', 'get_context', '(', 'template', ',', 'line', ',', 'num_lines', '=', '5', ',', 'marker', '=', 'None', ')', ':', 'template_lines', '=', 'template', '.', 'splitlines', '(', ')', 'num_template_lines', '=', 'len', '(', 'template_lines', ')', '# In test mode, a single line template would return a crazy line number like,', '# 357. Do this sanity check and if the given line is obviously wrong, just', '# return the entire template', 'if', 'line', '>', 'num_template_lines', ':', 'return', 'template', 'context_start', '=', 'max', '(', '0', ',', 'line', '-', 'num_lines', '-', '1', ')', '# subt 1 for 0-based indexing', 'context_end', '=', 'min', '(', 'num_template_lines', ',', 'line', '+', 'num_lines', ')', 'error_line_in_context', '=', 'line', '-', 'context_start', '-', '1', '# subtr 1 for 0-based idx', 'buf', '=', '[', ']', 'if', 'context_start', '>', '0', ':', 'buf', '.', 'append', '(', "'[...]'", ')', 'error_line_in_context', '+=', '1', 'buf', '.', 'extend', '(', 'template_lines', '[', 'context_start', ':', 'context_end', ']', ')', 'if', 'context_end', '<', 'num_template_lines', ':', 'buf', '.', 'append', '(', "'[...]'", ')', 'if', 'marker', ':', 'buf', '[', 'error_line_in_context', ']', '+=', 'marker', 'return', "'---\\n{0}\\n---'", '.', 'format', '(', "'\\n'", '.', 'join', '(', 'buf', ')', ')']
Returns debugging context around a line in a given string Returns:: string
['Returns', 'debugging', 'context', 'around', 'a', 'line', 'in', 'a', 'given', 'string']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L540-L572
1,775
estnltk/estnltk
estnltk/text.py
Text.postags
def postags(self): """The list of word part-of-speech tags. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(POSTAG)
python
def postags(self): """The list of word part-of-speech tags. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(POSTAG)
['def', 'postags', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_tagged', '(', 'ANALYSIS', ')', ':', 'self', '.', 'tag_analysis', '(', ')', 'return', 'self', '.', 'get_analysis_element', '(', 'POSTAG', ')']
The list of word part-of-speech tags. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
['The', 'list', 'of', 'word', 'part', '-', 'of', '-', 'speech', 'tags', '.']
train
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L672-L680
1,776
gmr/tredis
tredis/strings.py
StringsMixin.mset
def mset(self, mapping): """Sets the given keys to their respective values. :meth:`~tredis.RedisClient.mset` replaces existing values with new values, just as regular :meth:`~tredis.RedisClient.set`. See :meth:`~tredis.RedisClient.msetnx` if you don't want to overwrite existing values. :meth:`~tredis.RedisClient.mset` is atomic, so all given keys are set at once. It is not possible for clients to see that some of the keys were updated while others are unchanged. .. versionadded:: 0.2.0 .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of keys to set. :param dict mapping: A mapping of key/value pairs to set :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'MSET'] for key, value in mapping.items(): command += [key, value] return self._execute(command, b'OK')
python
def mset(self, mapping): """Sets the given keys to their respective values. :meth:`~tredis.RedisClient.mset` replaces existing values with new values, just as regular :meth:`~tredis.RedisClient.set`. See :meth:`~tredis.RedisClient.msetnx` if you don't want to overwrite existing values. :meth:`~tredis.RedisClient.mset` is atomic, so all given keys are set at once. It is not possible for clients to see that some of the keys were updated while others are unchanged. .. versionadded:: 0.2.0 .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of keys to set. :param dict mapping: A mapping of key/value pairs to set :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'MSET'] for key, value in mapping.items(): command += [key, value] return self._execute(command, b'OK')
['def', 'mset', '(', 'self', ',', 'mapping', ')', ':', 'command', '=', '[', "b'MSET'", ']', 'for', 'key', ',', 'value', 'in', 'mapping', '.', 'items', '(', ')', ':', 'command', '+=', '[', 'key', ',', 'value', ']', 'return', 'self', '.', '_execute', '(', 'command', ',', "b'OK'", ')']
Sets the given keys to their respective values. :meth:`~tredis.RedisClient.mset` replaces existing values with new values, just as regular :meth:`~tredis.RedisClient.set`. See :meth:`~tredis.RedisClient.msetnx` if you don't want to overwrite existing values. :meth:`~tredis.RedisClient.mset` is atomic, so all given keys are set at once. It is not possible for clients to see that some of the keys were updated while others are unchanged. .. versionadded:: 0.2.0 .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of keys to set. :param dict mapping: A mapping of key/value pairs to set :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError`
['Sets', 'the', 'given', 'keys', 'to', 'their', 'respective', 'values', '.', ':', 'meth', ':', '~tredis', '.', 'RedisClient', '.', 'mset', 'replaces', 'existing', 'values', 'with', 'new', 'values', 'just', 'as', 'regular', ':', 'meth', ':', '~tredis', '.', 'RedisClient', '.', 'set', '.', 'See', ':', 'meth', ':', '~tredis', '.', 'RedisClient', '.', 'msetnx', 'if', 'you', 'don', 't', 'want', 'to', 'overwrite', 'existing', 'values', '.']
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/strings.py#L440-L464
1,777
JamesPHoughton/pysd
pysd/py_backend/functions.py
Macro.doc
def doc(self): """ Formats a table of documentation strings to help users remember variable names, and understand how they are translated into python safe names. Returns ------- docs_df: pandas dataframe Dataframe with columns for the model components: - Real names - Python safe identifiers (as used in model.components) - Units string - Documentation strings from the original model file """ collector = [] for name, varname in self.components._namespace.items(): try: docstring = getattr(self.components, varname).__doc__ lines = docstring.split('\n') collector.append({'Real Name': name, 'Py Name': varname, 'Eqn': lines[2].replace("Original Eqn:", "").strip(), 'Unit': lines[3].replace("Units:", "").strip(), 'Lims': lines[4].replace("Limits:", "").strip(), 'Type': lines[5].replace("Type:", "").strip(), 'Comment': '\n'.join(lines[7:]).strip()}) except: pass docs_df = _pd.DataFrame(collector) docs_df.fillna('None', inplace=True) order = ['Real Name', 'Py Name', 'Unit', 'Lims', 'Type', 'Eqn', 'Comment'] return docs_df[order].sort_values(by='Real Name').reset_index(drop=True)
python
def doc(self): """ Formats a table of documentation strings to help users remember variable names, and understand how they are translated into python safe names. Returns ------- docs_df: pandas dataframe Dataframe with columns for the model components: - Real names - Python safe identifiers (as used in model.components) - Units string - Documentation strings from the original model file """ collector = [] for name, varname in self.components._namespace.items(): try: docstring = getattr(self.components, varname).__doc__ lines = docstring.split('\n') collector.append({'Real Name': name, 'Py Name': varname, 'Eqn': lines[2].replace("Original Eqn:", "").strip(), 'Unit': lines[3].replace("Units:", "").strip(), 'Lims': lines[4].replace("Limits:", "").strip(), 'Type': lines[5].replace("Type:", "").strip(), 'Comment': '\n'.join(lines[7:]).strip()}) except: pass docs_df = _pd.DataFrame(collector) docs_df.fillna('None', inplace=True) order = ['Real Name', 'Py Name', 'Unit', 'Lims', 'Type', 'Eqn', 'Comment'] return docs_df[order].sort_values(by='Real Name').reset_index(drop=True)
['def', 'doc', '(', 'self', ')', ':', 'collector', '=', '[', ']', 'for', 'name', ',', 'varname', 'in', 'self', '.', 'components', '.', '_namespace', '.', 'items', '(', ')', ':', 'try', ':', 'docstring', '=', 'getattr', '(', 'self', '.', 'components', ',', 'varname', ')', '.', '__doc__', 'lines', '=', 'docstring', '.', 'split', '(', "'\\n'", ')', 'collector', '.', 'append', '(', '{', "'Real Name'", ':', 'name', ',', "'Py Name'", ':', 'varname', ',', "'Eqn'", ':', 'lines', '[', '2', ']', '.', 'replace', '(', '"Original Eqn:"', ',', '""', ')', '.', 'strip', '(', ')', ',', "'Unit'", ':', 'lines', '[', '3', ']', '.', 'replace', '(', '"Units:"', ',', '""', ')', '.', 'strip', '(', ')', ',', "'Lims'", ':', 'lines', '[', '4', ']', '.', 'replace', '(', '"Limits:"', ',', '""', ')', '.', 'strip', '(', ')', ',', "'Type'", ':', 'lines', '[', '5', ']', '.', 'replace', '(', '"Type:"', ',', '""', ')', '.', 'strip', '(', ')', ',', "'Comment'", ':', "'\\n'", '.', 'join', '(', 'lines', '[', '7', ':', ']', ')', '.', 'strip', '(', ')', '}', ')', 'except', ':', 'pass', 'docs_df', '=', '_pd', '.', 'DataFrame', '(', 'collector', ')', 'docs_df', '.', 'fillna', '(', "'None'", ',', 'inplace', '=', 'True', ')', 'order', '=', '[', "'Real Name'", ',', "'Py Name'", ',', "'Unit'", ',', "'Lims'", ',', "'Type'", ',', "'Eqn'", ',', "'Comment'", ']', 'return', 'docs_df', '[', 'order', ']', '.', 'sort_values', '(', 'by', '=', "'Real Name'", ')', '.', 'reset_index', '(', 'drop', '=', 'True', ')']
Formats a table of documentation strings to help users remember variable names, and understand how they are translated into python safe names. Returns ------- docs_df: pandas dataframe Dataframe with columns for the model components: - Real names - Python safe identifiers (as used in model.components) - Units string - Documentation strings from the original model file
['Formats', 'a', 'table', 'of', 'documentation', 'strings', 'to', 'help', 'users', 'remember', 'variable', 'names', 'and', 'understand', 'how', 'they', 'are', 'translated', 'into', 'python', 'safe', 'names', '.']
train
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L473-L506
1,778
carta/ldap_tools
src/ldap_tools/user.py
CLI.create
def create(config, name, group, type): """Create an LDAP user.""" if type not in ('user', 'service'): raise click.BadOptionUsage("--type must be 'user' or 'service'") client = Client() client.prepare_connection() user_api = API(client) group_api = GroupApi(client) user_api.create(name[0], name[1], group, type, group_api)
python
def create(config, name, group, type): """Create an LDAP user.""" if type not in ('user', 'service'): raise click.BadOptionUsage("--type must be 'user' or 'service'") client = Client() client.prepare_connection() user_api = API(client) group_api = GroupApi(client) user_api.create(name[0], name[1], group, type, group_api)
['def', 'create', '(', 'config', ',', 'name', ',', 'group', ',', 'type', ')', ':', 'if', 'type', 'not', 'in', '(', "'user'", ',', "'service'", ')', ':', 'raise', 'click', '.', 'BadOptionUsage', '(', '"--type must be \'user\' or \'service\'"', ')', 'client', '=', 'Client', '(', ')', 'client', '.', 'prepare_connection', '(', ')', 'user_api', '=', 'API', '(', 'client', ')', 'group_api', '=', 'GroupApi', '(', 'client', ')', 'user_api', '.', 'create', '(', 'name', '[', '0', ']', ',', 'name', '[', '1', ']', ',', 'group', ',', 'type', ',', 'group_api', ')']
Create an LDAP user.
['Create', 'an', 'LDAP', 'user', '.']
train
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/user.py#L184-L192
1,779
pyopenapi/pyswagger
pyswagger/io.py
Response.apply_with
def apply_with(self, status=None, raw=None, header=None): """ update header, status code, raw datum, ...etc :param int status: status code :param str raw: body content :param dict header: header section :return: return self for chaining :rtype: Response """ if status != None: self.__status = status r = (final(self.__op.responses.get(str(self.__status), None)) or final(self.__op.responses.get('default', None))) if header != None: if isinstance(header, (collections.Mapping, collections.MutableMapping)): for k, v in six.iteritems(header): self._convert_header(r, k, v) else: for k, v in header: self._convert_header(r, k, v) if raw != None: # update 'raw' self.__raw = raw if self.__status == None: raise Exception('Update status code before assigning raw data') if r and r.schema and not self.__raw_body_only: # update data from Opeartion if succeed else from responseMessage.responseModel content_type = 'application/json' for k, v in six.iteritems(self.header): if k.lower() == 'content-type': content_type = v[0].lower() break schema = deref(r.schema) _type = schema.type _format = schema.format name = schema.name data = self.__op._mime_codec.unmarshal(content_type, self.raw, _type=_type, _format=_format, name=name) self.__data = r.schema._prim_(data, self.__op._prim_factory, ctx=dict(read=True)) return self
python
def apply_with(self, status=None, raw=None, header=None): """ update header, status code, raw datum, ...etc :param int status: status code :param str raw: body content :param dict header: header section :return: return self for chaining :rtype: Response """ if status != None: self.__status = status r = (final(self.__op.responses.get(str(self.__status), None)) or final(self.__op.responses.get('default', None))) if header != None: if isinstance(header, (collections.Mapping, collections.MutableMapping)): for k, v in six.iteritems(header): self._convert_header(r, k, v) else: for k, v in header: self._convert_header(r, k, v) if raw != None: # update 'raw' self.__raw = raw if self.__status == None: raise Exception('Update status code before assigning raw data') if r and r.schema and not self.__raw_body_only: # update data from Opeartion if succeed else from responseMessage.responseModel content_type = 'application/json' for k, v in six.iteritems(self.header): if k.lower() == 'content-type': content_type = v[0].lower() break schema = deref(r.schema) _type = schema.type _format = schema.format name = schema.name data = self.__op._mime_codec.unmarshal(content_type, self.raw, _type=_type, _format=_format, name=name) self.__data = r.schema._prim_(data, self.__op._prim_factory, ctx=dict(read=True)) return self
['def', 'apply_with', '(', 'self', ',', 'status', '=', 'None', ',', 'raw', '=', 'None', ',', 'header', '=', 'None', ')', ':', 'if', 'status', '!=', 'None', ':', 'self', '.', '__status', '=', 'status', 'r', '=', '(', 'final', '(', 'self', '.', '__op', '.', 'responses', '.', 'get', '(', 'str', '(', 'self', '.', '__status', ')', ',', 'None', ')', ')', 'or', 'final', '(', 'self', '.', '__op', '.', 'responses', '.', 'get', '(', "'default'", ',', 'None', ')', ')', ')', 'if', 'header', '!=', 'None', ':', 'if', 'isinstance', '(', 'header', ',', '(', 'collections', '.', 'Mapping', ',', 'collections', '.', 'MutableMapping', ')', ')', ':', 'for', 'k', ',', 'v', 'in', 'six', '.', 'iteritems', '(', 'header', ')', ':', 'self', '.', '_convert_header', '(', 'r', ',', 'k', ',', 'v', ')', 'else', ':', 'for', 'k', ',', 'v', 'in', 'header', ':', 'self', '.', '_convert_header', '(', 'r', ',', 'k', ',', 'v', ')', 'if', 'raw', '!=', 'None', ':', "# update 'raw'", 'self', '.', '__raw', '=', 'raw', 'if', 'self', '.', '__status', '==', 'None', ':', 'raise', 'Exception', '(', "'Update status code before assigning raw data'", ')', 'if', 'r', 'and', 'r', '.', 'schema', 'and', 'not', 'self', '.', '__raw_body_only', ':', '# update data from Opeartion if succeed else from responseMessage.responseModel', 'content_type', '=', "'application/json'", 'for', 'k', ',', 'v', 'in', 'six', '.', 'iteritems', '(', 'self', '.', 'header', ')', ':', 'if', 'k', '.', 'lower', '(', ')', '==', "'content-type'", ':', 'content_type', '=', 'v', '[', '0', ']', '.', 'lower', '(', ')', 'break', 'schema', '=', 'deref', '(', 'r', '.', 'schema', ')', '_type', '=', 'schema', '.', 'type', '_format', '=', 'schema', '.', 'format', 'name', '=', 'schema', '.', 'name', 'data', '=', 'self', '.', '__op', '.', '_mime_codec', '.', 'unmarshal', '(', 'content_type', ',', 'self', '.', 'raw', ',', '_type', '=', '_type', ',', '_format', '=', '_format', ',', 'name', '=', 'name', ')', 'self', '.', '__data', '=', 'r', '.', 'schema', '.', '_prim_', '(', 'data', ',', 'self', '.', '__op', '.', '_prim_factory', ',', 'ctx', '=', 'dict', '(', 'read', '=', 'True', ')', ')', 'return', 'self']
update header, status code, raw datum, ...etc :param int status: status code :param str raw: body content :param dict header: header section :return: return self for chaining :rtype: Response
['update', 'header', 'status', 'code', 'raw', 'datum', '...', 'etc']
train
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/io.py#L374-L419
1,780
rigetti/pyquil
examples/pointer.py
pointer_gate
def pointer_gate(num_qubits, U): """ Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the qubit addressed by the pointer qubits interpreted as an unsigned binary integer. There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered N - 1 N - 2 ... N - P are those reserved to represent the pointer. The first N - P qubits are the qubits which the one-qubit gate U can act on. """ ptr_bits = int(floor(np.log2(num_qubits))) data_bits = num_qubits - ptr_bits ptr_state = 0 assert ptr_bits > 0 program = pq.Program() program.defgate("CU", controlled(ptr_bits, U)) for _, target_qubit, changed in gray(ptr_bits): if changed is None: for ptr_qubit in range(num_qubits - ptr_bits, num_qubits): program.inst(X(ptr_qubit)) ptr_state ^= 1 << (ptr_qubit - data_bits) else: program.inst(X(data_bits + changed)) ptr_state ^= 1 << changed if target_qubit < data_bits: control_qubits = tuple(data_bits + i for i in range(ptr_bits)) program.inst(("CU",) + control_qubits + (target_qubit,)) fixup(program, data_bits, ptr_bits, ptr_state) return program
python
def pointer_gate(num_qubits, U): """ Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the qubit addressed by the pointer qubits interpreted as an unsigned binary integer. There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered N - 1 N - 2 ... N - P are those reserved to represent the pointer. The first N - P qubits are the qubits which the one-qubit gate U can act on. """ ptr_bits = int(floor(np.log2(num_qubits))) data_bits = num_qubits - ptr_bits ptr_state = 0 assert ptr_bits > 0 program = pq.Program() program.defgate("CU", controlled(ptr_bits, U)) for _, target_qubit, changed in gray(ptr_bits): if changed is None: for ptr_qubit in range(num_qubits - ptr_bits, num_qubits): program.inst(X(ptr_qubit)) ptr_state ^= 1 << (ptr_qubit - data_bits) else: program.inst(X(data_bits + changed)) ptr_state ^= 1 << changed if target_qubit < data_bits: control_qubits = tuple(data_bits + i for i in range(ptr_bits)) program.inst(("CU",) + control_qubits + (target_qubit,)) fixup(program, data_bits, ptr_bits, ptr_state) return program
['def', 'pointer_gate', '(', 'num_qubits', ',', 'U', ')', ':', 'ptr_bits', '=', 'int', '(', 'floor', '(', 'np', '.', 'log2', '(', 'num_qubits', ')', ')', ')', 'data_bits', '=', 'num_qubits', '-', 'ptr_bits', 'ptr_state', '=', '0', 'assert', 'ptr_bits', '>', '0', 'program', '=', 'pq', '.', 'Program', '(', ')', 'program', '.', 'defgate', '(', '"CU"', ',', 'controlled', '(', 'ptr_bits', ',', 'U', ')', ')', 'for', '_', ',', 'target_qubit', ',', 'changed', 'in', 'gray', '(', 'ptr_bits', ')', ':', 'if', 'changed', 'is', 'None', ':', 'for', 'ptr_qubit', 'in', 'range', '(', 'num_qubits', '-', 'ptr_bits', ',', 'num_qubits', ')', ':', 'program', '.', 'inst', '(', 'X', '(', 'ptr_qubit', ')', ')', 'ptr_state', '^=', '1', '<<', '(', 'ptr_qubit', '-', 'data_bits', ')', 'else', ':', 'program', '.', 'inst', '(', 'X', '(', 'data_bits', '+', 'changed', ')', ')', 'ptr_state', '^=', '1', '<<', 'changed', 'if', 'target_qubit', '<', 'data_bits', ':', 'control_qubits', '=', 'tuple', '(', 'data_bits', '+', 'i', 'for', 'i', 'in', 'range', '(', 'ptr_bits', ')', ')', 'program', '.', 'inst', '(', '(', '"CU"', ',', ')', '+', 'control_qubits', '+', '(', 'target_qubit', ',', ')', ')', 'fixup', '(', 'program', ',', 'data_bits', ',', 'ptr_bits', ',', 'ptr_state', ')', 'return', 'program']
Make a pointer gate on `num_qubits`. The one-qubit gate U will act on the qubit addressed by the pointer qubits interpreted as an unsigned binary integer. There are P = floor(lg(num_qubits)) pointer qubits, and qubits numbered N - 1 N - 2 ... N - P are those reserved to represent the pointer. The first N - P qubits are the qubits which the one-qubit gate U can act on.
['Make', 'a', 'pointer', 'gate', 'on', 'num_qubits', '.', 'The', 'one', '-', 'qubit', 'gate', 'U', 'will', 'act', 'on', 'the', 'qubit', 'addressed', 'by', 'the', 'pointer', 'qubits', 'interpreted', 'as', 'an', 'unsigned', 'binary', 'integer', '.']
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/examples/pointer.py#L77-L116
1,781
mattja/nsim
nsim/analyses1/pyeeg.py
hjorth
def hjorth(X, D=None): """ Compute Hjorth mobility and complexity of a time series from either two cases below: 1. X, the time series of type list (default) 2. D, a first order differential sequence of X (if D is provided, recommended to speed up) In case 1, D is computed using Numpy's Difference function. Notes ----- To speed up, it is recommended to compute D before calling this function because D may also be used by other functions whereas computing it here again will slow down. Parameters ---------- X list a time series D list first order differential sequence of a time series Returns ------- As indicated in return line Hjorth mobility and complexity """ if D is None: D = numpy.diff(X) D = D.tolist() D.insert(0, X[0]) # pad the first difference D = numpy.array(D) n = len(X) M2 = float(sum(D ** 2)) / n TP = sum(numpy.array(X) ** 2) M4 = 0 for i in range(1, len(D)): M4 += (D[i] - D[i - 1]) ** 2 M4 = M4 / n return numpy.sqrt(M2 / TP), numpy.sqrt( float(M4) * TP / M2 / M2 )
python
def hjorth(X, D=None): """ Compute Hjorth mobility and complexity of a time series from either two cases below: 1. X, the time series of type list (default) 2. D, a first order differential sequence of X (if D is provided, recommended to speed up) In case 1, D is computed using Numpy's Difference function. Notes ----- To speed up, it is recommended to compute D before calling this function because D may also be used by other functions whereas computing it here again will slow down. Parameters ---------- X list a time series D list first order differential sequence of a time series Returns ------- As indicated in return line Hjorth mobility and complexity """ if D is None: D = numpy.diff(X) D = D.tolist() D.insert(0, X[0]) # pad the first difference D = numpy.array(D) n = len(X) M2 = float(sum(D ** 2)) / n TP = sum(numpy.array(X) ** 2) M4 = 0 for i in range(1, len(D)): M4 += (D[i] - D[i - 1]) ** 2 M4 = M4 / n return numpy.sqrt(M2 / TP), numpy.sqrt( float(M4) * TP / M2 / M2 )
['def', 'hjorth', '(', 'X', ',', 'D', '=', 'None', ')', ':', 'if', 'D', 'is', 'None', ':', 'D', '=', 'numpy', '.', 'diff', '(', 'X', ')', 'D', '=', 'D', '.', 'tolist', '(', ')', 'D', '.', 'insert', '(', '0', ',', 'X', '[', '0', ']', ')', '# pad the first difference', 'D', '=', 'numpy', '.', 'array', '(', 'D', ')', 'n', '=', 'len', '(', 'X', ')', 'M2', '=', 'float', '(', 'sum', '(', 'D', '**', '2', ')', ')', '/', 'n', 'TP', '=', 'sum', '(', 'numpy', '.', 'array', '(', 'X', ')', '**', '2', ')', 'M4', '=', '0', 'for', 'i', 'in', 'range', '(', '1', ',', 'len', '(', 'D', ')', ')', ':', 'M4', '+=', '(', 'D', '[', 'i', ']', '-', 'D', '[', 'i', '-', '1', ']', ')', '**', '2', 'M4', '=', 'M4', '/', 'n', 'return', 'numpy', '.', 'sqrt', '(', 'M2', '/', 'TP', ')', ',', 'numpy', '.', 'sqrt', '(', 'float', '(', 'M4', ')', '*', 'TP', '/', 'M2', '/', 'M2', ')']
Compute Hjorth mobility and complexity of a time series from either two cases below: 1. X, the time series of type list (default) 2. D, a first order differential sequence of X (if D is provided, recommended to speed up) In case 1, D is computed using Numpy's Difference function. Notes ----- To speed up, it is recommended to compute D before calling this function because D may also be used by other functions whereas computing it here again will slow down. Parameters ---------- X list a time series D list first order differential sequence of a time series Returns ------- As indicated in return line Hjorth mobility and complexity
['Compute', 'Hjorth', 'mobility', 'and', 'complexity', 'of', 'a', 'time', 'series', 'from', 'either', 'two', 'cases', 'below', ':', '1', '.', 'X', 'the', 'time', 'series', 'of', 'type', 'list', '(', 'default', ')', '2', '.', 'D', 'a', 'first', 'order', 'differential', 'sequence', 'of', 'X', '(', 'if', 'D', 'is', 'provided', 'recommended', 'to', 'speed', 'up', ')']
train
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L277-L332
1,782
bmweiner/skillful
skillful/interface.py
ResponseBody.set_card_simple
def set_card_simple(self, title, content): """Set response card as simple type. title and content cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. content: str. Content of Simple type card. """ self.response.card.type = 'Simple' self.response.card.title = title self.response.card.content = content
python
def set_card_simple(self, title, content): """Set response card as simple type. title and content cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. content: str. Content of Simple type card. """ self.response.card.type = 'Simple' self.response.card.title = title self.response.card.content = content
['def', 'set_card_simple', '(', 'self', ',', 'title', ',', 'content', ')', ':', 'self', '.', 'response', '.', 'card', '.', 'type', '=', "'Simple'", 'self', '.', 'response', '.', 'card', '.', 'title', '=', 'title', 'self', '.', 'response', '.', 'card', '.', 'content', '=', 'content']
Set response card as simple type. title and content cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. content: str. Content of Simple type card.
['Set', 'response', 'card', 'as', 'simple', 'type', '.']
train
https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L386-L397
1,783
amzn/ion-python
amazon/ion/writer_buffer.py
BufferTree.start_container
def start_container(self): """Add a node to the tree that represents the start of a container. Until end_container is called, any nodes added through add_scalar_value or start_container will be children of this new node. """ self.__container_lengths.append(self.current_container_length) self.current_container_length = 0 new_container_node = _Node() self.__container_node.add_child(new_container_node) self.__container_nodes.append(self.__container_node) self.__container_node = new_container_node
python
def start_container(self): """Add a node to the tree that represents the start of a container. Until end_container is called, any nodes added through add_scalar_value or start_container will be children of this new node. """ self.__container_lengths.append(self.current_container_length) self.current_container_length = 0 new_container_node = _Node() self.__container_node.add_child(new_container_node) self.__container_nodes.append(self.__container_node) self.__container_node = new_container_node
['def', 'start_container', '(', 'self', ')', ':', 'self', '.', '__container_lengths', '.', 'append', '(', 'self', '.', 'current_container_length', ')', 'self', '.', 'current_container_length', '=', '0', 'new_container_node', '=', '_Node', '(', ')', 'self', '.', '__container_node', '.', 'add_child', '(', 'new_container_node', ')', 'self', '.', '__container_nodes', '.', 'append', '(', 'self', '.', '__container_node', ')', 'self', '.', '__container_node', '=', 'new_container_node']
Add a node to the tree that represents the start of a container. Until end_container is called, any nodes added through add_scalar_value or start_container will be children of this new node.
['Add', 'a', 'node', 'to', 'the', 'tree', 'that', 'represents', 'the', 'start', 'of', 'a', 'container', '.']
train
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/writer_buffer.py#L91-L102
1,784
MacHu-GWU/dataIO-project
dataIO/js.py
load
def load(abspath, default=None, enable_verbose=True): """Load Json from file. If file are not exists, returns ``default``. :param abspath: file path. use absolute path as much as you can. extension has to be ``.json`` or ``.gz`` (for compressed Json). :type abspath: string :param default: default ``dict()``, if ``abspath`` not exists, return the default Python object instead. :param enable_verbose: default ``True``, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> js.load("test.json") # if you have a json file Load from 'test.json' ... Complete! Elapse 0.000432 sec. {'a': 1, 'b': 2} **中文文档** 从Json文件中读取数据 :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值 :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值`` """ if default is None: default = dict() prt("\nLoad from '%s' ..." % abspath, enable_verbose) abspath = lower_ext(str(abspath)) is_json = is_json_file(abspath) if not os.path.exists(abspath): prt(" File not found, use default value: %r" % default, enable_verbose) return default st = time.clock() if is_json: data = json.loads(textfile.read(abspath, encoding="utf-8")) else: data = json.loads(compress.read_gzip(abspath).decode("utf-8")) prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) return data
python
def load(abspath, default=None, enable_verbose=True): """Load Json from file. If file are not exists, returns ``default``. :param abspath: file path. use absolute path as much as you can. extension has to be ``.json`` or ``.gz`` (for compressed Json). :type abspath: string :param default: default ``dict()``, if ``abspath`` not exists, return the default Python object instead. :param enable_verbose: default ``True``, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> js.load("test.json") # if you have a json file Load from 'test.json' ... Complete! Elapse 0.000432 sec. {'a': 1, 'b': 2} **中文文档** 从Json文件中读取数据 :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值 :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值`` """ if default is None: default = dict() prt("\nLoad from '%s' ..." % abspath, enable_verbose) abspath = lower_ext(str(abspath)) is_json = is_json_file(abspath) if not os.path.exists(abspath): prt(" File not found, use default value: %r" % default, enable_verbose) return default st = time.clock() if is_json: data = json.loads(textfile.read(abspath, encoding="utf-8")) else: data = json.loads(compress.read_gzip(abspath).decode("utf-8")) prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) return data
['def', 'load', '(', 'abspath', ',', 'default', '=', 'None', ',', 'enable_verbose', '=', 'True', ')', ':', 'if', 'default', 'is', 'None', ':', 'default', '=', 'dict', '(', ')', 'prt', '(', '"\\nLoad from \'%s\' ..."', '%', 'abspath', ',', 'enable_verbose', ')', 'abspath', '=', 'lower_ext', '(', 'str', '(', 'abspath', ')', ')', 'is_json', '=', 'is_json_file', '(', 'abspath', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'abspath', ')', ':', 'prt', '(', '" File not found, use default value: %r"', '%', 'default', ',', 'enable_verbose', ')', 'return', 'default', 'st', '=', 'time', '.', 'clock', '(', ')', 'if', 'is_json', ':', 'data', '=', 'json', '.', 'loads', '(', 'textfile', '.', 'read', '(', 'abspath', ',', 'encoding', '=', '"utf-8"', ')', ')', 'else', ':', 'data', '=', 'json', '.', 'loads', '(', 'compress', '.', 'read_gzip', '(', 'abspath', ')', '.', 'decode', '(', '"utf-8"', ')', ')', 'prt', '(', '" Complete! Elapse %.6f sec."', '%', '(', 'time', '.', 'clock', '(', ')', '-', 'st', ')', ',', 'enable_verbose', ')', 'return', 'data']
Load Json from file. If file are not exists, returns ``default``. :param abspath: file path. use absolute path as much as you can. extension has to be ``.json`` or ``.gz`` (for compressed Json). :type abspath: string :param default: default ``dict()``, if ``abspath`` not exists, return the default Python object instead. :param enable_verbose: default ``True``, help-message-display trigger. :type enable_verbose: boolean Usage:: >>> from dataIO import js >>> js.load("test.json") # if you have a json file Load from 'test.json' ... Complete! Elapse 0.000432 sec. {'a': 1, 'b': 2} **中文文档** 从Json文件中读取数据 :param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz`` 是被压缩后的Json文件 :type abspath: ``字符串`` :param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值 :param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭 :type enable_verbose: ``布尔值``
['Load', 'Json', 'from', 'file', '.', 'If', 'file', 'are', 'not', 'exists', 'returns', 'default', '.']
train
https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/js.py#L78-L132
1,785
BlueBrain/NeuroM
neurom/check/neuron_checks.py
has_no_flat_neurites
def has_no_flat_neurites(neuron, tol=0.1, method='ratio'): '''Check that a neuron has no flat neurites Arguments: neuron(Neuron): The neuron object to test tol(float): tolerance method(string): way of determining flatness, 'tolerance', 'ratio' \ as described in :meth:`neurom.check.morphtree.get_flat_neurites` Returns: CheckResult with result ''' return CheckResult(len(get_flat_neurites(neuron, tol, method)) == 0)
python
def has_no_flat_neurites(neuron, tol=0.1, method='ratio'): '''Check that a neuron has no flat neurites Arguments: neuron(Neuron): The neuron object to test tol(float): tolerance method(string): way of determining flatness, 'tolerance', 'ratio' \ as described in :meth:`neurom.check.morphtree.get_flat_neurites` Returns: CheckResult with result ''' return CheckResult(len(get_flat_neurites(neuron, tol, method)) == 0)
['def', 'has_no_flat_neurites', '(', 'neuron', ',', 'tol', '=', '0.1', ',', 'method', '=', "'ratio'", ')', ':', 'return', 'CheckResult', '(', 'len', '(', 'get_flat_neurites', '(', 'neuron', ',', 'tol', ',', 'method', ')', ')', '==', '0', ')']
Check that a neuron has no flat neurites Arguments: neuron(Neuron): The neuron object to test tol(float): tolerance method(string): way of determining flatness, 'tolerance', 'ratio' \ as described in :meth:`neurom.check.morphtree.get_flat_neurites` Returns: CheckResult with result
['Check', 'that', 'a', 'neuron', 'has', 'no', 'flat', 'neurites']
train
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/neuron_checks.py#L100-L112
1,786
draperjames/qtpandas
qtpandas/utils.py
fillNoneValues
def fillNoneValues(column): """Fill all NaN/NaT values of a column with an empty string Args: column (pandas.Series): A Series object with all rows. Returns: column: Series with filled NaN values. """ if column.dtype == object: column.fillna('', inplace=True) return column
python
def fillNoneValues(column): """Fill all NaN/NaT values of a column with an empty string Args: column (pandas.Series): A Series object with all rows. Returns: column: Series with filled NaN values. """ if column.dtype == object: column.fillna('', inplace=True) return column
['def', 'fillNoneValues', '(', 'column', ')', ':', 'if', 'column', '.', 'dtype', '==', 'object', ':', 'column', '.', 'fillna', '(', "''", ',', 'inplace', '=', 'True', ')', 'return', 'column']
Fill all NaN/NaT values of a column with an empty string Args: column (pandas.Series): A Series object with all rows. Returns: column: Series with filled NaN values.
['Fill', 'all', 'NaN', '/', 'NaT', 'values', 'of', 'a', 'column', 'with', 'an', 'empty', 'string']
train
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/utils.py#L19-L30
1,787
Rapptz/discord.py
discord/iterators.py
HistoryIterator._retrieve_messages_before_strategy
async def _retrieve_messages_before_strategy(self, retrieve): """Retrieve messages using before parameter.""" before = self.before.id if self.before else None data = await self.logs_from(self.channel.id, retrieve, before=before) if len(data): if self.limit is not None: self.limit -= retrieve self.before = Object(id=int(data[-1]['id'])) return data
python
async def _retrieve_messages_before_strategy(self, retrieve): """Retrieve messages using before parameter.""" before = self.before.id if self.before else None data = await self.logs_from(self.channel.id, retrieve, before=before) if len(data): if self.limit is not None: self.limit -= retrieve self.before = Object(id=int(data[-1]['id'])) return data
['async', 'def', '_retrieve_messages_before_strategy', '(', 'self', ',', 'retrieve', ')', ':', 'before', '=', 'self', '.', 'before', '.', 'id', 'if', 'self', '.', 'before', 'else', 'None', 'data', '=', 'await', 'self', '.', 'logs_from', '(', 'self', '.', 'channel', '.', 'id', ',', 'retrieve', ',', 'before', '=', 'before', ')', 'if', 'len', '(', 'data', ')', ':', 'if', 'self', '.', 'limit', 'is', 'not', 'None', ':', 'self', '.', 'limit', '-=', 'retrieve', 'self', '.', 'before', '=', 'Object', '(', 'id', '=', 'int', '(', 'data', '[', '-', '1', ']', '[', "'id'", ']', ')', ')', 'return', 'data']
Retrieve messages using before parameter.
['Retrieve', 'messages', 'using', 'before', 'parameter', '.']
train
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/iterators.py#L325-L333
1,788
MKLab-ITI/reveal-graph-embedding
reveal_graph_embedding/datautil/score_rw_util.py
write_average_score_row
def write_average_score_row(fp, score_name, scores): """ Simple utility function that writes an average score row in a file designated by a file pointer. Inputs: - fp: A file pointer. - score_name: What it says on the tin. - scores: An array of average score values corresponding to each of the training set percentages. """ row = "--" + score_name + "--" fp.write(row) for vector in scores: row = list(vector) row = [str(score) for score in row] row = "\n" + "\t".join(row) fp.write(row)
python
def write_average_score_row(fp, score_name, scores): """ Simple utility function that writes an average score row in a file designated by a file pointer. Inputs: - fp: A file pointer. - score_name: What it says on the tin. - scores: An array of average score values corresponding to each of the training set percentages. """ row = "--" + score_name + "--" fp.write(row) for vector in scores: row = list(vector) row = [str(score) for score in row] row = "\n" + "\t".join(row) fp.write(row)
['def', 'write_average_score_row', '(', 'fp', ',', 'score_name', ',', 'scores', ')', ':', 'row', '=', '"--"', '+', 'score_name', '+', '"--"', 'fp', '.', 'write', '(', 'row', ')', 'for', 'vector', 'in', 'scores', ':', 'row', '=', 'list', '(', 'vector', ')', 'row', '=', '[', 'str', '(', 'score', ')', 'for', 'score', 'in', 'row', ']', 'row', '=', '"\\n"', '+', '"\\t"', '.', 'join', '(', 'row', ')', 'fp', '.', 'write', '(', 'row', ')']
Simple utility function that writes an average score row in a file designated by a file pointer. Inputs: - fp: A file pointer. - score_name: What it says on the tin. - scores: An array of average score values corresponding to each of the training set percentages.
['Simple', 'utility', 'function', 'that', 'writes', 'an', 'average', 'score', 'row', 'in', 'a', 'file', 'designated', 'by', 'a', 'file', 'pointer', '.']
train
https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/datautil/score_rw_util.py#L76-L90
1,789
PMBio/limix-backup
limix/mtSet/iset.py
fit_iSet
def fit_iSet(Y, U_R=None, S_R=None, covs=None, Xr=None, n_perms=0, Ie=None, strat=False, verbose=True): """ Args: Y: [N, P] phenotype matrix S_R: N vector of eigenvalues of R U_R: [N, N] eigenvector matrix of R covs: [N, K] matrix for K covariates Xr: [N, S] genotype data of the set component n_perms: number of permutations to consider Ie: N boolean context indicator strat: if True, the implementation with stratified designs is considered """ factr=1e7 # remove? if strat: assert Ie is not None, 'Ie must be specified for stratification analyses' assert Y.shape[1]==1, 'Y must be Nx1 for stratification analysis' else: assert covs==None, 'Covariates are not supported for analysis of fully observed phenotypes' if verbose: print('fittng iSet') if strat: mtSetGxE = ISet_Strat(Y, Ie, Xr, covs=covs) RV = {} RV['null'] = mtSetGxE.fitNull() RV['rank2'] = mtSetGxE.fitFullRank() RV['rank1'] = mtSetGxE.fitLowRank() RV['block'] = mtSetGxE.fitBlock() RV['var'] = mtSetGxE.getVC() else: mtSetGxE = ISet_Full(Y=Y, S_R=S_R, U_R=U_R, Xr=Xr, factr=factr) RV = {} RV['null'] = mtSetGxE.fitNull() RV['rank2'] = mtSetGxE.fitFullRank() RV['rank1'] = mtSetGxE.fitLowRank() LLR = RV['rank1']['NLLAlt'] - RV['rank2']['NLLAlt'] if LLR<-1e-6: RV['rank2'] = mtSetGxE.fitFullRank(init_method='lr') try: RV['block'] = mtSetGxE.fitBlock() except: try: RV['block'] = mtSetGxE.fitBlock(init_method='null') except: RV['block'] = mtSetGxE.fitBlock(init_method='null_no_opt') RV['var'] = mtSetGxE.getVC() if n_perms>0: RVperm = {} nulls = ['null', 'block', 'rank1'] tests = ['mtSet', 'iSet', 'iSet-het'] for test in tests: RVperm[test+' LLR0'] = sp.zeros(n_perms) for seed_i in range(n_perms): if verbose: print('permutation %d / %d' % (seed_i, n_perms)) for it, test in enumerate(tests): if test=='mtSet': idxs = sp.random.permutation(Xr.shape[0]) _Xr = Xr[idxs, :] df0 = fit_iSet(Y, U_R=U_R, S_R=S_R, covs=covs, Xr=_Xr, n_perms=0, Ie=Ie, strat=strat, verbose=False) else: Y0 = mtSetGxE._sim_from(set_covar=nulls[it]) Y0 -= Y0.mean(0) df0 = fit_iSet(Y0, U_R=U_R, S_R=S_R, covs=covs, Xr=Xr, n_perms=0, Ie=Ie, strat=strat, verbose=False) RVperm[test+' LLR0'][seed_i] = df0[test+' LLR'][0] # output LLR_mtSet = RV['null']['NLL']-RV['rank2']['NLL'] LLR_iSet = RV['block']['NLL']-RV['rank2']['NLL'] LLR_iSet_het = RV['rank1']['NLL']-RV['rank2']['NLL'] if strat: var_keys = ['var_r_full', 'var_c', 'var_n'] else: var_keys = ['var_r_full', 'var_g', 'var_n'] varT = sp.sum([RV['var'][key] for key in var_keys]) var_pers = RV['var']['var_r_block'] / varT var_resc = (RV['var']['var_r_rank1'] - RV['var']['var_r_block']) / varT var_het = (RV['var']['var_r_full'] - RV['var']['var_r_rank1']) / varT conv = RV['null']['conv'] conv*= RV['block']['conv'] conv*= RV['rank1']['conv'] conv*= RV['rank2']['conv'] M = sp.array([LLR_mtSet, LLR_iSet, LLR_iSet_het, var_pers, var_resc, var_het, conv]).T columns = ['mtSet LLR', 'iSet LLR', 'iSet-het LLR', 'Persistent Var', 'Rescaling-GxC Var', 'Heterogeneity-GxC var', 'Converged'] df = pd.DataFrame(M, columns=columns) if n_perms>0: return df, pd.DataFrame(RVperm) return df
python
def fit_iSet(Y, U_R=None, S_R=None, covs=None, Xr=None, n_perms=0, Ie=None, strat=False, verbose=True): """ Args: Y: [N, P] phenotype matrix S_R: N vector of eigenvalues of R U_R: [N, N] eigenvector matrix of R covs: [N, K] matrix for K covariates Xr: [N, S] genotype data of the set component n_perms: number of permutations to consider Ie: N boolean context indicator strat: if True, the implementation with stratified designs is considered """ factr=1e7 # remove? if strat: assert Ie is not None, 'Ie must be specified for stratification analyses' assert Y.shape[1]==1, 'Y must be Nx1 for stratification analysis' else: assert covs==None, 'Covariates are not supported for analysis of fully observed phenotypes' if verbose: print('fittng iSet') if strat: mtSetGxE = ISet_Strat(Y, Ie, Xr, covs=covs) RV = {} RV['null'] = mtSetGxE.fitNull() RV['rank2'] = mtSetGxE.fitFullRank() RV['rank1'] = mtSetGxE.fitLowRank() RV['block'] = mtSetGxE.fitBlock() RV['var'] = mtSetGxE.getVC() else: mtSetGxE = ISet_Full(Y=Y, S_R=S_R, U_R=U_R, Xr=Xr, factr=factr) RV = {} RV['null'] = mtSetGxE.fitNull() RV['rank2'] = mtSetGxE.fitFullRank() RV['rank1'] = mtSetGxE.fitLowRank() LLR = RV['rank1']['NLLAlt'] - RV['rank2']['NLLAlt'] if LLR<-1e-6: RV['rank2'] = mtSetGxE.fitFullRank(init_method='lr') try: RV['block'] = mtSetGxE.fitBlock() except: try: RV['block'] = mtSetGxE.fitBlock(init_method='null') except: RV['block'] = mtSetGxE.fitBlock(init_method='null_no_opt') RV['var'] = mtSetGxE.getVC() if n_perms>0: RVperm = {} nulls = ['null', 'block', 'rank1'] tests = ['mtSet', 'iSet', 'iSet-het'] for test in tests: RVperm[test+' LLR0'] = sp.zeros(n_perms) for seed_i in range(n_perms): if verbose: print('permutation %d / %d' % (seed_i, n_perms)) for it, test in enumerate(tests): if test=='mtSet': idxs = sp.random.permutation(Xr.shape[0]) _Xr = Xr[idxs, :] df0 = fit_iSet(Y, U_R=U_R, S_R=S_R, covs=covs, Xr=_Xr, n_perms=0, Ie=Ie, strat=strat, verbose=False) else: Y0 = mtSetGxE._sim_from(set_covar=nulls[it]) Y0 -= Y0.mean(0) df0 = fit_iSet(Y0, U_R=U_R, S_R=S_R, covs=covs, Xr=Xr, n_perms=0, Ie=Ie, strat=strat, verbose=False) RVperm[test+' LLR0'][seed_i] = df0[test+' LLR'][0] # output LLR_mtSet = RV['null']['NLL']-RV['rank2']['NLL'] LLR_iSet = RV['block']['NLL']-RV['rank2']['NLL'] LLR_iSet_het = RV['rank1']['NLL']-RV['rank2']['NLL'] if strat: var_keys = ['var_r_full', 'var_c', 'var_n'] else: var_keys = ['var_r_full', 'var_g', 'var_n'] varT = sp.sum([RV['var'][key] for key in var_keys]) var_pers = RV['var']['var_r_block'] / varT var_resc = (RV['var']['var_r_rank1'] - RV['var']['var_r_block']) / varT var_het = (RV['var']['var_r_full'] - RV['var']['var_r_rank1']) / varT conv = RV['null']['conv'] conv*= RV['block']['conv'] conv*= RV['rank1']['conv'] conv*= RV['rank2']['conv'] M = sp.array([LLR_mtSet, LLR_iSet, LLR_iSet_het, var_pers, var_resc, var_het, conv]).T columns = ['mtSet LLR', 'iSet LLR', 'iSet-het LLR', 'Persistent Var', 'Rescaling-GxC Var', 'Heterogeneity-GxC var', 'Converged'] df = pd.DataFrame(M, columns=columns) if n_perms>0: return df, pd.DataFrame(RVperm) return df
['def', 'fit_iSet', '(', 'Y', ',', 'U_R', '=', 'None', ',', 'S_R', '=', 'None', ',', 'covs', '=', 'None', ',', 'Xr', '=', 'None', ',', 'n_perms', '=', '0', ',', 'Ie', '=', 'None', ',', 'strat', '=', 'False', ',', 'verbose', '=', 'True', ')', ':', 'factr', '=', '1e7', '# remove?', 'if', 'strat', ':', 'assert', 'Ie', 'is', 'not', 'None', ',', "'Ie must be specified for stratification analyses'", 'assert', 'Y', '.', 'shape', '[', '1', ']', '==', '1', ',', "'Y must be Nx1 for stratification analysis'", 'else', ':', 'assert', 'covs', '==', 'None', ',', "'Covariates are not supported for analysis of fully observed phenotypes'", 'if', 'verbose', ':', 'print', '(', "'fittng iSet'", ')', 'if', 'strat', ':', 'mtSetGxE', '=', 'ISet_Strat', '(', 'Y', ',', 'Ie', ',', 'Xr', ',', 'covs', '=', 'covs', ')', 'RV', '=', '{', '}', 'RV', '[', "'null'", ']', '=', 'mtSetGxE', '.', 'fitNull', '(', ')', 'RV', '[', "'rank2'", ']', '=', 'mtSetGxE', '.', 'fitFullRank', '(', ')', 'RV', '[', "'rank1'", ']', '=', 'mtSetGxE', '.', 'fitLowRank', '(', ')', 'RV', '[', "'block'", ']', '=', 'mtSetGxE', '.', 'fitBlock', '(', ')', 'RV', '[', "'var'", ']', '=', 'mtSetGxE', '.', 'getVC', '(', ')', 'else', ':', 'mtSetGxE', '=', 'ISet_Full', '(', 'Y', '=', 'Y', ',', 'S_R', '=', 'S_R', ',', 'U_R', '=', 'U_R', ',', 'Xr', '=', 'Xr', ',', 'factr', '=', 'factr', ')', 'RV', '=', '{', '}', 'RV', '[', "'null'", ']', '=', 'mtSetGxE', '.', 'fitNull', '(', ')', 'RV', '[', "'rank2'", ']', '=', 'mtSetGxE', '.', 'fitFullRank', '(', ')', 'RV', '[', "'rank1'", ']', '=', 'mtSetGxE', '.', 'fitLowRank', '(', ')', 'LLR', '=', 'RV', '[', "'rank1'", ']', '[', "'NLLAlt'", ']', '-', 'RV', '[', "'rank2'", ']', '[', "'NLLAlt'", ']', 'if', 'LLR', '<', '-', '1e-6', ':', 'RV', '[', "'rank2'", ']', '=', 'mtSetGxE', '.', 'fitFullRank', '(', 'init_method', '=', "'lr'", ')', 'try', ':', 'RV', '[', "'block'", ']', '=', 'mtSetGxE', '.', 'fitBlock', '(', ')', 'except', ':', 'try', ':', 'RV', '[', "'block'", ']', '=', 'mtSetGxE', '.', 'fitBlock', '(', 'init_method', '=', "'null'", ')', 'except', ':', 'RV', '[', "'block'", ']', '=', 'mtSetGxE', '.', 'fitBlock', '(', 'init_method', '=', "'null_no_opt'", ')', 'RV', '[', "'var'", ']', '=', 'mtSetGxE', '.', 'getVC', '(', ')', 'if', 'n_perms', '>', '0', ':', 'RVperm', '=', '{', '}', 'nulls', '=', '[', "'null'", ',', "'block'", ',', "'rank1'", ']', 'tests', '=', '[', "'mtSet'", ',', "'iSet'", ',', "'iSet-het'", ']', 'for', 'test', 'in', 'tests', ':', 'RVperm', '[', 'test', '+', "' LLR0'", ']', '=', 'sp', '.', 'zeros', '(', 'n_perms', ')', 'for', 'seed_i', 'in', 'range', '(', 'n_perms', ')', ':', 'if', 'verbose', ':', 'print', '(', "'permutation %d / %d'", '%', '(', 'seed_i', ',', 'n_perms', ')', ')', 'for', 'it', ',', 'test', 'in', 'enumerate', '(', 'tests', ')', ':', 'if', 'test', '==', "'mtSet'", ':', 'idxs', '=', 'sp', '.', 'random', '.', 'permutation', '(', 'Xr', '.', 'shape', '[', '0', ']', ')', '_Xr', '=', 'Xr', '[', 'idxs', ',', ':', ']', 'df0', '=', 'fit_iSet', '(', 'Y', ',', 'U_R', '=', 'U_R', ',', 'S_R', '=', 'S_R', ',', 'covs', '=', 'covs', ',', 'Xr', '=', '_Xr', ',', 'n_perms', '=', '0', ',', 'Ie', '=', 'Ie', ',', 'strat', '=', 'strat', ',', 'verbose', '=', 'False', ')', 'else', ':', 'Y0', '=', 'mtSetGxE', '.', '_sim_from', '(', 'set_covar', '=', 'nulls', '[', 'it', ']', ')', 'Y0', '-=', 'Y0', '.', 'mean', '(', '0', ')', 'df0', '=', 'fit_iSet', '(', 'Y0', ',', 'U_R', '=', 'U_R', ',', 'S_R', '=', 'S_R', ',', 'covs', '=', 'covs', ',', 'Xr', '=', 'Xr', ',', 'n_perms', '=', '0', ',', 'Ie', '=', 'Ie', ',', 'strat', '=', 'strat', ',', 'verbose', '=', 'False', ')', 'RVperm', '[', 'test', '+', "' LLR0'", ']', '[', 'seed_i', ']', '=', 'df0', '[', 'test', '+', "' LLR'", ']', '[', '0', ']', '# output', 'LLR_mtSet', '=', 'RV', '[', "'null'", ']', '[', "'NLL'", ']', '-', 'RV', '[', "'rank2'", ']', '[', "'NLL'", ']', 'LLR_iSet', '=', 'RV', '[', "'block'", ']', '[', "'NLL'", ']', '-', 'RV', '[', "'rank2'", ']', '[', "'NLL'", ']', 'LLR_iSet_het', '=', 'RV', '[', "'rank1'", ']', '[', "'NLL'", ']', '-', 'RV', '[', "'rank2'", ']', '[', "'NLL'", ']', 'if', 'strat', ':', 'var_keys', '=', '[', "'var_r_full'", ',', "'var_c'", ',', "'var_n'", ']', 'else', ':', 'var_keys', '=', '[', "'var_r_full'", ',', "'var_g'", ',', "'var_n'", ']', 'varT', '=', 'sp', '.', 'sum', '(', '[', 'RV', '[', "'var'", ']', '[', 'key', ']', 'for', 'key', 'in', 'var_keys', ']', ')', 'var_pers', '=', 'RV', '[', "'var'", ']', '[', "'var_r_block'", ']', '/', 'varT', 'var_resc', '=', '(', 'RV', '[', "'var'", ']', '[', "'var_r_rank1'", ']', '-', 'RV', '[', "'var'", ']', '[', "'var_r_block'", ']', ')', '/', 'varT', 'var_het', '=', '(', 'RV', '[', "'var'", ']', '[', "'var_r_full'", ']', '-', 'RV', '[', "'var'", ']', '[', "'var_r_rank1'", ']', ')', '/', 'varT', 'conv', '=', 'RV', '[', "'null'", ']', '[', "'conv'", ']', 'conv', '*=', 'RV', '[', "'block'", ']', '[', "'conv'", ']', 'conv', '*=', 'RV', '[', "'rank1'", ']', '[', "'conv'", ']', 'conv', '*=', 'RV', '[', "'rank2'", ']', '[', "'conv'", ']', 'M', '=', 'sp', '.', 'array', '(', '[', 'LLR_mtSet', ',', 'LLR_iSet', ',', 'LLR_iSet_het', ',', 'var_pers', ',', 'var_resc', ',', 'var_het', ',', 'conv', ']', ')', '.', 'T', 'columns', '=', '[', "'mtSet LLR'", ',', "'iSet LLR'", ',', "'iSet-het LLR'", ',', "'Persistent Var'", ',', "'Rescaling-GxC Var'", ',', "'Heterogeneity-GxC var'", ',', "'Converged'", ']', 'df', '=', 'pd', '.', 'DataFrame', '(', 'M', ',', 'columns', '=', 'columns', ')', 'if', 'n_perms', '>', '0', ':', 'return', 'df', ',', 'pd', '.', 'DataFrame', '(', 'RVperm', ')', 'return', 'df']
Args: Y: [N, P] phenotype matrix S_R: N vector of eigenvalues of R U_R: [N, N] eigenvector matrix of R covs: [N, K] matrix for K covariates Xr: [N, S] genotype data of the set component n_perms: number of permutations to consider Ie: N boolean context indicator strat: if True, the implementation with stratified designs is considered
['Args', ':', 'Y', ':', '[', 'N', 'P', ']', 'phenotype', 'matrix', 'S_R', ':', 'N', 'vector', 'of', 'eigenvalues', 'of', 'R', 'U_R', ':', '[', 'N', 'N', ']', 'eigenvector', 'matrix', 'of', 'R', 'covs', ':', '[', 'N', 'K', ']', 'matrix', 'for', 'K', 'covariates', 'Xr', ':', '[', 'N', 'S', ']', 'genotype', 'data', 'of', 'the', 'set', 'component', 'n_perms', ':', 'number', 'of', 'permutations', 'to', 'consider', 'Ie', ':', 'N', 'boolean', 'context', 'indicator', 'strat', ':', 'if', 'True', 'the', 'implementation', 'with', 'stratified', 'designs', 'is', 'considered']
train
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/mtSet/iset.py#L13-L102
1,790
tensorflow/mesh
mesh_tensorflow/placement_mesh_impl.py
PlacementMeshImpl.receive
def receive(self, x, mesh_axis, source_pcoord): """Collective receive in groups. Each group contains the processors that differ only in mesh_axis. ```python group_size = self.shape[mesh_axis].size ``` Args: x: a LaidOutTensor mesh_axis: an integer source_pcoord: a list of optional integers. Each element is either None or an integer in [0, group_size). If source_pcoord[k] is None, then the output for the k-th processor in each group is a zero tensor. If source_pcoord[k] is not None, then the output for the k-th processor in each group is equal to the input for the source_pcoord[k]-th processor in that group. Returns: a LaidOutTensor """ x = x.to_laid_out_tensor() shape = x.tensor_list[0].shape dtype = x.tensor_list[0].dtype def _collective_receive(tensor_list, device_list): ret = [] for pcoord, device in enumerate(device_list): with tf.device(device): if source_pcoord[pcoord] is None: ret.append(tf.zeros(shape, dtype)) else: ret.append(tf.identity(tensor_list[source_pcoord[pcoord]])) return ret return self._collective_with_groups( x, [mesh_axis], _collective_receive)
python
def receive(self, x, mesh_axis, source_pcoord): """Collective receive in groups. Each group contains the processors that differ only in mesh_axis. ```python group_size = self.shape[mesh_axis].size ``` Args: x: a LaidOutTensor mesh_axis: an integer source_pcoord: a list of optional integers. Each element is either None or an integer in [0, group_size). If source_pcoord[k] is None, then the output for the k-th processor in each group is a zero tensor. If source_pcoord[k] is not None, then the output for the k-th processor in each group is equal to the input for the source_pcoord[k]-th processor in that group. Returns: a LaidOutTensor """ x = x.to_laid_out_tensor() shape = x.tensor_list[0].shape dtype = x.tensor_list[0].dtype def _collective_receive(tensor_list, device_list): ret = [] for pcoord, device in enumerate(device_list): with tf.device(device): if source_pcoord[pcoord] is None: ret.append(tf.zeros(shape, dtype)) else: ret.append(tf.identity(tensor_list[source_pcoord[pcoord]])) return ret return self._collective_with_groups( x, [mesh_axis], _collective_receive)
['def', 'receive', '(', 'self', ',', 'x', ',', 'mesh_axis', ',', 'source_pcoord', ')', ':', 'x', '=', 'x', '.', 'to_laid_out_tensor', '(', ')', 'shape', '=', 'x', '.', 'tensor_list', '[', '0', ']', '.', 'shape', 'dtype', '=', 'x', '.', 'tensor_list', '[', '0', ']', '.', 'dtype', 'def', '_collective_receive', '(', 'tensor_list', ',', 'device_list', ')', ':', 'ret', '=', '[', ']', 'for', 'pcoord', ',', 'device', 'in', 'enumerate', '(', 'device_list', ')', ':', 'with', 'tf', '.', 'device', '(', 'device', ')', ':', 'if', 'source_pcoord', '[', 'pcoord', ']', 'is', 'None', ':', 'ret', '.', 'append', '(', 'tf', '.', 'zeros', '(', 'shape', ',', 'dtype', ')', ')', 'else', ':', 'ret', '.', 'append', '(', 'tf', '.', 'identity', '(', 'tensor_list', '[', 'source_pcoord', '[', 'pcoord', ']', ']', ')', ')', 'return', 'ret', 'return', 'self', '.', '_collective_with_groups', '(', 'x', ',', '[', 'mesh_axis', ']', ',', '_collective_receive', ')']
Collective receive in groups. Each group contains the processors that differ only in mesh_axis. ```python group_size = self.shape[mesh_axis].size ``` Args: x: a LaidOutTensor mesh_axis: an integer source_pcoord: a list of optional integers. Each element is either None or an integer in [0, group_size). If source_pcoord[k] is None, then the output for the k-th processor in each group is a zero tensor. If source_pcoord[k] is not None, then the output for the k-th processor in each group is equal to the input for the source_pcoord[k]-th processor in that group. Returns: a LaidOutTensor
['Collective', 'receive', 'in', 'groups', '.']
train
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L248-L283
1,791
radjkarl/fancyTools
fancytools/render/GridRender.py
GridRender.add
def add(self, point, value): """ Assign all self.merge_values to the self._mergeMatrix Get the position/intensity of a value """ # check range for p, r in zip(point, self.range): if p < r[0] or p > r[1]: return # check nan if isnan(value): return refs = self.opts['references'] # for all neighbour points (1, if antialiasting=False): for position, intensity in self.sortMethod.getPositionsIntensities( point): position = tuple(position) if self.mean is not None: old_value = self.values[position] if not np.isnan(old_value): anz_values = self.density[position] mean = old_value + intensity * ( ((value - old_value) / (anz_values + intensity))) self.mean[position] = mean if self.variance is not None: self.variance[ position] += (abs(value - mean) / ( anz_values + intensity)) if self.mergeMethod(self.values, position, intensity, value): for a in refs: a.mergeMethod(a, position, intensity, value) if self.density is not None: self.density[position] += intensity
python
def add(self, point, value): """ Assign all self.merge_values to the self._mergeMatrix Get the position/intensity of a value """ # check range for p, r in zip(point, self.range): if p < r[0] or p > r[1]: return # check nan if isnan(value): return refs = self.opts['references'] # for all neighbour points (1, if antialiasting=False): for position, intensity in self.sortMethod.getPositionsIntensities( point): position = tuple(position) if self.mean is not None: old_value = self.values[position] if not np.isnan(old_value): anz_values = self.density[position] mean = old_value + intensity * ( ((value - old_value) / (anz_values + intensity))) self.mean[position] = mean if self.variance is not None: self.variance[ position] += (abs(value - mean) / ( anz_values + intensity)) if self.mergeMethod(self.values, position, intensity, value): for a in refs: a.mergeMethod(a, position, intensity, value) if self.density is not None: self.density[position] += intensity
['def', 'add', '(', 'self', ',', 'point', ',', 'value', ')', ':', '# check range', 'for', 'p', ',', 'r', 'in', 'zip', '(', 'point', ',', 'self', '.', 'range', ')', ':', 'if', 'p', '<', 'r', '[', '0', ']', 'or', 'p', '>', 'r', '[', '1', ']', ':', 'return', '# check nan', 'if', 'isnan', '(', 'value', ')', ':', 'return', 'refs', '=', 'self', '.', 'opts', '[', "'references'", ']', '# for all neighbour points (1, if antialiasting=False):', 'for', 'position', ',', 'intensity', 'in', 'self', '.', 'sortMethod', '.', 'getPositionsIntensities', '(', 'point', ')', ':', 'position', '=', 'tuple', '(', 'position', ')', 'if', 'self', '.', 'mean', 'is', 'not', 'None', ':', 'old_value', '=', 'self', '.', 'values', '[', 'position', ']', 'if', 'not', 'np', '.', 'isnan', '(', 'old_value', ')', ':', 'anz_values', '=', 'self', '.', 'density', '[', 'position', ']', 'mean', '=', 'old_value', '+', 'intensity', '*', '(', '(', '(', 'value', '-', 'old_value', ')', '/', '(', 'anz_values', '+', 'intensity', ')', ')', ')', 'self', '.', 'mean', '[', 'position', ']', '=', 'mean', 'if', 'self', '.', 'variance', 'is', 'not', 'None', ':', 'self', '.', 'variance', '[', 'position', ']', '+=', '(', 'abs', '(', 'value', '-', 'mean', ')', '/', '(', 'anz_values', '+', 'intensity', ')', ')', 'if', 'self', '.', 'mergeMethod', '(', 'self', '.', 'values', ',', 'position', ',', 'intensity', ',', 'value', ')', ':', 'for', 'a', 'in', 'refs', ':', 'a', '.', 'mergeMethod', '(', 'a', ',', 'position', ',', 'intensity', ',', 'value', ')', 'if', 'self', '.', 'density', 'is', 'not', 'None', ':', 'self', '.', 'density', '[', 'position', ']', '+=', 'intensity']
Assign all self.merge_values to the self._mergeMatrix Get the position/intensity of a value
['Assign', 'all', 'self', '.', 'merge_values', 'to', 'the', 'self', '.', '_mergeMatrix', 'Get', 'the', 'position', '/', 'intensity', 'of', 'a', 'value']
train
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/render/GridRender.py#L113-L148
1,792
MolSSI-BSE/basis_set_exchange
basis_set_exchange/misc.py
expand_elements
def expand_elements(compact_el, as_str=False): """ Create a list of integers given a string or list of compacted elements This is partly the opposite of compact_elements, but is more flexible. compact_el can be a list or a string. If compact_el is a list, each element is processed individually as a string (meaning list elements can contain commas, ranges, etc) If compact_el is a string, it is split by commas and then each section is processed. In all cases, element symbols (case insensitive) and Z numbers (as integers or strings) can be used interchangeably. Ranges are also allowed in both lists and strings. Some examples: "H-Li,C-O,Ne" will return [1, 2, 3, 6, 7, 8, 10] "H-N,8,Na-12" will return [1, 2, 3, 4, 5, 6, 7, 8, 11, 12] ['C', 'Al-15,S', 17, '18'] will return [6, 13, 14, 15, 16, 17, 18] If as_str is True, the list will contain strings of the integers (ie, the first example above will return ['1', '2', '3', '6', '7', '8', '10'] """ # If an integer, just return it if isinstance(compact_el, int): if as_str is True: return [str(compact_el)] else: return [compact_el] # If compact_el is a list, make it a comma-separated string if isinstance(compact_el, list): compact_el = [str(x) for x in compact_el] compact_el = [x for x in compact_el if len(x) > 0] compact_el = ','.join(compact_el) # Find multiple - or , # Also replace all whitespace with spaces compact_el = re.sub(r',+', ',', compact_el) compact_el = re.sub(r'-+', '-', compact_el) compact_el = re.sub(r'\s+', '', compact_el) # Find starting with or ending with comma and strip them compact_el = compact_el.strip(',') # Check if I was passed an empty string or list if len(compact_el) == 0: return [] # Find some erroneous patterns # -, and ,- if '-,' in compact_el: raise RuntimeError("Malformed element string") if ',-' in compact_el: raise RuntimeError("Malformed element string") # Strings ends or begins with - if compact_el.startswith('-') or compact_el.endswith('-'): raise RuntimeError("Malformed element string") # x-y-z if re.search(r'\w+-\w+-\w+', compact_el): raise RuntimeError("Malformed element string") # Split on commas tmp_list = compact_el.split(',') # Now go over each one and replace elements with ints el_list = [] for el in tmp_list: if not '-' in el: el_list.append(_Z_from_str(el)) else: begin, end = el.split('-') begin = _Z_from_str(begin) end = _Z_from_str(end) el_list.extend(list(range(begin, end + 1))) if as_str is True: return [str(x) for x in el_list] else: return el_list
python
def expand_elements(compact_el, as_str=False): """ Create a list of integers given a string or list of compacted elements This is partly the opposite of compact_elements, but is more flexible. compact_el can be a list or a string. If compact_el is a list, each element is processed individually as a string (meaning list elements can contain commas, ranges, etc) If compact_el is a string, it is split by commas and then each section is processed. In all cases, element symbols (case insensitive) and Z numbers (as integers or strings) can be used interchangeably. Ranges are also allowed in both lists and strings. Some examples: "H-Li,C-O,Ne" will return [1, 2, 3, 6, 7, 8, 10] "H-N,8,Na-12" will return [1, 2, 3, 4, 5, 6, 7, 8, 11, 12] ['C', 'Al-15,S', 17, '18'] will return [6, 13, 14, 15, 16, 17, 18] If as_str is True, the list will contain strings of the integers (ie, the first example above will return ['1', '2', '3', '6', '7', '8', '10'] """ # If an integer, just return it if isinstance(compact_el, int): if as_str is True: return [str(compact_el)] else: return [compact_el] # If compact_el is a list, make it a comma-separated string if isinstance(compact_el, list): compact_el = [str(x) for x in compact_el] compact_el = [x for x in compact_el if len(x) > 0] compact_el = ','.join(compact_el) # Find multiple - or , # Also replace all whitespace with spaces compact_el = re.sub(r',+', ',', compact_el) compact_el = re.sub(r'-+', '-', compact_el) compact_el = re.sub(r'\s+', '', compact_el) # Find starting with or ending with comma and strip them compact_el = compact_el.strip(',') # Check if I was passed an empty string or list if len(compact_el) == 0: return [] # Find some erroneous patterns # -, and ,- if '-,' in compact_el: raise RuntimeError("Malformed element string") if ',-' in compact_el: raise RuntimeError("Malformed element string") # Strings ends or begins with - if compact_el.startswith('-') or compact_el.endswith('-'): raise RuntimeError("Malformed element string") # x-y-z if re.search(r'\w+-\w+-\w+', compact_el): raise RuntimeError("Malformed element string") # Split on commas tmp_list = compact_el.split(',') # Now go over each one and replace elements with ints el_list = [] for el in tmp_list: if not '-' in el: el_list.append(_Z_from_str(el)) else: begin, end = el.split('-') begin = _Z_from_str(begin) end = _Z_from_str(end) el_list.extend(list(range(begin, end + 1))) if as_str is True: return [str(x) for x in el_list] else: return el_list
['def', 'expand_elements', '(', 'compact_el', ',', 'as_str', '=', 'False', ')', ':', '# If an integer, just return it', 'if', 'isinstance', '(', 'compact_el', ',', 'int', ')', ':', 'if', 'as_str', 'is', 'True', ':', 'return', '[', 'str', '(', 'compact_el', ')', ']', 'else', ':', 'return', '[', 'compact_el', ']', '# If compact_el is a list, make it a comma-separated string', 'if', 'isinstance', '(', 'compact_el', ',', 'list', ')', ':', 'compact_el', '=', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'compact_el', ']', 'compact_el', '=', '[', 'x', 'for', 'x', 'in', 'compact_el', 'if', 'len', '(', 'x', ')', '>', '0', ']', 'compact_el', '=', "','", '.', 'join', '(', 'compact_el', ')', '# Find multiple - or ,', '# Also replace all whitespace with spaces', 'compact_el', '=', 're', '.', 'sub', '(', "r',+'", ',', "','", ',', 'compact_el', ')', 'compact_el', '=', 're', '.', 'sub', '(', "r'-+'", ',', "'-'", ',', 'compact_el', ')', 'compact_el', '=', 're', '.', 'sub', '(', "r'\\s+'", ',', "''", ',', 'compact_el', ')', '# Find starting with or ending with comma and strip them', 'compact_el', '=', 'compact_el', '.', 'strip', '(', "','", ')', '# Check if I was passed an empty string or list', 'if', 'len', '(', 'compact_el', ')', '==', '0', ':', 'return', '[', ']', '# Find some erroneous patterns', '# -, and ,-', 'if', "'-,'", 'in', 'compact_el', ':', 'raise', 'RuntimeError', '(', '"Malformed element string"', ')', 'if', "',-'", 'in', 'compact_el', ':', 'raise', 'RuntimeError', '(', '"Malformed element string"', ')', '# Strings ends or begins with -', 'if', 'compact_el', '.', 'startswith', '(', "'-'", ')', 'or', 'compact_el', '.', 'endswith', '(', "'-'", ')', ':', 'raise', 'RuntimeError', '(', '"Malformed element string"', ')', '# x-y-z', 'if', 're', '.', 'search', '(', "r'\\w+-\\w+-\\w+'", ',', 'compact_el', ')', ':', 'raise', 'RuntimeError', '(', '"Malformed element string"', ')', '# Split on commas', 'tmp_list', '=', 'compact_el', '.', 'split', '(', "','", ')', '# Now go over each one and replace elements with ints', 'el_list', '=', '[', ']', 'for', 'el', 'in', 'tmp_list', ':', 'if', 'not', "'-'", 'in', 'el', ':', 'el_list', '.', 'append', '(', '_Z_from_str', '(', 'el', ')', ')', 'else', ':', 'begin', ',', 'end', '=', 'el', '.', 'split', '(', "'-'", ')', 'begin', '=', '_Z_from_str', '(', 'begin', ')', 'end', '=', '_Z_from_str', '(', 'end', ')', 'el_list', '.', 'extend', '(', 'list', '(', 'range', '(', 'begin', ',', 'end', '+', '1', ')', ')', ')', 'if', 'as_str', 'is', 'True', ':', 'return', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'el_list', ']', 'else', ':', 'return', 'el_list']
Create a list of integers given a string or list of compacted elements This is partly the opposite of compact_elements, but is more flexible. compact_el can be a list or a string. If compact_el is a list, each element is processed individually as a string (meaning list elements can contain commas, ranges, etc) If compact_el is a string, it is split by commas and then each section is processed. In all cases, element symbols (case insensitive) and Z numbers (as integers or strings) can be used interchangeably. Ranges are also allowed in both lists and strings. Some examples: "H-Li,C-O,Ne" will return [1, 2, 3, 6, 7, 8, 10] "H-N,8,Na-12" will return [1, 2, 3, 4, 5, 6, 7, 8, 11, 12] ['C', 'Al-15,S', 17, '18'] will return [6, 13, 14, 15, 16, 17, 18] If as_str is True, the list will contain strings of the integers (ie, the first example above will return ['1', '2', '3', '6', '7', '8', '10']
['Create', 'a', 'list', 'of', 'integers', 'given', 'a', 'string', 'or', 'list', 'of', 'compacted', 'elements']
train
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/misc.py#L110-L190
1,793
squaresLab/BugZoo
bugzoo/exceptions.py
BugZooException.from_message_and_data
def from_message_and_data(cls, message: str, data: Dict[str, Any] ) -> 'BugZooException': """ Reproduces an exception from the message and data contained in its dictionary-based description. """ return cls(message)
python
def from_message_and_data(cls, message: str, data: Dict[str, Any] ) -> 'BugZooException': """ Reproduces an exception from the message and data contained in its dictionary-based description. """ return cls(message)
['def', 'from_message_and_data', '(', 'cls', ',', 'message', ':', 'str', ',', 'data', ':', 'Dict', '[', 'str', ',', 'Any', ']', ')', '->', "'BugZooException'", ':', 'return', 'cls', '(', 'message', ')']
Reproduces an exception from the message and data contained in its dictionary-based description.
['Reproduces', 'an', 'exception', 'from', 'the', 'message', 'and', 'data', 'contained', 'in', 'its', 'dictionary', '-', 'based', 'description', '.']
train
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/exceptions.py#L66-L74
1,794
bcbio/bcbio-nextgen
bcbio/structural/metasv.py
run
def run(items): """Run MetaSV if we have enough supported callers, adding output to the set of calls. """ assert len(items) == 1, "Expect one input to MetaSV ensemble calling" data = items[0] work_dir = _sv_workdir(data) out_file = os.path.join(work_dir, "variants.vcf.gz") cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data), "--bam", dd.get_align_bam(data), "--outdir", work_dir] methods = [] for call in data.get("sv", []): vcf_file = call.get("vcf_file", call.get("vrn_file", None)) if call["variantcaller"] in SUPPORTED and call["variantcaller"] not in methods and vcf_file is not None: methods.append(call["variantcaller"]) cmd += ["--%s_vcf" % call["variantcaller"], vcf_file] if len(methods) >= MIN_CALLERS: if not utils.file_exists(out_file): tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw")) ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data), os.path.join(tx_work_dir, "insert-stats.yaml")) cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))] cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")] cmd += ["--assembly_max_tools=1", "--assembly_pad=500"] cmd += ["--boost_sc", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]] do.run(cmd, "Combine variant calls with MetaSV") filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || " "(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)") filter_file = vfilter.cutoff_w_expression(out_file, filters, data, name="ReassemblyStats", limit_regions=None) effects_vcf, _ = effects.add_to_vcf(filter_file, data, "snpeff") data["sv"].append({"variantcaller": "metasv", "vrn_file": effects_vcf or filter_file}) return [data]
python
def run(items): """Run MetaSV if we have enough supported callers, adding output to the set of calls. """ assert len(items) == 1, "Expect one input to MetaSV ensemble calling" data = items[0] work_dir = _sv_workdir(data) out_file = os.path.join(work_dir, "variants.vcf.gz") cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data), "--bam", dd.get_align_bam(data), "--outdir", work_dir] methods = [] for call in data.get("sv", []): vcf_file = call.get("vcf_file", call.get("vrn_file", None)) if call["variantcaller"] in SUPPORTED and call["variantcaller"] not in methods and vcf_file is not None: methods.append(call["variantcaller"]) cmd += ["--%s_vcf" % call["variantcaller"], vcf_file] if len(methods) >= MIN_CALLERS: if not utils.file_exists(out_file): tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw")) ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data), os.path.join(tx_work_dir, "insert-stats.yaml")) cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))] cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")] cmd += ["--assembly_max_tools=1", "--assembly_pad=500"] cmd += ["--boost_sc", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]] do.run(cmd, "Combine variant calls with MetaSV") filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || " "(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || " "(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)") filter_file = vfilter.cutoff_w_expression(out_file, filters, data, name="ReassemblyStats", limit_regions=None) effects_vcf, _ = effects.add_to_vcf(filter_file, data, "snpeff") data["sv"].append({"variantcaller": "metasv", "vrn_file": effects_vcf or filter_file}) return [data]
['def', 'run', '(', 'items', ')', ':', 'assert', 'len', '(', 'items', ')', '==', '1', ',', '"Expect one input to MetaSV ensemble calling"', 'data', '=', 'items', '[', '0', ']', 'work_dir', '=', '_sv_workdir', '(', 'data', ')', 'out_file', '=', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"variants.vcf.gz"', ')', 'cmd', '=', '_get_cmd', '(', ')', '+', '[', '"--sample"', ',', 'dd', '.', 'get_sample_name', '(', 'data', ')', ',', '"--reference"', ',', 'dd', '.', 'get_ref_file', '(', 'data', ')', ',', '"--bam"', ',', 'dd', '.', 'get_align_bam', '(', 'data', ')', ',', '"--outdir"', ',', 'work_dir', ']', 'methods', '=', '[', ']', 'for', 'call', 'in', 'data', '.', 'get', '(', '"sv"', ',', '[', ']', ')', ':', 'vcf_file', '=', 'call', '.', 'get', '(', '"vcf_file"', ',', 'call', '.', 'get', '(', '"vrn_file"', ',', 'None', ')', ')', 'if', 'call', '[', '"variantcaller"', ']', 'in', 'SUPPORTED', 'and', 'call', '[', '"variantcaller"', ']', 'not', 'in', 'methods', 'and', 'vcf_file', 'is', 'not', 'None', ':', 'methods', '.', 'append', '(', 'call', '[', '"variantcaller"', ']', ')', 'cmd', '+=', '[', '"--%s_vcf"', '%', 'call', '[', '"variantcaller"', ']', ',', 'vcf_file', ']', 'if', 'len', '(', 'methods', ')', '>=', 'MIN_CALLERS', ':', 'if', 'not', 'utils', '.', 'file_exists', '(', 'out_file', ')', ':', 'tx_work_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"raw"', ')', ')', 'ins_stats', '=', 'shared', '.', 'calc_paired_insert_stats_save', '(', 'dd', '.', 'get_align_bam', '(', 'data', ')', ',', 'os', '.', 'path', '.', 'join', '(', 'tx_work_dir', ',', '"insert-stats.yaml"', ')', ')', 'cmd', '+=', '[', '"--workdir"', ',', 'tx_work_dir', ',', '"--num_threads"', ',', 'str', '(', 'dd', '.', 'get_num_cores', '(', 'data', ')', ')', ']', 'cmd', '+=', '[', '"--spades"', ',', 'utils', '.', 'which', '(', '"spades.py"', ')', ',', '"--age"', ',', 'utils', '.', 'which', '(', '"age_align"', ')', ']', 'cmd', '+=', '[', '"--assembly_max_tools=1"', ',', '"--assembly_pad=500"', ']', 'cmd', '+=', '[', '"--boost_sc"', ',', '"--isize_mean"', ',', 'ins_stats', '[', '"mean"', ']', ',', '"--isize_sd"', ',', 'ins_stats', '[', '"std"', ']', ']', 'do', '.', 'run', '(', 'cmd', ',', '"Combine variant calls with MetaSV"', ')', 'filters', '=', '(', '"(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || "', '"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || "', '"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || "', '"(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)"', ')', 'filter_file', '=', 'vfilter', '.', 'cutoff_w_expression', '(', 'out_file', ',', 'filters', ',', 'data', ',', 'name', '=', '"ReassemblyStats"', ',', 'limit_regions', '=', 'None', ')', 'effects_vcf', ',', '_', '=', 'effects', '.', 'add_to_vcf', '(', 'filter_file', ',', 'data', ',', '"snpeff"', ')', 'data', '[', '"sv"', ']', '.', 'append', '(', '{', '"variantcaller"', ':', '"metasv"', ',', '"vrn_file"', ':', 'effects_vcf', 'or', 'filter_file', '}', ')', 'return', '[', 'data', ']']
Run MetaSV if we have enough supported callers, adding output to the set of calls.
['Run', 'MetaSV', 'if', 'we', 'have', 'enough', 'supported', 'callers', 'adding', 'output', 'to', 'the', 'set', 'of', 'calls', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/metasv.py#L18-L52
1,795
SetBased/py-stratum
pystratum/Constants.py
Constants._read_configuration_file
def _read_configuration_file(self, config_filename): """ Reads parameters from the configuration file. :param str config_filename: The name of the configuration file. """ config = configparser.ConfigParser() config.read(config_filename) self._constants_filename = config.get('constants', 'columns') self._prefix = config.get('constants', 'prefix') self._class_name = config.get('constants', 'class')
python
def _read_configuration_file(self, config_filename): """ Reads parameters from the configuration file. :param str config_filename: The name of the configuration file. """ config = configparser.ConfigParser() config.read(config_filename) self._constants_filename = config.get('constants', 'columns') self._prefix = config.get('constants', 'prefix') self._class_name = config.get('constants', 'class')
['def', '_read_configuration_file', '(', 'self', ',', 'config_filename', ')', ':', 'config', '=', 'configparser', '.', 'ConfigParser', '(', ')', 'config', '.', 'read', '(', 'config_filename', ')', 'self', '.', '_constants_filename', '=', 'config', '.', 'get', '(', "'constants'", ',', "'columns'", ')', 'self', '.', '_prefix', '=', 'config', '.', 'get', '(', "'constants'", ',', "'prefix'", ')', 'self', '.', '_class_name', '=', 'config', '.', 'get', '(', "'constants'", ',', "'class'", ')']
Reads parameters from the configuration file. :param str config_filename: The name of the configuration file.
['Reads', 'parameters', 'from', 'the', 'configuration', 'file', '.']
train
https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/Constants.py#L131-L142
1,796
fjwCode/cerium
cerium/androiddriver.py
AndroidDriver.swipe_up
def swipe_up(self, width: int = 1080, length: int = 1920) -> None: '''Swipe up.''' self.swipe(0.5*width, 0.8*length, 0.5*width, 0.2*length)
python
def swipe_up(self, width: int = 1080, length: int = 1920) -> None: '''Swipe up.''' self.swipe(0.5*width, 0.8*length, 0.5*width, 0.2*length)
['def', 'swipe_up', '(', 'self', ',', 'width', ':', 'int', '=', '1080', ',', 'length', ':', 'int', '=', '1920', ')', '->', 'None', ':', 'self', '.', 'swipe', '(', '0.5', '*', 'width', ',', '0.8', '*', 'length', ',', '0.5', '*', 'width', ',', '0.2', '*', 'length', ')']
Swipe up.
['Swipe', 'up', '.']
train
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/androiddriver.py#L825-L827
1,797
PrefPy/prefpy
prefpy/gmmra.py
GMMPLAggregator._bot
def _bot(self, k): """ Description: Bottom k breaking Parameters: k: the number of alternatives to break from lowest rank """ if k < 2: raise ValueError("k smaller than 2") G = np.ones((self.m, self.m)) np.fill_diagonal(G, 0) for i in range(self.m): for j in range(self.m): if i == j: continue if i <= k and j <= k: G[i][j] = 0 return G
python
def _bot(self, k): """ Description: Bottom k breaking Parameters: k: the number of alternatives to break from lowest rank """ if k < 2: raise ValueError("k smaller than 2") G = np.ones((self.m, self.m)) np.fill_diagonal(G, 0) for i in range(self.m): for j in range(self.m): if i == j: continue if i <= k and j <= k: G[i][j] = 0 return G
['def', '_bot', '(', 'self', ',', 'k', ')', ':', 'if', 'k', '<', '2', ':', 'raise', 'ValueError', '(', '"k smaller than 2"', ')', 'G', '=', 'np', '.', 'ones', '(', '(', 'self', '.', 'm', ',', 'self', '.', 'm', ')', ')', 'np', '.', 'fill_diagonal', '(', 'G', ',', '0', ')', 'for', 'i', 'in', 'range', '(', 'self', '.', 'm', ')', ':', 'for', 'j', 'in', 'range', '(', 'self', '.', 'm', ')', ':', 'if', 'i', '==', 'j', ':', 'continue', 'if', 'i', '<=', 'k', 'and', 'j', '<=', 'k', ':', 'G', '[', 'i', ']', '[', 'j', ']', '=', '0', 'return', 'G']
Description: Bottom k breaking Parameters: k: the number of alternatives to break from lowest rank
['Description', ':', 'Bottom', 'k', 'breaking', 'Parameters', ':', 'k', ':', 'the', 'number', 'of', 'alternatives', 'to', 'break', 'from', 'lowest', 'rank']
train
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/gmmra.py#L47-L64
1,798
yhat/pandasql
pandasql/sqldf.py
get_outer_frame_variables
def get_outer_frame_variables(): """ Get a dict of local and global variables of the first outer frame from another file. """ cur_filename = inspect.getframeinfo(inspect.currentframe()).filename outer_frame = next(f for f in inspect.getouterframes(inspect.currentframe()) if f.filename != cur_filename) variables = {} variables.update(outer_frame.frame.f_globals) variables.update(outer_frame.frame.f_locals) return variables
python
def get_outer_frame_variables(): """ Get a dict of local and global variables of the first outer frame from another file. """ cur_filename = inspect.getframeinfo(inspect.currentframe()).filename outer_frame = next(f for f in inspect.getouterframes(inspect.currentframe()) if f.filename != cur_filename) variables = {} variables.update(outer_frame.frame.f_globals) variables.update(outer_frame.frame.f_locals) return variables
['def', 'get_outer_frame_variables', '(', ')', ':', 'cur_filename', '=', 'inspect', '.', 'getframeinfo', '(', 'inspect', '.', 'currentframe', '(', ')', ')', '.', 'filename', 'outer_frame', '=', 'next', '(', 'f', 'for', 'f', 'in', 'inspect', '.', 'getouterframes', '(', 'inspect', '.', 'currentframe', '(', ')', ')', 'if', 'f', '.', 'filename', '!=', 'cur_filename', ')', 'variables', '=', '{', '}', 'variables', '.', 'update', '(', 'outer_frame', '.', 'frame', '.', 'f_globals', ')', 'variables', '.', 'update', '(', 'outer_frame', '.', 'frame', '.', 'f_locals', ')', 'return', 'variables']
Get a dict of local and global variables of the first outer frame from another file.
['Get', 'a', 'dict', 'of', 'local', 'and', 'global', 'variables', 'of', 'the', 'first', 'outer', 'frame', 'from', 'another', 'file', '.']
train
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L98-L107
1,799
blockstack/blockstack-core
blockstack/lib/atlas.py
atlas_peer_download_zonefile_inventory
def atlas_peer_download_zonefile_inventory( my_hostport, peer_hostport, maxlen, bit_offset=0, timeout=None, peer_table={} ): """ Get the zonefile inventory from the remote peer Start from the given bit_offset NOTE: this doesn't update the peer table health by default; you'll have to explicitly pass in a peer table (i.e. setting to {} ensures that nothing happens). """ if timeout is None: timeout = atlas_inv_timeout() interval = 524288 # number of bits in 64KB peer_inv = "" log.debug("Download zonefile inventory %s-%s from %s" % (bit_offset, maxlen, peer_hostport)) if bit_offset > maxlen: # synced already return peer_inv for offset in xrange( bit_offset, maxlen, interval): next_inv = atlas_peer_get_zonefile_inventory_range( my_hostport, peer_hostport, offset, interval, timeout=timeout, peer_table=peer_table ) if next_inv is None: # partial failure log.debug("Failed to sync inventory for %s from %s to %s" % (peer_hostport, offset, offset+interval)) break peer_inv += next_inv if len(next_inv) < interval: # end-of-interval break return peer_inv
python
def atlas_peer_download_zonefile_inventory( my_hostport, peer_hostport, maxlen, bit_offset=0, timeout=None, peer_table={} ): """ Get the zonefile inventory from the remote peer Start from the given bit_offset NOTE: this doesn't update the peer table health by default; you'll have to explicitly pass in a peer table (i.e. setting to {} ensures that nothing happens). """ if timeout is None: timeout = atlas_inv_timeout() interval = 524288 # number of bits in 64KB peer_inv = "" log.debug("Download zonefile inventory %s-%s from %s" % (bit_offset, maxlen, peer_hostport)) if bit_offset > maxlen: # synced already return peer_inv for offset in xrange( bit_offset, maxlen, interval): next_inv = atlas_peer_get_zonefile_inventory_range( my_hostport, peer_hostport, offset, interval, timeout=timeout, peer_table=peer_table ) if next_inv is None: # partial failure log.debug("Failed to sync inventory for %s from %s to %s" % (peer_hostport, offset, offset+interval)) break peer_inv += next_inv if len(next_inv) < interval: # end-of-interval break return peer_inv
['def', 'atlas_peer_download_zonefile_inventory', '(', 'my_hostport', ',', 'peer_hostport', ',', 'maxlen', ',', 'bit_offset', '=', '0', ',', 'timeout', '=', 'None', ',', 'peer_table', '=', '{', '}', ')', ':', 'if', 'timeout', 'is', 'None', ':', 'timeout', '=', 'atlas_inv_timeout', '(', ')', 'interval', '=', '524288', '# number of bits in 64KB', 'peer_inv', '=', '""', 'log', '.', 'debug', '(', '"Download zonefile inventory %s-%s from %s"', '%', '(', 'bit_offset', ',', 'maxlen', ',', 'peer_hostport', ')', ')', 'if', 'bit_offset', '>', 'maxlen', ':', '# synced already', 'return', 'peer_inv', 'for', 'offset', 'in', 'xrange', '(', 'bit_offset', ',', 'maxlen', ',', 'interval', ')', ':', 'next_inv', '=', 'atlas_peer_get_zonefile_inventory_range', '(', 'my_hostport', ',', 'peer_hostport', ',', 'offset', ',', 'interval', ',', 'timeout', '=', 'timeout', ',', 'peer_table', '=', 'peer_table', ')', 'if', 'next_inv', 'is', 'None', ':', '# partial failure', 'log', '.', 'debug', '(', '"Failed to sync inventory for %s from %s to %s"', '%', '(', 'peer_hostport', ',', 'offset', ',', 'offset', '+', 'interval', ')', ')', 'break', 'peer_inv', '+=', 'next_inv', 'if', 'len', '(', 'next_inv', ')', '<', 'interval', ':', '# end-of-interval', 'break', 'return', 'peer_inv']
Get the zonefile inventory from the remote peer Start from the given bit_offset NOTE: this doesn't update the peer table health by default; you'll have to explicitly pass in a peer table (i.e. setting to {} ensures that nothing happens).
['Get', 'the', 'zonefile', 'inventory', 'from', 'the', 'remote', 'peer', 'Start', 'from', 'the', 'given', 'bit_offset']
train
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L1993-L2027