Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
2,100
commonsense/metanl
metanl/nltk_morphy.py
normalize_topic
def normalize_topic(topic): """ Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None. """ # find titles of the form Foo (bar) topic = topic.replace('_', ' ') match = re.match(r'([^(]+) \(([^)]+)\)', topic) if not match: return normalize(topic), None else: return normalize(match.group(1)), 'n/' + match.group(2).strip(' _')
python
def normalize_topic(topic): """ Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None. """ # find titles of the form Foo (bar) topic = topic.replace('_', ' ') match = re.match(r'([^(]+) \(([^)]+)\)', topic) if not match: return normalize(topic), None else: return normalize(match.group(1)), 'n/' + match.group(2).strip(' _')
['def', 'normalize_topic', '(', 'topic', ')', ':', '# find titles of the form Foo (bar)', 'topic', '=', 'topic', '.', 'replace', '(', "'_'", ',', "' '", ')', 'match', '=', 're', '.', 'match', '(', "r'([^(]+) \\(([^)]+)\\)'", ',', 'topic', ')', 'if', 'not', 'match', ':', 'return', 'normalize', '(', 'topic', ')', ',', 'None', 'else', ':', 'return', 'normalize', '(', 'match', '.', 'group', '(', '1', ')', ')', ',', "'n/'", '+', 'match', '.', 'group', '(', '2', ')', '.', 'strip', '(', "' _'", ')']
Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None.
['Get', 'a', 'canonical', 'representation', 'of', 'a', 'Wikipedia', 'topic', 'which', 'may', 'include', 'a', 'disambiguation', 'string', 'in', 'parentheses', '.']
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/nltk_morphy.py#L205-L220
2,101
rshk/python-libxdo
xdo/xdo.py
_errcheck
def _errcheck(result, func, arguments): """ Error checker for functions returning an integer indicating success (0) / failure (1). Raises a XdoException in case of error, otherwise just returns ``None`` (returning the original code, 0, would be useless anyways..) """ if result != 0: raise XdoException( 'Function {0} returned error code {1}' .format(func.__name__, result)) return None
python
def _errcheck(result, func, arguments): """ Error checker for functions returning an integer indicating success (0) / failure (1). Raises a XdoException in case of error, otherwise just returns ``None`` (returning the original code, 0, would be useless anyways..) """ if result != 0: raise XdoException( 'Function {0} returned error code {1}' .format(func.__name__, result)) return None
['def', '_errcheck', '(', 'result', ',', 'func', ',', 'arguments', ')', ':', 'if', 'result', '!=', '0', ':', 'raise', 'XdoException', '(', "'Function {0} returned error code {1}'", '.', 'format', '(', 'func', '.', '__name__', ',', 'result', ')', ')', 'return', 'None']
Error checker for functions returning an integer indicating success (0) / failure (1). Raises a XdoException in case of error, otherwise just returns ``None`` (returning the original code, 0, would be useless anyways..)
['Error', 'checker', 'for', 'functions', 'returning', 'an', 'integer', 'indicating', 'success', '(', '0', ')', '/', 'failure', '(', '1', ')', '.']
train
https://github.com/rshk/python-libxdo/blob/84cafa5943b005bc423edd28203a5266b3579ac3/xdo/xdo.py#L37-L51
2,102
twilio/twilio-python
twilio/rest/sync/v1/service/__init__.py
ServiceInstance.update
def update(self, webhook_url=values.unset, friendly_name=values.unset, reachability_webhooks_enabled=values.unset, acl_enabled=values.unset): """ Update the ServiceInstance :param unicode webhook_url: A URL that will receive event updates when objects are manipulated. :param unicode friendly_name: Human-readable name for this service instance :param bool reachability_webhooks_enabled: True or false - controls whether this instance fires webhooks when client endpoints connect to Sync :param bool acl_enabled: true or false - determines whether token identities must be granted access to Sync objects via the Permissions API in this Service. :returns: Updated ServiceInstance :rtype: twilio.rest.sync.v1.service.ServiceInstance """ return self._proxy.update( webhook_url=webhook_url, friendly_name=friendly_name, reachability_webhooks_enabled=reachability_webhooks_enabled, acl_enabled=acl_enabled, )
python
def update(self, webhook_url=values.unset, friendly_name=values.unset, reachability_webhooks_enabled=values.unset, acl_enabled=values.unset): """ Update the ServiceInstance :param unicode webhook_url: A URL that will receive event updates when objects are manipulated. :param unicode friendly_name: Human-readable name for this service instance :param bool reachability_webhooks_enabled: True or false - controls whether this instance fires webhooks when client endpoints connect to Sync :param bool acl_enabled: true or false - determines whether token identities must be granted access to Sync objects via the Permissions API in this Service. :returns: Updated ServiceInstance :rtype: twilio.rest.sync.v1.service.ServiceInstance """ return self._proxy.update( webhook_url=webhook_url, friendly_name=friendly_name, reachability_webhooks_enabled=reachability_webhooks_enabled, acl_enabled=acl_enabled, )
['def', 'update', '(', 'self', ',', 'webhook_url', '=', 'values', '.', 'unset', ',', 'friendly_name', '=', 'values', '.', 'unset', ',', 'reachability_webhooks_enabled', '=', 'values', '.', 'unset', ',', 'acl_enabled', '=', 'values', '.', 'unset', ')', ':', 'return', 'self', '.', '_proxy', '.', 'update', '(', 'webhook_url', '=', 'webhook_url', ',', 'friendly_name', '=', 'friendly_name', ',', 'reachability_webhooks_enabled', '=', 'reachability_webhooks_enabled', ',', 'acl_enabled', '=', 'acl_enabled', ',', ')']
Update the ServiceInstance :param unicode webhook_url: A URL that will receive event updates when objects are manipulated. :param unicode friendly_name: Human-readable name for this service instance :param bool reachability_webhooks_enabled: True or false - controls whether this instance fires webhooks when client endpoints connect to Sync :param bool acl_enabled: true or false - determines whether token identities must be granted access to Sync objects via the Permissions API in this Service. :returns: Updated ServiceInstance :rtype: twilio.rest.sync.v1.service.ServiceInstance
['Update', 'the', 'ServiceInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/sync/v1/service/__init__.py#L513-L532
2,103
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.init_jcrop
def init_jcrop(min_size=None): """Initialize jcrop. :param min_size: The minimal size of crop area. """ init_x = current_app.config['AVATARS_CROP_INIT_POS'][0] init_y = current_app.config['AVATARS_CROP_INIT_POS'][1] init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if current_app.config['AVATARS_CROP_MIN_SIZE']: min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2] min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size) else: min_size_js = '' return Markup(''' <script type="text/javascript"> jQuery(function ($) { // Create variables (in this scope) to hold the API and image size var jcrop_api, boundx, boundy, // Grab some information about the preview pane $preview = $('#preview-box'), $pcnt = $('#preview-box .preview-box'), $pimg = $('#preview-box .preview-box img'), xsize = $pcnt.width(), ysize = $pcnt.height(); $('#crop-box').Jcrop({ onChange: updatePreview, onSelect: updateCoords, setSelect: [%s, %s, %s, %s], aspectRatio: 1 }, function () { // Use the API to get the real image size var bounds = this.getBounds(); boundx = bounds[0]; boundy = bounds[1]; // Store the API in the jcrop_api variable jcrop_api = this; %s jcrop_api.focus(); // Move the preview into the jcrop container for css positioning $preview.appendTo(jcrop_api.ui.holder); }); function updatePreview(c) { if (parseInt(c.w) > 0) { var rx = xsize / c.w; var ry = ysize / c.h; $pimg.css({ width: Math.round(rx * boundx) + 'px', height: Math.round(ry * boundy) + 'px', marginLeft: '-' + Math.round(rx * c.x) + 'px', marginTop: '-' + Math.round(ry * c.y) + 'px' }); } } }); function updateCoords(c) { $('#x').val(c.x); $('#y').val(c.y); $('#w').val(c.w); $('#h').val(c.h); } </script> ''' % (init_x, init_y, init_size, init_size, min_size_js))
python
def init_jcrop(min_size=None): """Initialize jcrop. :param min_size: The minimal size of crop area. """ init_x = current_app.config['AVATARS_CROP_INIT_POS'][0] init_y = current_app.config['AVATARS_CROP_INIT_POS'][1] init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if current_app.config['AVATARS_CROP_MIN_SIZE']: min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2] min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size) else: min_size_js = '' return Markup(''' <script type="text/javascript"> jQuery(function ($) { // Create variables (in this scope) to hold the API and image size var jcrop_api, boundx, boundy, // Grab some information about the preview pane $preview = $('#preview-box'), $pcnt = $('#preview-box .preview-box'), $pimg = $('#preview-box .preview-box img'), xsize = $pcnt.width(), ysize = $pcnt.height(); $('#crop-box').Jcrop({ onChange: updatePreview, onSelect: updateCoords, setSelect: [%s, %s, %s, %s], aspectRatio: 1 }, function () { // Use the API to get the real image size var bounds = this.getBounds(); boundx = bounds[0]; boundy = bounds[1]; // Store the API in the jcrop_api variable jcrop_api = this; %s jcrop_api.focus(); // Move the preview into the jcrop container for css positioning $preview.appendTo(jcrop_api.ui.holder); }); function updatePreview(c) { if (parseInt(c.w) > 0) { var rx = xsize / c.w; var ry = ysize / c.h; $pimg.css({ width: Math.round(rx * boundx) + 'px', height: Math.round(ry * boundy) + 'px', marginLeft: '-' + Math.round(rx * c.x) + 'px', marginTop: '-' + Math.round(ry * c.y) + 'px' }); } } }); function updateCoords(c) { $('#x').val(c.x); $('#y').val(c.y); $('#w').val(c.w); $('#h').val(c.h); } </script> ''' % (init_x, init_y, init_size, init_size, min_size_js))
['def', 'init_jcrop', '(', 'min_size', '=', 'None', ')', ':', 'init_x', '=', 'current_app', '.', 'config', '[', "'AVATARS_CROP_INIT_POS'", ']', '[', '0', ']', 'init_y', '=', 'current_app', '.', 'config', '[', "'AVATARS_CROP_INIT_POS'", ']', '[', '1', ']', 'init_size', '=', 'current_app', '.', 'config', '[', "'AVATARS_CROP_INIT_SIZE'", ']', 'or', 'current_app', '.', 'config', '[', "'AVATARS_SIZE_TUPLE'", ']', '[', '2', ']', 'if', 'current_app', '.', 'config', '[', "'AVATARS_CROP_MIN_SIZE'", ']', ':', 'min_size', '=', 'min_size', 'or', 'current_app', '.', 'config', '[', "'AVATARS_SIZE_TUPLE'", ']', '[', '2', ']', 'min_size_js', '=', "'jcrop_api.setOptions({minSize: [%d, %d]});'", '%', '(', 'min_size', ',', 'min_size', ')', 'else', ':', 'min_size_js', '=', "''", 'return', 'Markup', '(', '\'\'\'\n<script type="text/javascript">\n jQuery(function ($) {\n // Create variables (in this scope) to hold the API and image size\n var jcrop_api,\n boundx,\n boundy,\n\n // Grab some information about the preview pane\n $preview = $(\'#preview-box\'),\n $pcnt = $(\'#preview-box .preview-box\'),\n $pimg = $(\'#preview-box .preview-box img\'),\n\n xsize = $pcnt.width(),\n ysize = $pcnt.height();\n\n $(\'#crop-box\').Jcrop({\n onChange: updatePreview,\n onSelect: updateCoords,\n setSelect: [%s, %s, %s, %s],\n aspectRatio: 1\n }, function () {\n // Use the API to get the real image size\n var bounds = this.getBounds();\n boundx = bounds[0];\n boundy = bounds[1];\n // Store the API in the jcrop_api variable\n jcrop_api = this;\n %s\n jcrop_api.focus();\n // Move the preview into the jcrop container for css positioning\n $preview.appendTo(jcrop_api.ui.holder);\n });\n\n function updatePreview(c) {\n if (parseInt(c.w) > 0) {\n var rx = xsize / c.w;\n var ry = ysize / c.h;\n $pimg.css({\n width: Math.round(rx * boundx) + \'px\',\n height: Math.round(ry * boundy) + \'px\',\n marginLeft: \'-\' + Math.round(rx * c.x) + \'px\',\n marginTop: \'-\' + Math.round(ry * c.y) + \'px\'\n });\n }\n }\n });\n\n function updateCoords(c) {\n $(\'#x\').val(c.x);\n $(\'#y\').val(c.y);\n $(\'#w\').val(c.w);\n $(\'#h\').val(c.h);\n }\n </script>\n \'\'\'', '%', '(', 'init_x', ',', 'init_y', ',', 'init_size', ',', 'init_size', ',', 'min_size_js', ')', ')']
Initialize jcrop. :param min_size: The minimal size of crop area.
['Initialize', 'jcrop', '.']
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L157-L226
2,104
jobovy/galpy
galpy/util/bovy_coords.py
scalarDecorator
def scalarDecorator(func): """Decorator to return scalar outputs as a set""" @wraps(func) def scalar_wrapper(*args,**kwargs): if nu.array(args[0]).shape == (): scalarOut= True newargs= () for ii in range(len(args)): newargs= newargs+(nu.array([args[ii]]),) args= newargs else: scalarOut= False result= func(*args,**kwargs) if scalarOut: out= () for ii in range(result.shape[1]): out= out+(result[0,ii],) return out else: return result return scalar_wrapper
python
def scalarDecorator(func): """Decorator to return scalar outputs as a set""" @wraps(func) def scalar_wrapper(*args,**kwargs): if nu.array(args[0]).shape == (): scalarOut= True newargs= () for ii in range(len(args)): newargs= newargs+(nu.array([args[ii]]),) args= newargs else: scalarOut= False result= func(*args,**kwargs) if scalarOut: out= () for ii in range(result.shape[1]): out= out+(result[0,ii],) return out else: return result return scalar_wrapper
['def', 'scalarDecorator', '(', 'func', ')', ':', '@', 'wraps', '(', 'func', ')', 'def', 'scalar_wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'nu', '.', 'array', '(', 'args', '[', '0', ']', ')', '.', 'shape', '==', '(', ')', ':', 'scalarOut', '=', 'True', 'newargs', '=', '(', ')', 'for', 'ii', 'in', 'range', '(', 'len', '(', 'args', ')', ')', ':', 'newargs', '=', 'newargs', '+', '(', 'nu', '.', 'array', '(', '[', 'args', '[', 'ii', ']', ']', ')', ',', ')', 'args', '=', 'newargs', 'else', ':', 'scalarOut', '=', 'False', 'result', '=', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'scalarOut', ':', 'out', '=', '(', ')', 'for', 'ii', 'in', 'range', '(', 'result', '.', 'shape', '[', '1', ']', ')', ':', 'out', '=', 'out', '+', '(', 'result', '[', '0', ',', 'ii', ']', ',', ')', 'return', 'out', 'else', ':', 'return', 'result', 'return', 'scalar_wrapper']
Decorator to return scalar outputs as a set
['Decorator', 'to', 'return', 'scalar', 'outputs', 'as', 'a', 'set']
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L106-L126
2,105
stbraun/fuzzing
features/steps/ft_fuzzer.py
step_impl10
def step_impl10(context): """Create application list. :param context: test context. """ assert context.app_list and len( context.app_list) > 0, "ENSURE: app list is provided." assert context.file_list and len( context.file_list) > 0, "ENSURE: file list is provided." context.fuzz_executor = FuzzExecutor(context.app_list, context.file_list) assert context.fuzz_executor, "VERIFY: fuzz executor created."
python
def step_impl10(context): """Create application list. :param context: test context. """ assert context.app_list and len( context.app_list) > 0, "ENSURE: app list is provided." assert context.file_list and len( context.file_list) > 0, "ENSURE: file list is provided." context.fuzz_executor = FuzzExecutor(context.app_list, context.file_list) assert context.fuzz_executor, "VERIFY: fuzz executor created."
['def', 'step_impl10', '(', 'context', ')', ':', 'assert', 'context', '.', 'app_list', 'and', 'len', '(', 'context', '.', 'app_list', ')', '>', '0', ',', '"ENSURE: app list is provided."', 'assert', 'context', '.', 'file_list', 'and', 'len', '(', 'context', '.', 'file_list', ')', '>', '0', ',', '"ENSURE: file list is provided."', 'context', '.', 'fuzz_executor', '=', 'FuzzExecutor', '(', 'context', '.', 'app_list', ',', 'context', '.', 'file_list', ')', 'assert', 'context', '.', 'fuzz_executor', ',', '"VERIFY: fuzz executor created."']
Create application list. :param context: test context.
['Create', 'application', 'list', '.']
train
https://github.com/stbraun/fuzzing/blob/974a64472732d4e40db919d242149bf0856fe199/features/steps/ft_fuzzer.py#L120-L130
2,106
limix/limix-core
limix_core/mean/linear.py
Linear.removeFixedEffect
def removeFixedEffect(self, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term. """ if self._n_terms==0: pass if index is None or index==(self._n_terms-1): self._n_terms-=1 F = self._F.pop() #= self.F[:-1] A = self._A.pop() #= self.A[:-1] self._A_identity.pop() #= self.A_identity[:-1] REML_term = self._REML_term.pop()# = self.REML_term[:-1] self._B.pop()# = self.B[:-1] self._n_fixed_effs-=F.shape[1]*A.shape[0] if REML_term: self._n_fixed_effs_REML-=F.shape[1]*A.shape[0] pass elif index >= self.n_terms: raise Exception("index exceeds max index of terms") else: raise NotImplementedError("currently only last term can be removed") pass self._rebuild_indicator() self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
python
def removeFixedEffect(self, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term. """ if self._n_terms==0: pass if index is None or index==(self._n_terms-1): self._n_terms-=1 F = self._F.pop() #= self.F[:-1] A = self._A.pop() #= self.A[:-1] self._A_identity.pop() #= self.A_identity[:-1] REML_term = self._REML_term.pop()# = self.REML_term[:-1] self._B.pop()# = self.B[:-1] self._n_fixed_effs-=F.shape[1]*A.shape[0] if REML_term: self._n_fixed_effs_REML-=F.shape[1]*A.shape[0] pass elif index >= self.n_terms: raise Exception("index exceeds max index of terms") else: raise NotImplementedError("currently only last term can be removed") pass self._rebuild_indicator() self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
['def', 'removeFixedEffect', '(', 'self', ',', 'index', '=', 'None', ')', ':', 'if', 'self', '.', '_n_terms', '==', '0', ':', 'pass', 'if', 'index', 'is', 'None', 'or', 'index', '==', '(', 'self', '.', '_n_terms', '-', '1', ')', ':', 'self', '.', '_n_terms', '-=', '1', 'F', '=', 'self', '.', '_F', '.', 'pop', '(', ')', '#= self.F[:-1]', 'A', '=', 'self', '.', '_A', '.', 'pop', '(', ')', '#= self.A[:-1]', 'self', '.', '_A_identity', '.', 'pop', '(', ')', '#= self.A_identity[:-1]', 'REML_term', '=', 'self', '.', '_REML_term', '.', 'pop', '(', ')', '# = self.REML_term[:-1]', 'self', '.', '_B', '.', 'pop', '(', ')', '# = self.B[:-1]', 'self', '.', '_n_fixed_effs', '-=', 'F', '.', 'shape', '[', '1', ']', '*', 'A', '.', 'shape', '[', '0', ']', 'if', 'REML_term', ':', 'self', '.', '_n_fixed_effs_REML', '-=', 'F', '.', 'shape', '[', '1', ']', '*', 'A', '.', 'shape', '[', '0', ']', 'pass', 'elif', 'index', '>=', 'self', '.', 'n_terms', ':', 'raise', 'Exception', '(', '"index exceeds max index of terms"', ')', 'else', ':', 'raise', 'NotImplementedError', '(', '"currently only last term can be removed"', ')', 'pass', 'self', '.', '_rebuild_indicator', '(', ')', 'self', '.', 'clear_cache', '(', "'Fstar'", ',', "'Astar'", ',', "'Xstar'", ',', "'Xhat'", ',', "'Areml'", ',', "'Areml_eigh'", ',', "'Areml_chol'", ',', "'Areml_inv'", ',', "'beta_hat'", ',', "'B_hat'", ',', "'LRLdiag_Xhat_tens'", ',', "'Areml_grad'", ',', "'beta_grad'", ',', "'Xstar_beta_grad'", ',', "'Zstar'", ',', "'DLZ'", ')']
set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term.
['set', 'sample', 'and', 'trait', 'designs', 'F', ':', 'NxK', 'sample', 'design', 'A', ':', 'LxP', 'sample', 'design', 'REML', ':', 'REML', 'for', 'this', 'term?', 'index', ':', 'index', 'of', 'which', 'fixed', 'effect', 'to', 'replace', '.', 'If', 'None', 'remove', 'last', 'term', '.']
train
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L219-L251
2,107
AnalogJ/lexicon
lexicon/config.py
ConfigResolver.with_config_dir
def with_config_dir(self, dir_path): """ Configure current resolver to use every valid YAML configuration files available in the given directory path. To be taken into account, a configuration file must conform to the following naming convention: * 'lexicon.yml' for a global Lexicon config file (see with_config_file doc) * 'lexicon_[provider].yml' for a DNS provider specific configuration file, with [provider] equals to the DNS provider name (see with_provider_config_file doc) Example: $ ls /etc/lexicon lexicon.yml # global Lexicon configuration file lexicon_cloudflare.yml # specific configuration file for clouflare DNS provder """ lexicon_provider_config_files = [] lexicon_config_files = [] for path in os.listdir(dir_path): path = os.path.join(dir_path, path) if os.path.isfile(path): basename = os.path.basename(path) search = re.search(r'^lexicon(?:_(\w+)|)\.yml$', basename) if search: provider = search.group(1) if provider: lexicon_provider_config_files.append((provider, path)) else: lexicon_config_files.append(path) for lexicon_provider_config_file in lexicon_provider_config_files: self.with_provider_config_file(lexicon_provider_config_file[0], lexicon_provider_config_file[1]) for lexicon_config_file in lexicon_config_files: self.with_config_file(lexicon_config_file) return self
python
def with_config_dir(self, dir_path): """ Configure current resolver to use every valid YAML configuration files available in the given directory path. To be taken into account, a configuration file must conform to the following naming convention: * 'lexicon.yml' for a global Lexicon config file (see with_config_file doc) * 'lexicon_[provider].yml' for a DNS provider specific configuration file, with [provider] equals to the DNS provider name (see with_provider_config_file doc) Example: $ ls /etc/lexicon lexicon.yml # global Lexicon configuration file lexicon_cloudflare.yml # specific configuration file for clouflare DNS provder """ lexicon_provider_config_files = [] lexicon_config_files = [] for path in os.listdir(dir_path): path = os.path.join(dir_path, path) if os.path.isfile(path): basename = os.path.basename(path) search = re.search(r'^lexicon(?:_(\w+)|)\.yml$', basename) if search: provider = search.group(1) if provider: lexicon_provider_config_files.append((provider, path)) else: lexicon_config_files.append(path) for lexicon_provider_config_file in lexicon_provider_config_files: self.with_provider_config_file(lexicon_provider_config_file[0], lexicon_provider_config_file[1]) for lexicon_config_file in lexicon_config_files: self.with_config_file(lexicon_config_file) return self
['def', 'with_config_dir', '(', 'self', ',', 'dir_path', ')', ':', 'lexicon_provider_config_files', '=', '[', ']', 'lexicon_config_files', '=', '[', ']', 'for', 'path', 'in', 'os', '.', 'listdir', '(', 'dir_path', ')', ':', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'path', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'basename', '=', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', 'search', '=', 're', '.', 'search', '(', "r'^lexicon(?:_(\\w+)|)\\.yml$'", ',', 'basename', ')', 'if', 'search', ':', 'provider', '=', 'search', '.', 'group', '(', '1', ')', 'if', 'provider', ':', 'lexicon_provider_config_files', '.', 'append', '(', '(', 'provider', ',', 'path', ')', ')', 'else', ':', 'lexicon_config_files', '.', 'append', '(', 'path', ')', 'for', 'lexicon_provider_config_file', 'in', 'lexicon_provider_config_files', ':', 'self', '.', 'with_provider_config_file', '(', 'lexicon_provider_config_file', '[', '0', ']', ',', 'lexicon_provider_config_file', '[', '1', ']', ')', 'for', 'lexicon_config_file', 'in', 'lexicon_config_files', ':', 'self', '.', 'with_config_file', '(', 'lexicon_config_file', ')', 'return', 'self']
Configure current resolver to use every valid YAML configuration files available in the given directory path. To be taken into account, a configuration file must conform to the following naming convention: * 'lexicon.yml' for a global Lexicon config file (see with_config_file doc) * 'lexicon_[provider].yml' for a DNS provider specific configuration file, with [provider] equals to the DNS provider name (see with_provider_config_file doc) Example: $ ls /etc/lexicon lexicon.yml # global Lexicon configuration file lexicon_cloudflare.yml # specific configuration file for clouflare DNS provder
['Configure', 'current', 'resolver', 'to', 'use', 'every', 'valid', 'YAML', 'configuration', 'files', 'available', 'in', 'the', 'given', 'directory', 'path', '.', 'To', 'be', 'taken', 'into', 'account', 'a', 'configuration', 'file', 'must', 'conform', 'to', 'the', 'following', 'naming', 'convention', ':', '*', 'lexicon', '.', 'yml', 'for', 'a', 'global', 'Lexicon', 'config', 'file', '(', 'see', 'with_config_file', 'doc', ')', '*', 'lexicon_', '[', 'provider', ']', '.', 'yml', 'for', 'a', 'DNS', 'provider', 'specific', 'configuration', 'file', 'with', '[', 'provider', ']', 'equals', 'to', 'the', 'DNS', 'provider', 'name', '(', 'see', 'with_provider_config_file', 'doc', ')']
train
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/config.py#L153-L189
2,108
dw/mitogen
mitogen/master.py
ThreadWatcher._reset
def _reset(cls): """If we have forked since the watch dictionaries were initialized, all that has is garbage, so clear it.""" if os.getpid() != cls._cls_pid: cls._cls_pid = os.getpid() cls._cls_instances_by_target.clear() cls._cls_thread_by_target.clear()
python
def _reset(cls): """If we have forked since the watch dictionaries were initialized, all that has is garbage, so clear it.""" if os.getpid() != cls._cls_pid: cls._cls_pid = os.getpid() cls._cls_instances_by_target.clear() cls._cls_thread_by_target.clear()
['def', '_reset', '(', 'cls', ')', ':', 'if', 'os', '.', 'getpid', '(', ')', '!=', 'cls', '.', '_cls_pid', ':', 'cls', '.', '_cls_pid', '=', 'os', '.', 'getpid', '(', ')', 'cls', '.', '_cls_instances_by_target', '.', 'clear', '(', ')', 'cls', '.', '_cls_thread_by_target', '.', 'clear', '(', ')']
If we have forked since the watch dictionaries were initialized, all that has is garbage, so clear it.
['If', 'we', 'have', 'forked', 'since', 'the', 'watch', 'dictionaries', 'were', 'initialized', 'all', 'that', 'has', 'is', 'garbage', 'so', 'clear', 'it', '.']
train
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/master.py#L256-L262
2,109
ctuning/ck
ck/kernel.py
select
def select(i): """ Input: { dict - dict with values being dicts with 'name' as string to display and 'sort' as int (for ordering) (title) - print title (error_if_empty) - if 'yes' and Enter, make error (skip_sort) - if 'yes', do not sort array } Output: { return - return code = 0 string - selected dictionary key } """ s='' title=i.get('title','') if title!='': out(title) out('') d=i['dict'] if i.get('skip_sort','')!='yes': kd=sorted(d, key=lambda v: d[v].get('sort',0)) else: kd=d j=0 ks={} for k in kd: q=d[k] sj=str(j) ks[sj]=k qn=q.get('name','') out(sj+') '+qn) j+=1 out('') rx=inp({'text':'Make your selection (or press Enter for 0): '}) if rx['return']>0: return rx sx=rx['string'].strip() if sx=='': if i.get('error_if_empty','')=='yes': return {'return':1, 'error':'selection is empty'} s=kd[0] else: if sx not in ks: return {'return':1, 'error':'selection is not recognized'} s=ks[sx] return {'return':0, 'string':s}
python
def select(i): """ Input: { dict - dict with values being dicts with 'name' as string to display and 'sort' as int (for ordering) (title) - print title (error_if_empty) - if 'yes' and Enter, make error (skip_sort) - if 'yes', do not sort array } Output: { return - return code = 0 string - selected dictionary key } """ s='' title=i.get('title','') if title!='': out(title) out('') d=i['dict'] if i.get('skip_sort','')!='yes': kd=sorted(d, key=lambda v: d[v].get('sort',0)) else: kd=d j=0 ks={} for k in kd: q=d[k] sj=str(j) ks[sj]=k qn=q.get('name','') out(sj+') '+qn) j+=1 out('') rx=inp({'text':'Make your selection (or press Enter for 0): '}) if rx['return']>0: return rx sx=rx['string'].strip() if sx=='': if i.get('error_if_empty','')=='yes': return {'return':1, 'error':'selection is empty'} s=kd[0] else: if sx not in ks: return {'return':1, 'error':'selection is not recognized'} s=ks[sx] return {'return':0, 'string':s}
['def', 'select', '(', 'i', ')', ':', 's', '=', "''", 'title', '=', 'i', '.', 'get', '(', "'title'", ',', "''", ')', 'if', 'title', '!=', "''", ':', 'out', '(', 'title', ')', 'out', '(', "''", ')', 'd', '=', 'i', '[', "'dict'", ']', 'if', 'i', '.', 'get', '(', "'skip_sort'", ',', "''", ')', '!=', "'yes'", ':', 'kd', '=', 'sorted', '(', 'd', ',', 'key', '=', 'lambda', 'v', ':', 'd', '[', 'v', ']', '.', 'get', '(', "'sort'", ',', '0', ')', ')', 'else', ':', 'kd', '=', 'd', 'j', '=', '0', 'ks', '=', '{', '}', 'for', 'k', 'in', 'kd', ':', 'q', '=', 'd', '[', 'k', ']', 'sj', '=', 'str', '(', 'j', ')', 'ks', '[', 'sj', ']', '=', 'k', 'qn', '=', 'q', '.', 'get', '(', "'name'", ',', "''", ')', 'out', '(', 'sj', '+', "') '", '+', 'qn', ')', 'j', '+=', '1', 'out', '(', "''", ')', 'rx', '=', 'inp', '(', '{', "'text'", ':', "'Make your selection (or press Enter for 0): '", '}', ')', 'if', 'rx', '[', "'return'", ']', '>', '0', ':', 'return', 'rx', 'sx', '=', 'rx', '[', "'string'", ']', '.', 'strip', '(', ')', 'if', 'sx', '==', "''", ':', 'if', 'i', '.', 'get', '(', "'error_if_empty'", ',', "''", ')', '==', "'yes'", ':', 'return', '{', "'return'", ':', '1', ',', "'error'", ':', "'selection is empty'", '}', 's', '=', 'kd', '[', '0', ']', 'else', ':', 'if', 'sx', 'not', 'in', 'ks', ':', 'return', '{', "'return'", ':', '1', ',', "'error'", ':', "'selection is not recognized'", '}', 's', '=', 'ks', '[', 'sx', ']', 'return', '{', "'return'", ':', '0', ',', "'string'", ':', 's', '}']
Input: { dict - dict with values being dicts with 'name' as string to display and 'sort' as int (for ordering) (title) - print title (error_if_empty) - if 'yes' and Enter, make error (skip_sort) - if 'yes', do not sort array } Output: { return - return code = 0 string - selected dictionary key }
['Input', ':', '{', 'dict', '-', 'dict', 'with', 'values', 'being', 'dicts', 'with', 'name', 'as', 'string', 'to', 'display', 'and', 'sort', 'as', 'int', '(', 'for', 'ordering', ')', '(', 'title', ')', '-', 'print', 'title', '(', 'error_if_empty', ')', '-', 'if', 'yes', 'and', 'Enter', 'make', 'error', '(', 'skip_sort', ')', '-', 'if', 'yes', 'do', 'not', 'sort', 'array', '}']
train
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L837-L895
2,110
saltstack/salt
salt/states/lxd_container.py
present
def present(name, running=None, source=None, profiles=None, config=None, devices=None, architecture='x86_64', ephemeral=False, restart_on_change=False, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Create the named container if it does not exist name The name of the container to be created running : None * If ``True``, ensure that the container is running * If ``False``, ensure that the container is stopped * If ``None``, do nothing with regards to the running state of the container source : None Can be either a string containing an image alias: "xenial/amd64" or an dict with type "image" with alias: {"type": "image", "alias": "xenial/amd64"} or image with "fingerprint": {"type": "image", "fingerprint": "SHA-256"} or image with "properties": {"type": "image", "properties": { "os": "ubuntu", "release": "14.04", "architecture": "x86_64" }} or none: {"type": "none"} or copy: {"type": "copy", "source": "my-old-container"} profiles : ['default'] List of profiles to apply on this container config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). architecture : 'x86_64' Can be one of the following: * unknown * i686 * x86_64 * armv7l * aarch64 * ppc * ppc64 * ppc64le * s390x ephemeral : False Destroy this container after stop? restart_on_change : False Restart the container when we detect changes on the config or its devices? remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. ''' if profiles is None: profiles = ['default'] if source is None: source = {} ret = { 'name': name, 'running': running, 'profiles': profiles, 'source': source, 'config': config, 'devices': devices, 'architecture': architecture, 'ephemeral': ephemeral, 'restart_on_change': restart_on_change, 'remote_addr': remote_addr, 'cert': cert, 'key': key, 'verify_cert': verify_cert, 'changes': {} } container = None try: container = __salt__['lxd.container_get']( name, remote_addr, cert, key, verify_cert, _raw=True ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) except SaltInvocationError as e: # Profile not found pass if container is None: if __opts__['test']: # Test is on, just return that we would create the container msg = 'Would create the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: msg = msg + ' and start it.' ret['changes']['started'] = ( 'Would start the container "{0}"'.format(name) ) ret['changes'] = {'created': msg} return _unchanged(ret, msg) # create the container try: __salt__['lxd.container_create']( name, source, profiles, config, devices, architecture, ephemeral, True, # Wait remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = 'Created the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: try: __salt__['lxd.container_start']( name, remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = msg + ' and started it.' ret['changes'] = { 'started': 'Started the container "{0}"'.format(name) } return _success(ret, msg) # Container exists, lets check for differences new_profiles = set(map(six.text_type, profiles)) old_profiles = set(map(six.text_type, container.profiles)) container_changed = False profile_changes = [] # Removed profiles for k in old_profiles.difference(new_profiles): if not __opts__['test']: profile_changes.append('Removed profile "{0}"'.format(k)) old_profiles.discard(k) else: profile_changes.append('Would remove profile "{0}"'.format(k)) # Added profiles for k in new_profiles.difference(old_profiles): if not __opts__['test']: profile_changes.append('Added profile "{0}"'.format(k)) old_profiles.add(k) else: profile_changes.append('Would add profile "{0}"'.format(k)) if profile_changes: container_changed = True ret['changes']['profiles'] = profile_changes container.profiles = list(old_profiles) # Config and devices changes config, devices = __salt__['lxd.normalize_input_values']( config, devices ) changes = __salt__['lxd.sync_config_devices']( container, config, devices, __opts__['test'] ) if changes: container_changed = True ret['changes'].update(changes) is_running = \ container.status_code == CONTAINER_STATUS_RUNNING if not __opts__['test']: try: __salt__['lxd.pylxd_save_object'](container) except CommandExecutionError as e: return _error(ret, six.text_type(e)) if running != is_running: if running is True: if __opts__['test']: changes['running'] = 'Would start the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and started.').format(name) ) else: container.start(wait=True) changes['running'] = 'Started the container' elif running is False: if __opts__['test']: changes['stopped'] = 'Would stopped the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and stopped.').format(name) ) else: container.stop(wait=True) changes['stopped'] = 'Stopped the container' if ((running is True or running is None) and is_running and restart_on_change and container_changed): if __opts__['test']: changes['restarted'] = 'Would restart the container' return _unchanged( ret, 'Would restart the container "{0}"'.format(name) ) else: container.restart(wait=True) changes['restarted'] = ( 'Container "{0}" has been restarted'.format(name) ) return _success( ret, 'Container "{0}" has been restarted'.format(name) ) if not container_changed: return _success(ret, 'No changes') if __opts__['test']: return _unchanged( ret, 'Container "{0}" would get changed.'.format(name) ) return _success(ret, '{0} changes'.format(len(ret['changes'].keys())))
python
def present(name, running=None, source=None, profiles=None, config=None, devices=None, architecture='x86_64', ephemeral=False, restart_on_change=False, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Create the named container if it does not exist name The name of the container to be created running : None * If ``True``, ensure that the container is running * If ``False``, ensure that the container is stopped * If ``None``, do nothing with regards to the running state of the container source : None Can be either a string containing an image alias: "xenial/amd64" or an dict with type "image" with alias: {"type": "image", "alias": "xenial/amd64"} or image with "fingerprint": {"type": "image", "fingerprint": "SHA-256"} or image with "properties": {"type": "image", "properties": { "os": "ubuntu", "release": "14.04", "architecture": "x86_64" }} or none: {"type": "none"} or copy: {"type": "copy", "source": "my-old-container"} profiles : ['default'] List of profiles to apply on this container config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). architecture : 'x86_64' Can be one of the following: * unknown * i686 * x86_64 * armv7l * aarch64 * ppc * ppc64 * ppc64le * s390x ephemeral : False Destroy this container after stop? restart_on_change : False Restart the container when we detect changes on the config or its devices? remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. ''' if profiles is None: profiles = ['default'] if source is None: source = {} ret = { 'name': name, 'running': running, 'profiles': profiles, 'source': source, 'config': config, 'devices': devices, 'architecture': architecture, 'ephemeral': ephemeral, 'restart_on_change': restart_on_change, 'remote_addr': remote_addr, 'cert': cert, 'key': key, 'verify_cert': verify_cert, 'changes': {} } container = None try: container = __salt__['lxd.container_get']( name, remote_addr, cert, key, verify_cert, _raw=True ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) except SaltInvocationError as e: # Profile not found pass if container is None: if __opts__['test']: # Test is on, just return that we would create the container msg = 'Would create the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: msg = msg + ' and start it.' ret['changes']['started'] = ( 'Would start the container "{0}"'.format(name) ) ret['changes'] = {'created': msg} return _unchanged(ret, msg) # create the container try: __salt__['lxd.container_create']( name, source, profiles, config, devices, architecture, ephemeral, True, # Wait remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = 'Created the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: try: __salt__['lxd.container_start']( name, remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = msg + ' and started it.' ret['changes'] = { 'started': 'Started the container "{0}"'.format(name) } return _success(ret, msg) # Container exists, lets check for differences new_profiles = set(map(six.text_type, profiles)) old_profiles = set(map(six.text_type, container.profiles)) container_changed = False profile_changes = [] # Removed profiles for k in old_profiles.difference(new_profiles): if not __opts__['test']: profile_changes.append('Removed profile "{0}"'.format(k)) old_profiles.discard(k) else: profile_changes.append('Would remove profile "{0}"'.format(k)) # Added profiles for k in new_profiles.difference(old_profiles): if not __opts__['test']: profile_changes.append('Added profile "{0}"'.format(k)) old_profiles.add(k) else: profile_changes.append('Would add profile "{0}"'.format(k)) if profile_changes: container_changed = True ret['changes']['profiles'] = profile_changes container.profiles = list(old_profiles) # Config and devices changes config, devices = __salt__['lxd.normalize_input_values']( config, devices ) changes = __salt__['lxd.sync_config_devices']( container, config, devices, __opts__['test'] ) if changes: container_changed = True ret['changes'].update(changes) is_running = \ container.status_code == CONTAINER_STATUS_RUNNING if not __opts__['test']: try: __salt__['lxd.pylxd_save_object'](container) except CommandExecutionError as e: return _error(ret, six.text_type(e)) if running != is_running: if running is True: if __opts__['test']: changes['running'] = 'Would start the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and started.').format(name) ) else: container.start(wait=True) changes['running'] = 'Started the container' elif running is False: if __opts__['test']: changes['stopped'] = 'Would stopped the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and stopped.').format(name) ) else: container.stop(wait=True) changes['stopped'] = 'Stopped the container' if ((running is True or running is None) and is_running and restart_on_change and container_changed): if __opts__['test']: changes['restarted'] = 'Would restart the container' return _unchanged( ret, 'Would restart the container "{0}"'.format(name) ) else: container.restart(wait=True) changes['restarted'] = ( 'Container "{0}" has been restarted'.format(name) ) return _success( ret, 'Container "{0}" has been restarted'.format(name) ) if not container_changed: return _success(ret, 'No changes') if __opts__['test']: return _unchanged( ret, 'Container "{0}" would get changed.'.format(name) ) return _success(ret, '{0} changes'.format(len(ret['changes'].keys())))
['def', 'present', '(', 'name', ',', 'running', '=', 'None', ',', 'source', '=', 'None', ',', 'profiles', '=', 'None', ',', 'config', '=', 'None', ',', 'devices', '=', 'None', ',', 'architecture', '=', "'x86_64'", ',', 'ephemeral', '=', 'False', ',', 'restart_on_change', '=', 'False', ',', 'remote_addr', '=', 'None', ',', 'cert', '=', 'None', ',', 'key', '=', 'None', ',', 'verify_cert', '=', 'True', ')', ':', 'if', 'profiles', 'is', 'None', ':', 'profiles', '=', '[', "'default'", ']', 'if', 'source', 'is', 'None', ':', 'source', '=', '{', '}', 'ret', '=', '{', "'name'", ':', 'name', ',', "'running'", ':', 'running', ',', "'profiles'", ':', 'profiles', ',', "'source'", ':', 'source', ',', "'config'", ':', 'config', ',', "'devices'", ':', 'devices', ',', "'architecture'", ':', 'architecture', ',', "'ephemeral'", ':', 'ephemeral', ',', "'restart_on_change'", ':', 'restart_on_change', ',', "'remote_addr'", ':', 'remote_addr', ',', "'cert'", ':', 'cert', ',', "'key'", ':', 'key', ',', "'verify_cert'", ':', 'verify_cert', ',', "'changes'", ':', '{', '}', '}', 'container', '=', 'None', 'try', ':', 'container', '=', '__salt__', '[', "'lxd.container_get'", ']', '(', 'name', ',', 'remote_addr', ',', 'cert', ',', 'key', ',', 'verify_cert', ',', '_raw', '=', 'True', ')', 'except', 'CommandExecutionError', 'as', 'e', ':', 'return', '_error', '(', 'ret', ',', 'six', '.', 'text_type', '(', 'e', ')', ')', 'except', 'SaltInvocationError', 'as', 'e', ':', '# Profile not found', 'pass', 'if', 'container', 'is', 'None', ':', 'if', '__opts__', '[', "'test'", ']', ':', '# Test is on, just return that we would create the container', 'msg', '=', '\'Would create the container "{0}"\'', '.', 'format', '(', 'name', ')', 'ret', '[', "'changes'", ']', '=', '{', "'created'", ':', 'msg', '}', 'if', 'running', 'is', 'True', ':', 'msg', '=', 'msg', '+', "' and start it.'", 'ret', '[', "'changes'", ']', '[', "'started'", ']', '=', '(', '\'Would start the container "{0}"\'', '.', 'format', '(', 'name', ')', ')', 'ret', '[', "'changes'", ']', '=', '{', "'created'", ':', 'msg', '}', 'return', '_unchanged', '(', 'ret', ',', 'msg', ')', '# create the container', 'try', ':', '__salt__', '[', "'lxd.container_create'", ']', '(', 'name', ',', 'source', ',', 'profiles', ',', 'config', ',', 'devices', ',', 'architecture', ',', 'ephemeral', ',', 'True', ',', '# Wait', 'remote_addr', ',', 'cert', ',', 'key', ',', 'verify_cert', ')', 'except', 'CommandExecutionError', 'as', 'e', ':', 'return', '_error', '(', 'ret', ',', 'six', '.', 'text_type', '(', 'e', ')', ')', 'msg', '=', '\'Created the container "{0}"\'', '.', 'format', '(', 'name', ')', 'ret', '[', "'changes'", ']', '=', '{', "'created'", ':', 'msg', '}', 'if', 'running', 'is', 'True', ':', 'try', ':', '__salt__', '[', "'lxd.container_start'", ']', '(', 'name', ',', 'remote_addr', ',', 'cert', ',', 'key', ',', 'verify_cert', ')', 'except', 'CommandExecutionError', 'as', 'e', ':', 'return', '_error', '(', 'ret', ',', 'six', '.', 'text_type', '(', 'e', ')', ')', 'msg', '=', 'msg', '+', "' and started it.'", 'ret', '[', "'changes'", ']', '=', '{', "'started'", ':', '\'Started the container "{0}"\'', '.', 'format', '(', 'name', ')', '}', 'return', '_success', '(', 'ret', ',', 'msg', ')', '# Container exists, lets check for differences', 'new_profiles', '=', 'set', '(', 'map', '(', 'six', '.', 'text_type', ',', 'profiles', ')', ')', 'old_profiles', '=', 'set', '(', 'map', '(', 'six', '.', 'text_type', ',', 'container', '.', 'profiles', ')', ')', 'container_changed', '=', 'False', 'profile_changes', '=', '[', ']', '# Removed profiles', 'for', 'k', 'in', 'old_profiles', '.', 'difference', '(', 'new_profiles', ')', ':', 'if', 'not', '__opts__', '[', "'test'", ']', ':', 'profile_changes', '.', 'append', '(', '\'Removed profile "{0}"\'', '.', 'format', '(', 'k', ')', ')', 'old_profiles', '.', 'discard', '(', 'k', ')', 'else', ':', 'profile_changes', '.', 'append', '(', '\'Would remove profile "{0}"\'', '.', 'format', '(', 'k', ')', ')', '# Added profiles', 'for', 'k', 'in', 'new_profiles', '.', 'difference', '(', 'old_profiles', ')', ':', 'if', 'not', '__opts__', '[', "'test'", ']', ':', 'profile_changes', '.', 'append', '(', '\'Added profile "{0}"\'', '.', 'format', '(', 'k', ')', ')', 'old_profiles', '.', 'add', '(', 'k', ')', 'else', ':', 'profile_changes', '.', 'append', '(', '\'Would add profile "{0}"\'', '.', 'format', '(', 'k', ')', ')', 'if', 'profile_changes', ':', 'container_changed', '=', 'True', 'ret', '[', "'changes'", ']', '[', "'profiles'", ']', '=', 'profile_changes', 'container', '.', 'profiles', '=', 'list', '(', 'old_profiles', ')', '# Config and devices changes', 'config', ',', 'devices', '=', '__salt__', '[', "'lxd.normalize_input_values'", ']', '(', 'config', ',', 'devices', ')', 'changes', '=', '__salt__', '[', "'lxd.sync_config_devices'", ']', '(', 'container', ',', 'config', ',', 'devices', ',', '__opts__', '[', "'test'", ']', ')', 'if', 'changes', ':', 'container_changed', '=', 'True', 'ret', '[', "'changes'", ']', '.', 'update', '(', 'changes', ')', 'is_running', '=', 'container', '.', 'status_code', '==', 'CONTAINER_STATUS_RUNNING', 'if', 'not', '__opts__', '[', "'test'", ']', ':', 'try', ':', '__salt__', '[', "'lxd.pylxd_save_object'", ']', '(', 'container', ')', 'except', 'CommandExecutionError', 'as', 'e', ':', 'return', '_error', '(', 'ret', ',', 'six', '.', 'text_type', '(', 'e', ')', ')', 'if', 'running', '!=', 'is_running', ':', 'if', 'running', 'is', 'True', ':', 'if', '__opts__', '[', "'test'", ']', ':', 'changes', '[', "'running'", ']', '=', "'Would start the container'", 'return', '_unchanged', '(', 'ret', ',', '(', '\'Container "{0}" would get changed \'', "'and started.'", ')', '.', 'format', '(', 'name', ')', ')', 'else', ':', 'container', '.', 'start', '(', 'wait', '=', 'True', ')', 'changes', '[', "'running'", ']', '=', "'Started the container'", 'elif', 'running', 'is', 'False', ':', 'if', '__opts__', '[', "'test'", ']', ':', 'changes', '[', "'stopped'", ']', '=', "'Would stopped the container'", 'return', '_unchanged', '(', 'ret', ',', '(', '\'Container "{0}" would get changed \'', "'and stopped.'", ')', '.', 'format', '(', 'name', ')', ')', 'else', ':', 'container', '.', 'stop', '(', 'wait', '=', 'True', ')', 'changes', '[', "'stopped'", ']', '=', "'Stopped the container'", 'if', '(', '(', 'running', 'is', 'True', 'or', 'running', 'is', 'None', ')', 'and', 'is_running', 'and', 'restart_on_change', 'and', 'container_changed', ')', ':', 'if', '__opts__', '[', "'test'", ']', ':', 'changes', '[', "'restarted'", ']', '=', "'Would restart the container'", 'return', '_unchanged', '(', 'ret', ',', '\'Would restart the container "{0}"\'', '.', 'format', '(', 'name', ')', ')', 'else', ':', 'container', '.', 'restart', '(', 'wait', '=', 'True', ')', 'changes', '[', "'restarted'", ']', '=', '(', '\'Container "{0}" has been restarted\'', '.', 'format', '(', 'name', ')', ')', 'return', '_success', '(', 'ret', ',', '\'Container "{0}" has been restarted\'', '.', 'format', '(', 'name', ')', ')', 'if', 'not', 'container_changed', ':', 'return', '_success', '(', 'ret', ',', "'No changes'", ')', 'if', '__opts__', '[', "'test'", ']', ':', 'return', '_unchanged', '(', 'ret', ',', '\'Container "{0}" would get changed.\'', '.', 'format', '(', 'name', ')', ')', 'return', '_success', '(', 'ret', ',', "'{0} changes'", '.', 'format', '(', 'len', '(', 'ret', '[', "'changes'", ']', '.', 'keys', '(', ')', ')', ')', ')']
Create the named container if it does not exist name The name of the container to be created running : None * If ``True``, ensure that the container is running * If ``False``, ensure that the container is stopped * If ``None``, do nothing with regards to the running state of the container source : None Can be either a string containing an image alias: "xenial/amd64" or an dict with type "image" with alias: {"type": "image", "alias": "xenial/amd64"} or image with "fingerprint": {"type": "image", "fingerprint": "SHA-256"} or image with "properties": {"type": "image", "properties": { "os": "ubuntu", "release": "14.04", "architecture": "x86_64" }} or none: {"type": "none"} or copy: {"type": "copy", "source": "my-old-container"} profiles : ['default'] List of profiles to apply on this container config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). architecture : 'x86_64' Can be one of the following: * unknown * i686 * x86_64 * armv7l * aarch64 * ppc * ppc64 * ppc64le * s390x ephemeral : False Destroy this container after stop? restart_on_change : False Restart the container when we detect changes on the config or its devices? remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates.
['Create', 'the', 'named', 'container', 'if', 'it', 'does', 'not', 'exist']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/lxd_container.py#L57-L360
2,111
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
PymataCore.start
def start(self): """ This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value. """ # check if user specified a socket transport if self.ip_address: self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop) self.loop.run_until_complete((self.socket.start())) # set the read and write handles self.read = self.socket.read self.write = self.socket.write for i in range(0, len(self.ip_handshake)): self.loop.run_until_complete((self.read())) else: try: self.serial_port = PymataSerial(self.com_port, 57600, self.sleep_tune, self.log_output) # set the read and write handles self.read = self.serial_port.read self.write = self.serial_port.write except serial.SerialException: if self.log_output: log_string = 'Cannot instantiate serial interface: ' \ + self.com_port logging.exception(log_string) else: print( 'Cannot instantiate serial interface: ' + self.com_port) print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # wait for arduino to go through a reset cycle if need be time.sleep(self.arduino_wait) # register the get_command method with the event loop # self.loop = asyncio.get_event_loop() self.the_task = self.loop.create_task(self._command_dispatcher()) # get arduino firmware version and print it try: firmware_version = self.loop.run_until_complete(self.get_firmware_version()) if self.log_output: log_string = "\nArduino Firmware ID: " + firmware_version logging.exception(log_string) else: print("\nArduino Firmware ID: " + firmware_version) except TypeError: print('\nIs your serial cable plugged in and do you have the correct Firmata sketch loaded?') print('Is the COM port correct?') print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # try to get an analog pin map. if it comes back as none - shutdown report = self.loop.run_until_complete(self.get_analog_map()) if not report: if self.log_output: log_string = '*** Analog map retrieval timed out. ***' logging.exception(log_string) log_string = '\nDo you have Arduino connectivity and do you ' \ 'have a Firmata sketch uploaded to the board?' logging.exception(log_string) else: print('*** Analog map retrieval timed out. ***') print('\nDo you have Arduino connectivity and do you have a ' 'Firmata sketch uploaded to the board?') try: loop = self.loop for t in asyncio.Task.all_tasks(loop): t.cancel() loop.run_until_complete(asyncio.sleep(.1)) loop.close() loop.stop() sys.exit(0) except RuntimeError: # this suppresses the Event Loop Is Running message, which may # be a bug in python 3 sys.exit(0) except TypeError: sys.exit(0) # custom assemble the pin lists for pin in report: digital_data = PinData() self.digital_pins.append(digital_data) if pin != Constants.IGNORE: analog_data = PinData() self.analog_pins.append(analog_data) if self.log_output: log_string = 'Auto-discovery complete. Found ' + \ str(len(self.digital_pins)) + ' Digital Pins and ' + \ str(len(self.analog_pins)) + ' Analog Pins' logging.info(log_string) else: print('{} {} {} {} {}'.format('Auto-discovery complete. Found', len(self.digital_pins), 'Digital Pins and', len(self.analog_pins), 'Analog Pins\n\n')) self.first_analog_pin = len(self.digital_pins) - len(self.analog_pins)
python
def start(self): """ This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value. """ # check if user specified a socket transport if self.ip_address: self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop) self.loop.run_until_complete((self.socket.start())) # set the read and write handles self.read = self.socket.read self.write = self.socket.write for i in range(0, len(self.ip_handshake)): self.loop.run_until_complete((self.read())) else: try: self.serial_port = PymataSerial(self.com_port, 57600, self.sleep_tune, self.log_output) # set the read and write handles self.read = self.serial_port.read self.write = self.serial_port.write except serial.SerialException: if self.log_output: log_string = 'Cannot instantiate serial interface: ' \ + self.com_port logging.exception(log_string) else: print( 'Cannot instantiate serial interface: ' + self.com_port) print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # wait for arduino to go through a reset cycle if need be time.sleep(self.arduino_wait) # register the get_command method with the event loop # self.loop = asyncio.get_event_loop() self.the_task = self.loop.create_task(self._command_dispatcher()) # get arduino firmware version and print it try: firmware_version = self.loop.run_until_complete(self.get_firmware_version()) if self.log_output: log_string = "\nArduino Firmware ID: " + firmware_version logging.exception(log_string) else: print("\nArduino Firmware ID: " + firmware_version) except TypeError: print('\nIs your serial cable plugged in and do you have the correct Firmata sketch loaded?') print('Is the COM port correct?') print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # try to get an analog pin map. if it comes back as none - shutdown report = self.loop.run_until_complete(self.get_analog_map()) if not report: if self.log_output: log_string = '*** Analog map retrieval timed out. ***' logging.exception(log_string) log_string = '\nDo you have Arduino connectivity and do you ' \ 'have a Firmata sketch uploaded to the board?' logging.exception(log_string) else: print('*** Analog map retrieval timed out. ***') print('\nDo you have Arduino connectivity and do you have a ' 'Firmata sketch uploaded to the board?') try: loop = self.loop for t in asyncio.Task.all_tasks(loop): t.cancel() loop.run_until_complete(asyncio.sleep(.1)) loop.close() loop.stop() sys.exit(0) except RuntimeError: # this suppresses the Event Loop Is Running message, which may # be a bug in python 3 sys.exit(0) except TypeError: sys.exit(0) # custom assemble the pin lists for pin in report: digital_data = PinData() self.digital_pins.append(digital_data) if pin != Constants.IGNORE: analog_data = PinData() self.analog_pins.append(analog_data) if self.log_output: log_string = 'Auto-discovery complete. Found ' + \ str(len(self.digital_pins)) + ' Digital Pins and ' + \ str(len(self.analog_pins)) + ' Analog Pins' logging.info(log_string) else: print('{} {} {} {} {}'.format('Auto-discovery complete. Found', len(self.digital_pins), 'Digital Pins and', len(self.analog_pins), 'Analog Pins\n\n')) self.first_analog_pin = len(self.digital_pins) - len(self.analog_pins)
['def', 'start', '(', 'self', ')', ':', '# check if user specified a socket transport', 'if', 'self', '.', 'ip_address', ':', 'self', '.', 'socket', '=', 'PymataSocket', '(', 'self', '.', 'ip_address', ',', 'self', '.', 'ip_port', ',', 'self', '.', 'loop', ')', 'self', '.', 'loop', '.', 'run_until_complete', '(', '(', 'self', '.', 'socket', '.', 'start', '(', ')', ')', ')', '# set the read and write handles', 'self', '.', 'read', '=', 'self', '.', 'socket', '.', 'read', 'self', '.', 'write', '=', 'self', '.', 'socket', '.', 'write', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'self', '.', 'ip_handshake', ')', ')', ':', 'self', '.', 'loop', '.', 'run_until_complete', '(', '(', 'self', '.', 'read', '(', ')', ')', ')', 'else', ':', 'try', ':', 'self', '.', 'serial_port', '=', 'PymataSerial', '(', 'self', '.', 'com_port', ',', '57600', ',', 'self', '.', 'sleep_tune', ',', 'self', '.', 'log_output', ')', '# set the read and write handles', 'self', '.', 'read', '=', 'self', '.', 'serial_port', '.', 'read', 'self', '.', 'write', '=', 'self', '.', 'serial_port', '.', 'write', 'except', 'serial', '.', 'SerialException', ':', 'if', 'self', '.', 'log_output', ':', 'log_string', '=', "'Cannot instantiate serial interface: '", '+', 'self', '.', 'com_port', 'logging', '.', 'exception', '(', 'log_string', ')', 'else', ':', 'print', '(', "'Cannot instantiate serial interface: '", '+', 'self', '.', 'com_port', ')', 'print', '(', '\'To see a list of serial ports, type: "list_serial_ports" in your console.\'', ')', 'sys', '.', 'exit', '(', '0', ')', '# wait for arduino to go through a reset cycle if need be', 'time', '.', 'sleep', '(', 'self', '.', 'arduino_wait', ')', '# register the get_command method with the event loop', '# self.loop = asyncio.get_event_loop()', 'self', '.', 'the_task', '=', 'self', '.', 'loop', '.', 'create_task', '(', 'self', '.', '_command_dispatcher', '(', ')', ')', '# get arduino firmware version and print it', 'try', ':', 'firmware_version', '=', 'self', '.', 'loop', '.', 'run_until_complete', '(', 'self', '.', 'get_firmware_version', '(', ')', ')', 'if', 'self', '.', 'log_output', ':', 'log_string', '=', '"\\nArduino Firmware ID: "', '+', 'firmware_version', 'logging', '.', 'exception', '(', 'log_string', ')', 'else', ':', 'print', '(', '"\\nArduino Firmware ID: "', '+', 'firmware_version', ')', 'except', 'TypeError', ':', 'print', '(', "'\\nIs your serial cable plugged in and do you have the correct Firmata sketch loaded?'", ')', 'print', '(', "'Is the COM port correct?'", ')', 'print', '(', '\'To see a list of serial ports, type: "list_serial_ports" in your console.\'', ')', 'sys', '.', 'exit', '(', '0', ')', '# try to get an analog pin map. if it comes back as none - shutdown', 'report', '=', 'self', '.', 'loop', '.', 'run_until_complete', '(', 'self', '.', 'get_analog_map', '(', ')', ')', 'if', 'not', 'report', ':', 'if', 'self', '.', 'log_output', ':', 'log_string', '=', "'*** Analog map retrieval timed out. ***'", 'logging', '.', 'exception', '(', 'log_string', ')', 'log_string', '=', "'\\nDo you have Arduino connectivity and do you '", "'have a Firmata sketch uploaded to the board?'", 'logging', '.', 'exception', '(', 'log_string', ')', 'else', ':', 'print', '(', "'*** Analog map retrieval timed out. ***'", ')', 'print', '(', "'\\nDo you have Arduino connectivity and do you have a '", "'Firmata sketch uploaded to the board?'", ')', 'try', ':', 'loop', '=', 'self', '.', 'loop', 'for', 't', 'in', 'asyncio', '.', 'Task', '.', 'all_tasks', '(', 'loop', ')', ':', 't', '.', 'cancel', '(', ')', 'loop', '.', 'run_until_complete', '(', 'asyncio', '.', 'sleep', '(', '.1', ')', ')', 'loop', '.', 'close', '(', ')', 'loop', '.', 'stop', '(', ')', 'sys', '.', 'exit', '(', '0', ')', 'except', 'RuntimeError', ':', '# this suppresses the Event Loop Is Running message, which may', '# be a bug in python 3', 'sys', '.', 'exit', '(', '0', ')', 'except', 'TypeError', ':', 'sys', '.', 'exit', '(', '0', ')', '# custom assemble the pin lists', 'for', 'pin', 'in', 'report', ':', 'digital_data', '=', 'PinData', '(', ')', 'self', '.', 'digital_pins', '.', 'append', '(', 'digital_data', ')', 'if', 'pin', '!=', 'Constants', '.', 'IGNORE', ':', 'analog_data', '=', 'PinData', '(', ')', 'self', '.', 'analog_pins', '.', 'append', '(', 'analog_data', ')', 'if', 'self', '.', 'log_output', ':', 'log_string', '=', "'Auto-discovery complete. Found '", '+', 'str', '(', 'len', '(', 'self', '.', 'digital_pins', ')', ')', '+', "' Digital Pins and '", '+', 'str', '(', 'len', '(', 'self', '.', 'analog_pins', ')', ')', '+', "' Analog Pins'", 'logging', '.', 'info', '(', 'log_string', ')', 'else', ':', 'print', '(', "'{} {} {} {} {}'", '.', 'format', '(', "'Auto-discovery complete. Found'", ',', 'len', '(', 'self', '.', 'digital_pins', ')', ',', "'Digital Pins and'", ',', 'len', '(', 'self', '.', 'analog_pins', ')', ',', "'Analog Pins\\n\\n'", ')', ')', 'self', '.', 'first_analog_pin', '=', 'len', '(', 'self', '.', 'digital_pins', ')', '-', 'len', '(', 'self', '.', 'analog_pins', ')']
This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value.
['This', 'method', 'must', 'be', 'called', 'immediately', 'after', 'the', 'class', 'is', 'instantiated', '.', 'It', 'instantiates', 'the', 'serial', 'interface', 'and', 'then', 'performs', 'auto', 'pin', 'discovery', '.', 'It', 'is', 'intended', 'for', 'use', 'by', 'pymata3', 'applications', 'that', 'do', 'not', 'use', 'asyncio', 'coroutines', 'directly', '.']
train
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L260-L369
2,112
Aula13/poloniex
poloniex/poloniex.py
PoloniexPublic.returnTradeHistory
def returnTradeHistory(self, currencyPair, start=None, end=None): """Returns the past 200 trades for a given market, or up to 50,000 trades between a range specified in UNIX timestamps by the "start" and "end" GET parameters.""" return self._public('returnTradeHistory', currencyPair=currencyPair, start=start, end=end)
python
def returnTradeHistory(self, currencyPair, start=None, end=None): """Returns the past 200 trades for a given market, or up to 50,000 trades between a range specified in UNIX timestamps by the "start" and "end" GET parameters.""" return self._public('returnTradeHistory', currencyPair=currencyPair, start=start, end=end)
['def', 'returnTradeHistory', '(', 'self', ',', 'currencyPair', ',', 'start', '=', 'None', ',', 'end', '=', 'None', ')', ':', 'return', 'self', '.', '_public', '(', "'returnTradeHistory'", ',', 'currencyPair', '=', 'currencyPair', ',', 'start', '=', 'start', ',', 'end', '=', 'end', ')']
Returns the past 200 trades for a given market, or up to 50,000 trades between a range specified in UNIX timestamps by the "start" and "end" GET parameters.
['Returns', 'the', 'past', '200', 'trades', 'for', 'a', 'given', 'market', 'or', 'up', 'to', '50', '000', 'trades', 'between', 'a', 'range', 'specified', 'in', 'UNIX', 'timestamps', 'by', 'the', 'start', 'and', 'end', 'GET', 'parameters', '.']
train
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L103-L108
2,113
bokeh/bokeh
bokeh/embed/server.py
_get_app_path
def _get_app_path(url): ''' Extract the app path from a Bokeh server URL Args: url (str) : Returns: str ''' app_path = urlparse(url).path.rstrip("/") if not app_path.startswith("/"): app_path = "/" + app_path return app_path
python
def _get_app_path(url): ''' Extract the app path from a Bokeh server URL Args: url (str) : Returns: str ''' app_path = urlparse(url).path.rstrip("/") if not app_path.startswith("/"): app_path = "/" + app_path return app_path
['def', '_get_app_path', '(', 'url', ')', ':', 'app_path', '=', 'urlparse', '(', 'url', ')', '.', 'path', '.', 'rstrip', '(', '"/"', ')', 'if', 'not', 'app_path', '.', 'startswith', '(', '"/"', ')', ':', 'app_path', '=', '"/"', '+', 'app_path', 'return', 'app_path']
Extract the app path from a Bokeh server URL Args: url (str) : Returns: str
['Extract', 'the', 'app', 'path', 'from', 'a', 'Bokeh', 'server', 'URL']
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/embed/server.py#L256-L269
2,114
svenevs/exhale
exhale/graph.py
ExhaleRoot.fileRefDiscovery
def fileRefDiscovery(self): ''' Finds the missing components for file nodes by parsing the Doxygen xml (which is just the ``doxygen_output_dir/node.refid``). Additional items parsed include adding items whose ``refid`` tag are used in this file, the <programlisting> for the file, what it includes and what includes it, as well as the location of the file (with respsect to the *Doxygen* root). Care must be taken to only include a refid found with specific tags. The parsing of the xml file was done by just looking at some example outputs. It seems to be working correctly, but there may be some subtle use cases that break it. .. warning:: Some enums, classes, variables, etc declared in the file will not have their associated refid in the declaration of the file, but will be present in the <programlisting>. These are added to the files' list of children when they are found, but this parental relationship cannot be formed if you set ``XML_PROGRAMLISTING = NO`` with Doxygen. An example of such an enum would be an enum declared inside of a namespace within this file. ''' if not os.path.isdir(configs._doxygen_xml_output_directory): utils.fancyError("The doxygen xml output directory [{0}] is not valid!".format( configs._doxygen_xml_output_directory )) # parse the doxygen xml file and extract all refid's put in it # keys: file object, values: list of refid's doxygen_xml_file_ownerships = {} # innerclass, innernamespace, etc ref_regex = re.compile(r'.*<inner.*refid="(\w+)".*') # what files this file includes inc_regex = re.compile(r'.*<includes.*>(.+)</includes>') # what files include this file inc_by_regex = re.compile(r'.*<includedby refid="(\w+)".*>(.*)</includedby>') # the actual location of the file loc_regex = re.compile(r'.*<location file="(.*)"/>') for f in self.files: doxygen_xml_file_ownerships[f] = [] try: doxy_xml_path = os.path.join(configs._doxygen_xml_output_directory, "{0}.xml".format(f.refid)) with codecs.open(doxy_xml_path, "r", "utf-8") as doxy_file: processing_code_listing = False # shows up at bottom of xml for line in doxy_file: # see if this line represents the location tag match = loc_regex.match(line) if match is not None: f.location = os.path.normpath(match.groups()[0]) continue if not processing_code_listing: # gather included by references match = inc_by_regex.match(line) if match is not None: ref, name = match.groups() f.included_by.append((ref, name)) continue # gather includes lines match = inc_regex.match(line) if match is not None: inc = match.groups()[0] f.includes.append(inc) continue # gather any classes, namespaces, etc declared in the file match = ref_regex.match(line) if match is not None: match_refid = match.groups()[0] if match_refid in self.node_by_refid: doxygen_xml_file_ownerships[f].append(match_refid) continue # lastly, see if we are starting the code listing if "<programlisting>" in line: processing_code_listing = True elif processing_code_listing: if "</programlisting>" in line: processing_code_listing = False else: f.program_listing.append(line) except: utils.fancyError( "Unable to process doxygen xml for file [{0}].\n".format(f.name) ) # # IMPORTANT: do not set the parent field of anything being added as a child to the file # # hack to make things work right on RTD # TODO: do this at construction rather than as a post process! if configs.doxygenStripFromPath is not None: for node in itertools.chain(self.files, self.dirs): if node.kind == "file": manip = node.location else: # node.kind == "dir" manip = node.name abs_strip_path = os.path.normpath(os.path.abspath( configs.doxygenStripFromPath )) if manip.startswith(abs_strip_path): manip = os.path.relpath(manip, abs_strip_path) manip = os.path.normpath(manip) if node.kind == "file": node.location = manip else: # node.kind == "dir" node.name = manip # now that we have parsed all the listed refid's in the doxygen xml, reparent # the nodes that we care about allowable_child_kinds = ["struct", "class", "function", "typedef", "define", "enum", "union"] for f in self.files: for match_refid in doxygen_xml_file_ownerships[f]: child = self.node_by_refid[match_refid] if child.kind in allowable_child_kinds: if child not in f.children: f.children.append(child) elif child.kind == "namespace": if child not in f.namespaces_used: f.namespaces_used.append(child) # last but not least, some different kinds declared in the file that are scoped # in a namespace they will show up in the programlisting, but not at the toplevel. for f in self.files: potential_orphans = [] for n in f.namespaces_used: for child in n.children: if child.kind == "enum" or child.kind == "variable" or \ child.kind == "function" or child.kind == "typedef" or \ child.kind == "union": potential_orphans.append(child) # now that we have a list of potential orphans, see if this doxygen xml had # the refid of a given child present. for orphan in potential_orphans: unresolved_name = orphan.name.split("::")[-1] if f.refid in orphan.refid and any(unresolved_name in line for line in f.program_listing): if orphan not in f.children: f.children.append(orphan) # Last but not least, make sure all children know where they were defined. for f in self.files: for child in f.children: if child.def_in_file is None: child.def_in_file = f elif child.def_in_file != f: # << verboseBuild utils.verbose_log( "Conflicting file definition for [{0}]: both [{1}] and [{2}] found.".format( child.name, child.def_in_file.name, f.name ), utils.AnsiColors.BOLD_RED )
python
def fileRefDiscovery(self): ''' Finds the missing components for file nodes by parsing the Doxygen xml (which is just the ``doxygen_output_dir/node.refid``). Additional items parsed include adding items whose ``refid`` tag are used in this file, the <programlisting> for the file, what it includes and what includes it, as well as the location of the file (with respsect to the *Doxygen* root). Care must be taken to only include a refid found with specific tags. The parsing of the xml file was done by just looking at some example outputs. It seems to be working correctly, but there may be some subtle use cases that break it. .. warning:: Some enums, classes, variables, etc declared in the file will not have their associated refid in the declaration of the file, but will be present in the <programlisting>. These are added to the files' list of children when they are found, but this parental relationship cannot be formed if you set ``XML_PROGRAMLISTING = NO`` with Doxygen. An example of such an enum would be an enum declared inside of a namespace within this file. ''' if not os.path.isdir(configs._doxygen_xml_output_directory): utils.fancyError("The doxygen xml output directory [{0}] is not valid!".format( configs._doxygen_xml_output_directory )) # parse the doxygen xml file and extract all refid's put in it # keys: file object, values: list of refid's doxygen_xml_file_ownerships = {} # innerclass, innernamespace, etc ref_regex = re.compile(r'.*<inner.*refid="(\w+)".*') # what files this file includes inc_regex = re.compile(r'.*<includes.*>(.+)</includes>') # what files include this file inc_by_regex = re.compile(r'.*<includedby refid="(\w+)".*>(.*)</includedby>') # the actual location of the file loc_regex = re.compile(r'.*<location file="(.*)"/>') for f in self.files: doxygen_xml_file_ownerships[f] = [] try: doxy_xml_path = os.path.join(configs._doxygen_xml_output_directory, "{0}.xml".format(f.refid)) with codecs.open(doxy_xml_path, "r", "utf-8") as doxy_file: processing_code_listing = False # shows up at bottom of xml for line in doxy_file: # see if this line represents the location tag match = loc_regex.match(line) if match is not None: f.location = os.path.normpath(match.groups()[0]) continue if not processing_code_listing: # gather included by references match = inc_by_regex.match(line) if match is not None: ref, name = match.groups() f.included_by.append((ref, name)) continue # gather includes lines match = inc_regex.match(line) if match is not None: inc = match.groups()[0] f.includes.append(inc) continue # gather any classes, namespaces, etc declared in the file match = ref_regex.match(line) if match is not None: match_refid = match.groups()[0] if match_refid in self.node_by_refid: doxygen_xml_file_ownerships[f].append(match_refid) continue # lastly, see if we are starting the code listing if "<programlisting>" in line: processing_code_listing = True elif processing_code_listing: if "</programlisting>" in line: processing_code_listing = False else: f.program_listing.append(line) except: utils.fancyError( "Unable to process doxygen xml for file [{0}].\n".format(f.name) ) # # IMPORTANT: do not set the parent field of anything being added as a child to the file # # hack to make things work right on RTD # TODO: do this at construction rather than as a post process! if configs.doxygenStripFromPath is not None: for node in itertools.chain(self.files, self.dirs): if node.kind == "file": manip = node.location else: # node.kind == "dir" manip = node.name abs_strip_path = os.path.normpath(os.path.abspath( configs.doxygenStripFromPath )) if manip.startswith(abs_strip_path): manip = os.path.relpath(manip, abs_strip_path) manip = os.path.normpath(manip) if node.kind == "file": node.location = manip else: # node.kind == "dir" node.name = manip # now that we have parsed all the listed refid's in the doxygen xml, reparent # the nodes that we care about allowable_child_kinds = ["struct", "class", "function", "typedef", "define", "enum", "union"] for f in self.files: for match_refid in doxygen_xml_file_ownerships[f]: child = self.node_by_refid[match_refid] if child.kind in allowable_child_kinds: if child not in f.children: f.children.append(child) elif child.kind == "namespace": if child not in f.namespaces_used: f.namespaces_used.append(child) # last but not least, some different kinds declared in the file that are scoped # in a namespace they will show up in the programlisting, but not at the toplevel. for f in self.files: potential_orphans = [] for n in f.namespaces_used: for child in n.children: if child.kind == "enum" or child.kind == "variable" or \ child.kind == "function" or child.kind == "typedef" or \ child.kind == "union": potential_orphans.append(child) # now that we have a list of potential orphans, see if this doxygen xml had # the refid of a given child present. for orphan in potential_orphans: unresolved_name = orphan.name.split("::")[-1] if f.refid in orphan.refid and any(unresolved_name in line for line in f.program_listing): if orphan not in f.children: f.children.append(orphan) # Last but not least, make sure all children know where they were defined. for f in self.files: for child in f.children: if child.def_in_file is None: child.def_in_file = f elif child.def_in_file != f: # << verboseBuild utils.verbose_log( "Conflicting file definition for [{0}]: both [{1}] and [{2}] found.".format( child.name, child.def_in_file.name, f.name ), utils.AnsiColors.BOLD_RED )
['def', 'fileRefDiscovery', '(', 'self', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'configs', '.', '_doxygen_xml_output_directory', ')', ':', 'utils', '.', 'fancyError', '(', '"The doxygen xml output directory [{0}] is not valid!"', '.', 'format', '(', 'configs', '.', '_doxygen_xml_output_directory', ')', ')', "# parse the doxygen xml file and extract all refid's put in it", "# keys: file object, values: list of refid's", 'doxygen_xml_file_ownerships', '=', '{', '}', '# innerclass, innernamespace, etc', 'ref_regex', '=', 're', '.', 'compile', '(', 'r\'.*<inner.*refid="(\\w+)".*\'', ')', '# what files this file includes', 'inc_regex', '=', 're', '.', 'compile', '(', "r'.*<includes.*>(.+)</includes>'", ')', '# what files include this file', 'inc_by_regex', '=', 're', '.', 'compile', '(', 'r\'.*<includedby refid="(\\w+)".*>(.*)</includedby>\'', ')', '# the actual location of the file', 'loc_regex', '=', 're', '.', 'compile', '(', 'r\'.*<location file="(.*)"/>\'', ')', 'for', 'f', 'in', 'self', '.', 'files', ':', 'doxygen_xml_file_ownerships', '[', 'f', ']', '=', '[', ']', 'try', ':', 'doxy_xml_path', '=', 'os', '.', 'path', '.', 'join', '(', 'configs', '.', '_doxygen_xml_output_directory', ',', '"{0}.xml"', '.', 'format', '(', 'f', '.', 'refid', ')', ')', 'with', 'codecs', '.', 'open', '(', 'doxy_xml_path', ',', '"r"', ',', '"utf-8"', ')', 'as', 'doxy_file', ':', 'processing_code_listing', '=', 'False', '# shows up at bottom of xml', 'for', 'line', 'in', 'doxy_file', ':', '# see if this line represents the location tag', 'match', '=', 'loc_regex', '.', 'match', '(', 'line', ')', 'if', 'match', 'is', 'not', 'None', ':', 'f', '.', 'location', '=', 'os', '.', 'path', '.', 'normpath', '(', 'match', '.', 'groups', '(', ')', '[', '0', ']', ')', 'continue', 'if', 'not', 'processing_code_listing', ':', '# gather included by references', 'match', '=', 'inc_by_regex', '.', 'match', '(', 'line', ')', 'if', 'match', 'is', 'not', 'None', ':', 'ref', ',', 'name', '=', 'match', '.', 'groups', '(', ')', 'f', '.', 'included_by', '.', 'append', '(', '(', 'ref', ',', 'name', ')', ')', 'continue', '# gather includes lines', 'match', '=', 'inc_regex', '.', 'match', '(', 'line', ')', 'if', 'match', 'is', 'not', 'None', ':', 'inc', '=', 'match', '.', 'groups', '(', ')', '[', '0', ']', 'f', '.', 'includes', '.', 'append', '(', 'inc', ')', 'continue', '# gather any classes, namespaces, etc declared in the file', 'match', '=', 'ref_regex', '.', 'match', '(', 'line', ')', 'if', 'match', 'is', 'not', 'None', ':', 'match_refid', '=', 'match', '.', 'groups', '(', ')', '[', '0', ']', 'if', 'match_refid', 'in', 'self', '.', 'node_by_refid', ':', 'doxygen_xml_file_ownerships', '[', 'f', ']', '.', 'append', '(', 'match_refid', ')', 'continue', '# lastly, see if we are starting the code listing', 'if', '"<programlisting>"', 'in', 'line', ':', 'processing_code_listing', '=', 'True', 'elif', 'processing_code_listing', ':', 'if', '"</programlisting>"', 'in', 'line', ':', 'processing_code_listing', '=', 'False', 'else', ':', 'f', '.', 'program_listing', '.', 'append', '(', 'line', ')', 'except', ':', 'utils', '.', 'fancyError', '(', '"Unable to process doxygen xml for file [{0}].\\n"', '.', 'format', '(', 'f', '.', 'name', ')', ')', '#', '# IMPORTANT: do not set the parent field of anything being added as a child to the file', '#', '# hack to make things work right on RTD', '# TODO: do this at construction rather than as a post process!', 'if', 'configs', '.', 'doxygenStripFromPath', 'is', 'not', 'None', ':', 'for', 'node', 'in', 'itertools', '.', 'chain', '(', 'self', '.', 'files', ',', 'self', '.', 'dirs', ')', ':', 'if', 'node', '.', 'kind', '==', '"file"', ':', 'manip', '=', 'node', '.', 'location', 'else', ':', '# node.kind == "dir"', 'manip', '=', 'node', '.', 'name', 'abs_strip_path', '=', 'os', '.', 'path', '.', 'normpath', '(', 'os', '.', 'path', '.', 'abspath', '(', 'configs', '.', 'doxygenStripFromPath', ')', ')', 'if', 'manip', '.', 'startswith', '(', 'abs_strip_path', ')', ':', 'manip', '=', 'os', '.', 'path', '.', 'relpath', '(', 'manip', ',', 'abs_strip_path', ')', 'manip', '=', 'os', '.', 'path', '.', 'normpath', '(', 'manip', ')', 'if', 'node', '.', 'kind', '==', '"file"', ':', 'node', '.', 'location', '=', 'manip', 'else', ':', '# node.kind == "dir"', 'node', '.', 'name', '=', 'manip', "# now that we have parsed all the listed refid's in the doxygen xml, reparent", '# the nodes that we care about', 'allowable_child_kinds', '=', '[', '"struct"', ',', '"class"', ',', '"function"', ',', '"typedef"', ',', '"define"', ',', '"enum"', ',', '"union"', ']', 'for', 'f', 'in', 'self', '.', 'files', ':', 'for', 'match_refid', 'in', 'doxygen_xml_file_ownerships', '[', 'f', ']', ':', 'child', '=', 'self', '.', 'node_by_refid', '[', 'match_refid', ']', 'if', 'child', '.', 'kind', 'in', 'allowable_child_kinds', ':', 'if', 'child', 'not', 'in', 'f', '.', 'children', ':', 'f', '.', 'children', '.', 'append', '(', 'child', ')', 'elif', 'child', '.', 'kind', '==', '"namespace"', ':', 'if', 'child', 'not', 'in', 'f', '.', 'namespaces_used', ':', 'f', '.', 'namespaces_used', '.', 'append', '(', 'child', ')', '# last but not least, some different kinds declared in the file that are scoped', '# in a namespace they will show up in the programlisting, but not at the toplevel.', 'for', 'f', 'in', 'self', '.', 'files', ':', 'potential_orphans', '=', '[', ']', 'for', 'n', 'in', 'f', '.', 'namespaces_used', ':', 'for', 'child', 'in', 'n', '.', 'children', ':', 'if', 'child', '.', 'kind', '==', '"enum"', 'or', 'child', '.', 'kind', '==', '"variable"', 'or', 'child', '.', 'kind', '==', '"function"', 'or', 'child', '.', 'kind', '==', '"typedef"', 'or', 'child', '.', 'kind', '==', '"union"', ':', 'potential_orphans', '.', 'append', '(', 'child', ')', '# now that we have a list of potential orphans, see if this doxygen xml had', '# the refid of a given child present.', 'for', 'orphan', 'in', 'potential_orphans', ':', 'unresolved_name', '=', 'orphan', '.', 'name', '.', 'split', '(', '"::"', ')', '[', '-', '1', ']', 'if', 'f', '.', 'refid', 'in', 'orphan', '.', 'refid', 'and', 'any', '(', 'unresolved_name', 'in', 'line', 'for', 'line', 'in', 'f', '.', 'program_listing', ')', ':', 'if', 'orphan', 'not', 'in', 'f', '.', 'children', ':', 'f', '.', 'children', '.', 'append', '(', 'orphan', ')', '# Last but not least, make sure all children know where they were defined.', 'for', 'f', 'in', 'self', '.', 'files', ':', 'for', 'child', 'in', 'f', '.', 'children', ':', 'if', 'child', '.', 'def_in_file', 'is', 'None', ':', 'child', '.', 'def_in_file', '=', 'f', 'elif', 'child', '.', 'def_in_file', '!=', 'f', ':', '# << verboseBuild', 'utils', '.', 'verbose_log', '(', '"Conflicting file definition for [{0}]: both [{1}] and [{2}] found."', '.', 'format', '(', 'child', '.', 'name', ',', 'child', '.', 'def_in_file', '.', 'name', ',', 'f', '.', 'name', ')', ',', 'utils', '.', 'AnsiColors', '.', 'BOLD_RED', ')']
Finds the missing components for file nodes by parsing the Doxygen xml (which is just the ``doxygen_output_dir/node.refid``). Additional items parsed include adding items whose ``refid`` tag are used in this file, the <programlisting> for the file, what it includes and what includes it, as well as the location of the file (with respsect to the *Doxygen* root). Care must be taken to only include a refid found with specific tags. The parsing of the xml file was done by just looking at some example outputs. It seems to be working correctly, but there may be some subtle use cases that break it. .. warning:: Some enums, classes, variables, etc declared in the file will not have their associated refid in the declaration of the file, but will be present in the <programlisting>. These are added to the files' list of children when they are found, but this parental relationship cannot be formed if you set ``XML_PROGRAMLISTING = NO`` with Doxygen. An example of such an enum would be an enum declared inside of a namespace within this file.
['Finds', 'the', 'missing', 'components', 'for', 'file', 'nodes', 'by', 'parsing', 'the', 'Doxygen', 'xml', '(', 'which', 'is', 'just', 'the', 'doxygen_output_dir', '/', 'node', '.', 'refid', ')', '.', 'Additional', 'items', 'parsed', 'include', 'adding', 'items', 'whose', 'refid', 'tag', 'are', 'used', 'in', 'this', 'file', 'the', '<programlisting', '>', 'for', 'the', 'file', 'what', 'it', 'includes', 'and', 'what', 'includes', 'it', 'as', 'well', 'as', 'the', 'location', 'of', 'the', 'file', '(', 'with', 'respsect', 'to', 'the', '*', 'Doxygen', '*', 'root', ')', '.']
train
https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/graph.py#L1708-L1861
2,115
UCL-INGI/INGInious
inginious/frontend/template_helper.py
TemplateHelper._json_safe_dump
def _json_safe_dump(self, data): """ Make a json dump of `data`, that can be used directly in a `<script>` tag. Available as json() inside templates """ return json.dumps(data).replace(u'<', u'\\u003c') \ .replace(u'>', u'\\u003e') \ .replace(u'&', u'\\u0026') \ .replace(u"'", u'\\u0027')
python
def _json_safe_dump(self, data): """ Make a json dump of `data`, that can be used directly in a `<script>` tag. Available as json() inside templates """ return json.dumps(data).replace(u'<', u'\\u003c') \ .replace(u'>', u'\\u003e') \ .replace(u'&', u'\\u0026') \ .replace(u"'", u'\\u0027')
['def', '_json_safe_dump', '(', 'self', ',', 'data', ')', ':', 'return', 'json', '.', 'dumps', '(', 'data', ')', '.', 'replace', '(', "u'<'", ',', "u'\\\\u003c'", ')', '.', 'replace', '(', "u'>'", ',', "u'\\\\u003e'", ')', '.', 'replace', '(', "u'&'", ',', "u'\\\\u0026'", ')', '.', 'replace', '(', 'u"\'"', ',', "u'\\\\u0027'", ')']
Make a json dump of `data`, that can be used directly in a `<script>` tag. Available as json() inside templates
['Make', 'a', 'json', 'dump', 'of', 'data', 'that', 'can', 'be', 'used', 'directly', 'in', 'a', '<script', '>', 'tag', '.', 'Available', 'as', 'json', '()', 'inside', 'templates']
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/template_helper.py#L153-L158
2,116
72squared/redpipe
redpipe/keyspaces.py
Keyspace.delete
def delete(self, *names): """ Remove the key from redis :param names: tuple of strings - The keys to remove from redis. :return: Future() """ names = [self.redis_key(n) for n in names] with self.pipe as pipe: return pipe.delete(*names)
python
def delete(self, *names): """ Remove the key from redis :param names: tuple of strings - The keys to remove from redis. :return: Future() """ names = [self.redis_key(n) for n in names] with self.pipe as pipe: return pipe.delete(*names)
['def', 'delete', '(', 'self', ',', '*', 'names', ')', ':', 'names', '=', '[', 'self', '.', 'redis_key', '(', 'n', ')', 'for', 'n', 'in', 'names', ']', 'with', 'self', '.', 'pipe', 'as', 'pipe', ':', 'return', 'pipe', '.', 'delete', '(', '*', 'names', ')']
Remove the key from redis :param names: tuple of strings - The keys to remove from redis. :return: Future()
['Remove', 'the', 'key', 'from', 'redis']
train
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L176-L185
2,117
raphaelm/python-sepaxml
sepaxml/transfer.py
SepaTransfer._finalize_batch
def _finalize_batch(self): """ Method to finalize the batch, this will iterate over the _batches dict and create a PmtInf node for each batch. The correct information (from the batch_key and batch_totals) will be inserted and the batch transaction nodes will be folded. Finally, the batches will be added to the main XML. """ for batch_meta, batch_nodes in self._batches.items(): PmtInf_nodes = self._create_PmtInf_node() PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name']) PmtInf_nodes['PmtMtdNode'].text = "TRF" PmtInf_nodes['BtchBookgNode'].text = "true" PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA" if batch_meta: PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta else: del PmtInf_nodes['ReqdExctnDtNode'] PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name'] PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN'] if 'BIC' in self._config: PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC'] PmtInf_nodes['ChrgBrNode'].text = "SLEV" PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes)) PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta]) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode']) PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node']) PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode']) if 'ReqdExctnDtNode' in PmtInf_nodes: PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode']) PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode']) PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node']) PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode']) if 'BIC' in self._config: PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node']) PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode']) for txnode in batch_nodes: PmtInf_nodes['PmtInfNode'].append(txnode) CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn') CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
python
def _finalize_batch(self): """ Method to finalize the batch, this will iterate over the _batches dict and create a PmtInf node for each batch. The correct information (from the batch_key and batch_totals) will be inserted and the batch transaction nodes will be folded. Finally, the batches will be added to the main XML. """ for batch_meta, batch_nodes in self._batches.items(): PmtInf_nodes = self._create_PmtInf_node() PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name']) PmtInf_nodes['PmtMtdNode'].text = "TRF" PmtInf_nodes['BtchBookgNode'].text = "true" PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA" if batch_meta: PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta else: del PmtInf_nodes['ReqdExctnDtNode'] PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name'] PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN'] if 'BIC' in self._config: PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC'] PmtInf_nodes['ChrgBrNode'].text = "SLEV" PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes)) PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta]) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode']) PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node']) PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode']) if 'ReqdExctnDtNode' in PmtInf_nodes: PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode']) PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode']) PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node']) PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode']) if 'BIC' in self._config: PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node']) PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode']) for txnode in batch_nodes: PmtInf_nodes['PmtInfNode'].append(txnode) CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn') CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
['def', '_finalize_batch', '(', 'self', ')', ':', 'for', 'batch_meta', ',', 'batch_nodes', 'in', 'self', '.', '_batches', '.', 'items', '(', ')', ':', 'PmtInf_nodes', '=', 'self', '.', '_create_PmtInf_node', '(', ')', 'PmtInf_nodes', '[', "'PmtInfIdNode'", ']', '.', 'text', '=', 'make_id', '(', 'self', '.', '_config', '[', "'name'", ']', ')', 'PmtInf_nodes', '[', "'PmtMtdNode'", ']', '.', 'text', '=', '"TRF"', 'PmtInf_nodes', '[', "'BtchBookgNode'", ']', '.', 'text', '=', '"true"', 'PmtInf_nodes', '[', "'Cd_SvcLvl_Node'", ']', '.', 'text', '=', '"SEPA"', 'if', 'batch_meta', ':', 'PmtInf_nodes', '[', "'ReqdExctnDtNode'", ']', '.', 'text', '=', 'batch_meta', 'else', ':', 'del', 'PmtInf_nodes', '[', "'ReqdExctnDtNode'", ']', 'PmtInf_nodes', '[', "'Nm_Dbtr_Node'", ']', '.', 'text', '=', 'self', '.', '_config', '[', "'name'", ']', 'PmtInf_nodes', '[', "'IBAN_DbtrAcct_Node'", ']', '.', 'text', '=', 'self', '.', '_config', '[', "'IBAN'", ']', 'if', "'BIC'", 'in', 'self', '.', '_config', ':', 'PmtInf_nodes', '[', "'BIC_DbtrAgt_Node'", ']', '.', 'text', '=', 'self', '.', '_config', '[', "'BIC'", ']', 'PmtInf_nodes', '[', "'ChrgBrNode'", ']', '.', 'text', '=', '"SLEV"', 'PmtInf_nodes', '[', "'NbOfTxsNode'", ']', '.', 'text', '=', 'str', '(', 'len', '(', 'batch_nodes', ')', ')', 'PmtInf_nodes', '[', "'CtrlSumNode'", ']', '.', 'text', '=', 'int_to_decimal_str', '(', 'self', '.', '_batch_totals', '[', 'batch_meta', ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'PmtInfIdNode'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'PmtMtdNode'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'BtchBookgNode'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'NbOfTxsNode'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'CtrlSumNode'", ']', ')', 'PmtInf_nodes', '[', "'SvcLvlNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'Cd_SvcLvl_Node'", ']', ')', 'PmtInf_nodes', '[', "'PmtTpInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'SvcLvlNode'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'PmtTpInfNode'", ']', ')', 'if', "'ReqdExctnDtNode'", 'in', 'PmtInf_nodes', ':', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'ReqdExctnDtNode'", ']', ')', 'PmtInf_nodes', '[', "'DbtrNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'Nm_Dbtr_Node'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'DbtrNode'", ']', ')', 'PmtInf_nodes', '[', "'Id_DbtrAcct_Node'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'IBAN_DbtrAcct_Node'", ']', ')', 'PmtInf_nodes', '[', "'DbtrAcctNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'Id_DbtrAcct_Node'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'DbtrAcctNode'", ']', ')', 'if', "'BIC'", 'in', 'self', '.', '_config', ':', 'PmtInf_nodes', '[', "'FinInstnId_DbtrAgt_Node'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'BIC_DbtrAgt_Node'", ']', ')', 'PmtInf_nodes', '[', "'DbtrAgtNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'FinInstnId_DbtrAgt_Node'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'DbtrAgtNode'", ']', ')', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'PmtInf_nodes', '[', "'ChrgBrNode'", ']', ')', 'for', 'txnode', 'in', 'batch_nodes', ':', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', '.', 'append', '(', 'txnode', ')', 'CstmrCdtTrfInitn_node', '=', 'self', '.', '_xml', '.', 'find', '(', "'CstmrCdtTrfInitn'", ')', 'CstmrCdtTrfInitn_node', '.', 'append', '(', 'PmtInf_nodes', '[', "'PmtInfNode'", ']', ')']
Method to finalize the batch, this will iterate over the _batches dict and create a PmtInf node for each batch. The correct information (from the batch_key and batch_totals) will be inserted and the batch transaction nodes will be folded. Finally, the batches will be added to the main XML.
['Method', 'to', 'finalize', 'the', 'batch', 'this', 'will', 'iterate', 'over', 'the', '_batches', 'dict', 'and', 'create', 'a', 'PmtInf', 'node', 'for', 'each', 'batch', '.', 'The', 'correct', 'information', '(', 'from', 'the', 'batch_key', 'and', 'batch_totals', ')', 'will', 'be', 'inserted', 'and', 'the', 'batch', 'transaction', 'nodes', 'will', 'be', 'folded', '.', 'Finally', 'the', 'batches', 'will', 'be', 'added', 'to', 'the', 'main', 'XML', '.']
train
https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/transfer.py#L309-L369
2,118
JIC-CSB/jicimagelib
jicimagelib/io.py
BFConvertWrapper.manifest
def manifest(self, entry): """Returns manifest as a list. :param entry: :class:`jicimagelib.image.FileBackend.Entry` :returns: list """ entries = [] for fname in self._sorted_nicely(os.listdir(entry.directory)): if fname == 'manifest.json': continue fpath = os.path.abspath(os.path.join(entry.directory, fname)) metadata = self.metadata_from_fname(fname) entries.append({"filename": fpath, "series": metadata.s, "channel": metadata.c, "zslice": metadata.z, "timepoint": metadata.t}) return entries
python
def manifest(self, entry): """Returns manifest as a list. :param entry: :class:`jicimagelib.image.FileBackend.Entry` :returns: list """ entries = [] for fname in self._sorted_nicely(os.listdir(entry.directory)): if fname == 'manifest.json': continue fpath = os.path.abspath(os.path.join(entry.directory, fname)) metadata = self.metadata_from_fname(fname) entries.append({"filename": fpath, "series": metadata.s, "channel": metadata.c, "zslice": metadata.z, "timepoint": metadata.t}) return entries
['def', 'manifest', '(', 'self', ',', 'entry', ')', ':', 'entries', '=', '[', ']', 'for', 'fname', 'in', 'self', '.', '_sorted_nicely', '(', 'os', '.', 'listdir', '(', 'entry', '.', 'directory', ')', ')', ':', 'if', 'fname', '==', "'manifest.json'", ':', 'continue', 'fpath', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'entry', '.', 'directory', ',', 'fname', ')', ')', 'metadata', '=', 'self', '.', 'metadata_from_fname', '(', 'fname', ')', 'entries', '.', 'append', '(', '{', '"filename"', ':', 'fpath', ',', '"series"', ':', 'metadata', '.', 's', ',', '"channel"', ':', 'metadata', '.', 'c', ',', '"zslice"', ':', 'metadata', '.', 'z', ',', '"timepoint"', ':', 'metadata', '.', 't', '}', ')', 'return', 'entries']
Returns manifest as a list. :param entry: :class:`jicimagelib.image.FileBackend.Entry` :returns: list
['Returns', 'manifest', 'as', 'a', 'list', '.', ':', 'param', 'entry', ':', ':', 'class', ':', 'jicimagelib', '.', 'image', '.', 'FileBackend', '.', 'Entry', ':', 'returns', ':', 'list']
train
https://github.com/JIC-CSB/jicimagelib/blob/fbd67accb2e6d55969c6d4ed7e8b4bb4ab65cd44/jicimagelib/io.py#L129-L146
2,119
johnnoone/aioconsul
aioconsul/client/operator_endpoint.py
OperatorEndpoint.peer_delete
async def peer_delete(self, *, dc=None, address): """Remove the server with given address from the Raft configuration Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. address (str): "IP:port" of the server to remove. Returns: bool: ``True`` on success There are rare cases where a peer may be left behind in the Raft configuration even though the server is no longer present and known to the cluster. This endpoint can be used to remove the failed server so that it is no longer affects the Raft quorum. """ address = extract_attr(address, keys=["Address"]) params = {"dc": dc, "address": address} response = await self._api.delete("/v1/operator/raft/peer", params=params) return response.status < 400
python
async def peer_delete(self, *, dc=None, address): """Remove the server with given address from the Raft configuration Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. address (str): "IP:port" of the server to remove. Returns: bool: ``True`` on success There are rare cases where a peer may be left behind in the Raft configuration even though the server is no longer present and known to the cluster. This endpoint can be used to remove the failed server so that it is no longer affects the Raft quorum. """ address = extract_attr(address, keys=["Address"]) params = {"dc": dc, "address": address} response = await self._api.delete("/v1/operator/raft/peer", params=params) return response.status < 400
['async', 'def', 'peer_delete', '(', 'self', ',', '*', ',', 'dc', '=', 'None', ',', 'address', ')', ':', 'address', '=', 'extract_attr', '(', 'address', ',', 'keys', '=', '[', '"Address"', ']', ')', 'params', '=', '{', '"dc"', ':', 'dc', ',', '"address"', ':', 'address', '}', 'response', '=', 'await', 'self', '.', '_api', '.', 'delete', '(', '"/v1/operator/raft/peer"', ',', 'params', '=', 'params', ')', 'return', 'response', '.', 'status', '<', '400']
Remove the server with given address from the Raft configuration Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. address (str): "IP:port" of the server to remove. Returns: bool: ``True`` on success There are rare cases where a peer may be left behind in the Raft configuration even though the server is no longer present and known to the cluster. This endpoint can be used to remove the failed server so that it is no longer affects the Raft quorum.
['Remove', 'the', 'server', 'with', 'given', 'address', 'from', 'the', 'Raft', 'configuration']
train
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/operator_endpoint.py#L74-L93
2,120
tomduck/pandoc-xnos
pandocxnos/pandocattributes.py
PandocAttributes.to_dict
def to_dict(self): """Returns attributes formatted as a dictionary.""" d = {'id': self.id, 'classes': self.classes} d.update(self.kvs) return d
python
def to_dict(self): """Returns attributes formatted as a dictionary.""" d = {'id': self.id, 'classes': self.classes} d.update(self.kvs) return d
['def', 'to_dict', '(', 'self', ')', ':', 'd', '=', '{', "'id'", ':', 'self', '.', 'id', ',', "'classes'", ':', 'self', '.', 'classes', '}', 'd', '.', 'update', '(', 'self', '.', 'kvs', ')', 'return', 'd']
Returns attributes formatted as a dictionary.
['Returns', 'attributes', 'formatted', 'as', 'a', 'dictionary', '.']
train
https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/pandocattributes.py#L182-L186
2,121
Spinmob/spinmob
_data.py
fitter.degrees_of_freedom
def degrees_of_freedom(self): """ Returns the number of degrees of freedom. """ if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None # Temporary hack: get the studentized residuals, which uses the massaged data # This should later be changed to get_massaged_data() r = self.studentized_residuals() # Happens if data / functions not defined if r == None: return # calculate the number of points N = 0.0 for i in range(len(r)): N += len(r[i]) return N-len(self._pnames)
python
def degrees_of_freedom(self): """ Returns the number of degrees of freedom. """ if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None # Temporary hack: get the studentized residuals, which uses the massaged data # This should later be changed to get_massaged_data() r = self.studentized_residuals() # Happens if data / functions not defined if r == None: return # calculate the number of points N = 0.0 for i in range(len(r)): N += len(r[i]) return N-len(self._pnames)
['def', 'degrees_of_freedom', '(', 'self', ')', ':', 'if', 'len', '(', 'self', '.', '_set_xdata', ')', '==', '0', 'or', 'len', '(', 'self', '.', '_set_ydata', ')', '==', '0', ':', 'return', 'None', '# Temporary hack: get the studentized residuals, which uses the massaged data', '# This should later be changed to get_massaged_data()', 'r', '=', 'self', '.', 'studentized_residuals', '(', ')', '# Happens if data / functions not defined', 'if', 'r', '==', 'None', ':', 'return', '# calculate the number of points', 'N', '=', '0.0', 'for', 'i', 'in', 'range', '(', 'len', '(', 'r', ')', ')', ':', 'N', '+=', 'len', '(', 'r', '[', 'i', ']', ')', 'return', 'N', '-', 'len', '(', 'self', '.', '_pnames', ')']
Returns the number of degrees of freedom.
['Returns', 'the', 'number', 'of', 'degrees', 'of', 'freedom', '.']
train
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L2481-L2498
2,122
blockcypher/blockcypher-python
blockcypher/utils.py
to_satoshis
def to_satoshis(input_quantity, input_type): ''' convert to satoshis, no rounding ''' assert input_type in UNIT_CHOICES, input_type # convert to satoshis if input_type in ('btc', 'mbtc', 'bit'): satoshis = float(input_quantity) * float(UNIT_MAPPINGS[input_type]['satoshis_per']) elif input_type == 'satoshi': satoshis = input_quantity else: raise Exception('Invalid Unit Choice: %s' % input_type) return int(satoshis)
python
def to_satoshis(input_quantity, input_type): ''' convert to satoshis, no rounding ''' assert input_type in UNIT_CHOICES, input_type # convert to satoshis if input_type in ('btc', 'mbtc', 'bit'): satoshis = float(input_quantity) * float(UNIT_MAPPINGS[input_type]['satoshis_per']) elif input_type == 'satoshi': satoshis = input_quantity else: raise Exception('Invalid Unit Choice: %s' % input_type) return int(satoshis)
['def', 'to_satoshis', '(', 'input_quantity', ',', 'input_type', ')', ':', 'assert', 'input_type', 'in', 'UNIT_CHOICES', ',', 'input_type', '# convert to satoshis', 'if', 'input_type', 'in', '(', "'btc'", ',', "'mbtc'", ',', "'bit'", ')', ':', 'satoshis', '=', 'float', '(', 'input_quantity', ')', '*', 'float', '(', 'UNIT_MAPPINGS', '[', 'input_type', ']', '[', "'satoshis_per'", ']', ')', 'elif', 'input_type', '==', "'satoshi'", ':', 'satoshis', '=', 'input_quantity', 'else', ':', 'raise', 'Exception', '(', "'Invalid Unit Choice: %s'", '%', 'input_type', ')', 'return', 'int', '(', 'satoshis', ')']
convert to satoshis, no rounding
['convert', 'to', 'satoshis', 'no', 'rounding']
train
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/utils.py#L27-L39
2,123
jonhadfield/creds
lib/creds/ssh.py
PublicKey.raw
def raw(self): """Return raw key. returns: str: raw key """ if self._raw: return text_type(self._raw).strip("\r\n") else: return text_type(base64decode(self._b64encoded)).strip("\r\n")
python
def raw(self): """Return raw key. returns: str: raw key """ if self._raw: return text_type(self._raw).strip("\r\n") else: return text_type(base64decode(self._b64encoded)).strip("\r\n")
['def', 'raw', '(', 'self', ')', ':', 'if', 'self', '.', '_raw', ':', 'return', 'text_type', '(', 'self', '.', '_raw', ')', '.', 'strip', '(', '"\\r\\n"', ')', 'else', ':', 'return', 'text_type', '(', 'base64decode', '(', 'self', '.', '_b64encoded', ')', ')', '.', 'strip', '(', '"\\r\\n"', ')']
Return raw key. returns: str: raw key
['Return', 'raw', 'key', '.']
train
https://github.com/jonhadfield/creds/blob/b2053b43516cf742c6e4c2b79713bc625592f47c/lib/creds/ssh.py#L43-L52
2,124
joaopcanario/imports
imports/imports.py
check
def check(path_dir, requirements_name='requirements.txt'): '''Look for unused packages listed on project requirements''' requirements = _load_requirements(requirements_name, path_dir) imported_modules = _iter_modules(path_dir) installed_packages = _list_installed_packages() imported_modules.update(_excluded_imports()) diff = {lib for lib in installed_packages if lib not in imported_modules} with_dependencies, _ = _list_dependencies(diff) unused_dependencies = sorted([d for d in diff if d in requirements]) for unused_dependency in unused_dependencies: if with_dependencies.get(unused_dependency): print(' - {}'.format(unused_dependency)) for dependency in with_dependencies.get(unused_dependency): print('\t - {}'.format(dependency)) else: print(' - {}'.format(unused_dependency))
python
def check(path_dir, requirements_name='requirements.txt'): '''Look for unused packages listed on project requirements''' requirements = _load_requirements(requirements_name, path_dir) imported_modules = _iter_modules(path_dir) installed_packages = _list_installed_packages() imported_modules.update(_excluded_imports()) diff = {lib for lib in installed_packages if lib not in imported_modules} with_dependencies, _ = _list_dependencies(diff) unused_dependencies = sorted([d for d in diff if d in requirements]) for unused_dependency in unused_dependencies: if with_dependencies.get(unused_dependency): print(' - {}'.format(unused_dependency)) for dependency in with_dependencies.get(unused_dependency): print('\t - {}'.format(dependency)) else: print(' - {}'.format(unused_dependency))
['def', 'check', '(', 'path_dir', ',', 'requirements_name', '=', "'requirements.txt'", ')', ':', 'requirements', '=', '_load_requirements', '(', 'requirements_name', ',', 'path_dir', ')', 'imported_modules', '=', '_iter_modules', '(', 'path_dir', ')', 'installed_packages', '=', '_list_installed_packages', '(', ')', 'imported_modules', '.', 'update', '(', '_excluded_imports', '(', ')', ')', 'diff', '=', '{', 'lib', 'for', 'lib', 'in', 'installed_packages', 'if', 'lib', 'not', 'in', 'imported_modules', '}', 'with_dependencies', ',', '_', '=', '_list_dependencies', '(', 'diff', ')', 'unused_dependencies', '=', 'sorted', '(', '[', 'd', 'for', 'd', 'in', 'diff', 'if', 'd', 'in', 'requirements', ']', ')', 'for', 'unused_dependency', 'in', 'unused_dependencies', ':', 'if', 'with_dependencies', '.', 'get', '(', 'unused_dependency', ')', ':', 'print', '(', "' - {}'", '.', 'format', '(', 'unused_dependency', ')', ')', 'for', 'dependency', 'in', 'with_dependencies', '.', 'get', '(', 'unused_dependency', ')', ':', 'print', '(', "'\\t - {}'", '.', 'format', '(', 'dependency', ')', ')', 'else', ':', 'print', '(', "' - {}'", '.', 'format', '(', 'unused_dependency', ')', ')']
Look for unused packages listed on project requirements
['Look', 'for', 'unused', 'packages', 'listed', 'on', 'project', 'requirements']
train
https://github.com/joaopcanario/imports/blob/46db0d3d2aa55427027bf0e91d61a24d52730337/imports/imports.py#L94-L112
2,125
openstack/networking-cisco
networking_cisco/plugins/cisco/db/l3/ha_db.py
HA_db_mixin._delete_redundancy_routers
def _delete_redundancy_routers(self, context, router_db): """To be called in delete_router() BEFORE router has been deleted in DB. The router should have not interfaces. """ e_context = context.elevated() for binding in router_db.redundancy_bindings: self.delete_router(e_context, binding.redundancy_router_id) LOG.debug("Deleted redundancy router %s", binding.redundancy_router_id) if router_db.gw_port_id: # delete ha settings and extra port for gateway (VIP) port self._delete_ha_group(e_context, router_db.gw_port_id)
python
def _delete_redundancy_routers(self, context, router_db): """To be called in delete_router() BEFORE router has been deleted in DB. The router should have not interfaces. """ e_context = context.elevated() for binding in router_db.redundancy_bindings: self.delete_router(e_context, binding.redundancy_router_id) LOG.debug("Deleted redundancy router %s", binding.redundancy_router_id) if router_db.gw_port_id: # delete ha settings and extra port for gateway (VIP) port self._delete_ha_group(e_context, router_db.gw_port_id)
['def', '_delete_redundancy_routers', '(', 'self', ',', 'context', ',', 'router_db', ')', ':', 'e_context', '=', 'context', '.', 'elevated', '(', ')', 'for', 'binding', 'in', 'router_db', '.', 'redundancy_bindings', ':', 'self', '.', 'delete_router', '(', 'e_context', ',', 'binding', '.', 'redundancy_router_id', ')', 'LOG', '.', 'debug', '(', '"Deleted redundancy router %s"', ',', 'binding', '.', 'redundancy_router_id', ')', 'if', 'router_db', '.', 'gw_port_id', ':', '# delete ha settings and extra port for gateway (VIP) port', 'self', '.', '_delete_ha_group', '(', 'e_context', ',', 'router_db', '.', 'gw_port_id', ')']
To be called in delete_router() BEFORE router has been deleted in DB. The router should have not interfaces.
['To', 'be', 'called', 'in', 'delete_router', '()', 'BEFORE', 'router', 'has', 'been', 'deleted', 'in', 'DB', '.', 'The', 'router', 'should', 'have', 'not', 'interfaces', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/l3/ha_db.py#L539-L550
2,126
f3at/feat
src/feat/extern/log/log.py
infoObject
def infoObject(object, cat, format, *args): """ Log an informational message in the given category. """ doLog(INFO, object, cat, format, args)
python
def infoObject(object, cat, format, *args): """ Log an informational message in the given category. """ doLog(INFO, object, cat, format, args)
['def', 'infoObject', '(', 'object', ',', 'cat', ',', 'format', ',', '*', 'args', ')', ':', 'doLog', '(', 'INFO', ',', 'object', ',', 'cat', ',', 'format', ',', 'args', ')']
Log an informational message in the given category.
['Log', 'an', 'informational', 'message', 'in', 'the', 'given', 'category', '.']
train
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/extern/log/log.py#L385-L389
2,127
DataDog/integrations-core
spark/datadog_checks/spark/spark.py
SparkCheck._standalone_init
def _standalone_init(self, spark_master_address, pre_20_mode, requests_config, tags): """ Return a dictionary of {app_id: (app_name, tracking_url)} for the running Spark applications """ metrics_json = self._rest_request_to_json( spark_master_address, SPARK_MASTER_STATE_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags ) running_apps = {} if metrics_json.get('activeapps'): for app in metrics_json['activeapps']: app_id = app.get('id') app_name = app.get('name') # Parse through the HTML to grab the application driver's link try: app_url = self._get_standalone_app_url(app_id, spark_master_address, requests_config, tags) if app_id and app_name and app_url: if pre_20_mode: self.log.debug('Getting application list in pre-20 mode') applist = self._rest_request_to_json( app_url, SPARK_APPS_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags ) for appl in applist: aid = appl.get('id') aname = appl.get('name') running_apps[aid] = (aname, app_url) else: running_apps[app_id] = (app_name, app_url) except Exception: # it's possible for the requests to fail if the job # completed since we got the list of apps. Just continue pass # Report success after gathering metrics from Spark master self.service_check( SPARK_STANDALONE_SERVICE_CHECK, AgentCheck.OK, tags=['url:%s' % spark_master_address] + tags, message='Connection to Spark master "%s" was successful' % spark_master_address, ) self.log.info("Returning running apps %s" % running_apps) return running_apps
python
def _standalone_init(self, spark_master_address, pre_20_mode, requests_config, tags): """ Return a dictionary of {app_id: (app_name, tracking_url)} for the running Spark applications """ metrics_json = self._rest_request_to_json( spark_master_address, SPARK_MASTER_STATE_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags ) running_apps = {} if metrics_json.get('activeapps'): for app in metrics_json['activeapps']: app_id = app.get('id') app_name = app.get('name') # Parse through the HTML to grab the application driver's link try: app_url = self._get_standalone_app_url(app_id, spark_master_address, requests_config, tags) if app_id and app_name and app_url: if pre_20_mode: self.log.debug('Getting application list in pre-20 mode') applist = self._rest_request_to_json( app_url, SPARK_APPS_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags ) for appl in applist: aid = appl.get('id') aname = appl.get('name') running_apps[aid] = (aname, app_url) else: running_apps[app_id] = (app_name, app_url) except Exception: # it's possible for the requests to fail if the job # completed since we got the list of apps. Just continue pass # Report success after gathering metrics from Spark master self.service_check( SPARK_STANDALONE_SERVICE_CHECK, AgentCheck.OK, tags=['url:%s' % spark_master_address] + tags, message='Connection to Spark master "%s" was successful' % spark_master_address, ) self.log.info("Returning running apps %s" % running_apps) return running_apps
['def', '_standalone_init', '(', 'self', ',', 'spark_master_address', ',', 'pre_20_mode', ',', 'requests_config', ',', 'tags', ')', ':', 'metrics_json', '=', 'self', '.', '_rest_request_to_json', '(', 'spark_master_address', ',', 'SPARK_MASTER_STATE_PATH', ',', 'SPARK_STANDALONE_SERVICE_CHECK', ',', 'requests_config', ',', 'tags', ')', 'running_apps', '=', '{', '}', 'if', 'metrics_json', '.', 'get', '(', "'activeapps'", ')', ':', 'for', 'app', 'in', 'metrics_json', '[', "'activeapps'", ']', ':', 'app_id', '=', 'app', '.', 'get', '(', "'id'", ')', 'app_name', '=', 'app', '.', 'get', '(', "'name'", ')', "# Parse through the HTML to grab the application driver's link", 'try', ':', 'app_url', '=', 'self', '.', '_get_standalone_app_url', '(', 'app_id', ',', 'spark_master_address', ',', 'requests_config', ',', 'tags', ')', 'if', 'app_id', 'and', 'app_name', 'and', 'app_url', ':', 'if', 'pre_20_mode', ':', 'self', '.', 'log', '.', 'debug', '(', "'Getting application list in pre-20 mode'", ')', 'applist', '=', 'self', '.', '_rest_request_to_json', '(', 'app_url', ',', 'SPARK_APPS_PATH', ',', 'SPARK_STANDALONE_SERVICE_CHECK', ',', 'requests_config', ',', 'tags', ')', 'for', 'appl', 'in', 'applist', ':', 'aid', '=', 'appl', '.', 'get', '(', "'id'", ')', 'aname', '=', 'appl', '.', 'get', '(', "'name'", ')', 'running_apps', '[', 'aid', ']', '=', '(', 'aname', ',', 'app_url', ')', 'else', ':', 'running_apps', '[', 'app_id', ']', '=', '(', 'app_name', ',', 'app_url', ')', 'except', 'Exception', ':', "# it's possible for the requests to fail if the job", '# completed since we got the list of apps. Just continue', 'pass', '# Report success after gathering metrics from Spark master', 'self', '.', 'service_check', '(', 'SPARK_STANDALONE_SERVICE_CHECK', ',', 'AgentCheck', '.', 'OK', ',', 'tags', '=', '[', "'url:%s'", '%', 'spark_master_address', ']', '+', 'tags', ',', 'message', '=', '\'Connection to Spark master "%s" was successful\'', '%', 'spark_master_address', ',', ')', 'self', '.', 'log', '.', 'info', '(', '"Returning running apps %s"', '%', 'running_apps', ')', 'return', 'running_apps']
Return a dictionary of {app_id: (app_name, tracking_url)} for the running Spark applications
['Return', 'a', 'dictionary', 'of', '{', 'app_id', ':', '(', 'app_name', 'tracking_url', ')', '}', 'for', 'the', 'running', 'Spark', 'applications']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/spark/datadog_checks/spark/spark.py#L304-L348
2,128
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
brocade_interface_ext.get_interface_detail_output_interface_configured_line_speed
def get_interface_detail_output_interface_configured_line_speed(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') configured_line_speed = ET.SubElement(interface, "configured-line-speed") configured_line_speed.text = kwargs.pop('configured_line_speed') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_interface_detail_output_interface_configured_line_speed(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') configured_line_speed = ET.SubElement(interface, "configured-line-speed") configured_line_speed.text = kwargs.pop('configured_line_speed') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_interface_detail_output_interface_configured_line_speed', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_interface_detail', '=', 'ET', '.', 'Element', '(', '"get_interface_detail"', ')', 'config', '=', 'get_interface_detail', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_interface_detail', ',', '"output"', ')', 'interface', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"interface"', ')', 'interface_type_key', '=', 'ET', '.', 'SubElement', '(', 'interface', ',', '"interface-type"', ')', 'interface_type_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'interface_type'", ')', 'interface_name_key', '=', 'ET', '.', 'SubElement', '(', 'interface', ',', '"interface-name"', ')', 'interface_name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'interface_name'", ')', 'configured_line_speed', '=', 'ET', '.', 'SubElement', '(', 'interface', ',', '"configured-line-speed"', ')', 'configured_line_speed', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'configured_line_speed'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L1024-L1040
2,129
pandeylab/pythomics
pythomics/proteomics/parsers.py
MQIterator.parseFullScan
def parseFullScan(self, i, modifications=True): """ parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml """ scanObj = PeptideObject() peptide = str(i[1]) pid=i[2] if modifications: sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'%pid for row in self.conn.execute(sql): scanObj.addModification(peptide[row[1]], str(row[1]), str(row[2]), row[0]) scanObj.peptide = peptide if self.decompressScanInfo(scanObj, i[0]): return scanObj return None
python
def parseFullScan(self, i, modifications=True): """ parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml """ scanObj = PeptideObject() peptide = str(i[1]) pid=i[2] if modifications: sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'%pid for row in self.conn.execute(sql): scanObj.addModification(peptide[row[1]], str(row[1]), str(row[2]), row[0]) scanObj.peptide = peptide if self.decompressScanInfo(scanObj, i[0]): return scanObj return None
['def', 'parseFullScan', '(', 'self', ',', 'i', ',', 'modifications', '=', 'True', ')', ':', 'scanObj', '=', 'PeptideObject', '(', ')', 'peptide', '=', 'str', '(', 'i', '[', '1', ']', ')', 'pid', '=', 'i', '[', '2', ']', 'if', 'modifications', ':', 'sql', '=', "'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'", '%', 'pid', 'for', 'row', 'in', 'self', '.', 'conn', '.', 'execute', '(', 'sql', ')', ':', 'scanObj', '.', 'addModification', '(', 'peptide', '[', 'row', '[', '1', ']', ']', ',', 'str', '(', 'row', '[', '1', ']', ')', ',', 'str', '(', 'row', '[', '2', ']', ')', ',', 'row', '[', '0', ']', ')', 'scanObj', '.', 'peptide', '=', 'peptide', 'if', 'self', '.', 'decompressScanInfo', '(', 'scanObj', ',', 'i', '[', '0', ']', ')', ':', 'return', 'scanObj', 'return', 'None']
parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml
['parses', 'scan', 'info', 'for', 'giving', 'a', 'Spectrum', 'Obj', 'for', 'plotting', '.', 'takes', 'significantly', 'longer', 'since', 'it', 'has', 'to', 'unzip', '/', 'parse', 'xml']
train
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/proteomics/parsers.py#L1901-L1915
2,130
SethMMorton/natsort
natsort/utils.py
natsort_key
def natsort_key(val, key, string_func, bytes_func, num_func): """ Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory """ # Apply key if needed if key is not None: val = key(val) # Assume the input are strings, which is the most common case try: return string_func(val) except (TypeError, AttributeError): # If bytes type, use the bytes_func if type(val) in (bytes,): return bytes_func(val) # Otherwise, assume it is an iterable that must be parsed recursively. # Do not apply the key recursively. try: return tuple( natsort_key(x, None, string_func, bytes_func, num_func) for x in val ) # If that failed, it must be a number. except TypeError: return num_func(val)
python
def natsort_key(val, key, string_func, bytes_func, num_func): """ Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory """ # Apply key if needed if key is not None: val = key(val) # Assume the input are strings, which is the most common case try: return string_func(val) except (TypeError, AttributeError): # If bytes type, use the bytes_func if type(val) in (bytes,): return bytes_func(val) # Otherwise, assume it is an iterable that must be parsed recursively. # Do not apply the key recursively. try: return tuple( natsort_key(x, None, string_func, bytes_func, num_func) for x in val ) # If that failed, it must be a number. except TypeError: return num_func(val)
['def', 'natsort_key', '(', 'val', ',', 'key', ',', 'string_func', ',', 'bytes_func', ',', 'num_func', ')', ':', '# Apply key if needed', 'if', 'key', 'is', 'not', 'None', ':', 'val', '=', 'key', '(', 'val', ')', '# Assume the input are strings, which is the most common case', 'try', ':', 'return', 'string_func', '(', 'val', ')', 'except', '(', 'TypeError', ',', 'AttributeError', ')', ':', '# If bytes type, use the bytes_func', 'if', 'type', '(', 'val', ')', 'in', '(', 'bytes', ',', ')', ':', 'return', 'bytes_func', '(', 'val', ')', '# Otherwise, assume it is an iterable that must be parsed recursively.', '# Do not apply the key recursively.', 'try', ':', 'return', 'tuple', '(', 'natsort_key', '(', 'x', ',', 'None', ',', 'string_func', ',', 'bytes_func', ',', 'num_func', ')', 'for', 'x', 'in', 'val', ')', '# If that failed, it must be a number.', 'except', 'TypeError', ':', 'return', 'num_func', '(', 'val', ')']
Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory
['Key', 'to', 'sort', 'strings', 'and', 'numbers', 'naturally', '.']
train
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L186-L251
2,131
StackStorm/pybind
pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/ipv6_vrrp_extended/__init__.py
ipv6_vrrp_extended._set_auth_type
def _set_auth_type(self, v, load=False): """ Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container) If this variable is read-only (config: false) in the source YANG file, then _set_auth_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_auth_type() directly. YANG Description: Authentication type """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """auth_type must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""", }) self.__auth_type = t if hasattr(self, '_set'): self._set()
python
def _set_auth_type(self, v, load=False): """ Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container) If this variable is read-only (config: false) in the source YANG file, then _set_auth_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_auth_type() directly. YANG Description: Authentication type """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """auth_type must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""", }) self.__auth_type = t if hasattr(self, '_set'): self._set()
['def', '_set_auth_type', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'auth_type', '.', 'auth_type', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"auth-type"', ',', 'rest_name', '=', '"auth-type"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Authentication type'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-vrrp'", ',', 'defining_module', '=', "'brocade-vrrp'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""auth_type must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=auth_type.auth_type, is_container=\'container\', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Authentication type\'}}, namespace=\'urn:brocade.com:mgmt:brocade-vrrp\', defining_module=\'brocade-vrrp\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__auth_type', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container) If this variable is read-only (config: false) in the source YANG file, then _set_auth_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_auth_type() directly. YANG Description: Authentication type
['Setter', 'method', 'for', 'auth_type', 'mapped', 'from', 'YANG', 'variable', '/', 'routing_system', '/', 'interface', '/', 've', '/', 'ipv6', '/', 'ipv6_vrrp_extended', '/', 'auth_type', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_auth_type', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_auth_type', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/ipv6_vrrp_extended/__init__.py#L96-L119
2,132
langloisjp/tstore
tstore/pgtablestorage.py
sqlwhere
def sqlwhere(criteria=None): """Generates SQL where clause. Returns (sql, values). Criteria is a dictionary of {field: value}. >>> sqlwhere() ('', []) >>> sqlwhere({'id': 5}) ('id=%s', [5]) >>> sqlwhere({'id': 3, 'name': 'toto'}) ('id=%s and name=%s', [3, 'toto']) >>> sqlwhere({'id': 3, 'name': 'toto', 'createdon': '2013-12-02'}) ('createdon=%s and id=%s and name=%s', ['2013-12-02', 3, 'toto']) """ if not criteria: return ('', []) fields = sorted(criteria.keys()) validate_names(fields) values = [criteria[field] for field in fields] parts = [field + '=%s' for field in fields] sql = ' and '.join(parts) return (sql, values)
python
def sqlwhere(criteria=None): """Generates SQL where clause. Returns (sql, values). Criteria is a dictionary of {field: value}. >>> sqlwhere() ('', []) >>> sqlwhere({'id': 5}) ('id=%s', [5]) >>> sqlwhere({'id': 3, 'name': 'toto'}) ('id=%s and name=%s', [3, 'toto']) >>> sqlwhere({'id': 3, 'name': 'toto', 'createdon': '2013-12-02'}) ('createdon=%s and id=%s and name=%s', ['2013-12-02', 3, 'toto']) """ if not criteria: return ('', []) fields = sorted(criteria.keys()) validate_names(fields) values = [criteria[field] for field in fields] parts = [field + '=%s' for field in fields] sql = ' and '.join(parts) return (sql, values)
['def', 'sqlwhere', '(', 'criteria', '=', 'None', ')', ':', 'if', 'not', 'criteria', ':', 'return', '(', "''", ',', '[', ']', ')', 'fields', '=', 'sorted', '(', 'criteria', '.', 'keys', '(', ')', ')', 'validate_names', '(', 'fields', ')', 'values', '=', '[', 'criteria', '[', 'field', ']', 'for', 'field', 'in', 'fields', ']', 'parts', '=', '[', 'field', '+', "'=%s'", 'for', 'field', 'in', 'fields', ']', 'sql', '=', "' and '", '.', 'join', '(', 'parts', ')', 'return', '(', 'sql', ',', 'values', ')']
Generates SQL where clause. Returns (sql, values). Criteria is a dictionary of {field: value}. >>> sqlwhere() ('', []) >>> sqlwhere({'id': 5}) ('id=%s', [5]) >>> sqlwhere({'id': 3, 'name': 'toto'}) ('id=%s and name=%s', [3, 'toto']) >>> sqlwhere({'id': 3, 'name': 'toto', 'createdon': '2013-12-02'}) ('createdon=%s and id=%s and name=%s', ['2013-12-02', 3, 'toto'])
['Generates', 'SQL', 'where', 'clause', '.', 'Returns', '(', 'sql', 'values', ')', '.', 'Criteria', 'is', 'a', 'dictionary', 'of', '{', 'field', ':', 'value', '}', '.']
train
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/pgtablestorage.py#L412-L432
2,133
deschler/django-modeltranslation
modeltranslation/utils.py
auto_populate
def auto_populate(mode='all'): """ Overrides translation fields population mode (population mode decides which unprovided translations will be filled during model construction / loading). Example: with auto_populate('all'): s = Slugged.objects.create(title='foo') s.title_en == 'foo' // True s.title_de == 'foo' // True This method may be used to ensure consistency loading untranslated fixtures, with non-default language active: with auto_populate('required'): call_command('loaddata', 'fixture.json') """ current_population_mode = settings.AUTO_POPULATE settings.AUTO_POPULATE = mode try: yield finally: settings.AUTO_POPULATE = current_population_mode
python
def auto_populate(mode='all'): """ Overrides translation fields population mode (population mode decides which unprovided translations will be filled during model construction / loading). Example: with auto_populate('all'): s = Slugged.objects.create(title='foo') s.title_en == 'foo' // True s.title_de == 'foo' // True This method may be used to ensure consistency loading untranslated fixtures, with non-default language active: with auto_populate('required'): call_command('loaddata', 'fixture.json') """ current_population_mode = settings.AUTO_POPULATE settings.AUTO_POPULATE = mode try: yield finally: settings.AUTO_POPULATE = current_population_mode
['def', 'auto_populate', '(', 'mode', '=', "'all'", ')', ':', 'current_population_mode', '=', 'settings', '.', 'AUTO_POPULATE', 'settings', '.', 'AUTO_POPULATE', '=', 'mode', 'try', ':', 'yield', 'finally', ':', 'settings', '.', 'AUTO_POPULATE', '=', 'current_population_mode']
Overrides translation fields population mode (population mode decides which unprovided translations will be filled during model construction / loading). Example: with auto_populate('all'): s = Slugged.objects.create(title='foo') s.title_en == 'foo' // True s.title_de == 'foo' // True This method may be used to ensure consistency loading untranslated fixtures, with non-default language active: with auto_populate('required'): call_command('loaddata', 'fixture.json')
['Overrides', 'translation', 'fields', 'population', 'mode', '(', 'population', 'mode', 'decides', 'which', 'unprovided', 'translations', 'will', 'be', 'filled', 'during', 'model', 'construction', '/', 'loading', ')', '.']
train
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/utils.py#L126-L149
2,134
bokeh/bokeh
bokeh/embed/util.py
submodel_has_python_callbacks
def submodel_has_python_callbacks(models): ''' Traverses submodels to check for Python (event) callbacks ''' has_python_callback = False for model in collect_models(models): if len(model._callbacks) > 0 or len(model._event_callbacks) > 0: has_python_callback = True break return has_python_callback
python
def submodel_has_python_callbacks(models): ''' Traverses submodels to check for Python (event) callbacks ''' has_python_callback = False for model in collect_models(models): if len(model._callbacks) > 0 or len(model._event_callbacks) > 0: has_python_callback = True break return has_python_callback
['def', 'submodel_has_python_callbacks', '(', 'models', ')', ':', 'has_python_callback', '=', 'False', 'for', 'model', 'in', 'collect_models', '(', 'models', ')', ':', 'if', 'len', '(', 'model', '.', '_callbacks', ')', '>', '0', 'or', 'len', '(', 'model', '.', '_event_callbacks', ')', '>', '0', ':', 'has_python_callback', '=', 'True', 'break', 'return', 'has_python_callback']
Traverses submodels to check for Python (event) callbacks
['Traverses', 'submodels', 'to', 'check', 'for', 'Python', '(', 'event', ')', 'callbacks']
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/embed/util.py#L305-L315
2,135
twilio/twilio-python
twilio/base/version.py
Version.request
def request(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None, allow_redirects=False): """ Make an HTTP request. """ url = self.relative_uri(uri) return self.domain.request( method, url, params=params, data=data, headers=headers, auth=auth, timeout=timeout, allow_redirects=allow_redirects )
python
def request(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None, allow_redirects=False): """ Make an HTTP request. """ url = self.relative_uri(uri) return self.domain.request( method, url, params=params, data=data, headers=headers, auth=auth, timeout=timeout, allow_redirects=allow_redirects )
['def', 'request', '(', 'self', ',', 'method', ',', 'uri', ',', 'params', '=', 'None', ',', 'data', '=', 'None', ',', 'headers', '=', 'None', ',', 'auth', '=', 'None', ',', 'timeout', '=', 'None', ',', 'allow_redirects', '=', 'False', ')', ':', 'url', '=', 'self', '.', 'relative_uri', '(', 'uri', ')', 'return', 'self', '.', 'domain', '.', 'request', '(', 'method', ',', 'url', ',', 'params', '=', 'params', ',', 'data', '=', 'data', ',', 'headers', '=', 'headers', ',', 'auth', '=', 'auth', ',', 'timeout', '=', 'timeout', ',', 'allow_redirects', '=', 'allow_redirects', ')']
Make an HTTP request.
['Make', 'an', 'HTTP', 'request', '.']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/base/version.py#L33-L48
2,136
mnkhouri/news_scraper
news_scraper/scrape.py
fetch_and_parse
def fetch_and_parse(url, bodyLines): """Takes a url, and returns a dictionary of data with 'bodyLines' lines""" pageHtml = fetch_page(url) return parse(url, pageHtml, bodyLines)
python
def fetch_and_parse(url, bodyLines): """Takes a url, and returns a dictionary of data with 'bodyLines' lines""" pageHtml = fetch_page(url) return parse(url, pageHtml, bodyLines)
['def', 'fetch_and_parse', '(', 'url', ',', 'bodyLines', ')', ':', 'pageHtml', '=', 'fetch_page', '(', 'url', ')', 'return', 'parse', '(', 'url', ',', 'pageHtml', ',', 'bodyLines', ')']
Takes a url, and returns a dictionary of data with 'bodyLines' lines
['Takes', 'a', 'url', 'and', 'returns', 'a', 'dictionary', 'of', 'data', 'with', 'bodyLines', 'lines']
train
https://github.com/mnkhouri/news_scraper/blob/7fd3487c587281a4816f0761f0c4d2196ae05702/news_scraper/scrape.py#L68-L72
2,137
PyCQA/pydocstyle
src/pydocstyle/parser.py
Function.is_public
def is_public(self): """Return True iff this function should be considered public.""" if self.dunder_all is not None: return self.name in self.dunder_all else: return not self.name.startswith('_')
python
def is_public(self): """Return True iff this function should be considered public.""" if self.dunder_all is not None: return self.name in self.dunder_all else: return not self.name.startswith('_')
['def', 'is_public', '(', 'self', ')', ':', 'if', 'self', '.', 'dunder_all', 'is', 'not', 'None', ':', 'return', 'self', '.', 'name', 'in', 'self', '.', 'dunder_all', 'else', ':', 'return', 'not', 'self', '.', 'name', '.', 'startswith', '(', "'_'", ')']
Return True iff this function should be considered public.
['Return', 'True', 'iff', 'this', 'function', 'should', 'be', 'considered', 'public', '.']
train
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/parser.py#L131-L136
2,138
ccubed/PyMoe
Pymoe/Mal/__init__.py
Mal._anime_add
def _anime_add(self, data): """ Adds an anime to a user's list. :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success """ if isinstance(data, Anime): xmlstr = data.to_xml() r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id), params={'data': xmlstr}, auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 201: raise ServerError(r.text, r.status_code) return True else: raise SyntaxError( "Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data)))
python
def _anime_add(self, data): """ Adds an anime to a user's list. :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success """ if isinstance(data, Anime): xmlstr = data.to_xml() r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id), params={'data': xmlstr}, auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 201: raise ServerError(r.text, r.status_code) return True else: raise SyntaxError( "Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data)))
['def', '_anime_add', '(', 'self', ',', 'data', ')', ':', 'if', 'isinstance', '(', 'data', ',', 'Anime', ')', ':', 'xmlstr', '=', 'data', '.', 'to_xml', '(', ')', 'r', '=', 'requests', '.', 'get', '(', 'self', '.', 'apiurl', '+', '"animelist/add/{}.xml"', '.', 'format', '(', 'data', '.', 'id', ')', ',', 'params', '=', '{', "'data'", ':', 'xmlstr', '}', ',', 'auth', '=', 'HTTPBasicAuth', '(', 'self', '.', '_username', ',', 'self', '.', '_password', ')', ',', 'headers', '=', 'self', '.', 'header', ')', 'if', 'r', '.', 'status_code', '!=', '201', ':', 'raise', 'ServerError', '(', 'r', '.', 'text', ',', 'r', '.', 'status_code', ')', 'return', 'True', 'else', ':', 'raise', 'SyntaxError', '(', '"Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}"', '.', 'format', '(', 'type', '(', 'data', ')', ')', ')']
Adds an anime to a user's list. :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success
['Adds', 'an', 'anime', 'to', 'a', 'user', 's', 'list', '.']
train
https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Mal/__init__.py#L136-L157
2,139
AmesCornish/buttersink
buttersink/ButterStore.py
ButterStore._keepVol
def _keepVol(self, vol): """ Mark this volume to be kept in path. """ if vol is None: return if vol in self.extraVolumes: del self.extraVolumes[vol] return if vol not in self.paths: raise Exception("%s not in %s" % (vol, self)) paths = [os.path.basename(path) for path in self.paths[vol]] newPath = self.selectReceivePath(paths) if self._skipDryRun(logger, 'INFO')("Copy %s to %s", vol, newPath): return self.butterVolumes[vol.uuid].copy(newPath)
python
def _keepVol(self, vol): """ Mark this volume to be kept in path. """ if vol is None: return if vol in self.extraVolumes: del self.extraVolumes[vol] return if vol not in self.paths: raise Exception("%s not in %s" % (vol, self)) paths = [os.path.basename(path) for path in self.paths[vol]] newPath = self.selectReceivePath(paths) if self._skipDryRun(logger, 'INFO')("Copy %s to %s", vol, newPath): return self.butterVolumes[vol.uuid].copy(newPath)
['def', '_keepVol', '(', 'self', ',', 'vol', ')', ':', 'if', 'vol', 'is', 'None', ':', 'return', 'if', 'vol', 'in', 'self', '.', 'extraVolumes', ':', 'del', 'self', '.', 'extraVolumes', '[', 'vol', ']', 'return', 'if', 'vol', 'not', 'in', 'self', '.', 'paths', ':', 'raise', 'Exception', '(', '"%s not in %s"', '%', '(', 'vol', ',', 'self', ')', ')', 'paths', '=', '[', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', 'for', 'path', 'in', 'self', '.', 'paths', '[', 'vol', ']', ']', 'newPath', '=', 'self', '.', 'selectReceivePath', '(', 'paths', ')', 'if', 'self', '.', '_skipDryRun', '(', 'logger', ',', "'INFO'", ')', '(', '"Copy %s to %s"', ',', 'vol', ',', 'newPath', ')', ':', 'return', 'self', '.', 'butterVolumes', '[', 'vol', '.', 'uuid', ']', '.', 'copy', '(', 'newPath', ')']
Mark this volume to be kept in path.
['Mark', 'this', 'volume', 'to', 'be', 'kept', 'in', 'path', '.']
train
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/ButterStore.py#L308-L326
2,140
StackStorm/pybind
pybind/slxos/v17s_1_02/qos/map_/dscp_cos/__init__.py
dscp_cos._set_dscp_to_cos_mapping
def _set_dscp_to_cos_mapping(self, v, load=False): """ Setter method for dscp_to_cos_mapping, mapped from YANG variable /qos/map/dscp_cos/dscp_to_cos_mapping (list) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_to_cos_mapping is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_to_cos_mapping() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dscp_in_values",dscp_to_cos_mapping.dscp_to_cos_mapping, yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}), is_container='list', yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dscp_to_cos_mapping must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dscp_in_values",dscp_to_cos_mapping.dscp_to_cos_mapping, yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}), is_container='list', yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""", }) self.__dscp_to_cos_mapping = t if hasattr(self, '_set'): self._set()
python
def _set_dscp_to_cos_mapping(self, v, load=False): """ Setter method for dscp_to_cos_mapping, mapped from YANG variable /qos/map/dscp_cos/dscp_to_cos_mapping (list) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_to_cos_mapping is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_to_cos_mapping() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dscp_in_values",dscp_to_cos_mapping.dscp_to_cos_mapping, yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}), is_container='list', yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dscp_to_cos_mapping must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dscp_in_values",dscp_to_cos_mapping.dscp_to_cos_mapping, yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}), is_container='list', yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""", }) self.__dscp_to_cos_mapping = t if hasattr(self, '_set'): self._set()
['def', '_set_dscp_to_cos_mapping', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"dscp_in_values"', ',', 'dscp_to_cos_mapping', '.', 'dscp_to_cos_mapping', ',', 'yang_name', '=', '"dscp-to-cos-mapping"', ',', 'rest_name', '=', '"map"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'dscp-in-values'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Map DSCP values to CoS value'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-suppress-no'", ':', 'None', ',', "u'alt-name'", ':', "u'map'", ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'callpoint'", ':', "u'dscp_mark_list_cos'", '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"dscp-to-cos-mapping"', ',', 'rest_name', '=', '"map"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Map DSCP values to CoS value'", ',', "u'cli-suppress-mode'", ':', 'None', ',', "u'cli-suppress-no'", ':', 'None', ',', "u'alt-name'", ':', "u'map'", ',', "u'cli-sequence-commands'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', ',', "u'callpoint'", ':', "u'dscp_mark_list_cos'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-qos-mls'", ',', 'defining_module', '=', "'brocade-qos-mls'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""dscp_to_cos_mapping must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("dscp_in_values",dscp_to_cos_mapping.dscp_to_cos_mapping, yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'dscp-in-values\', extensions={u\'tailf-common\': {u\'info\': u\'Map DSCP values to CoS value\', u\'cli-suppress-mode\': None, u\'cli-suppress-no\': None, u\'alt-name\': u\'map\', u\'cli-sequence-commands\': None, u\'cli-incomplete-command\': None, u\'callpoint\': u\'dscp_mark_list_cos\'}}), is_container=\'list\', yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Map DSCP values to CoS value\', u\'cli-suppress-mode\': None, u\'cli-suppress-no\': None, u\'alt-name\': u\'map\', u\'cli-sequence-commands\': None, u\'cli-incomplete-command\': None, u\'callpoint\': u\'dscp_mark_list_cos\'}}, namespace=\'urn:brocade.com:mgmt:brocade-qos-mls\', defining_module=\'brocade-qos-mls\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__dscp_to_cos_mapping', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for dscp_to_cos_mapping, mapped from YANG variable /qos/map/dscp_cos/dscp_to_cos_mapping (list) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_to_cos_mapping is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_to_cos_mapping() directly.
['Setter', 'method', 'for', 'dscp_to_cos_mapping', 'mapped', 'from', 'YANG', 'variable', '/', 'qos', '/', 'map', '/', 'dscp_cos', '/', 'dscp_to_cos_mapping', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_dscp_to_cos_mapping', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_dscp_to_cos_mapping', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos/map_/dscp_cos/__init__.py#L131-L152
2,141
nerdvegas/rez
src/rez/vendor/amqp/serialization.py
AMQPWriter.write_shortstr
def write_shortstr(self, s): """Write a string up to 255 bytes long (after any encoding). If passed a unicode string, encode with UTF-8. """ self._flushbits() if isinstance(s, string): s = s.encode('utf-8') if len(s) > 255: raise FrameSyntaxError( 'Shortstring overflow ({0} > 255)'.format(len(s))) self.write_octet(len(s)) self.out.write(s)
python
def write_shortstr(self, s): """Write a string up to 255 bytes long (after any encoding). If passed a unicode string, encode with UTF-8. """ self._flushbits() if isinstance(s, string): s = s.encode('utf-8') if len(s) > 255: raise FrameSyntaxError( 'Shortstring overflow ({0} > 255)'.format(len(s))) self.write_octet(len(s)) self.out.write(s)
['def', 'write_shortstr', '(', 'self', ',', 's', ')', ':', 'self', '.', '_flushbits', '(', ')', 'if', 'isinstance', '(', 's', ',', 'string', ')', ':', 's', '=', 's', '.', 'encode', '(', "'utf-8'", ')', 'if', 'len', '(', 's', ')', '>', '255', ':', 'raise', 'FrameSyntaxError', '(', "'Shortstring overflow ({0} > 255)'", '.', 'format', '(', 'len', '(', 's', ')', ')', ')', 'self', '.', 'write_octet', '(', 'len', '(', 's', ')', ')', 'self', '.', 'out', '.', 'write', '(', 's', ')']
Write a string up to 255 bytes long (after any encoding). If passed a unicode string, encode with UTF-8.
['Write', 'a', 'string', 'up', 'to', '255', 'bytes', 'long', '(', 'after', 'any', 'encoding', ')', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/serialization.py#L315-L328
2,142
Miserlou/Zappa
zappa/cli.py
ZappaCLI.colorize_invoke_command
def colorize_invoke_command(self, string): """ Apply various heuristics to return a colorized version the invoke command string. If these fail, simply return the string in plaintext. Inspired by colorize_log_entry(). """ final_string = string try: # Line headers try: for token in ['START', 'END', 'REPORT', '[DEBUG]']: if token in final_string: format_string = '[{}]' # match whole words only pattern = r'\b{}\b' if token == '[DEBUG]': format_string = '{}' pattern = re.escape(token) repl = click.style( format_string.format(token), bold=True, fg='cyan' ) final_string = re.sub( pattern.format(token), repl, final_string ) except Exception: # pragma: no cover pass # Green bold Tokens try: for token in [ 'Zappa Event:', 'RequestId:', 'Version:', 'Duration:', 'Billed', 'Memory Size:', 'Max Memory Used:' ]: if token in final_string: final_string = final_string.replace(token, click.style( token, bold=True, fg='green' )) except Exception: # pragma: no cover pass # UUIDs for token in final_string.replace('\t', ' ').split(' '): try: if token.count('-') is 4 and token.replace('-', '').isalnum(): final_string = final_string.replace( token, click.style(token, fg='magenta') ) except Exception: # pragma: no cover pass return final_string except Exception: return string
python
def colorize_invoke_command(self, string): """ Apply various heuristics to return a colorized version the invoke command string. If these fail, simply return the string in plaintext. Inspired by colorize_log_entry(). """ final_string = string try: # Line headers try: for token in ['START', 'END', 'REPORT', '[DEBUG]']: if token in final_string: format_string = '[{}]' # match whole words only pattern = r'\b{}\b' if token == '[DEBUG]': format_string = '{}' pattern = re.escape(token) repl = click.style( format_string.format(token), bold=True, fg='cyan' ) final_string = re.sub( pattern.format(token), repl, final_string ) except Exception: # pragma: no cover pass # Green bold Tokens try: for token in [ 'Zappa Event:', 'RequestId:', 'Version:', 'Duration:', 'Billed', 'Memory Size:', 'Max Memory Used:' ]: if token in final_string: final_string = final_string.replace(token, click.style( token, bold=True, fg='green' )) except Exception: # pragma: no cover pass # UUIDs for token in final_string.replace('\t', ' ').split(' '): try: if token.count('-') is 4 and token.replace('-', '').isalnum(): final_string = final_string.replace( token, click.style(token, fg='magenta') ) except Exception: # pragma: no cover pass return final_string except Exception: return string
['def', 'colorize_invoke_command', '(', 'self', ',', 'string', ')', ':', 'final_string', '=', 'string', 'try', ':', '# Line headers', 'try', ':', 'for', 'token', 'in', '[', "'START'", ',', "'END'", ',', "'REPORT'", ',', "'[DEBUG]'", ']', ':', 'if', 'token', 'in', 'final_string', ':', 'format_string', '=', "'[{}]'", '# match whole words only', 'pattern', '=', "r'\\b{}\\b'", 'if', 'token', '==', "'[DEBUG]'", ':', 'format_string', '=', "'{}'", 'pattern', '=', 're', '.', 'escape', '(', 'token', ')', 'repl', '=', 'click', '.', 'style', '(', 'format_string', '.', 'format', '(', 'token', ')', ',', 'bold', '=', 'True', ',', 'fg', '=', "'cyan'", ')', 'final_string', '=', 're', '.', 'sub', '(', 'pattern', '.', 'format', '(', 'token', ')', ',', 'repl', ',', 'final_string', ')', 'except', 'Exception', ':', '# pragma: no cover', 'pass', '# Green bold Tokens', 'try', ':', 'for', 'token', 'in', '[', "'Zappa Event:'", ',', "'RequestId:'", ',', "'Version:'", ',', "'Duration:'", ',', "'Billed'", ',', "'Memory Size:'", ',', "'Max Memory Used:'", ']', ':', 'if', 'token', 'in', 'final_string', ':', 'final_string', '=', 'final_string', '.', 'replace', '(', 'token', ',', 'click', '.', 'style', '(', 'token', ',', 'bold', '=', 'True', ',', 'fg', '=', "'green'", ')', ')', 'except', 'Exception', ':', '# pragma: no cover', 'pass', '# UUIDs', 'for', 'token', 'in', 'final_string', '.', 'replace', '(', "'\\t'", ',', "' '", ')', '.', 'split', '(', "' '", ')', ':', 'try', ':', 'if', 'token', '.', 'count', '(', "'-'", ')', 'is', '4', 'and', 'token', '.', 'replace', '(', "'-'", ',', "''", ')', '.', 'isalnum', '(', ')', ':', 'final_string', '=', 'final_string', '.', 'replace', '(', 'token', ',', 'click', '.', 'style', '(', 'token', ',', 'fg', '=', "'magenta'", ')', ')', 'except', 'Exception', ':', '# pragma: no cover', 'pass', 'return', 'final_string', 'except', 'Exception', ':', 'return', 'string']
Apply various heuristics to return a colorized version the invoke command string. If these fail, simply return the string in plaintext. Inspired by colorize_log_entry().
['Apply', 'various', 'heuristics', 'to', 'return', 'a', 'colorized', 'version', 'the', 'invoke', 'command', 'string', '.', 'If', 'these', 'fail', 'simply', 'return', 'the', 'string', 'in', 'plaintext', '.']
train
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L1321-L1387
2,143
SergeySatskiy/cdm-pythonparser
cdmpyparser.py
BriefModuleInfo._onFunction
def _onFunction(self, name, line, pos, absPosition, keywordLine, keywordPos, colonLine, colonPos, level, isAsync, returnAnnotation): """Memorizes a function""" self.__flushLevel(level) f = Function(name, line, pos, absPosition, keywordLine, keywordPos, colonLine, colonPos, isAsync, returnAnnotation) if self.__lastDecorators is not None: f.decorators = self.__lastDecorators self.__lastDecorators = None self.objectsStack.append(f)
python
def _onFunction(self, name, line, pos, absPosition, keywordLine, keywordPos, colonLine, colonPos, level, isAsync, returnAnnotation): """Memorizes a function""" self.__flushLevel(level) f = Function(name, line, pos, absPosition, keywordLine, keywordPos, colonLine, colonPos, isAsync, returnAnnotation) if self.__lastDecorators is not None: f.decorators = self.__lastDecorators self.__lastDecorators = None self.objectsStack.append(f)
['def', '_onFunction', '(', 'self', ',', 'name', ',', 'line', ',', 'pos', ',', 'absPosition', ',', 'keywordLine', ',', 'keywordPos', ',', 'colonLine', ',', 'colonPos', ',', 'level', ',', 'isAsync', ',', 'returnAnnotation', ')', ':', 'self', '.', '__flushLevel', '(', 'level', ')', 'f', '=', 'Function', '(', 'name', ',', 'line', ',', 'pos', ',', 'absPosition', ',', 'keywordLine', ',', 'keywordPos', ',', 'colonLine', ',', 'colonPos', ',', 'isAsync', ',', 'returnAnnotation', ')', 'if', 'self', '.', '__lastDecorators', 'is', 'not', 'None', ':', 'f', '.', 'decorators', '=', 'self', '.', '__lastDecorators', 'self', '.', '__lastDecorators', '=', 'None', 'self', '.', 'objectsStack', '.', 'append', '(', 'f', ')']
Memorizes a function
['Memorizes', 'a', 'function']
train
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L512-L523
2,144
gem/oq-engine
openquake/hazardlib/gsim/climent_1994.py
ClimentEtAl1994._compute_mean
def _compute_mean(self, C, rup, dists, sites, imt): """ Compute mean value for PGA and pseudo-velocity response spectrum, as given in equation 1. Converts also pseudo-velocity response spectrum values to SA, using: SA = (PSV * W)/ratio(SA_larger/SA_geo_mean) W = (2 * pi / T) T = period (sec) """ mean = (self._compute_term_1_2(rup, C) + self._compute_term_3_4(dists, C) + self._get_site_amplification(sites, imt, C)) # convert from m/s**2 to g for PGA and from m/s to g for PSV # and divided this value for the ratio(SA_larger/SA_geo_mean) if imt.name == "PGA": mean = (np.exp(mean) / g) / C['r_SA'] else: W = (2. * np.pi)/imt.period mean = ((np.exp(mean) * W) / g) / C['r_SA'] return np.log(mean)
python
def _compute_mean(self, C, rup, dists, sites, imt): """ Compute mean value for PGA and pseudo-velocity response spectrum, as given in equation 1. Converts also pseudo-velocity response spectrum values to SA, using: SA = (PSV * W)/ratio(SA_larger/SA_geo_mean) W = (2 * pi / T) T = period (sec) """ mean = (self._compute_term_1_2(rup, C) + self._compute_term_3_4(dists, C) + self._get_site_amplification(sites, imt, C)) # convert from m/s**2 to g for PGA and from m/s to g for PSV # and divided this value for the ratio(SA_larger/SA_geo_mean) if imt.name == "PGA": mean = (np.exp(mean) / g) / C['r_SA'] else: W = (2. * np.pi)/imt.period mean = ((np.exp(mean) * W) / g) / C['r_SA'] return np.log(mean)
['def', '_compute_mean', '(', 'self', ',', 'C', ',', 'rup', ',', 'dists', ',', 'sites', ',', 'imt', ')', ':', 'mean', '=', '(', 'self', '.', '_compute_term_1_2', '(', 'rup', ',', 'C', ')', '+', 'self', '.', '_compute_term_3_4', '(', 'dists', ',', 'C', ')', '+', 'self', '.', '_get_site_amplification', '(', 'sites', ',', 'imt', ',', 'C', ')', ')', '# convert from m/s**2 to g for PGA and from m/s to g for PSV', '# and divided this value for the ratio(SA_larger/SA_geo_mean)', 'if', 'imt', '.', 'name', '==', '"PGA"', ':', 'mean', '=', '(', 'np', '.', 'exp', '(', 'mean', ')', '/', 'g', ')', '/', 'C', '[', "'r_SA'", ']', 'else', ':', 'W', '=', '(', '2.', '*', 'np', '.', 'pi', ')', '/', 'imt', '.', 'period', 'mean', '=', '(', '(', 'np', '.', 'exp', '(', 'mean', ')', '*', 'W', ')', '/', 'g', ')', '/', 'C', '[', "'r_SA'", ']', 'return', 'np', '.', 'log', '(', 'mean', ')']
Compute mean value for PGA and pseudo-velocity response spectrum, as given in equation 1. Converts also pseudo-velocity response spectrum values to SA, using: SA = (PSV * W)/ratio(SA_larger/SA_geo_mean) W = (2 * pi / T) T = period (sec)
['Compute', 'mean', 'value', 'for', 'PGA', 'and', 'pseudo', '-', 'velocity', 'response', 'spectrum', 'as', 'given', 'in', 'equation', '1', '.', 'Converts', 'also', 'pseudo', '-', 'velocity', 'response', 'spectrum', 'values', 'to', 'SA', 'using', ':']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/climent_1994.py#L150-L172
2,145
dcaune/perseus-lib-python-common
majormode/perseus/model/place.py
Place._eq__
def _eq__(self, other): """ Compare the current place object to another passed to the comparison method. The two place objects must have the same identification, even if some of their attributes might be different. @param other: a ``Place`` instance to compare with the current place object. @return: ``True`` if the given place corresponds to the current place; ``False`` otherwise. """ return self.place_id and other.place_id \ and self.place_id == other.place_id
python
def _eq__(self, other): """ Compare the current place object to another passed to the comparison method. The two place objects must have the same identification, even if some of their attributes might be different. @param other: a ``Place`` instance to compare with the current place object. @return: ``True`` if the given place corresponds to the current place; ``False`` otherwise. """ return self.place_id and other.place_id \ and self.place_id == other.place_id
['def', '_eq__', '(', 'self', ',', 'other', ')', ':', 'return', 'self', '.', 'place_id', 'and', 'other', '.', 'place_id', 'and', 'self', '.', 'place_id', '==', 'other', '.', 'place_id']
Compare the current place object to another passed to the comparison method. The two place objects must have the same identification, even if some of their attributes might be different. @param other: a ``Place`` instance to compare with the current place object. @return: ``True`` if the given place corresponds to the current place; ``False`` otherwise.
['Compare', 'the', 'current', 'place', 'object', 'to', 'another', 'passed', 'to', 'the', 'comparison', 'method', '.', 'The', 'two', 'place', 'objects', 'must', 'have', 'the', 'same', 'identification', 'even', 'if', 'some', 'of', 'their', 'attributes', 'might', 'be', 'different', '.']
train
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/model/place.py#L138-L151
2,146
vals/umis
umis/umis.py
guess_depth_cutoff
def guess_depth_cutoff(cb_histogram): ''' Guesses at an appropriate barcode cutoff ''' with read_cbhistogram(cb_histogram) as fh: cb_vals = [int(p.strip().split()[1]) for p in fh] histo = np.histogram(np.log10(cb_vals), bins=50) vals = histo[0] edges = histo[1] mids = np.array([(edges[i] + edges[i+1])/2 for i in range(edges.size - 1)]) wdensity = vals * (10**mids) / sum(vals * (10**mids)) baseline = np.median(wdensity) wdensity = list(wdensity) # find highest density in upper half of barcode distribution peak = wdensity.index(max(wdensity[len(wdensity)/2:])) cutoff = None for index, dens in reversed(list(enumerate(wdensity[1:peak]))): if dens < 2 * baseline: cutoff = index break if not cutoff: return None else: cutoff = 10**mids[cutoff] logger.info('Setting barcode cutoff to %d' % cutoff) return cutoff
python
def guess_depth_cutoff(cb_histogram): ''' Guesses at an appropriate barcode cutoff ''' with read_cbhistogram(cb_histogram) as fh: cb_vals = [int(p.strip().split()[1]) for p in fh] histo = np.histogram(np.log10(cb_vals), bins=50) vals = histo[0] edges = histo[1] mids = np.array([(edges[i] + edges[i+1])/2 for i in range(edges.size - 1)]) wdensity = vals * (10**mids) / sum(vals * (10**mids)) baseline = np.median(wdensity) wdensity = list(wdensity) # find highest density in upper half of barcode distribution peak = wdensity.index(max(wdensity[len(wdensity)/2:])) cutoff = None for index, dens in reversed(list(enumerate(wdensity[1:peak]))): if dens < 2 * baseline: cutoff = index break if not cutoff: return None else: cutoff = 10**mids[cutoff] logger.info('Setting barcode cutoff to %d' % cutoff) return cutoff
['def', 'guess_depth_cutoff', '(', 'cb_histogram', ')', ':', 'with', 'read_cbhistogram', '(', 'cb_histogram', ')', 'as', 'fh', ':', 'cb_vals', '=', '[', 'int', '(', 'p', '.', 'strip', '(', ')', '.', 'split', '(', ')', '[', '1', ']', ')', 'for', 'p', 'in', 'fh', ']', 'histo', '=', 'np', '.', 'histogram', '(', 'np', '.', 'log10', '(', 'cb_vals', ')', ',', 'bins', '=', '50', ')', 'vals', '=', 'histo', '[', '0', ']', 'edges', '=', 'histo', '[', '1', ']', 'mids', '=', 'np', '.', 'array', '(', '[', '(', 'edges', '[', 'i', ']', '+', 'edges', '[', 'i', '+', '1', ']', ')', '/', '2', 'for', 'i', 'in', 'range', '(', 'edges', '.', 'size', '-', '1', ')', ']', ')', 'wdensity', '=', 'vals', '*', '(', '10', '**', 'mids', ')', '/', 'sum', '(', 'vals', '*', '(', '10', '**', 'mids', ')', ')', 'baseline', '=', 'np', '.', 'median', '(', 'wdensity', ')', 'wdensity', '=', 'list', '(', 'wdensity', ')', '# find highest density in upper half of barcode distribution', 'peak', '=', 'wdensity', '.', 'index', '(', 'max', '(', 'wdensity', '[', 'len', '(', 'wdensity', ')', '/', '2', ':', ']', ')', ')', 'cutoff', '=', 'None', 'for', 'index', ',', 'dens', 'in', 'reversed', '(', 'list', '(', 'enumerate', '(', 'wdensity', '[', '1', ':', 'peak', ']', ')', ')', ')', ':', 'if', 'dens', '<', '2', '*', 'baseline', ':', 'cutoff', '=', 'index', 'break', 'if', 'not', 'cutoff', ':', 'return', 'None', 'else', ':', 'cutoff', '=', '10', '**', 'mids', '[', 'cutoff', ']', 'logger', '.', 'info', '(', "'Setting barcode cutoff to %d'", '%', 'cutoff', ')', 'return', 'cutoff']
Guesses at an appropriate barcode cutoff
['Guesses', 'at', 'an', 'appropriate', 'barcode', 'cutoff']
train
https://github.com/vals/umis/blob/e8adb8486d9e9134ab8a6cad9811a7e74dcc4a2c/umis/umis.py#L1020-L1044
2,147
google/grr
grr/server/grr_response_server/flows/general/collectors.py
ArtifactExpander._ExpandArtifactFilesSource
def _ExpandArtifactFilesSource(self, source, requested): """Recursively expands an artifact files source.""" expanded_source = rdf_artifacts.ExpandedSource(base_source=source) sub_sources = [] artifact_list = [] if "artifact_list" in source.attributes: artifact_list = source.attributes["artifact_list"] for artifact_name in artifact_list: if artifact_name in self.processed_artifacts: continue artifact_obj = artifact_registry.REGISTRY.GetArtifact(artifact_name) for expanded_artifact in self.Expand(artifact_obj, requested): sub_sources.extend(expanded_artifact.sources) expanded_source.artifact_sources = sub_sources expanded_source.path_type = self._path_type return [expanded_source]
python
def _ExpandArtifactFilesSource(self, source, requested): """Recursively expands an artifact files source.""" expanded_source = rdf_artifacts.ExpandedSource(base_source=source) sub_sources = [] artifact_list = [] if "artifact_list" in source.attributes: artifact_list = source.attributes["artifact_list"] for artifact_name in artifact_list: if artifact_name in self.processed_artifacts: continue artifact_obj = artifact_registry.REGISTRY.GetArtifact(artifact_name) for expanded_artifact in self.Expand(artifact_obj, requested): sub_sources.extend(expanded_artifact.sources) expanded_source.artifact_sources = sub_sources expanded_source.path_type = self._path_type return [expanded_source]
['def', '_ExpandArtifactFilesSource', '(', 'self', ',', 'source', ',', 'requested', ')', ':', 'expanded_source', '=', 'rdf_artifacts', '.', 'ExpandedSource', '(', 'base_source', '=', 'source', ')', 'sub_sources', '=', '[', ']', 'artifact_list', '=', '[', ']', 'if', '"artifact_list"', 'in', 'source', '.', 'attributes', ':', 'artifact_list', '=', 'source', '.', 'attributes', '[', '"artifact_list"', ']', 'for', 'artifact_name', 'in', 'artifact_list', ':', 'if', 'artifact_name', 'in', 'self', '.', 'processed_artifacts', ':', 'continue', 'artifact_obj', '=', 'artifact_registry', '.', 'REGISTRY', '.', 'GetArtifact', '(', 'artifact_name', ')', 'for', 'expanded_artifact', 'in', 'self', '.', 'Expand', '(', 'artifact_obj', ',', 'requested', ')', ':', 'sub_sources', '.', 'extend', '(', 'expanded_artifact', '.', 'sources', ')', 'expanded_source', '.', 'artifact_sources', '=', 'sub_sources', 'expanded_source', '.', 'path_type', '=', 'self', '.', '_path_type', 'return', '[', 'expanded_source', ']']
Recursively expands an artifact files source.
['Recursively', 'expands', 'an', 'artifact', 'files', 'source', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/collectors.py#L1080-L1095
2,148
vertica/vertica-python
vertica_python/vertica/cursor.py
Cursor.copy
def copy(self, sql, data, **kwargs): """ EXAMPLE: >> with open("/tmp/file.csv", "rb") as fs: >> cursor.copy("COPY table(field1,field2) FROM STDIN DELIMITER ',' ENCLOSED BY ''''", >> fs, buffer_size=65536) """ sql = as_text(sql) if self.closed(): raise errors.InterfaceError('Cursor is closed') self.flush_to_query_ready() if isinstance(data, binary_type): stream = BytesIO(data) elif isinstance(data, text_type): stream = StringIO(data) elif isinstance(data, file_type): stream = data else: raise TypeError("Not valid type of data {0}".format(type(data))) self.connection.write(messages.Query(sql)) while True: message = self.connection.read_message() self._message = message if isinstance(message, messages.ErrorResponse): raise errors.QueryError.from_error_response(message, sql) self.connection.process_message(message=message) if isinstance(message, messages.ReadyForQuery): break elif isinstance(message, messages.CopyInResponse): self.connection.write(messages.CopyStream(stream, **kwargs)) self.connection.write(messages.CopyDone()) if self.error is not None: raise self.error
python
def copy(self, sql, data, **kwargs): """ EXAMPLE: >> with open("/tmp/file.csv", "rb") as fs: >> cursor.copy("COPY table(field1,field2) FROM STDIN DELIMITER ',' ENCLOSED BY ''''", >> fs, buffer_size=65536) """ sql = as_text(sql) if self.closed(): raise errors.InterfaceError('Cursor is closed') self.flush_to_query_ready() if isinstance(data, binary_type): stream = BytesIO(data) elif isinstance(data, text_type): stream = StringIO(data) elif isinstance(data, file_type): stream = data else: raise TypeError("Not valid type of data {0}".format(type(data))) self.connection.write(messages.Query(sql)) while True: message = self.connection.read_message() self._message = message if isinstance(message, messages.ErrorResponse): raise errors.QueryError.from_error_response(message, sql) self.connection.process_message(message=message) if isinstance(message, messages.ReadyForQuery): break elif isinstance(message, messages.CopyInResponse): self.connection.write(messages.CopyStream(stream, **kwargs)) self.connection.write(messages.CopyDone()) if self.error is not None: raise self.error
['def', 'copy', '(', 'self', ',', 'sql', ',', 'data', ',', '*', '*', 'kwargs', ')', ':', 'sql', '=', 'as_text', '(', 'sql', ')', 'if', 'self', '.', 'closed', '(', ')', ':', 'raise', 'errors', '.', 'InterfaceError', '(', "'Cursor is closed'", ')', 'self', '.', 'flush_to_query_ready', '(', ')', 'if', 'isinstance', '(', 'data', ',', 'binary_type', ')', ':', 'stream', '=', 'BytesIO', '(', 'data', ')', 'elif', 'isinstance', '(', 'data', ',', 'text_type', ')', ':', 'stream', '=', 'StringIO', '(', 'data', ')', 'elif', 'isinstance', '(', 'data', ',', 'file_type', ')', ':', 'stream', '=', 'data', 'else', ':', 'raise', 'TypeError', '(', '"Not valid type of data {0}"', '.', 'format', '(', 'type', '(', 'data', ')', ')', ')', 'self', '.', 'connection', '.', 'write', '(', 'messages', '.', 'Query', '(', 'sql', ')', ')', 'while', 'True', ':', 'message', '=', 'self', '.', 'connection', '.', 'read_message', '(', ')', 'self', '.', '_message', '=', 'message', 'if', 'isinstance', '(', 'message', ',', 'messages', '.', 'ErrorResponse', ')', ':', 'raise', 'errors', '.', 'QueryError', '.', 'from_error_response', '(', 'message', ',', 'sql', ')', 'self', '.', 'connection', '.', 'process_message', '(', 'message', '=', 'message', ')', 'if', 'isinstance', '(', 'message', ',', 'messages', '.', 'ReadyForQuery', ')', ':', 'break', 'elif', 'isinstance', '(', 'message', ',', 'messages', '.', 'CopyInResponse', ')', ':', 'self', '.', 'connection', '.', 'write', '(', 'messages', '.', 'CopyStream', '(', 'stream', ',', '*', '*', 'kwargs', ')', ')', 'self', '.', 'connection', '.', 'write', '(', 'messages', '.', 'CopyDone', '(', ')', ')', 'if', 'self', '.', 'error', 'is', 'not', 'None', ':', 'raise', 'self', '.', 'error']
EXAMPLE: >> with open("/tmp/file.csv", "rb") as fs: >> cursor.copy("COPY table(field1,field2) FROM STDIN DELIMITER ',' ENCLOSED BY ''''", >> fs, buffer_size=65536)
['EXAMPLE', ':', '>>', 'with', 'open', '(', '/', 'tmp', '/', 'file', '.', 'csv', 'rb', ')', 'as', 'fs', ':', '>>', 'cursor', '.', 'copy', '(', 'COPY', 'table', '(', 'field1', 'field2', ')', 'FROM', 'STDIN', 'DELIMITER', 'ENCLOSED', 'BY', '>>', 'fs', 'buffer_size', '=', '65536', ')']
train
https://github.com/vertica/vertica-python/blob/5619c1b2b2eb5ea751c684b28648fc376b5be29c/vertica_python/vertica/cursor.py#L337-L380
2,149
scikit-umfpack/scikit-umfpack
scikits/umfpack/umfpack.py
UmfpackContext.free_numeric
def free_numeric(self): """Free numeric data""" if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
python
def free_numeric(self): """Free numeric data""" if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
['def', 'free_numeric', '(', 'self', ')', ':', 'if', 'self', '.', '_numeric', 'is', 'not', 'None', ':', 'self', '.', 'funs', '.', 'free_numeric', '(', 'self', '.', '_numeric', ')', 'self', '.', '_numeric', '=', 'None', 'self', '.', 'free_symbolic', '(', ')']
Free numeric data
['Free', 'numeric', 'data']
train
https://github.com/scikit-umfpack/scikit-umfpack/blob/a2102ef92f4dd060138e72bb5d7c444f8ec49cbc/scikits/umfpack/umfpack.py#L625-L630
2,150
lehins/python-wepay
wepay/calls/withdrawal.py
Withdrawal.__modify
def __modify(self, withdrawal_id, **kwargs): """Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'withdrawal_id': withdrawal_id } return self.make_call(self.__modify, params, kwargs)
python
def __modify(self, withdrawal_id, **kwargs): """Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'withdrawal_id': withdrawal_id } return self.make_call(self.__modify, params, kwargs)
['def', '__modify', '(', 'self', ',', 'withdrawal_id', ',', '*', '*', 'kwargs', ')', ':', 'params', '=', '{', "'withdrawal_id'", ':', 'withdrawal_id', '}', 'return', 'self', '.', 'make_call', '(', 'self', '.', '__modify', ',', 'params', ',', 'kwargs', ')']
Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
['Call', 'documentation', ':', '/', 'withdrawal', '/', 'modify', '<https', ':', '//', 'www', '.', 'wepay', '.', 'com', '/', 'developer', '/', 'reference', '/', 'withdrawal#modify', '>', '_', 'plus', 'extra', 'keyword', 'parameters', ':', ':', 'keyword', 'str', 'access_token', ':', 'will', 'be', 'used', 'instead', 'of', 'instance', 's', 'access_token', 'with', 'batch_mode', '=', 'True', 'will', 'set', 'authorization', 'param', 'to', 'it', 's', 'value', '.']
train
https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/withdrawal.py#L59-L81
2,151
steenzout/python-object
setup.py
requirements
def requirements(requirements_file): """Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file. """ return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
python
def requirements(requirements_file): """Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file. """ return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
['def', 'requirements', '(', 'requirements_file', ')', ':', 'return', '[', 'str', '(', 'pkg', '.', 'req', ')', 'for', 'pkg', 'in', 'parse_requirements', '(', 'requirements_file', ',', 'session', '=', 'pip_download', '.', 'PipSession', '(', ')', ')', 'if', 'pkg', '.', 'req', 'is', 'not', 'None', ']']
Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file.
['Return', 'packages', 'mentioned', 'in', 'the', 'given', 'file', '.']
train
https://github.com/steenzout/python-object/blob/b865e3eeb4c2435923cf900d3ef2a89c1b35fe18/setup.py#L19-L30
2,152
SeleniumHQ/selenium
py/selenium/webdriver/support/select.py
Select.all_selected_options
def all_selected_options(self): """Returns a list of all selected options belonging to this select tag""" ret = [] for opt in self.options: if opt.is_selected(): ret.append(opt) return ret
python
def all_selected_options(self): """Returns a list of all selected options belonging to this select tag""" ret = [] for opt in self.options: if opt.is_selected(): ret.append(opt) return ret
['def', 'all_selected_options', '(', 'self', ')', ':', 'ret', '=', '[', ']', 'for', 'opt', 'in', 'self', '.', 'options', ':', 'if', 'opt', '.', 'is_selected', '(', ')', ':', 'ret', '.', 'append', '(', 'opt', ')', 'return', 'ret']
Returns a list of all selected options belonging to this select tag
['Returns', 'a', 'list', 'of', 'all', 'selected', 'options', 'belonging', 'to', 'this', 'select', 'tag']
train
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/support/select.py#L50-L56
2,153
benmack/eo-box
eobox/raster/cube.py
EOCubeChunk.from_eocube
def from_eocube(eocube, ji): """Create a EOCubeChunk object from an EOCube object.""" eocubewin = EOCubeChunk(ji, eocube.df_layers, eocube.chunksize, eocube.wdir) return eocubewin
python
def from_eocube(eocube, ji): """Create a EOCubeChunk object from an EOCube object.""" eocubewin = EOCubeChunk(ji, eocube.df_layers, eocube.chunksize, eocube.wdir) return eocubewin
['def', 'from_eocube', '(', 'eocube', ',', 'ji', ')', ':', 'eocubewin', '=', 'EOCubeChunk', '(', 'ji', ',', 'eocube', '.', 'df_layers', ',', 'eocube', '.', 'chunksize', ',', 'eocube', '.', 'wdir', ')', 'return', 'eocubewin']
Create a EOCubeChunk object from an EOCube object.
['Create', 'a', 'EOCubeChunk', 'object', 'from', 'an', 'EOCube', 'object', '.']
train
https://github.com/benmack/eo-box/blob/a291450c766bf50ea06adcdeb5729a4aad790ed5/eobox/raster/cube.py#L346-L349
2,154
xflr6/graphviz
graphviz/backend.py
render
def render(engine, format, filepath, renderer=None, formatter=None, quiet=False): """Render file with Graphviz ``engine`` into ``format``, return result filename. Args: engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). format: The output format used for rendering (``'pdf'``, ``'png'``, ...). filepath: Path to the DOT source file to render. renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). quiet (bool): Suppress ``stderr`` output. Returns: The (possibly relative) path of the rendered file. Raises: ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero. """ cmd, rendered = command(engine, format, filepath, renderer, formatter) run(cmd, capture_output=True, check=True, quiet=quiet) return rendered
python
def render(engine, format, filepath, renderer=None, formatter=None, quiet=False): """Render file with Graphviz ``engine`` into ``format``, return result filename. Args: engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). format: The output format used for rendering (``'pdf'``, ``'png'``, ...). filepath: Path to the DOT source file to render. renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). quiet (bool): Suppress ``stderr`` output. Returns: The (possibly relative) path of the rendered file. Raises: ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero. """ cmd, rendered = command(engine, format, filepath, renderer, formatter) run(cmd, capture_output=True, check=True, quiet=quiet) return rendered
['def', 'render', '(', 'engine', ',', 'format', ',', 'filepath', ',', 'renderer', '=', 'None', ',', 'formatter', '=', 'None', ',', 'quiet', '=', 'False', ')', ':', 'cmd', ',', 'rendered', '=', 'command', '(', 'engine', ',', 'format', ',', 'filepath', ',', 'renderer', ',', 'formatter', ')', 'run', '(', 'cmd', ',', 'capture_output', '=', 'True', ',', 'check', '=', 'True', ',', 'quiet', '=', 'quiet', ')', 'return', 'rendered']
Render file with Graphviz ``engine`` into ``format``, return result filename. Args: engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). format: The output format used for rendering (``'pdf'``, ``'png'``, ...). filepath: Path to the DOT source file to render. renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). quiet (bool): Suppress ``stderr`` output. Returns: The (possibly relative) path of the rendered file. Raises: ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero.
['Render', 'file', 'with', 'Graphviz', 'engine', 'into', 'format', 'return', 'result', 'filename', '.']
train
https://github.com/xflr6/graphviz/blob/7376095ef1e47abad7e0b0361b6c9720b706e7a0/graphviz/backend.py#L164-L184
2,155
RedHatInsights/insights-core
examples/rules/sample_script.py
report
def report(rel): """Fires if the machine is running Fedora.""" if "Fedora" in rel.product: return make_pass("IS_FEDORA", product=rel.product) else: return make_fail("IS_NOT_FEDORA", product=rel.product)
python
def report(rel): """Fires if the machine is running Fedora.""" if "Fedora" in rel.product: return make_pass("IS_FEDORA", product=rel.product) else: return make_fail("IS_NOT_FEDORA", product=rel.product)
['def', 'report', '(', 'rel', ')', ':', 'if', '"Fedora"', 'in', 'rel', '.', 'product', ':', 'return', 'make_pass', '(', '"IS_FEDORA"', ',', 'product', '=', 'rel', '.', 'product', ')', 'else', ':', 'return', 'make_fail', '(', '"IS_NOT_FEDORA"', ',', 'product', '=', 'rel', '.', 'product', ')']
Fires if the machine is running Fedora.
['Fires', 'if', 'the', 'machine', 'is', 'running', 'Fedora', '.']
train
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/rules/sample_script.py#L24-L30
2,156
projectatomic/osbs-client
osbs/api.py
OSBS.get_compression_extension
def get_compression_extension(self): """ Find the filename extension for the 'docker save' output, which may or may not be compressed. Raises OsbsValidationException if the extension cannot be determined due to a configuration error. :returns: str including leading dot, or else None if no compression """ build_request = BuildRequest(build_json_store=self.os_conf.get_build_json_store()) inner = build_request.inner_template postbuild_plugins = inner.get('postbuild_plugins', []) for plugin in postbuild_plugins: if plugin.get('name') == 'compress': args = plugin.get('args', {}) method = args.get('method', 'gzip') if method == 'gzip': return '.gz' elif method == 'lzma': return '.xz' raise OsbsValidationException("unknown compression method '%s'" % method) return None
python
def get_compression_extension(self): """ Find the filename extension for the 'docker save' output, which may or may not be compressed. Raises OsbsValidationException if the extension cannot be determined due to a configuration error. :returns: str including leading dot, or else None if no compression """ build_request = BuildRequest(build_json_store=self.os_conf.get_build_json_store()) inner = build_request.inner_template postbuild_plugins = inner.get('postbuild_plugins', []) for plugin in postbuild_plugins: if plugin.get('name') == 'compress': args = plugin.get('args', {}) method = args.get('method', 'gzip') if method == 'gzip': return '.gz' elif method == 'lzma': return '.xz' raise OsbsValidationException("unknown compression method '%s'" % method) return None
['def', 'get_compression_extension', '(', 'self', ')', ':', 'build_request', '=', 'BuildRequest', '(', 'build_json_store', '=', 'self', '.', 'os_conf', '.', 'get_build_json_store', '(', ')', ')', 'inner', '=', 'build_request', '.', 'inner_template', 'postbuild_plugins', '=', 'inner', '.', 'get', '(', "'postbuild_plugins'", ',', '[', ']', ')', 'for', 'plugin', 'in', 'postbuild_plugins', ':', 'if', 'plugin', '.', 'get', '(', "'name'", ')', '==', "'compress'", ':', 'args', '=', 'plugin', '.', 'get', '(', "'args'", ',', '{', '}', ')', 'method', '=', 'args', '.', 'get', '(', "'method'", ',', "'gzip'", ')', 'if', 'method', '==', "'gzip'", ':', 'return', "'.gz'", 'elif', 'method', '==', "'lzma'", ':', 'return', "'.xz'", 'raise', 'OsbsValidationException', '(', '"unknown compression method \'%s\'"', '%', 'method', ')', 'return', 'None']
Find the filename extension for the 'docker save' output, which may or may not be compressed. Raises OsbsValidationException if the extension cannot be determined due to a configuration error. :returns: str including leading dot, or else None if no compression
['Find', 'the', 'filename', 'extension', 'for', 'the', 'docker', 'save', 'output', 'which', 'may', 'or', 'may', 'not', 'be', 'compressed', '.']
train
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/api.py#L1267-L1292
2,157
oceanprotocol/oceandb-bigchaindb-driver
oceandb_bigchaindb_driver/instance.py
ed25519_generate_key_pair_from_secret
def ed25519_generate_key_pair_from_secret(secret): """ Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58. """ # if you want to do this correctly, use a key derivation function! if not isinstance(secret, bytes): secret = secret.encode() hash_bytes = sha3.keccak_256(secret).digest() sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes) # Private key private_value_base58 = sk.encode(encoding='base58') # Public key public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58') return private_value_base58, public_value_compressed_base58
python
def ed25519_generate_key_pair_from_secret(secret): """ Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58. """ # if you want to do this correctly, use a key derivation function! if not isinstance(secret, bytes): secret = secret.encode() hash_bytes = sha3.keccak_256(secret).digest() sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes) # Private key private_value_base58 = sk.encode(encoding='base58') # Public key public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58') return private_value_base58, public_value_compressed_base58
['def', 'ed25519_generate_key_pair_from_secret', '(', 'secret', ')', ':', '# if you want to do this correctly, use a key derivation function!', 'if', 'not', 'isinstance', '(', 'secret', ',', 'bytes', ')', ':', 'secret', '=', 'secret', '.', 'encode', '(', ')', 'hash_bytes', '=', 'sha3', '.', 'keccak_256', '(', 'secret', ')', '.', 'digest', '(', ')', 'sk', '=', 'Ed25519SigningKeyFromHash', '.', 'generate', '(', 'hash_bytes', '=', 'hash_bytes', ')', '# Private key', 'private_value_base58', '=', 'sk', '.', 'encode', '(', 'encoding', '=', "'base58'", ')', '# Public key', 'public_value_compressed_base58', '=', 'sk', '.', 'get_verifying_key', '(', ')', '.', 'encode', '(', 'encoding', '=', "'base58'", ')', 'return', 'private_value_base58', ',', 'public_value_compressed_base58']
Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58.
['Generate', 'a', 'new', 'key', 'pair', '.', 'Args', ':', 'secret', '(', ':', 'class', ':', 'string', ')', ':', 'A', 'secret', 'that', 'serves', 'as', 'a', 'seed', 'Returns', ':', 'A', 'tuple', 'of', '(', 'private_key', 'public_key', ')', 'encoded', 'in', 'base58', '.']
train
https://github.com/oceanprotocol/oceandb-bigchaindb-driver/blob/82315bcc9f7ba8b01beb08014bdeb541546c6671/oceandb_bigchaindb_driver/instance.py#L48-L69
2,158
djgagne/hagelslag
hagelslag/processing/TrackProcessing.py
TrackProcessor.find_mrms_tracks
def find_mrms_tracks(self): """ Identify objects from MRMS timesteps and link them together with object matching. Returns: List of STObjects containing MESH track information. """ obs_objects = [] tracked_obs_objects = [] if self.mrms_ew is not None: self.mrms_grid.load_data() if len(self.mrms_grid.data) != len(self.hours): print('Less than 24 hours of observation data found') return tracked_obs_objects for h, hour in enumerate(self.hours): mrms_data = np.zeros(self.mrms_grid.data[h].shape) mrms_data[:] = np.array(self.mrms_grid.data[h]) mrms_data[mrms_data < 0] = 0 hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data, self.gaussian_window)), self.size_filter) hour_labels[mrms_data < self.mrms_ew.min_thresh] = 0 obj_slices = find_objects(hour_labels) num_slices = len(obj_slices) obs_objects.append([]) if num_slices > 0: for sl in obj_slices: obs_objects[-1].append(STObject(mrms_data[sl], np.where(hour_labels[sl] > 0, 1, 0), self.model_grid.x[sl], self.model_grid.y[sl], self.model_grid.i[sl], self.model_grid.j[sl], hour, hour, dx=self.model_grid.dx)) if h > 0: dims = obs_objects[-1][-1].timesteps[0].shape obs_objects[-1][-1].estimate_motion(hour, self.mrms_grid.data[h-1], dims[1], dims[0]) for h, hour in enumerate(self.hours): past_time_objs = [] for obj in tracked_obs_objects: if obj.end_time == hour - 1: past_time_objs.append(obj) if len(past_time_objs) == 0: tracked_obs_objects.extend(obs_objects[h]) elif len(past_time_objs) > 0 and len(obs_objects[h]) > 0: assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], hour - 1, hour) unpaired = list(range(len(obs_objects[h]))) for pair in assignments: past_time_objs[pair[0]].extend(obs_objects[h][pair[1]]) unpaired.remove(pair[1]) if len(unpaired) > 0: for up in unpaired: tracked_obs_objects.append(obs_objects[h][up]) print("Tracked Obs Objects: {0:03d} Hour: {1:02d}".format(len(tracked_obs_objects), hour)) return tracked_obs_objects
python
def find_mrms_tracks(self): """ Identify objects from MRMS timesteps and link them together with object matching. Returns: List of STObjects containing MESH track information. """ obs_objects = [] tracked_obs_objects = [] if self.mrms_ew is not None: self.mrms_grid.load_data() if len(self.mrms_grid.data) != len(self.hours): print('Less than 24 hours of observation data found') return tracked_obs_objects for h, hour in enumerate(self.hours): mrms_data = np.zeros(self.mrms_grid.data[h].shape) mrms_data[:] = np.array(self.mrms_grid.data[h]) mrms_data[mrms_data < 0] = 0 hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data, self.gaussian_window)), self.size_filter) hour_labels[mrms_data < self.mrms_ew.min_thresh] = 0 obj_slices = find_objects(hour_labels) num_slices = len(obj_slices) obs_objects.append([]) if num_slices > 0: for sl in obj_slices: obs_objects[-1].append(STObject(mrms_data[sl], np.where(hour_labels[sl] > 0, 1, 0), self.model_grid.x[sl], self.model_grid.y[sl], self.model_grid.i[sl], self.model_grid.j[sl], hour, hour, dx=self.model_grid.dx)) if h > 0: dims = obs_objects[-1][-1].timesteps[0].shape obs_objects[-1][-1].estimate_motion(hour, self.mrms_grid.data[h-1], dims[1], dims[0]) for h, hour in enumerate(self.hours): past_time_objs = [] for obj in tracked_obs_objects: if obj.end_time == hour - 1: past_time_objs.append(obj) if len(past_time_objs) == 0: tracked_obs_objects.extend(obs_objects[h]) elif len(past_time_objs) > 0 and len(obs_objects[h]) > 0: assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], hour - 1, hour) unpaired = list(range(len(obs_objects[h]))) for pair in assignments: past_time_objs[pair[0]].extend(obs_objects[h][pair[1]]) unpaired.remove(pair[1]) if len(unpaired) > 0: for up in unpaired: tracked_obs_objects.append(obs_objects[h][up]) print("Tracked Obs Objects: {0:03d} Hour: {1:02d}".format(len(tracked_obs_objects), hour)) return tracked_obs_objects
['def', 'find_mrms_tracks', '(', 'self', ')', ':', 'obs_objects', '=', '[', ']', 'tracked_obs_objects', '=', '[', ']', 'if', 'self', '.', 'mrms_ew', 'is', 'not', 'None', ':', 'self', '.', 'mrms_grid', '.', 'load_data', '(', ')', 'if', 'len', '(', 'self', '.', 'mrms_grid', '.', 'data', ')', '!=', 'len', '(', 'self', '.', 'hours', ')', ':', 'print', '(', "'Less than 24 hours of observation data found'", ')', 'return', 'tracked_obs_objects', 'for', 'h', ',', 'hour', 'in', 'enumerate', '(', 'self', '.', 'hours', ')', ':', 'mrms_data', '=', 'np', '.', 'zeros', '(', 'self', '.', 'mrms_grid', '.', 'data', '[', 'h', ']', '.', 'shape', ')', 'mrms_data', '[', ':', ']', '=', 'np', '.', 'array', '(', 'self', '.', 'mrms_grid', '.', 'data', '[', 'h', ']', ')', 'mrms_data', '[', 'mrms_data', '<', '0', ']', '=', '0', 'hour_labels', '=', 'self', '.', 'mrms_ew', '.', 'size_filter', '(', 'self', '.', 'mrms_ew', '.', 'label', '(', 'gaussian_filter', '(', 'mrms_data', ',', 'self', '.', 'gaussian_window', ')', ')', ',', 'self', '.', 'size_filter', ')', 'hour_labels', '[', 'mrms_data', '<', 'self', '.', 'mrms_ew', '.', 'min_thresh', ']', '=', '0', 'obj_slices', '=', 'find_objects', '(', 'hour_labels', ')', 'num_slices', '=', 'len', '(', 'obj_slices', ')', 'obs_objects', '.', 'append', '(', '[', ']', ')', 'if', 'num_slices', '>', '0', ':', 'for', 'sl', 'in', 'obj_slices', ':', 'obs_objects', '[', '-', '1', ']', '.', 'append', '(', 'STObject', '(', 'mrms_data', '[', 'sl', ']', ',', 'np', '.', 'where', '(', 'hour_labels', '[', 'sl', ']', '>', '0', ',', '1', ',', '0', ')', ',', 'self', '.', 'model_grid', '.', 'x', '[', 'sl', ']', ',', 'self', '.', 'model_grid', '.', 'y', '[', 'sl', ']', ',', 'self', '.', 'model_grid', '.', 'i', '[', 'sl', ']', ',', 'self', '.', 'model_grid', '.', 'j', '[', 'sl', ']', ',', 'hour', ',', 'hour', ',', 'dx', '=', 'self', '.', 'model_grid', '.', 'dx', ')', ')', 'if', 'h', '>', '0', ':', 'dims', '=', 'obs_objects', '[', '-', '1', ']', '[', '-', '1', ']', '.', 'timesteps', '[', '0', ']', '.', 'shape', 'obs_objects', '[', '-', '1', ']', '[', '-', '1', ']', '.', 'estimate_motion', '(', 'hour', ',', 'self', '.', 'mrms_grid', '.', 'data', '[', 'h', '-', '1', ']', ',', 'dims', '[', '1', ']', ',', 'dims', '[', '0', ']', ')', 'for', 'h', ',', 'hour', 'in', 'enumerate', '(', 'self', '.', 'hours', ')', ':', 'past_time_objs', '=', '[', ']', 'for', 'obj', 'in', 'tracked_obs_objects', ':', 'if', 'obj', '.', 'end_time', '==', 'hour', '-', '1', ':', 'past_time_objs', '.', 'append', '(', 'obj', ')', 'if', 'len', '(', 'past_time_objs', ')', '==', '0', ':', 'tracked_obs_objects', '.', 'extend', '(', 'obs_objects', '[', 'h', ']', ')', 'elif', 'len', '(', 'past_time_objs', ')', '>', '0', 'and', 'len', '(', 'obs_objects', '[', 'h', ']', ')', '>', '0', ':', 'assignments', '=', 'self', '.', 'object_matcher', '.', 'match_objects', '(', 'past_time_objs', ',', 'obs_objects', '[', 'h', ']', ',', 'hour', '-', '1', ',', 'hour', ')', 'unpaired', '=', 'list', '(', 'range', '(', 'len', '(', 'obs_objects', '[', 'h', ']', ')', ')', ')', 'for', 'pair', 'in', 'assignments', ':', 'past_time_objs', '[', 'pair', '[', '0', ']', ']', '.', 'extend', '(', 'obs_objects', '[', 'h', ']', '[', 'pair', '[', '1', ']', ']', ')', 'unpaired', '.', 'remove', '(', 'pair', '[', '1', ']', ')', 'if', 'len', '(', 'unpaired', ')', '>', '0', ':', 'for', 'up', 'in', 'unpaired', ':', 'tracked_obs_objects', '.', 'append', '(', 'obs_objects', '[', 'h', ']', '[', 'up', ']', ')', 'print', '(', '"Tracked Obs Objects: {0:03d} Hour: {1:02d}"', '.', 'format', '(', 'len', '(', 'tracked_obs_objects', ')', ',', 'hour', ')', ')', 'return', 'tracked_obs_objects']
Identify objects from MRMS timesteps and link them together with object matching. Returns: List of STObjects containing MESH track information.
['Identify', 'objects', 'from', 'MRMS', 'timesteps', 'and', 'link', 'them', 'together', 'with', 'object', 'matching', '.']
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackProcessing.py#L267-L328
2,159
foremast/foremast
src/foremast/awslambda/awslambda.py
LambdaFunction.update_function_configuration
def update_function_configuration(self, vpc_config): """Update existing Lambda function configuration. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda """ LOG.info('Updating configuration for lambda function: %s', self.app_name) try: self.lambda_client.update_function_configuration( Environment=self.lambda_environment, FunctionName=self.app_name, Runtime=self.runtime, Role=self.role_arn, Handler=self.handler, Description=self.description, Timeout=int(self.timeout), MemorySize=int(self.memory), VpcConfig=vpc_config) if self.concurrency_limit: self.lambda_client.put_function_concurrency( FunctionName=self.app_name, ReservedConcurrentExecutions=self.concurrency_limit ) else: self.lambda_client.delete_function_concurrency(FunctionName=self.app_name) except boto3.exceptions.botocore.exceptions.ClientError as error: if 'CreateNetworkInterface' in error.response['Error']['Message']: message = '{0} is missing "ec2:CreateNetworkInterface"'.format(self.role_arn) LOG.debug(message) raise SystemExit(message) raise LOG.info('Updating Lambda function tags') lambda_arn = get_lambda_arn(self.app_name, self.env, self.region) self.lambda_client.tag_resource(Resource=lambda_arn, Tags={'app_group': self.group, 'app_name': self.app_name}) LOG.info("Successfully updated Lambda configuration.")
python
def update_function_configuration(self, vpc_config): """Update existing Lambda function configuration. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda """ LOG.info('Updating configuration for lambda function: %s', self.app_name) try: self.lambda_client.update_function_configuration( Environment=self.lambda_environment, FunctionName=self.app_name, Runtime=self.runtime, Role=self.role_arn, Handler=self.handler, Description=self.description, Timeout=int(self.timeout), MemorySize=int(self.memory), VpcConfig=vpc_config) if self.concurrency_limit: self.lambda_client.put_function_concurrency( FunctionName=self.app_name, ReservedConcurrentExecutions=self.concurrency_limit ) else: self.lambda_client.delete_function_concurrency(FunctionName=self.app_name) except boto3.exceptions.botocore.exceptions.ClientError as error: if 'CreateNetworkInterface' in error.response['Error']['Message']: message = '{0} is missing "ec2:CreateNetworkInterface"'.format(self.role_arn) LOG.debug(message) raise SystemExit(message) raise LOG.info('Updating Lambda function tags') lambda_arn = get_lambda_arn(self.app_name, self.env, self.region) self.lambda_client.tag_resource(Resource=lambda_arn, Tags={'app_group': self.group, 'app_name': self.app_name}) LOG.info("Successfully updated Lambda configuration.")
['def', 'update_function_configuration', '(', 'self', ',', 'vpc_config', ')', ':', 'LOG', '.', 'info', '(', "'Updating configuration for lambda function: %s'", ',', 'self', '.', 'app_name', ')', 'try', ':', 'self', '.', 'lambda_client', '.', 'update_function_configuration', '(', 'Environment', '=', 'self', '.', 'lambda_environment', ',', 'FunctionName', '=', 'self', '.', 'app_name', ',', 'Runtime', '=', 'self', '.', 'runtime', ',', 'Role', '=', 'self', '.', 'role_arn', ',', 'Handler', '=', 'self', '.', 'handler', ',', 'Description', '=', 'self', '.', 'description', ',', 'Timeout', '=', 'int', '(', 'self', '.', 'timeout', ')', ',', 'MemorySize', '=', 'int', '(', 'self', '.', 'memory', ')', ',', 'VpcConfig', '=', 'vpc_config', ')', 'if', 'self', '.', 'concurrency_limit', ':', 'self', '.', 'lambda_client', '.', 'put_function_concurrency', '(', 'FunctionName', '=', 'self', '.', 'app_name', ',', 'ReservedConcurrentExecutions', '=', 'self', '.', 'concurrency_limit', ')', 'else', ':', 'self', '.', 'lambda_client', '.', 'delete_function_concurrency', '(', 'FunctionName', '=', 'self', '.', 'app_name', ')', 'except', 'boto3', '.', 'exceptions', '.', 'botocore', '.', 'exceptions', '.', 'ClientError', 'as', 'error', ':', 'if', "'CreateNetworkInterface'", 'in', 'error', '.', 'response', '[', "'Error'", ']', '[', "'Message'", ']', ':', 'message', '=', '\'{0} is missing "ec2:CreateNetworkInterface"\'', '.', 'format', '(', 'self', '.', 'role_arn', ')', 'LOG', '.', 'debug', '(', 'message', ')', 'raise', 'SystemExit', '(', 'message', ')', 'raise', 'LOG', '.', 'info', '(', "'Updating Lambda function tags'", ')', 'lambda_arn', '=', 'get_lambda_arn', '(', 'self', '.', 'app_name', ',', 'self', '.', 'env', ',', 'self', '.', 'region', ')', 'self', '.', 'lambda_client', '.', 'tag_resource', '(', 'Resource', '=', 'lambda_arn', ',', 'Tags', '=', '{', "'app_group'", ':', 'self', '.', 'group', ',', "'app_name'", ':', 'self', '.', 'app_name', '}', ')', 'LOG', '.', 'info', '(', '"Successfully updated Lambda configuration."', ')']
Update existing Lambda function configuration. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda
['Update', 'existing', 'Lambda', 'function', 'configuration', '.']
train
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/awslambda.py#L165-L206
2,160
tanghaibao/jcvi
jcvi/apps/phylo.py
run_gblocks
def run_gblocks(align_fasta_file, **kwargs): """ remove poorly aligned positions and divergent regions with Gblocks """ cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs) r, e = cl.run() print("Gblocks:", cl, file=sys.stderr) if e: print("***Gblocks could not run", file=sys.stderr) return None else: print(r, file=sys.stderr) alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \ r'\1', r, flags=re.DOTALL) alignp = int(alignp) if alignp <= 10: print("** WARNING ** Only %s %% positions retained by Gblocks. " \ "Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr) return None else: return align_fasta_file+"-gb"
python
def run_gblocks(align_fasta_file, **kwargs): """ remove poorly aligned positions and divergent regions with Gblocks """ cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs) r, e = cl.run() print("Gblocks:", cl, file=sys.stderr) if e: print("***Gblocks could not run", file=sys.stderr) return None else: print(r, file=sys.stderr) alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \ r'\1', r, flags=re.DOTALL) alignp = int(alignp) if alignp <= 10: print("** WARNING ** Only %s %% positions retained by Gblocks. " \ "Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr) return None else: return align_fasta_file+"-gb"
['def', 'run_gblocks', '(', 'align_fasta_file', ',', '*', '*', 'kwargs', ')', ':', 'cl', '=', 'GblocksCommandline', '(', 'aln_file', '=', 'align_fasta_file', ',', '*', '*', 'kwargs', ')', 'r', ',', 'e', '=', 'cl', '.', 'run', '(', ')', 'print', '(', '"Gblocks:"', ',', 'cl', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'if', 'e', ':', 'print', '(', '"***Gblocks could not run"', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'None', 'else', ':', 'print', '(', 'r', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'alignp', '=', 're', '.', 'sub', '(', "r'.*Gblocks alignment:.*\\(([0-9]{1,3}) %\\).*'", ',', "r'\\1'", ',', 'r', ',', 'flags', '=', 're', '.', 'DOTALL', ')', 'alignp', '=', 'int', '(', 'alignp', ')', 'if', 'alignp', '<=', '10', ':', 'print', '(', '"** WARNING ** Only %s %% positions retained by Gblocks. "', '"Results aborted. Using original alignment instead.\\n"', '%', 'alignp', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'None', 'else', ':', 'return', 'align_fasta_file', '+', '"-gb"']
remove poorly aligned positions and divergent regions with Gblocks
['remove', 'poorly', 'aligned', 'positions', 'and', 'divergent', 'regions', 'with', 'Gblocks']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/phylo.py#L152-L174
2,161
10gen/mongo-orchestration
mongo_orchestration/process.py
kill_mprocess
def kill_mprocess(process): """kill process Args: process - Popen object for process """ if process and proc_alive(process): process.terminate() process.communicate() return not proc_alive(process)
python
def kill_mprocess(process): """kill process Args: process - Popen object for process """ if process and proc_alive(process): process.terminate() process.communicate() return not proc_alive(process)
['def', 'kill_mprocess', '(', 'process', ')', ':', 'if', 'process', 'and', 'proc_alive', '(', 'process', ')', ':', 'process', '.', 'terminate', '(', ')', 'process', '.', 'communicate', '(', ')', 'return', 'not', 'proc_alive', '(', 'process', ')']
kill process Args: process - Popen object for process
['kill', 'process', 'Args', ':', 'process', '-', 'Popen', 'object', 'for', 'process']
train
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/process.py#L263-L271
2,162
Aula13/poloniex
poloniex/poloniex.py
Poloniex.createLoanOffer
def createLoanOffer(self, currency, amount, duration, autoRenew, lendingRate): """Creates a loan offer for a given currency. Required POST parameters are "currency", "amount", "duration", "autoRenew" (0 or 1), and "lendingRate". """ return self._private('createLoanOffer', currency=currency, amount=amount, duration=duration, autoRenew=autoRenew, lendingRate=lendingRate)
python
def createLoanOffer(self, currency, amount, duration, autoRenew, lendingRate): """Creates a loan offer for a given currency. Required POST parameters are "currency", "amount", "duration", "autoRenew" (0 or 1), and "lendingRate". """ return self._private('createLoanOffer', currency=currency, amount=amount, duration=duration, autoRenew=autoRenew, lendingRate=lendingRate)
['def', 'createLoanOffer', '(', 'self', ',', 'currency', ',', 'amount', ',', 'duration', ',', 'autoRenew', ',', 'lendingRate', ')', ':', 'return', 'self', '.', '_private', '(', "'createLoanOffer'", ',', 'currency', '=', 'currency', ',', 'amount', '=', 'amount', ',', 'duration', '=', 'duration', ',', 'autoRenew', '=', 'autoRenew', ',', 'lendingRate', '=', 'lendingRate', ')']
Creates a loan offer for a given currency. Required POST parameters are "currency", "amount", "duration", "autoRenew" (0 or 1), and "lendingRate".
['Creates', 'a', 'loan', 'offer', 'for', 'a', 'given', 'currency', '.', 'Required', 'POST', 'parameters', 'are', 'currency', 'amount', 'duration', 'autoRenew', '(', '0', 'or', '1', ')', 'and', 'lendingRate', '.']
train
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L369-L376
2,163
jmgilman/Neolib
neolib/pyamf/amf3.py
Context.addProxyObject
def addProxyObject(self, obj, proxied): """ Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6 """ self.proxied_objects[id(obj)] = proxied self.proxied_objects[id(proxied)] = obj
python
def addProxyObject(self, obj, proxied): """ Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6 """ self.proxied_objects[id(obj)] = proxied self.proxied_objects[id(proxied)] = obj
['def', 'addProxyObject', '(', 'self', ',', 'obj', ',', 'proxied', ')', ':', 'self', '.', 'proxied_objects', '[', 'id', '(', 'obj', ')', ']', '=', 'proxied', 'self', '.', 'proxied_objects', '[', 'id', '(', 'proxied', ')', ']', '=', 'obj']
Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6
['Stores', 'a', 'reference', 'to', 'the', 'unproxied', 'and', 'proxied', 'versions', 'of', 'C', '{', 'obj', '}', 'for', 'later', 'retrieval', '.']
train
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L713-L721
2,164
rapidpro/expressions
python/temba_expressions/conversions.py
to_repr
def to_repr(value, ctx): """ Converts a value back to its representation form, e.g. x -> "x" """ as_string = to_string(value, ctx) if isinstance(value, str) or isinstance(value, datetime.date) or isinstance(value, datetime.time): as_string = as_string.replace('"', '""') # escape quotes by doubling as_string = '"%s"' % as_string return as_string
python
def to_repr(value, ctx): """ Converts a value back to its representation form, e.g. x -> "x" """ as_string = to_string(value, ctx) if isinstance(value, str) or isinstance(value, datetime.date) or isinstance(value, datetime.time): as_string = as_string.replace('"', '""') # escape quotes by doubling as_string = '"%s"' % as_string return as_string
['def', 'to_repr', '(', 'value', ',', 'ctx', ')', ':', 'as_string', '=', 'to_string', '(', 'value', ',', 'ctx', ')', 'if', 'isinstance', '(', 'value', ',', 'str', ')', 'or', 'isinstance', '(', 'value', ',', 'datetime', '.', 'date', ')', 'or', 'isinstance', '(', 'value', ',', 'datetime', '.', 'time', ')', ':', 'as_string', '=', 'as_string', '.', 'replace', '(', '\'"\'', ',', '\'""\'', ')', '# escape quotes by doubling', 'as_string', '=', '\'"%s"\'', '%', 'as_string', 'return', 'as_string']
Converts a value back to its representation form, e.g. x -> "x"
['Converts', 'a', 'value', 'back', 'to', 'its', 'representation', 'form', 'e', '.', 'g', '.', 'x', '-', '>', 'x']
train
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L187-L197
2,165
koszullab/metaTOR
metator/scripts/hicstuff.py
bin_matrix
def bin_matrix(M, subsampling_factor=3): """Bin either sparse or dense matrices. """ try: from scipy.sparse import issparse if issparse(M): return bin_sparse(M, subsampling_factor=subsampling_factor) else: raise ImportError except ImportError: return bin_dense(M, subsampling_factor=subsampling_factor)
python
def bin_matrix(M, subsampling_factor=3): """Bin either sparse or dense matrices. """ try: from scipy.sparse import issparse if issparse(M): return bin_sparse(M, subsampling_factor=subsampling_factor) else: raise ImportError except ImportError: return bin_dense(M, subsampling_factor=subsampling_factor)
['def', 'bin_matrix', '(', 'M', ',', 'subsampling_factor', '=', '3', ')', ':', 'try', ':', 'from', 'scipy', '.', 'sparse', 'import', 'issparse', 'if', 'issparse', '(', 'M', ')', ':', 'return', 'bin_sparse', '(', 'M', ',', 'subsampling_factor', '=', 'subsampling_factor', ')', 'else', ':', 'raise', 'ImportError', 'except', 'ImportError', ':', 'return', 'bin_dense', '(', 'M', ',', 'subsampling_factor', '=', 'subsampling_factor', ')']
Bin either sparse or dense matrices.
['Bin', 'either', 'sparse', 'or', 'dense', 'matrices', '.']
train
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L208-L219
2,166
google/budou
budou/chunk.py
ChunkList.get_overlaps
def get_overlaps(self, offset, length): """Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`) """ # In case entity's offset points to a space just before the entity. if ''.join([chunk.word for chunk in self])[offset] == ' ': offset += 1 index = 0 result = ChunkList() for chunk in self: if offset < index + len(chunk.word) and index < offset + length: result.append(chunk) index += len(chunk.word) return result
python
def get_overlaps(self, offset, length): """Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`) """ # In case entity's offset points to a space just before the entity. if ''.join([chunk.word for chunk in self])[offset] == ' ': offset += 1 index = 0 result = ChunkList() for chunk in self: if offset < index + len(chunk.word) and index < offset + length: result.append(chunk) index += len(chunk.word) return result
['def', 'get_overlaps', '(', 'self', ',', 'offset', ',', 'length', ')', ':', "# In case entity's offset points to a space just before the entity.", 'if', "''", '.', 'join', '(', '[', 'chunk', '.', 'word', 'for', 'chunk', 'in', 'self', ']', ')', '[', 'offset', ']', '==', "' '", ':', 'offset', '+=', '1', 'index', '=', '0', 'result', '=', 'ChunkList', '(', ')', 'for', 'chunk', 'in', 'self', ':', 'if', 'offset', '<', 'index', '+', 'len', '(', 'chunk', '.', 'word', ')', 'and', 'index', '<', 'offset', '+', 'length', ':', 'result', '.', 'append', '(', 'chunk', ')', 'index', '+=', 'len', '(', 'chunk', '.', 'word', ')', 'return', 'result']
Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
['Returns', 'chunks', 'overlapped', 'with', 'the', 'given', 'range', '.']
train
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/chunk.py#L189-L208
2,167
tango-controls/pytango
tango/tango_object.py
Server.get_devices
def get_devices(self): """ Helper that retuns a dict of devices for this server. :return: Returns a tuple of two elements: - dict<tango class name : list of device names> - dict<device names : tango class name> :rtype: tuple<dict, dict> """ if self.__util is None: import tango db = tango.Database() else: db = self.__util.get_database() server = self.server_instance dev_list = db.get_device_class_list(server) class_map, dev_map = {}, {} for class_name, dev_name in zip(dev_list[1::2], dev_list[::2]): dev_names = class_map.get(class_name) if dev_names is None: class_map[class_name] = dev_names = [] dev_name = dev_name.lower() dev_names.append(dev_name) dev_map[dev_name] = class_name return class_map, dev_map
python
def get_devices(self): """ Helper that retuns a dict of devices for this server. :return: Returns a tuple of two elements: - dict<tango class name : list of device names> - dict<device names : tango class name> :rtype: tuple<dict, dict> """ if self.__util is None: import tango db = tango.Database() else: db = self.__util.get_database() server = self.server_instance dev_list = db.get_device_class_list(server) class_map, dev_map = {}, {} for class_name, dev_name in zip(dev_list[1::2], dev_list[::2]): dev_names = class_map.get(class_name) if dev_names is None: class_map[class_name] = dev_names = [] dev_name = dev_name.lower() dev_names.append(dev_name) dev_map[dev_name] = class_name return class_map, dev_map
['def', 'get_devices', '(', 'self', ')', ':', 'if', 'self', '.', '__util', 'is', 'None', ':', 'import', 'tango', 'db', '=', 'tango', '.', 'Database', '(', ')', 'else', ':', 'db', '=', 'self', '.', '__util', '.', 'get_database', '(', ')', 'server', '=', 'self', '.', 'server_instance', 'dev_list', '=', 'db', '.', 'get_device_class_list', '(', 'server', ')', 'class_map', ',', 'dev_map', '=', '{', '}', ',', '{', '}', 'for', 'class_name', ',', 'dev_name', 'in', 'zip', '(', 'dev_list', '[', '1', ':', ':', '2', ']', ',', 'dev_list', '[', ':', ':', '2', ']', ')', ':', 'dev_names', '=', 'class_map', '.', 'get', '(', 'class_name', ')', 'if', 'dev_names', 'is', 'None', ':', 'class_map', '[', 'class_name', ']', '=', 'dev_names', '=', '[', ']', 'dev_name', '=', 'dev_name', '.', 'lower', '(', ')', 'dev_names', '.', 'append', '(', 'dev_name', ')', 'dev_map', '[', 'dev_name', ']', '=', 'class_name', 'return', 'class_map', ',', 'dev_map']
Helper that retuns a dict of devices for this server. :return: Returns a tuple of two elements: - dict<tango class name : list of device names> - dict<device names : tango class name> :rtype: tuple<dict, dict>
['Helper', 'that', 'retuns', 'a', 'dict', 'of', 'devices', 'for', 'this', 'server', '.']
train
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/tango_object.py#L476-L501
2,168
saltstack/salt
salt/proxy/cimc.py
init
def init(opts): ''' This function gets called when the proxy starts up. ''' if 'host' not in opts['proxy']: log.critical('No \'host\' key found in pillar for this proxy.') return False if 'username' not in opts['proxy']: log.critical('No \'username\' key found in pillar for this proxy.') return False if 'password' not in opts['proxy']: log.critical('No \'passwords\' key found in pillar for this proxy.') return False DETAILS['url'] = 'https://{0}/nuova'.format(opts['proxy']['host']) DETAILS['headers'] = {'Content-Type': 'application/x-www-form-urlencoded', 'Content-Length': 62, 'USER-Agent': 'lwp-request/2.06'} # Set configuration details DETAILS['host'] = opts['proxy']['host'] DETAILS['username'] = opts['proxy'].get('username') DETAILS['password'] = opts['proxy'].get('password') # Ensure connectivity to the device log.debug("Attempting to connect to cimc proxy host.") get_config_resolver_class("computeRackUnit") log.debug("Successfully connected to cimc proxy host.") DETAILS['initialized'] = True
python
def init(opts): ''' This function gets called when the proxy starts up. ''' if 'host' not in opts['proxy']: log.critical('No \'host\' key found in pillar for this proxy.') return False if 'username' not in opts['proxy']: log.critical('No \'username\' key found in pillar for this proxy.') return False if 'password' not in opts['proxy']: log.critical('No \'passwords\' key found in pillar for this proxy.') return False DETAILS['url'] = 'https://{0}/nuova'.format(opts['proxy']['host']) DETAILS['headers'] = {'Content-Type': 'application/x-www-form-urlencoded', 'Content-Length': 62, 'USER-Agent': 'lwp-request/2.06'} # Set configuration details DETAILS['host'] = opts['proxy']['host'] DETAILS['username'] = opts['proxy'].get('username') DETAILS['password'] = opts['proxy'].get('password') # Ensure connectivity to the device log.debug("Attempting to connect to cimc proxy host.") get_config_resolver_class("computeRackUnit") log.debug("Successfully connected to cimc proxy host.") DETAILS['initialized'] = True
['def', 'init', '(', 'opts', ')', ':', 'if', "'host'", 'not', 'in', 'opts', '[', "'proxy'", ']', ':', 'log', '.', 'critical', '(', "'No \\'host\\' key found in pillar for this proxy.'", ')', 'return', 'False', 'if', "'username'", 'not', 'in', 'opts', '[', "'proxy'", ']', ':', 'log', '.', 'critical', '(', "'No \\'username\\' key found in pillar for this proxy.'", ')', 'return', 'False', 'if', "'password'", 'not', 'in', 'opts', '[', "'proxy'", ']', ':', 'log', '.', 'critical', '(', "'No \\'passwords\\' key found in pillar for this proxy.'", ')', 'return', 'False', 'DETAILS', '[', "'url'", ']', '=', "'https://{0}/nuova'", '.', 'format', '(', 'opts', '[', "'proxy'", ']', '[', "'host'", ']', ')', 'DETAILS', '[', "'headers'", ']', '=', '{', "'Content-Type'", ':', "'application/x-www-form-urlencoded'", ',', "'Content-Length'", ':', '62', ',', "'USER-Agent'", ':', "'lwp-request/2.06'", '}', '# Set configuration details', 'DETAILS', '[', "'host'", ']', '=', 'opts', '[', "'proxy'", ']', '[', "'host'", ']', 'DETAILS', '[', "'username'", ']', '=', 'opts', '[', "'proxy'", ']', '.', 'get', '(', "'username'", ')', 'DETAILS', '[', "'password'", ']', '=', 'opts', '[', "'proxy'", ']', '.', 'get', '(', "'password'", ')', '# Ensure connectivity to the device', 'log', '.', 'debug', '(', '"Attempting to connect to cimc proxy host."', ')', 'get_config_resolver_class', '(', '"computeRackUnit"', ')', 'log', '.', 'debug', '(', '"Successfully connected to cimc proxy host."', ')', 'DETAILS', '[', "'initialized'", ']', '=', 'True']
This function gets called when the proxy starts up.
['This', 'function', 'gets', 'called', 'when', 'the', 'proxy', 'starts', 'up', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/cimc.py#L110-L139
2,169
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
xmlDoc.nodeDumpOutput
def nodeDumpOutput(self, buf, cur, level, format, encoding): """Dump an XML node, recursive behaviour, children are printed too. Note that @format = 1 provide node indenting only if xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was called """ if buf is None: buf__o = None else: buf__o = buf._o if cur is None: cur__o = None else: cur__o = cur._o libxml2mod.xmlNodeDumpOutput(buf__o, self._o, cur__o, level, format, encoding)
python
def nodeDumpOutput(self, buf, cur, level, format, encoding): """Dump an XML node, recursive behaviour, children are printed too. Note that @format = 1 provide node indenting only if xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was called """ if buf is None: buf__o = None else: buf__o = buf._o if cur is None: cur__o = None else: cur__o = cur._o libxml2mod.xmlNodeDumpOutput(buf__o, self._o, cur__o, level, format, encoding)
['def', 'nodeDumpOutput', '(', 'self', ',', 'buf', ',', 'cur', ',', 'level', ',', 'format', ',', 'encoding', ')', ':', 'if', 'buf', 'is', 'None', ':', 'buf__o', '=', 'None', 'else', ':', 'buf__o', '=', 'buf', '.', '_o', 'if', 'cur', 'is', 'None', ':', 'cur__o', '=', 'None', 'else', ':', 'cur__o', '=', 'cur', '.', '_o', 'libxml2mod', '.', 'xmlNodeDumpOutput', '(', 'buf__o', ',', 'self', '.', '_o', ',', 'cur__o', ',', 'level', ',', 'format', ',', 'encoding', ')']
Dump an XML node, recursive behaviour, children are printed too. Note that @format = 1 provide node indenting only if xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was called
['Dump', 'an', 'XML', 'node', 'recursive', 'behaviour', 'children', 'are', 'printed', 'too', '.', 'Note', 'that']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4418-L4427
2,170
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/directory.py
OSInstance.add_application
def add_application(self, application, sync=True): """ add an application to this OS instance. :param application: the application to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the application object on list to be added on next save(). :return: """ LOGGER.debug("OSInstance.add_application") if not sync: self.application_2_add.append(application) else: if application.id is None: application.save() if self.id is not None and application.id is not None: params = { 'id': self.id, 'applicationID': application.id } args = {'http_operation': 'GET', 'operation_path': 'update/applications/add', 'parameters': params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( 'OSInstance.add_application - Problem while updating OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.application_ids.append(application.id) application.osi_ids.append(self.id) else: LOGGER.warning( 'OSInstance.add_application - Problem while updating OS instance ' + self.name + '. Reason: application ' + application.name + ' id is None' )
python
def add_application(self, application, sync=True): """ add an application to this OS instance. :param application: the application to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the application object on list to be added on next save(). :return: """ LOGGER.debug("OSInstance.add_application") if not sync: self.application_2_add.append(application) else: if application.id is None: application.save() if self.id is not None and application.id is not None: params = { 'id': self.id, 'applicationID': application.id } args = {'http_operation': 'GET', 'operation_path': 'update/applications/add', 'parameters': params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( 'OSInstance.add_application - Problem while updating OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.application_ids.append(application.id) application.osi_ids.append(self.id) else: LOGGER.warning( 'OSInstance.add_application - Problem while updating OS instance ' + self.name + '. Reason: application ' + application.name + ' id is None' )
['def', 'add_application', '(', 'self', ',', 'application', ',', 'sync', '=', 'True', ')', ':', 'LOGGER', '.', 'debug', '(', '"OSInstance.add_application"', ')', 'if', 'not', 'sync', ':', 'self', '.', 'application_2_add', '.', 'append', '(', 'application', ')', 'else', ':', 'if', 'application', '.', 'id', 'is', 'None', ':', 'application', '.', 'save', '(', ')', 'if', 'self', '.', 'id', 'is', 'not', 'None', 'and', 'application', '.', 'id', 'is', 'not', 'None', ':', 'params', '=', '{', "'id'", ':', 'self', '.', 'id', ',', "'applicationID'", ':', 'application', '.', 'id', '}', 'args', '=', '{', "'http_operation'", ':', "'GET'", ',', "'operation_path'", ':', "'update/applications/add'", ',', "'parameters'", ':', 'params', '}', 'response', '=', 'OSInstanceService', '.', 'requester', '.', 'call', '(', 'args', ')', 'if', 'response', '.', 'rc', '!=', '0', ':', 'LOGGER', '.', 'warning', '(', "'OSInstance.add_application - Problem while updating OS instance '", '+', 'self', '.', 'name', '+', "'. Reason: '", '+', 'str', '(', 'response', '.', 'response_content', ')', '+', "'-'", '+', 'str', '(', 'response', '.', 'error_message', ')', '+', '" ("', '+', 'str', '(', 'response', '.', 'rc', ')', '+', '")"', ')', 'else', ':', 'self', '.', 'application_ids', '.', 'append', '(', 'application', '.', 'id', ')', 'application', '.', 'osi_ids', '.', 'append', '(', 'self', '.', 'id', ')', 'else', ':', 'LOGGER', '.', 'warning', '(', "'OSInstance.add_application - Problem while updating OS instance '", '+', 'self', '.', 'name', '+', "'. Reason: application '", '+', 'application', '.', 'name', '+', "' id is None'", ')']
add an application to this OS instance. :param application: the application to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the application object on list to be added on next save(). :return:
['add', 'an', 'application', 'to', 'this', 'OS', 'instance', '.', ':', 'param', 'application', ':', 'the', 'application', 'to', 'add', 'on', 'this', 'OS', 'instance', ':', 'param', 'sync', ':', 'If', 'sync', '=', 'True', '(', 'default', ')', 'synchronize', 'with', 'Ariane', 'server', '.', 'If', 'sync', '=', 'False', 'add', 'the', 'application', 'object', 'on', 'list', 'to', 'be', 'added', 'on', 'next', 'save', '()', '.', ':', 'return', ':']
train
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/directory.py#L2317-L2351
2,171
YosaiProject/yosai
yosai/core/subject/subject.py
Yosai.requires_user
def requires_user(fn): """ Requires that the calling Subject be *either* authenticated *or* remembered via RememberMe services before allowing access. This method essentially ensures that subject.identifiers IS NOT None :raises UnauthenticatedException: indicating that the decorated method is not allowed to be executed because the Subject attempted to perform a user-only operation """ @functools.wraps(fn) def wrap(*args, **kwargs): subject = Yosai.get_current_subject() if subject.identifiers is None: msg = ("Attempting to perform a user-only operation. The " "current Subject is NOT a user (they haven't been " "authenticated or remembered from a previous login). " "ACCESS DENIED.") raise UnauthenticatedException(msg) return fn(*args, **kwargs) return wrap
python
def requires_user(fn): """ Requires that the calling Subject be *either* authenticated *or* remembered via RememberMe services before allowing access. This method essentially ensures that subject.identifiers IS NOT None :raises UnauthenticatedException: indicating that the decorated method is not allowed to be executed because the Subject attempted to perform a user-only operation """ @functools.wraps(fn) def wrap(*args, **kwargs): subject = Yosai.get_current_subject() if subject.identifiers is None: msg = ("Attempting to perform a user-only operation. The " "current Subject is NOT a user (they haven't been " "authenticated or remembered from a previous login). " "ACCESS DENIED.") raise UnauthenticatedException(msg) return fn(*args, **kwargs) return wrap
['def', 'requires_user', '(', 'fn', ')', ':', '@', 'functools', '.', 'wraps', '(', 'fn', ')', 'def', 'wrap', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'subject', '=', 'Yosai', '.', 'get_current_subject', '(', ')', 'if', 'subject', '.', 'identifiers', 'is', 'None', ':', 'msg', '=', '(', '"Attempting to perform a user-only operation. The "', '"current Subject is NOT a user (they haven\'t been "', '"authenticated or remembered from a previous login). "', '"ACCESS DENIED."', ')', 'raise', 'UnauthenticatedException', '(', 'msg', ')', 'return', 'fn', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'wrap']
Requires that the calling Subject be *either* authenticated *or* remembered via RememberMe services before allowing access. This method essentially ensures that subject.identifiers IS NOT None :raises UnauthenticatedException: indicating that the decorated method is not allowed to be executed because the Subject attempted to perform a user-only operation
['Requires', 'that', 'the', 'calling', 'Subject', 'be', '*', 'either', '*', 'authenticated', '*', 'or', '*', 'remembered', 'via', 'RememberMe', 'services', 'before', 'allowing', 'access', '.']
train
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/subject/subject.py#L861-L886
2,172
neo4j-contrib/neomodel
neomodel/core.py
StructuredNode.create
def create(cls, *props, **kwargs): """ Call to CREATE with parameters map. A new instance will be created and saved. :param props: dict of properties to create the nodes. :type props: tuple :param lazy: False by default, specify True to get nodes with id only without the parameters. :type: bool :rtype: list """ if 'streaming' in kwargs: warnings.warn('streaming is not supported by bolt, please remove the kwarg', category=DeprecationWarning, stacklevel=1) lazy = kwargs.get('lazy', False) # create mapped query query = "CREATE (n:{0} {{create_params}})".format(':'.join(cls.inherited_labels())) # close query if lazy: query += " RETURN id(n)" else: query += " RETURN n" results = [] for item in [cls.deflate(p, obj=_UnsavedNode(), skip_empty=True) for p in props]: node, _ = db.cypher_query(query, {'create_params': item}) results.extend(node[0]) nodes = [cls.inflate(node) for node in results] if not lazy and hasattr(cls, 'post_create'): for node in nodes: node.post_create() return nodes
python
def create(cls, *props, **kwargs): """ Call to CREATE with parameters map. A new instance will be created and saved. :param props: dict of properties to create the nodes. :type props: tuple :param lazy: False by default, specify True to get nodes with id only without the parameters. :type: bool :rtype: list """ if 'streaming' in kwargs: warnings.warn('streaming is not supported by bolt, please remove the kwarg', category=DeprecationWarning, stacklevel=1) lazy = kwargs.get('lazy', False) # create mapped query query = "CREATE (n:{0} {{create_params}})".format(':'.join(cls.inherited_labels())) # close query if lazy: query += " RETURN id(n)" else: query += " RETURN n" results = [] for item in [cls.deflate(p, obj=_UnsavedNode(), skip_empty=True) for p in props]: node, _ = db.cypher_query(query, {'create_params': item}) results.extend(node[0]) nodes = [cls.inflate(node) for node in results] if not lazy and hasattr(cls, 'post_create'): for node in nodes: node.post_create() return nodes
['def', 'create', '(', 'cls', ',', '*', 'props', ',', '*', '*', 'kwargs', ')', ':', 'if', "'streaming'", 'in', 'kwargs', ':', 'warnings', '.', 'warn', '(', "'streaming is not supported by bolt, please remove the kwarg'", ',', 'category', '=', 'DeprecationWarning', ',', 'stacklevel', '=', '1', ')', 'lazy', '=', 'kwargs', '.', 'get', '(', "'lazy'", ',', 'False', ')', '# create mapped query', 'query', '=', '"CREATE (n:{0} {{create_params}})"', '.', 'format', '(', "':'", '.', 'join', '(', 'cls', '.', 'inherited_labels', '(', ')', ')', ')', '# close query', 'if', 'lazy', ':', 'query', '+=', '" RETURN id(n)"', 'else', ':', 'query', '+=', '" RETURN n"', 'results', '=', '[', ']', 'for', 'item', 'in', '[', 'cls', '.', 'deflate', '(', 'p', ',', 'obj', '=', '_UnsavedNode', '(', ')', ',', 'skip_empty', '=', 'True', ')', 'for', 'p', 'in', 'props', ']', ':', 'node', ',', '_', '=', 'db', '.', 'cypher_query', '(', 'query', ',', '{', "'create_params'", ':', 'item', '}', ')', 'results', '.', 'extend', '(', 'node', '[', '0', ']', ')', 'nodes', '=', '[', 'cls', '.', 'inflate', '(', 'node', ')', 'for', 'node', 'in', 'results', ']', 'if', 'not', 'lazy', 'and', 'hasattr', '(', 'cls', ',', "'post_create'", ')', ':', 'for', 'node', 'in', 'nodes', ':', 'node', '.', 'post_create', '(', ')', 'return', 'nodes']
Call to CREATE with parameters map. A new instance will be created and saved. :param props: dict of properties to create the nodes. :type props: tuple :param lazy: False by default, specify True to get nodes with id only without the parameters. :type: bool :rtype: list
['Call', 'to', 'CREATE', 'with', 'parameters', 'map', '.', 'A', 'new', 'instance', 'will', 'be', 'created', 'and', 'saved', '.']
train
https://github.com/neo4j-contrib/neomodel/blob/cca5de4c4e90998293558b871b1b529095c91a38/neomodel/core.py#L303-L339
2,173
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/module_utils2.py
IsPathSuffix
def IsPathSuffix(mod_path, path): """Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise. """ return (mod_path.endswith(path) and (len(mod_path) == len(path) or mod_path[:-len(path)].endswith(os.sep)))
python
def IsPathSuffix(mod_path, path): """Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise. """ return (mod_path.endswith(path) and (len(mod_path) == len(path) or mod_path[:-len(path)].endswith(os.sep)))
['def', 'IsPathSuffix', '(', 'mod_path', ',', 'path', ')', ':', 'return', '(', 'mod_path', '.', 'endswith', '(', 'path', ')', 'and', '(', 'len', '(', 'mod_path', ')', '==', 'len', '(', 'path', ')', 'or', 'mod_path', '[', ':', '-', 'len', '(', 'path', ')', ']', '.', 'endswith', '(', 'os', '.', 'sep', ')', ')', ')']
Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise.
['Checks', 'whether', 'path', 'is', 'a', 'full', 'path', 'suffix', 'of', 'mod_path', '.']
train
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/module_utils2.py#L21-L34
2,174
nerdvegas/rez
src/rez/status.py
Status.print_tools
def print_tools(self, pattern=None, buf=sys.stdout): """Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern. """ seen = set() rows = [] context = self.context if context: data = context.get_tools() conflicts = set(context.get_conflicting_tools().keys()) for _, (variant, tools) in sorted(data.items()): pkg_str = variant.qualified_package_name for tool in tools: if pattern and not fnmatch(tool, pattern): continue if tool in conflicts: label = "(in conflict)" color = critical else: label = '' color = None rows.append([tool, '-', pkg_str, "active context", label, color]) seen.add(tool) for suite in self.suites: for tool, d in suite.get_tools().iteritems(): if tool in seen: continue if pattern and not fnmatch(tool, pattern): continue label = [] color = None path = which(tool) if path: path_ = os.path.join(suite.tools_path, tool) if path != path_: label.append("(hidden by unknown tool '%s')" % path) color = warning variant = d["variant"] if isinstance(variant, set): pkg_str = ", ".join(variant) label.append("(in conflict)") color = critical else: pkg_str = variant.qualified_package_name orig_tool = d["tool_name"] if orig_tool == tool: orig_tool = '-' label = ' '.join(label) source = ("context '%s' in suite '%s'" % (d["context_name"], suite.load_path)) rows.append([tool, orig_tool, pkg_str, source, label, color]) seen.add(tool) _pr = Printer(buf) if not rows: _pr("No matching tools.") return False headers = [["TOOL", "ALIASING", "PACKAGE", "SOURCE", "", None], ["----", "--------", "-------", "------", "", None]] rows = headers + sorted(rows, key=lambda x: x[0].lower()) print_colored_columns(_pr, rows) return True
python
def print_tools(self, pattern=None, buf=sys.stdout): """Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern. """ seen = set() rows = [] context = self.context if context: data = context.get_tools() conflicts = set(context.get_conflicting_tools().keys()) for _, (variant, tools) in sorted(data.items()): pkg_str = variant.qualified_package_name for tool in tools: if pattern and not fnmatch(tool, pattern): continue if tool in conflicts: label = "(in conflict)" color = critical else: label = '' color = None rows.append([tool, '-', pkg_str, "active context", label, color]) seen.add(tool) for suite in self.suites: for tool, d in suite.get_tools().iteritems(): if tool in seen: continue if pattern and not fnmatch(tool, pattern): continue label = [] color = None path = which(tool) if path: path_ = os.path.join(suite.tools_path, tool) if path != path_: label.append("(hidden by unknown tool '%s')" % path) color = warning variant = d["variant"] if isinstance(variant, set): pkg_str = ", ".join(variant) label.append("(in conflict)") color = critical else: pkg_str = variant.qualified_package_name orig_tool = d["tool_name"] if orig_tool == tool: orig_tool = '-' label = ' '.join(label) source = ("context '%s' in suite '%s'" % (d["context_name"], suite.load_path)) rows.append([tool, orig_tool, pkg_str, source, label, color]) seen.add(tool) _pr = Printer(buf) if not rows: _pr("No matching tools.") return False headers = [["TOOL", "ALIASING", "PACKAGE", "SOURCE", "", None], ["----", "--------", "-------", "------", "", None]] rows = headers + sorted(rows, key=lambda x: x[0].lower()) print_colored_columns(_pr, rows) return True
['def', 'print_tools', '(', 'self', ',', 'pattern', '=', 'None', ',', 'buf', '=', 'sys', '.', 'stdout', ')', ':', 'seen', '=', 'set', '(', ')', 'rows', '=', '[', ']', 'context', '=', 'self', '.', 'context', 'if', 'context', ':', 'data', '=', 'context', '.', 'get_tools', '(', ')', 'conflicts', '=', 'set', '(', 'context', '.', 'get_conflicting_tools', '(', ')', '.', 'keys', '(', ')', ')', 'for', '_', ',', '(', 'variant', ',', 'tools', ')', 'in', 'sorted', '(', 'data', '.', 'items', '(', ')', ')', ':', 'pkg_str', '=', 'variant', '.', 'qualified_package_name', 'for', 'tool', 'in', 'tools', ':', 'if', 'pattern', 'and', 'not', 'fnmatch', '(', 'tool', ',', 'pattern', ')', ':', 'continue', 'if', 'tool', 'in', 'conflicts', ':', 'label', '=', '"(in conflict)"', 'color', '=', 'critical', 'else', ':', 'label', '=', "''", 'color', '=', 'None', 'rows', '.', 'append', '(', '[', 'tool', ',', "'-'", ',', 'pkg_str', ',', '"active context"', ',', 'label', ',', 'color', ']', ')', 'seen', '.', 'add', '(', 'tool', ')', 'for', 'suite', 'in', 'self', '.', 'suites', ':', 'for', 'tool', ',', 'd', 'in', 'suite', '.', 'get_tools', '(', ')', '.', 'iteritems', '(', ')', ':', 'if', 'tool', 'in', 'seen', ':', 'continue', 'if', 'pattern', 'and', 'not', 'fnmatch', '(', 'tool', ',', 'pattern', ')', ':', 'continue', 'label', '=', '[', ']', 'color', '=', 'None', 'path', '=', 'which', '(', 'tool', ')', 'if', 'path', ':', 'path_', '=', 'os', '.', 'path', '.', 'join', '(', 'suite', '.', 'tools_path', ',', 'tool', ')', 'if', 'path', '!=', 'path_', ':', 'label', '.', 'append', '(', '"(hidden by unknown tool \'%s\')"', '%', 'path', ')', 'color', '=', 'warning', 'variant', '=', 'd', '[', '"variant"', ']', 'if', 'isinstance', '(', 'variant', ',', 'set', ')', ':', 'pkg_str', '=', '", "', '.', 'join', '(', 'variant', ')', 'label', '.', 'append', '(', '"(in conflict)"', ')', 'color', '=', 'critical', 'else', ':', 'pkg_str', '=', 'variant', '.', 'qualified_package_name', 'orig_tool', '=', 'd', '[', '"tool_name"', ']', 'if', 'orig_tool', '==', 'tool', ':', 'orig_tool', '=', "'-'", 'label', '=', "' '", '.', 'join', '(', 'label', ')', 'source', '=', '(', '"context \'%s\' in suite \'%s\'"', '%', '(', 'd', '[', '"context_name"', ']', ',', 'suite', '.', 'load_path', ')', ')', 'rows', '.', 'append', '(', '[', 'tool', ',', 'orig_tool', ',', 'pkg_str', ',', 'source', ',', 'label', ',', 'color', ']', ')', 'seen', '.', 'add', '(', 'tool', ')', '_pr', '=', 'Printer', '(', 'buf', ')', 'if', 'not', 'rows', ':', '_pr', '(', '"No matching tools."', ')', 'return', 'False', 'headers', '=', '[', '[', '"TOOL"', ',', '"ALIASING"', ',', '"PACKAGE"', ',', '"SOURCE"', ',', '""', ',', 'None', ']', ',', '[', '"----"', ',', '"--------"', ',', '"-------"', ',', '"------"', ',', '""', ',', 'None', ']', ']', 'rows', '=', 'headers', '+', 'sorted', '(', 'rows', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', '0', ']', '.', 'lower', '(', ')', ')', 'print_colored_columns', '(', '_pr', ',', 'rows', ')', 'return', 'True']
Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern.
['Print', 'a', 'list', 'of', 'visible', 'tools', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/status.py#L120-L193
2,175
dmlc/gluon-nlp
scripts/word_embeddings/data.py
transform_data_fasttext
def transform_data_fasttext(data, vocab, idx_to_counts, cbow, ngram_buckets, ngrams, batch_size, window_size, frequent_token_subsampling=1E-4, dtype='float32', index_dtype='int64'): """Transform a DataStream of coded DataSets to a DataStream of batches. Parameters ---------- data : gluonnlp.data.DataStream DataStream where each sample is a valid input to gluonnlp.data.EmbeddingCenterContextBatchify. vocab : gluonnlp.Vocab Vocabulary containing all tokens whose indices occur in data. For each token, it's associated subwords will be computed and used for constructing the batches. No subwords are used if ngram_buckets is 0. idx_to_counts : list of int List of integers such that idx_to_counts[idx] represents the count of vocab.idx_to_token[idx] in the underlying dataset. The count information is used to subsample frequent words in the dataset. Each token is independently dropped with probability 1 - sqrt(t / (count / sum_counts)) where t is the hyperparameter frequent_token_subsampling. cbow : boolean If True, batches for CBOW are returned. ngram_buckets : int Number of hash buckets to consider for the fastText nlp.vocab.NGramHashes subword function. ngrams : list of int For each integer n in the list, all ngrams of length n will be considered by the nlp.vocab.NGramHashes subword function. batch_size : int The returned data stream iterates over batches of batch_size. window_size : int The context window size for gluonnlp.data.EmbeddingCenterContextBatchify. frequent_token_subsampling : float Hyperparameter for subsampling. See idx_to_counts above for more information. dtype : str or np.dtype, default 'float32' Data type of data array. index_dtype : str or np.dtype, default 'int64' Data type of index arrays. Returns ------- gluonnlp.data.DataStream Stream over batches. Each returned element is a list corresponding to the arguments for the forward pass of model.SG or model.CBOW respectively based on if cbow is False or True. If ngarm_buckets > 0, the returned sample will contain ngrams. Both model.SG or model.CBOW will handle them correctly as long as they are initialized with the subword_function returned as second argument by this function (see below). gluonnlp.vocab.NGramHashes The subword_function used for obtaining the subwords in the returned batches. """ if ngram_buckets <= 0: raise ValueError('Invalid ngram_buckets. Use Word2Vec training ' 'pipeline if not interested in ngrams.') sum_counts = float(sum(idx_to_counts)) idx_to_pdiscard = [ 1 - math.sqrt(frequent_token_subsampling / (count / sum_counts)) for count in idx_to_counts] def subsample(shard): return [[ t for t, r in zip(sentence, np.random.uniform(0, 1, size=len(sentence))) if r > idx_to_pdiscard[t]] for sentence in shard] data = data.transform(subsample) batchify = nlp.data.batchify.EmbeddingCenterContextBatchify( batch_size=batch_size, window_size=window_size, cbow=cbow, weight_dtype=dtype, index_dtype=index_dtype) data = data.transform(batchify) with print_time('prepare subwords'): subword_function = nlp.vocab.create_subword_function( 'NGramHashes', ngrams=ngrams, num_subwords=ngram_buckets) # Store subword indices for all words in vocabulary idx_to_subwordidxs = list(subword_function(vocab.idx_to_token)) subwordidxs = np.concatenate(idx_to_subwordidxs) subwordidxsptr = np.cumsum([ len(subwordidxs) for subwordidxs in idx_to_subwordidxs]) subwordidxsptr = np.concatenate([ np.zeros(1, dtype=np.int64), subwordidxsptr]) if cbow: subword_lookup = functools.partial( cbow_lookup, subwordidxs=subwordidxs, subwordidxsptr=subwordidxsptr, offset=len(vocab)) else: subword_lookup = functools.partial( skipgram_lookup, subwordidxs=subwordidxs, subwordidxsptr=subwordidxsptr, offset=len(vocab)) max_subwordidxs_len = max(len(s) for s in idx_to_subwordidxs) if max_subwordidxs_len > 500: warnings.warn( 'The word with largest number of subwords ' 'has {} subwords, suggesting there are ' 'some noisy words in your vocabulary. ' 'You should filter out very long words ' 'to avoid memory issues.'.format(max_subwordidxs_len)) data = UnchainStream(data) if cbow: batchify_fn = cbow_fasttext_batch else: batchify_fn = skipgram_fasttext_batch batchify_fn = functools.partial( batchify_fn, num_tokens=len(vocab) + len(subword_function), subword_lookup=subword_lookup, dtype=dtype, index_dtype=index_dtype) return data, batchify_fn, subword_function
python
def transform_data_fasttext(data, vocab, idx_to_counts, cbow, ngram_buckets, ngrams, batch_size, window_size, frequent_token_subsampling=1E-4, dtype='float32', index_dtype='int64'): """Transform a DataStream of coded DataSets to a DataStream of batches. Parameters ---------- data : gluonnlp.data.DataStream DataStream where each sample is a valid input to gluonnlp.data.EmbeddingCenterContextBatchify. vocab : gluonnlp.Vocab Vocabulary containing all tokens whose indices occur in data. For each token, it's associated subwords will be computed and used for constructing the batches. No subwords are used if ngram_buckets is 0. idx_to_counts : list of int List of integers such that idx_to_counts[idx] represents the count of vocab.idx_to_token[idx] in the underlying dataset. The count information is used to subsample frequent words in the dataset. Each token is independently dropped with probability 1 - sqrt(t / (count / sum_counts)) where t is the hyperparameter frequent_token_subsampling. cbow : boolean If True, batches for CBOW are returned. ngram_buckets : int Number of hash buckets to consider for the fastText nlp.vocab.NGramHashes subword function. ngrams : list of int For each integer n in the list, all ngrams of length n will be considered by the nlp.vocab.NGramHashes subword function. batch_size : int The returned data stream iterates over batches of batch_size. window_size : int The context window size for gluonnlp.data.EmbeddingCenterContextBatchify. frequent_token_subsampling : float Hyperparameter for subsampling. See idx_to_counts above for more information. dtype : str or np.dtype, default 'float32' Data type of data array. index_dtype : str or np.dtype, default 'int64' Data type of index arrays. Returns ------- gluonnlp.data.DataStream Stream over batches. Each returned element is a list corresponding to the arguments for the forward pass of model.SG or model.CBOW respectively based on if cbow is False or True. If ngarm_buckets > 0, the returned sample will contain ngrams. Both model.SG or model.CBOW will handle them correctly as long as they are initialized with the subword_function returned as second argument by this function (see below). gluonnlp.vocab.NGramHashes The subword_function used for obtaining the subwords in the returned batches. """ if ngram_buckets <= 0: raise ValueError('Invalid ngram_buckets. Use Word2Vec training ' 'pipeline if not interested in ngrams.') sum_counts = float(sum(idx_to_counts)) idx_to_pdiscard = [ 1 - math.sqrt(frequent_token_subsampling / (count / sum_counts)) for count in idx_to_counts] def subsample(shard): return [[ t for t, r in zip(sentence, np.random.uniform(0, 1, size=len(sentence))) if r > idx_to_pdiscard[t]] for sentence in shard] data = data.transform(subsample) batchify = nlp.data.batchify.EmbeddingCenterContextBatchify( batch_size=batch_size, window_size=window_size, cbow=cbow, weight_dtype=dtype, index_dtype=index_dtype) data = data.transform(batchify) with print_time('prepare subwords'): subword_function = nlp.vocab.create_subword_function( 'NGramHashes', ngrams=ngrams, num_subwords=ngram_buckets) # Store subword indices for all words in vocabulary idx_to_subwordidxs = list(subword_function(vocab.idx_to_token)) subwordidxs = np.concatenate(idx_to_subwordidxs) subwordidxsptr = np.cumsum([ len(subwordidxs) for subwordidxs in idx_to_subwordidxs]) subwordidxsptr = np.concatenate([ np.zeros(1, dtype=np.int64), subwordidxsptr]) if cbow: subword_lookup = functools.partial( cbow_lookup, subwordidxs=subwordidxs, subwordidxsptr=subwordidxsptr, offset=len(vocab)) else: subword_lookup = functools.partial( skipgram_lookup, subwordidxs=subwordidxs, subwordidxsptr=subwordidxsptr, offset=len(vocab)) max_subwordidxs_len = max(len(s) for s in idx_to_subwordidxs) if max_subwordidxs_len > 500: warnings.warn( 'The word with largest number of subwords ' 'has {} subwords, suggesting there are ' 'some noisy words in your vocabulary. ' 'You should filter out very long words ' 'to avoid memory issues.'.format(max_subwordidxs_len)) data = UnchainStream(data) if cbow: batchify_fn = cbow_fasttext_batch else: batchify_fn = skipgram_fasttext_batch batchify_fn = functools.partial( batchify_fn, num_tokens=len(vocab) + len(subword_function), subword_lookup=subword_lookup, dtype=dtype, index_dtype=index_dtype) return data, batchify_fn, subword_function
['def', 'transform_data_fasttext', '(', 'data', ',', 'vocab', ',', 'idx_to_counts', ',', 'cbow', ',', 'ngram_buckets', ',', 'ngrams', ',', 'batch_size', ',', 'window_size', ',', 'frequent_token_subsampling', '=', '1E-4', ',', 'dtype', '=', "'float32'", ',', 'index_dtype', '=', "'int64'", ')', ':', 'if', 'ngram_buckets', '<=', '0', ':', 'raise', 'ValueError', '(', "'Invalid ngram_buckets. Use Word2Vec training '", "'pipeline if not interested in ngrams.'", ')', 'sum_counts', '=', 'float', '(', 'sum', '(', 'idx_to_counts', ')', ')', 'idx_to_pdiscard', '=', '[', '1', '-', 'math', '.', 'sqrt', '(', 'frequent_token_subsampling', '/', '(', 'count', '/', 'sum_counts', ')', ')', 'for', 'count', 'in', 'idx_to_counts', ']', 'def', 'subsample', '(', 'shard', ')', ':', 'return', '[', '[', 't', 'for', 't', ',', 'r', 'in', 'zip', '(', 'sentence', ',', 'np', '.', 'random', '.', 'uniform', '(', '0', ',', '1', ',', 'size', '=', 'len', '(', 'sentence', ')', ')', ')', 'if', 'r', '>', 'idx_to_pdiscard', '[', 't', ']', ']', 'for', 'sentence', 'in', 'shard', ']', 'data', '=', 'data', '.', 'transform', '(', 'subsample', ')', 'batchify', '=', 'nlp', '.', 'data', '.', 'batchify', '.', 'EmbeddingCenterContextBatchify', '(', 'batch_size', '=', 'batch_size', ',', 'window_size', '=', 'window_size', ',', 'cbow', '=', 'cbow', ',', 'weight_dtype', '=', 'dtype', ',', 'index_dtype', '=', 'index_dtype', ')', 'data', '=', 'data', '.', 'transform', '(', 'batchify', ')', 'with', 'print_time', '(', "'prepare subwords'", ')', ':', 'subword_function', '=', 'nlp', '.', 'vocab', '.', 'create_subword_function', '(', "'NGramHashes'", ',', 'ngrams', '=', 'ngrams', ',', 'num_subwords', '=', 'ngram_buckets', ')', '# Store subword indices for all words in vocabulary', 'idx_to_subwordidxs', '=', 'list', '(', 'subword_function', '(', 'vocab', '.', 'idx_to_token', ')', ')', 'subwordidxs', '=', 'np', '.', 'concatenate', '(', 'idx_to_subwordidxs', ')', 'subwordidxsptr', '=', 'np', '.', 'cumsum', '(', '[', 'len', '(', 'subwordidxs', ')', 'for', 'subwordidxs', 'in', 'idx_to_subwordidxs', ']', ')', 'subwordidxsptr', '=', 'np', '.', 'concatenate', '(', '[', 'np', '.', 'zeros', '(', '1', ',', 'dtype', '=', 'np', '.', 'int64', ')', ',', 'subwordidxsptr', ']', ')', 'if', 'cbow', ':', 'subword_lookup', '=', 'functools', '.', 'partial', '(', 'cbow_lookup', ',', 'subwordidxs', '=', 'subwordidxs', ',', 'subwordidxsptr', '=', 'subwordidxsptr', ',', 'offset', '=', 'len', '(', 'vocab', ')', ')', 'else', ':', 'subword_lookup', '=', 'functools', '.', 'partial', '(', 'skipgram_lookup', ',', 'subwordidxs', '=', 'subwordidxs', ',', 'subwordidxsptr', '=', 'subwordidxsptr', ',', 'offset', '=', 'len', '(', 'vocab', ')', ')', 'max_subwordidxs_len', '=', 'max', '(', 'len', '(', 's', ')', 'for', 's', 'in', 'idx_to_subwordidxs', ')', 'if', 'max_subwordidxs_len', '>', '500', ':', 'warnings', '.', 'warn', '(', "'The word with largest number of subwords '", "'has {} subwords, suggesting there are '", "'some noisy words in your vocabulary. '", "'You should filter out very long words '", "'to avoid memory issues.'", '.', 'format', '(', 'max_subwordidxs_len', ')', ')', 'data', '=', 'UnchainStream', '(', 'data', ')', 'if', 'cbow', ':', 'batchify_fn', '=', 'cbow_fasttext_batch', 'else', ':', 'batchify_fn', '=', 'skipgram_fasttext_batch', 'batchify_fn', '=', 'functools', '.', 'partial', '(', 'batchify_fn', ',', 'num_tokens', '=', 'len', '(', 'vocab', ')', '+', 'len', '(', 'subword_function', ')', ',', 'subword_lookup', '=', 'subword_lookup', ',', 'dtype', '=', 'dtype', ',', 'index_dtype', '=', 'index_dtype', ')', 'return', 'data', ',', 'batchify_fn', ',', 'subword_function']
Transform a DataStream of coded DataSets to a DataStream of batches. Parameters ---------- data : gluonnlp.data.DataStream DataStream where each sample is a valid input to gluonnlp.data.EmbeddingCenterContextBatchify. vocab : gluonnlp.Vocab Vocabulary containing all tokens whose indices occur in data. For each token, it's associated subwords will be computed and used for constructing the batches. No subwords are used if ngram_buckets is 0. idx_to_counts : list of int List of integers such that idx_to_counts[idx] represents the count of vocab.idx_to_token[idx] in the underlying dataset. The count information is used to subsample frequent words in the dataset. Each token is independently dropped with probability 1 - sqrt(t / (count / sum_counts)) where t is the hyperparameter frequent_token_subsampling. cbow : boolean If True, batches for CBOW are returned. ngram_buckets : int Number of hash buckets to consider for the fastText nlp.vocab.NGramHashes subword function. ngrams : list of int For each integer n in the list, all ngrams of length n will be considered by the nlp.vocab.NGramHashes subword function. batch_size : int The returned data stream iterates over batches of batch_size. window_size : int The context window size for gluonnlp.data.EmbeddingCenterContextBatchify. frequent_token_subsampling : float Hyperparameter for subsampling. See idx_to_counts above for more information. dtype : str or np.dtype, default 'float32' Data type of data array. index_dtype : str or np.dtype, default 'int64' Data type of index arrays. Returns ------- gluonnlp.data.DataStream Stream over batches. Each returned element is a list corresponding to the arguments for the forward pass of model.SG or model.CBOW respectively based on if cbow is False or True. If ngarm_buckets > 0, the returned sample will contain ngrams. Both model.SG or model.CBOW will handle them correctly as long as they are initialized with the subword_function returned as second argument by this function (see below). gluonnlp.vocab.NGramHashes The subword_function used for obtaining the subwords in the returned batches.
['Transform', 'a', 'DataStream', 'of', 'coded', 'DataSets', 'to', 'a', 'DataStream', 'of', 'batches', '.']
train
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/data.py#L134-L252
2,176
twilio/twilio-python
twilio/rest/messaging/v1/session/webhook.py
WebhookList.create
def create(self, target, configuration_url=values.unset, configuration_method=values.unset, configuration_filters=values.unset, configuration_triggers=values.unset, configuration_flow_sid=values.unset, configuration_retry_count=values.unset, configuration_replay_after=values.unset, configuration_buffer_messages=values.unset, configuration_buffer_window=values.unset): """ Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance """ data = values.of({ 'Target': target, 'Configuration.Url': configuration_url, 'Configuration.Method': configuration_method, 'Configuration.Filters': serialize.map(configuration_filters, lambda e: e), 'Configuration.Triggers': serialize.map(configuration_triggers, lambda e: e), 'Configuration.FlowSid': configuration_flow_sid, 'Configuration.RetryCount': configuration_retry_count, 'Configuration.ReplayAfter': configuration_replay_after, 'Configuration.BufferMessages': configuration_buffer_messages, 'Configuration.BufferWindow': configuration_buffer_window, }) payload = self._version.create( 'POST', self._uri, data=data, ) return WebhookInstance(self._version, payload, session_sid=self._solution['session_sid'], )
python
def create(self, target, configuration_url=values.unset, configuration_method=values.unset, configuration_filters=values.unset, configuration_triggers=values.unset, configuration_flow_sid=values.unset, configuration_retry_count=values.unset, configuration_replay_after=values.unset, configuration_buffer_messages=values.unset, configuration_buffer_window=values.unset): """ Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance """ data = values.of({ 'Target': target, 'Configuration.Url': configuration_url, 'Configuration.Method': configuration_method, 'Configuration.Filters': serialize.map(configuration_filters, lambda e: e), 'Configuration.Triggers': serialize.map(configuration_triggers, lambda e: e), 'Configuration.FlowSid': configuration_flow_sid, 'Configuration.RetryCount': configuration_retry_count, 'Configuration.ReplayAfter': configuration_replay_after, 'Configuration.BufferMessages': configuration_buffer_messages, 'Configuration.BufferWindow': configuration_buffer_window, }) payload = self._version.create( 'POST', self._uri, data=data, ) return WebhookInstance(self._version, payload, session_sid=self._solution['session_sid'], )
['def', 'create', '(', 'self', ',', 'target', ',', 'configuration_url', '=', 'values', '.', 'unset', ',', 'configuration_method', '=', 'values', '.', 'unset', ',', 'configuration_filters', '=', 'values', '.', 'unset', ',', 'configuration_triggers', '=', 'values', '.', 'unset', ',', 'configuration_flow_sid', '=', 'values', '.', 'unset', ',', 'configuration_retry_count', '=', 'values', '.', 'unset', ',', 'configuration_replay_after', '=', 'values', '.', 'unset', ',', 'configuration_buffer_messages', '=', 'values', '.', 'unset', ',', 'configuration_buffer_window', '=', 'values', '.', 'unset', ')', ':', 'data', '=', 'values', '.', 'of', '(', '{', "'Target'", ':', 'target', ',', "'Configuration.Url'", ':', 'configuration_url', ',', "'Configuration.Method'", ':', 'configuration_method', ',', "'Configuration.Filters'", ':', 'serialize', '.', 'map', '(', 'configuration_filters', ',', 'lambda', 'e', ':', 'e', ')', ',', "'Configuration.Triggers'", ':', 'serialize', '.', 'map', '(', 'configuration_triggers', ',', 'lambda', 'e', ':', 'e', ')', ',', "'Configuration.FlowSid'", ':', 'configuration_flow_sid', ',', "'Configuration.RetryCount'", ':', 'configuration_retry_count', ',', "'Configuration.ReplayAfter'", ':', 'configuration_replay_after', ',', "'Configuration.BufferMessages'", ':', 'configuration_buffer_messages', ',', "'Configuration.BufferWindow'", ':', 'configuration_buffer_window', ',', '}', ')', 'payload', '=', 'self', '.', '_version', '.', 'create', '(', "'POST'", ',', 'self', '.', '_uri', ',', 'data', '=', 'data', ',', ')', 'return', 'WebhookInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'session_sid', '=', 'self', '.', '_solution', '[', "'session_sid'", ']', ',', ')']
Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance
['Create', 'a', 'new', 'WebhookInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/messaging/v1/session/webhook.py#L120-L165
2,177
twilio/twilio-python
twilio/rest/__init__.py
Client.taskrouter
def taskrouter(self): """ Access the Taskrouter Twilio Domain :returns: Taskrouter Twilio Domain :rtype: twilio.rest.taskrouter.Taskrouter """ if self._taskrouter is None: from twilio.rest.taskrouter import Taskrouter self._taskrouter = Taskrouter(self) return self._taskrouter
python
def taskrouter(self): """ Access the Taskrouter Twilio Domain :returns: Taskrouter Twilio Domain :rtype: twilio.rest.taskrouter.Taskrouter """ if self._taskrouter is None: from twilio.rest.taskrouter import Taskrouter self._taskrouter = Taskrouter(self) return self._taskrouter
['def', 'taskrouter', '(', 'self', ')', ':', 'if', 'self', '.', '_taskrouter', 'is', 'None', ':', 'from', 'twilio', '.', 'rest', '.', 'taskrouter', 'import', 'Taskrouter', 'self', '.', '_taskrouter', '=', 'Taskrouter', '(', 'self', ')', 'return', 'self', '.', '_taskrouter']
Access the Taskrouter Twilio Domain :returns: Taskrouter Twilio Domain :rtype: twilio.rest.taskrouter.Taskrouter
['Access', 'the', 'Taskrouter', 'Twilio', 'Domain']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/__init__.py#L380-L390
2,178
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/wiki/wiki_client.py
WikiClient.create_page_move
def create_page_move(self, page_move_parameters, project, wiki_identifier, comment=None): """CreatePageMove. Creates a page move operation that updates the path and order of the page as provided in the parameters. :param :class:`<WikiPageMoveParameters> <azure.devops.v5_0.wiki.models.WikiPageMoveParameters>` page_move_parameters: Page more operation parameters. :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str comment: Comment that is to be associated with this page move. :rtype: :class:`<WikiPageMoveResponse> <azure.devops.v5_0.wiki.models.WikiPageMoveResponse>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if wiki_identifier is not None: route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str') query_parameters = {} if comment is not None: query_parameters['comment'] = self._serialize.query('comment', comment, 'str') content = self._serialize.body(page_move_parameters, 'WikiPageMoveParameters') response = self._send(http_method='POST', location_id='e37bbe71-cbae-49e5-9a4e-949143b9d910', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) response_object = models.WikiPageMoveResponse() response_object.page_move = self._deserialize('WikiPageMove', response) response_object.eTag = response.headers.get('ETag') return response_object
python
def create_page_move(self, page_move_parameters, project, wiki_identifier, comment=None): """CreatePageMove. Creates a page move operation that updates the path and order of the page as provided in the parameters. :param :class:`<WikiPageMoveParameters> <azure.devops.v5_0.wiki.models.WikiPageMoveParameters>` page_move_parameters: Page more operation parameters. :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str comment: Comment that is to be associated with this page move. :rtype: :class:`<WikiPageMoveResponse> <azure.devops.v5_0.wiki.models.WikiPageMoveResponse>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if wiki_identifier is not None: route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str') query_parameters = {} if comment is not None: query_parameters['comment'] = self._serialize.query('comment', comment, 'str') content = self._serialize.body(page_move_parameters, 'WikiPageMoveParameters') response = self._send(http_method='POST', location_id='e37bbe71-cbae-49e5-9a4e-949143b9d910', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) response_object = models.WikiPageMoveResponse() response_object.page_move = self._deserialize('WikiPageMove', response) response_object.eTag = response.headers.get('ETag') return response_object
['def', 'create_page_move', '(', 'self', ',', 'page_move_parameters', ',', 'project', ',', 'wiki_identifier', ',', 'comment', '=', 'None', ')', ':', 'route_values', '=', '{', '}', 'if', 'project', 'is', 'not', 'None', ':', 'route_values', '[', "'project'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'project'", ',', 'project', ',', "'str'", ')', 'if', 'wiki_identifier', 'is', 'not', 'None', ':', 'route_values', '[', "'wikiIdentifier'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'wiki_identifier'", ',', 'wiki_identifier', ',', "'str'", ')', 'query_parameters', '=', '{', '}', 'if', 'comment', 'is', 'not', 'None', ':', 'query_parameters', '[', "'comment'", ']', '=', 'self', '.', '_serialize', '.', 'query', '(', "'comment'", ',', 'comment', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'page_move_parameters', ',', "'WikiPageMoveParameters'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'POST'", ',', 'location_id', '=', "'e37bbe71-cbae-49e5-9a4e-949143b9d910'", ',', 'version', '=', "'5.0'", ',', 'route_values', '=', 'route_values', ',', 'query_parameters', '=', 'query_parameters', ',', 'content', '=', 'content', ')', 'response_object', '=', 'models', '.', 'WikiPageMoveResponse', '(', ')', 'response_object', '.', 'page_move', '=', 'self', '.', '_deserialize', '(', "'WikiPageMove'", ',', 'response', ')', 'response_object', '.', 'eTag', '=', 'response', '.', 'headers', '.', 'get', '(', "'ETag'", ')', 'return', 'response_object']
CreatePageMove. Creates a page move operation that updates the path and order of the page as provided in the parameters. :param :class:`<WikiPageMoveParameters> <azure.devops.v5_0.wiki.models.WikiPageMoveParameters>` page_move_parameters: Page more operation parameters. :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str comment: Comment that is to be associated with this page move. :rtype: :class:`<WikiPageMoveResponse> <azure.devops.v5_0.wiki.models.WikiPageMoveResponse>`
['CreatePageMove', '.', 'Creates', 'a', 'page', 'move', 'operation', 'that', 'updates', 'the', 'path', 'and', 'order', 'of', 'the', 'page', 'as', 'provided', 'in', 'the', 'parameters', '.', ':', 'param', ':', 'class', ':', '<WikiPageMoveParameters', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'wiki', '.', 'models', '.', 'WikiPageMoveParameters', '>', 'page_move_parameters', ':', 'Page', 'more', 'operation', 'parameters', '.', ':', 'param', 'str', 'project', ':', 'Project', 'ID', 'or', 'project', 'name', ':', 'param', 'str', 'wiki_identifier', ':', 'Wiki', 'Id', 'or', 'name', '.', ':', 'param', 'str', 'comment', ':', 'Comment', 'that', 'is', 'to', 'be', 'associated', 'with', 'this', 'page', 'move', '.', ':', 'rtype', ':', ':', 'class', ':', '<WikiPageMoveResponse', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'wiki', '.', 'models', '.', 'WikiPageMoveResponse', '>']
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/wiki/wiki_client.py#L62-L89
2,179
spacetelescope/drizzlepac
drizzlepac/staticMask.py
staticMask.close
def close(self): """ Deletes all static mask objects. """ for key in self.masklist.keys(): self.masklist[key] = None self.masklist = {}
python
def close(self): """ Deletes all static mask objects. """ for key in self.masklist.keys(): self.masklist[key] = None self.masklist = {}
['def', 'close', '(', 'self', ')', ':', 'for', 'key', 'in', 'self', '.', 'masklist', '.', 'keys', '(', ')', ':', 'self', '.', 'masklist', '[', 'key', ']', '=', 'None', 'self', '.', 'masklist', '=', '{', '}']
Deletes all static mask objects.
['Deletes', 'all', 'static', 'mask', 'objects', '.']
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/staticMask.py#L241-L246
2,180
tonioo/sievelib
sievelib/commands.py
Command.check_next_arg
def check_next_arg(self, atype, avalue, add=True, check_extension=True): """Argument validity checking This method is usually used by the parser to check if detected argument is allowed for this command. We make a distinction between required and optional arguments. Optional (or tagged) arguments can be provided unordered but not the required ones. A special handling is also done for arguments that require an argument (example: the :comparator argument expects a string argument). The "testlist" type is checked separately as we can't know in advance how many arguments will be provided. If the argument is incorrect, the method raises the appropriate exception, or return False to let the parser handle the exception. :param atype: the argument's type :param avalue: the argument's value :param add: indicates if this argument should be recorded on success :param check_extension: raise ExtensionNotLoaded if extension not loaded :return: True on success, False otherwise """ if not self.has_arguments(): return False if self.iscomplete(atype, avalue): return False if self.curarg is not None and "extra_arg" in self.curarg: condition = ( atype in self.curarg["extra_arg"]["type"] and ("values" not in self.curarg["extra_arg"] or avalue in self.curarg["extra_arg"]["values"]) ) if condition: if add: self.extra_arguments[self.curarg["name"]] = avalue self.curarg = None return True raise BadValue(self.curarg["name"], avalue) failed = False pos = self.nextargpos while pos < len(self.args_definition): curarg = self.args_definition[pos] if curarg.get("required", False): if curarg["type"] == ["testlist"]: if atype != "test": failed = True elif add: if not curarg["name"] in self.arguments: self.arguments[curarg["name"]] = [] self.arguments[curarg["name"]] += [avalue] elif not self.__is_valid_type(atype, curarg["type"]) or \ not self.__is_valid_value_for_arg( curarg, avalue, check_extension): failed = True else: self.curarg = curarg self.rargs_cnt += 1 self.nextargpos = pos + 1 if add: self.arguments[curarg["name"]] = avalue break condition = ( atype in curarg["type"] and self.__is_valid_value_for_arg(curarg, avalue, check_extension) ) if condition: ext = curarg.get("extension") condition = ( check_extension and ext and ext not in RequireCommand.loaded_extensions) if condition: raise ExtensionNotLoaded(ext) condition = ( "extra_arg" in curarg and ("valid_for" not in curarg["extra_arg"] or avalue in curarg["extra_arg"]["valid_for"]) ) if condition: self.curarg = curarg if add: self.arguments[curarg["name"]] = avalue break pos += 1 if failed: raise BadArgument(self.name, avalue, self.args_definition[pos]["type"]) return True
python
def check_next_arg(self, atype, avalue, add=True, check_extension=True): """Argument validity checking This method is usually used by the parser to check if detected argument is allowed for this command. We make a distinction between required and optional arguments. Optional (or tagged) arguments can be provided unordered but not the required ones. A special handling is also done for arguments that require an argument (example: the :comparator argument expects a string argument). The "testlist" type is checked separately as we can't know in advance how many arguments will be provided. If the argument is incorrect, the method raises the appropriate exception, or return False to let the parser handle the exception. :param atype: the argument's type :param avalue: the argument's value :param add: indicates if this argument should be recorded on success :param check_extension: raise ExtensionNotLoaded if extension not loaded :return: True on success, False otherwise """ if not self.has_arguments(): return False if self.iscomplete(atype, avalue): return False if self.curarg is not None and "extra_arg" in self.curarg: condition = ( atype in self.curarg["extra_arg"]["type"] and ("values" not in self.curarg["extra_arg"] or avalue in self.curarg["extra_arg"]["values"]) ) if condition: if add: self.extra_arguments[self.curarg["name"]] = avalue self.curarg = None return True raise BadValue(self.curarg["name"], avalue) failed = False pos = self.nextargpos while pos < len(self.args_definition): curarg = self.args_definition[pos] if curarg.get("required", False): if curarg["type"] == ["testlist"]: if atype != "test": failed = True elif add: if not curarg["name"] in self.arguments: self.arguments[curarg["name"]] = [] self.arguments[curarg["name"]] += [avalue] elif not self.__is_valid_type(atype, curarg["type"]) or \ not self.__is_valid_value_for_arg( curarg, avalue, check_extension): failed = True else: self.curarg = curarg self.rargs_cnt += 1 self.nextargpos = pos + 1 if add: self.arguments[curarg["name"]] = avalue break condition = ( atype in curarg["type"] and self.__is_valid_value_for_arg(curarg, avalue, check_extension) ) if condition: ext = curarg.get("extension") condition = ( check_extension and ext and ext not in RequireCommand.loaded_extensions) if condition: raise ExtensionNotLoaded(ext) condition = ( "extra_arg" in curarg and ("valid_for" not in curarg["extra_arg"] or avalue in curarg["extra_arg"]["valid_for"]) ) if condition: self.curarg = curarg if add: self.arguments[curarg["name"]] = avalue break pos += 1 if failed: raise BadArgument(self.name, avalue, self.args_definition[pos]["type"]) return True
['def', 'check_next_arg', '(', 'self', ',', 'atype', ',', 'avalue', ',', 'add', '=', 'True', ',', 'check_extension', '=', 'True', ')', ':', 'if', 'not', 'self', '.', 'has_arguments', '(', ')', ':', 'return', 'False', 'if', 'self', '.', 'iscomplete', '(', 'atype', ',', 'avalue', ')', ':', 'return', 'False', 'if', 'self', '.', 'curarg', 'is', 'not', 'None', 'and', '"extra_arg"', 'in', 'self', '.', 'curarg', ':', 'condition', '=', '(', 'atype', 'in', 'self', '.', 'curarg', '[', '"extra_arg"', ']', '[', '"type"', ']', 'and', '(', '"values"', 'not', 'in', 'self', '.', 'curarg', '[', '"extra_arg"', ']', 'or', 'avalue', 'in', 'self', '.', 'curarg', '[', '"extra_arg"', ']', '[', '"values"', ']', ')', ')', 'if', 'condition', ':', 'if', 'add', ':', 'self', '.', 'extra_arguments', '[', 'self', '.', 'curarg', '[', '"name"', ']', ']', '=', 'avalue', 'self', '.', 'curarg', '=', 'None', 'return', 'True', 'raise', 'BadValue', '(', 'self', '.', 'curarg', '[', '"name"', ']', ',', 'avalue', ')', 'failed', '=', 'False', 'pos', '=', 'self', '.', 'nextargpos', 'while', 'pos', '<', 'len', '(', 'self', '.', 'args_definition', ')', ':', 'curarg', '=', 'self', '.', 'args_definition', '[', 'pos', ']', 'if', 'curarg', '.', 'get', '(', '"required"', ',', 'False', ')', ':', 'if', 'curarg', '[', '"type"', ']', '==', '[', '"testlist"', ']', ':', 'if', 'atype', '!=', '"test"', ':', 'failed', '=', 'True', 'elif', 'add', ':', 'if', 'not', 'curarg', '[', '"name"', ']', 'in', 'self', '.', 'arguments', ':', 'self', '.', 'arguments', '[', 'curarg', '[', '"name"', ']', ']', '=', '[', ']', 'self', '.', 'arguments', '[', 'curarg', '[', '"name"', ']', ']', '+=', '[', 'avalue', ']', 'elif', 'not', 'self', '.', '__is_valid_type', '(', 'atype', ',', 'curarg', '[', '"type"', ']', ')', 'or', 'not', 'self', '.', '__is_valid_value_for_arg', '(', 'curarg', ',', 'avalue', ',', 'check_extension', ')', ':', 'failed', '=', 'True', 'else', ':', 'self', '.', 'curarg', '=', 'curarg', 'self', '.', 'rargs_cnt', '+=', '1', 'self', '.', 'nextargpos', '=', 'pos', '+', '1', 'if', 'add', ':', 'self', '.', 'arguments', '[', 'curarg', '[', '"name"', ']', ']', '=', 'avalue', 'break', 'condition', '=', '(', 'atype', 'in', 'curarg', '[', '"type"', ']', 'and', 'self', '.', '__is_valid_value_for_arg', '(', 'curarg', ',', 'avalue', ',', 'check_extension', ')', ')', 'if', 'condition', ':', 'ext', '=', 'curarg', '.', 'get', '(', '"extension"', ')', 'condition', '=', '(', 'check_extension', 'and', 'ext', 'and', 'ext', 'not', 'in', 'RequireCommand', '.', 'loaded_extensions', ')', 'if', 'condition', ':', 'raise', 'ExtensionNotLoaded', '(', 'ext', ')', 'condition', '=', '(', '"extra_arg"', 'in', 'curarg', 'and', '(', '"valid_for"', 'not', 'in', 'curarg', '[', '"extra_arg"', ']', 'or', 'avalue', 'in', 'curarg', '[', '"extra_arg"', ']', '[', '"valid_for"', ']', ')', ')', 'if', 'condition', ':', 'self', '.', 'curarg', '=', 'curarg', 'if', 'add', ':', 'self', '.', 'arguments', '[', 'curarg', '[', '"name"', ']', ']', '=', 'avalue', 'break', 'pos', '+=', '1', 'if', 'failed', ':', 'raise', 'BadArgument', '(', 'self', '.', 'name', ',', 'avalue', ',', 'self', '.', 'args_definition', '[', 'pos', ']', '[', '"type"', ']', ')', 'return', 'True']
Argument validity checking This method is usually used by the parser to check if detected argument is allowed for this command. We make a distinction between required and optional arguments. Optional (or tagged) arguments can be provided unordered but not the required ones. A special handling is also done for arguments that require an argument (example: the :comparator argument expects a string argument). The "testlist" type is checked separately as we can't know in advance how many arguments will be provided. If the argument is incorrect, the method raises the appropriate exception, or return False to let the parser handle the exception. :param atype: the argument's type :param avalue: the argument's value :param add: indicates if this argument should be recorded on success :param check_extension: raise ExtensionNotLoaded if extension not loaded :return: True on success, False otherwise
['Argument', 'validity', 'checking']
train
https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/commands.py#L402-L499
2,181
zxylvlp/PingPHP
pingphp/grammar.py
p_ArrayLiteralContentList
def p_ArrayLiteralContentList(p): ''' ArrayLiteralContentList : ArrayLiteralContent | ArrayLiteralContentList COMMA ArrayLiteralContent ''' if len(p) < 3: p[0] = ArrayLiteralContentList(None, p[1]) else: p[0] = ArrayLiteralContentList(p[1], p[3])
python
def p_ArrayLiteralContentList(p): ''' ArrayLiteralContentList : ArrayLiteralContent | ArrayLiteralContentList COMMA ArrayLiteralContent ''' if len(p) < 3: p[0] = ArrayLiteralContentList(None, p[1]) else: p[0] = ArrayLiteralContentList(p[1], p[3])
['def', 'p_ArrayLiteralContentList', '(', 'p', ')', ':', 'if', 'len', '(', 'p', ')', '<', '3', ':', 'p', '[', '0', ']', '=', 'ArrayLiteralContentList', '(', 'None', ',', 'p', '[', '1', ']', ')', 'else', ':', 'p', '[', '0', ']', '=', 'ArrayLiteralContentList', '(', 'p', '[', '1', ']', ',', 'p', '[', '3', ']', ')']
ArrayLiteralContentList : ArrayLiteralContent | ArrayLiteralContentList COMMA ArrayLiteralContent
['ArrayLiteralContentList', ':', 'ArrayLiteralContent', '|', 'ArrayLiteralContentList', 'COMMA', 'ArrayLiteralContent']
train
https://github.com/zxylvlp/PingPHP/blob/2e9a5f1ef4b5b13310e3f8ff350fa91032357bc5/pingphp/grammar.py#L588-L596
2,182
smarie/python-parsyfiles
parsyfiles/parsing_core_api.py
ParsingPlan.execute
def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Called to parse the object as described in this parsing plan, using the provided arguments for the parser. * Exceptions are caught and wrapped into ParsingException * If result does not match expected type, an error is thrown :param logger: the logger to use during parsing (optional: None is supported) :param options: a dictionary of option sets. Each option set is identified with an id in the dictionary. :return: """ try: res = self._execute(logger, options) except Exception as e: raise ParsingException.create_for_caught_error(self.parser, self.obj_type, self.obj_on_fs_to_parse, e, options) # Check that the returned parsed object has the correct type if res is not None: if robust_isinstance(res, self.obj_type): return res # wrong type : error raise WrongTypeCreatedError.create_for_wrong_result_type(self.parser, self.obj_type, self.obj_on_fs_to_parse, res, options)
python
def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Called to parse the object as described in this parsing plan, using the provided arguments for the parser. * Exceptions are caught and wrapped into ParsingException * If result does not match expected type, an error is thrown :param logger: the logger to use during parsing (optional: None is supported) :param options: a dictionary of option sets. Each option set is identified with an id in the dictionary. :return: """ try: res = self._execute(logger, options) except Exception as e: raise ParsingException.create_for_caught_error(self.parser, self.obj_type, self.obj_on_fs_to_parse, e, options) # Check that the returned parsed object has the correct type if res is not None: if robust_isinstance(res, self.obj_type): return res # wrong type : error raise WrongTypeCreatedError.create_for_wrong_result_type(self.parser, self.obj_type, self.obj_on_fs_to_parse, res, options)
['def', 'execute', '(', 'self', ',', 'logger', ':', 'Logger', ',', 'options', ':', 'Dict', '[', 'str', ',', 'Dict', '[', 'str', ',', 'Any', ']', ']', ')', '->', 'T', ':', 'try', ':', 'res', '=', 'self', '.', '_execute', '(', 'logger', ',', 'options', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'ParsingException', '.', 'create_for_caught_error', '(', 'self', '.', 'parser', ',', 'self', '.', 'obj_type', ',', 'self', '.', 'obj_on_fs_to_parse', ',', 'e', ',', 'options', ')', '# Check that the returned parsed object has the correct type', 'if', 'res', 'is', 'not', 'None', ':', 'if', 'robust_isinstance', '(', 'res', ',', 'self', '.', 'obj_type', ')', ':', 'return', 'res', '# wrong type : error', 'raise', 'WrongTypeCreatedError', '.', 'create_for_wrong_result_type', '(', 'self', '.', 'parser', ',', 'self', '.', 'obj_type', ',', 'self', '.', 'obj_on_fs_to_parse', ',', 'res', ',', 'options', ')']
Called to parse the object as described in this parsing plan, using the provided arguments for the parser. * Exceptions are caught and wrapped into ParsingException * If result does not match expected type, an error is thrown :param logger: the logger to use during parsing (optional: None is supported) :param options: a dictionary of option sets. Each option set is identified with an id in the dictionary. :return:
['Called', 'to', 'parse', 'the', 'object', 'as', 'described', 'in', 'this', 'parsing', 'plan', 'using', 'the', 'provided', 'arguments', 'for', 'the', 'parser', '.', '*', 'Exceptions', 'are', 'caught', 'and', 'wrapped', 'into', 'ParsingException', '*', 'If', 'result', 'does', 'not', 'match', 'expected', 'type', 'an', 'error', 'is', 'thrown']
train
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_core_api.py#L394-L417
2,183
scikit-hep/root_pandas
root_pandas/readwrite.py
filter_noexpand_columns
def filter_noexpand_columns(columns): """Return columns not containing and containing the noexpand prefix. Parameters ---------- columns: sequence of str A sequence of strings to be split Returns ------- Two lists, the first containing strings without the noexpand prefix, the second containing those that do with the prefix filtered out. """ prefix_len = len(NOEXPAND_PREFIX) noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)] other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)] return other, noexpand
python
def filter_noexpand_columns(columns): """Return columns not containing and containing the noexpand prefix. Parameters ---------- columns: sequence of str A sequence of strings to be split Returns ------- Two lists, the first containing strings without the noexpand prefix, the second containing those that do with the prefix filtered out. """ prefix_len = len(NOEXPAND_PREFIX) noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)] other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)] return other, noexpand
['def', 'filter_noexpand_columns', '(', 'columns', ')', ':', 'prefix_len', '=', 'len', '(', 'NOEXPAND_PREFIX', ')', 'noexpand', '=', '[', 'c', '[', 'prefix_len', ':', ']', 'for', 'c', 'in', 'columns', 'if', 'c', '.', 'startswith', '(', 'NOEXPAND_PREFIX', ')', ']', 'other', '=', '[', 'c', 'for', 'c', 'in', 'columns', 'if', 'not', 'c', '.', 'startswith', '(', 'NOEXPAND_PREFIX', ')', ']', 'return', 'other', ',', 'noexpand']
Return columns not containing and containing the noexpand prefix. Parameters ---------- columns: sequence of str A sequence of strings to be split Returns ------- Two lists, the first containing strings without the noexpand prefix, the second containing those that do with the prefix filtered out.
['Return', 'columns', 'not', 'containing', 'and', 'containing', 'the', 'noexpand', 'prefix', '.']
train
https://github.com/scikit-hep/root_pandas/blob/57991a4feaeb9213575cfba7a369fc05cc0d846b/root_pandas/readwrite.py#L117-L133
2,184
pywbem/pywbem
pywbem/cim_obj.py
_qualifiers_tomof
def _qualifiers_tomof(qualifiers, indent, maxline=MAX_MOF_LINE): """ Return a MOF string with the qualifier values, including the surrounding square brackets. The qualifiers are ordered by their name. Return empty string if no qualifiers. Normally multiline output and may fold qualifiers into multiple lines. The order of qualifiers is preserved. Parameters: qualifiers (NocaseDict): Qualifiers to format. indent (:term:`integer`): Number of spaces to indent each line of the returned string, counted to the opening bracket in the first line. Returns: :term:`unicode string`: MOF string. """ if not qualifiers: return u'' mof = [] mof.append(_indent_str(indent)) mof.append(u'[') line_pos = indent + 1 mof_quals = [] for q in qualifiers.itervalues(): mof_quals.append(q.tomof(indent + 1 + MOF_INDENT, maxline, line_pos)) delim = ',\n' + _indent_str(indent + 1) mof.append(delim.join(mof_quals)) mof.append(u']\n') return u''.join(mof)
python
def _qualifiers_tomof(qualifiers, indent, maxline=MAX_MOF_LINE): """ Return a MOF string with the qualifier values, including the surrounding square brackets. The qualifiers are ordered by their name. Return empty string if no qualifiers. Normally multiline output and may fold qualifiers into multiple lines. The order of qualifiers is preserved. Parameters: qualifiers (NocaseDict): Qualifiers to format. indent (:term:`integer`): Number of spaces to indent each line of the returned string, counted to the opening bracket in the first line. Returns: :term:`unicode string`: MOF string. """ if not qualifiers: return u'' mof = [] mof.append(_indent_str(indent)) mof.append(u'[') line_pos = indent + 1 mof_quals = [] for q in qualifiers.itervalues(): mof_quals.append(q.tomof(indent + 1 + MOF_INDENT, maxline, line_pos)) delim = ',\n' + _indent_str(indent + 1) mof.append(delim.join(mof_quals)) mof.append(u']\n') return u''.join(mof)
['def', '_qualifiers_tomof', '(', 'qualifiers', ',', 'indent', ',', 'maxline', '=', 'MAX_MOF_LINE', ')', ':', 'if', 'not', 'qualifiers', ':', 'return', "u''", 'mof', '=', '[', ']', 'mof', '.', 'append', '(', '_indent_str', '(', 'indent', ')', ')', 'mof', '.', 'append', '(', "u'['", ')', 'line_pos', '=', 'indent', '+', '1', 'mof_quals', '=', '[', ']', 'for', 'q', 'in', 'qualifiers', '.', 'itervalues', '(', ')', ':', 'mof_quals', '.', 'append', '(', 'q', '.', 'tomof', '(', 'indent', '+', '1', '+', 'MOF_INDENT', ',', 'maxline', ',', 'line_pos', ')', ')', 'delim', '=', "',\\n'", '+', '_indent_str', '(', 'indent', '+', '1', ')', 'mof', '.', 'append', '(', 'delim', '.', 'join', '(', 'mof_quals', ')', ')', 'mof', '.', 'append', '(', "u']\\n'", ')', 'return', "u''", '.', 'join', '(', 'mof', ')']
Return a MOF string with the qualifier values, including the surrounding square brackets. The qualifiers are ordered by their name. Return empty string if no qualifiers. Normally multiline output and may fold qualifiers into multiple lines. The order of qualifiers is preserved. Parameters: qualifiers (NocaseDict): Qualifiers to format. indent (:term:`integer`): Number of spaces to indent each line of the returned string, counted to the opening bracket in the first line. Returns: :term:`unicode string`: MOF string.
['Return', 'a', 'MOF', 'string', 'with', 'the', 'qualifier', 'values', 'including', 'the', 'surrounding', 'square', 'brackets', '.', 'The', 'qualifiers', 'are', 'ordered', 'by', 'their', 'name', '.']
train
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L415-L455
2,185
almarklein/pyelastix
pyelastix.py
_write_parameter_file
def _write_parameter_file(params): """ Write the parameter file in the format that elaxtix likes. """ # Get path path = os.path.join(get_tempdir(), 'params.txt') # Define helper function def valToStr(val): if val in [True, False]: return '"%s"' % str(val).lower() elif isinstance(val, int): return str(val) elif isinstance(val, float): tmp = str(val) if not '.' in tmp: tmp += '.0' return tmp elif isinstance(val, str): return '"%s"' % val # Compile text text = '' for key in params: val = params[key] # Make a string of the values if isinstance(val, (list, tuple)): vals = [valToStr(v) for v in val] val_ = ' '.join(vals) else: val_ = valToStr(val) # Create line and add line = '(%s %s)' % (key, val_) text += line + '\n' # Write text f = open(path, 'wb') try: f.write(text.encode('utf-8')) finally: f.close() # Done return path
python
def _write_parameter_file(params): """ Write the parameter file in the format that elaxtix likes. """ # Get path path = os.path.join(get_tempdir(), 'params.txt') # Define helper function def valToStr(val): if val in [True, False]: return '"%s"' % str(val).lower() elif isinstance(val, int): return str(val) elif isinstance(val, float): tmp = str(val) if not '.' in tmp: tmp += '.0' return tmp elif isinstance(val, str): return '"%s"' % val # Compile text text = '' for key in params: val = params[key] # Make a string of the values if isinstance(val, (list, tuple)): vals = [valToStr(v) for v in val] val_ = ' '.join(vals) else: val_ = valToStr(val) # Create line and add line = '(%s %s)' % (key, val_) text += line + '\n' # Write text f = open(path, 'wb') try: f.write(text.encode('utf-8')) finally: f.close() # Done return path
['def', '_write_parameter_file', '(', 'params', ')', ':', '# Get path', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'get_tempdir', '(', ')', ',', "'params.txt'", ')', '# Define helper function', 'def', 'valToStr', '(', 'val', ')', ':', 'if', 'val', 'in', '[', 'True', ',', 'False', ']', ':', 'return', '\'"%s"\'', '%', 'str', '(', 'val', ')', '.', 'lower', '(', ')', 'elif', 'isinstance', '(', 'val', ',', 'int', ')', ':', 'return', 'str', '(', 'val', ')', 'elif', 'isinstance', '(', 'val', ',', 'float', ')', ':', 'tmp', '=', 'str', '(', 'val', ')', 'if', 'not', "'.'", 'in', 'tmp', ':', 'tmp', '+=', "'.0'", 'return', 'tmp', 'elif', 'isinstance', '(', 'val', ',', 'str', ')', ':', 'return', '\'"%s"\'', '%', 'val', '# Compile text', 'text', '=', "''", 'for', 'key', 'in', 'params', ':', 'val', '=', 'params', '[', 'key', ']', '# Make a string of the values', 'if', 'isinstance', '(', 'val', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'vals', '=', '[', 'valToStr', '(', 'v', ')', 'for', 'v', 'in', 'val', ']', 'val_', '=', "' '", '.', 'join', '(', 'vals', ')', 'else', ':', 'val_', '=', 'valToStr', '(', 'val', ')', '# Create line and add', 'line', '=', "'(%s %s)'", '%', '(', 'key', ',', 'val_', ')', 'text', '+=', 'line', '+', "'\\n'", '# Write text', 'f', '=', 'open', '(', 'path', ',', "'wb'", ')', 'try', ':', 'f', '.', 'write', '(', 'text', '.', 'encode', '(', "'utf-8'", ')', ')', 'finally', ':', 'f', '.', 'close', '(', ')', '# Done', 'return', 'path']
Write the parameter file in the format that elaxtix likes.
['Write', 'the', 'parameter', 'file', 'in', 'the', 'format', 'that', 'elaxtix', 'likes', '.']
train
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L970-L1013
2,186
saltstack/salt
salt/states/boto_secgroup.py
_get_rule_changes
def _get_rule_changes(rules, _rules): ''' given a list of desired rules (rules) and existing rules (_rules) return a list of rules to delete (to_delete) and to create (to_create) ''' to_delete = [] to_create = [] # for each rule in state file # 1. validate rule # 2. determine if rule exists in existing security group rules for rule in rules: try: ip_protocol = six.text_type(rule.get('ip_protocol')) except KeyError: raise SaltInvocationError('ip_protocol, to_port, and from_port are' ' required arguments for security group' ' rules.') supported_protocols = ['tcp', '6', 6, 'udp', '17', 17, 'icmp', '1', 1, 'all', '-1', -1] if ip_protocol not in supported_protocols and (not '{0}'.format(ip_protocol).isdigit() or int(ip_protocol) > 255): raise SaltInvocationError( 'Invalid ip_protocol {0} specified in security group rule.'.format(ip_protocol)) # For the 'all' case, we need to change the protocol name to '-1'. if ip_protocol == 'all': rule['ip_protocol'] = '-1' cidr_ip = rule.get('cidr_ip', None) group_name = rule.get('source_group_name', None) group_id = rule.get('source_group_group_id', None) if cidr_ip and (group_id or group_name): raise SaltInvocationError('cidr_ip and source groups can not both' ' be specified in security group rules.') if group_id and group_name: raise SaltInvocationError('Either source_group_group_id or' ' source_group_name can be specified in' ' security group rules, but not both.') if not (cidr_ip or group_id or group_name): raise SaltInvocationError('cidr_ip, source_group_group_id, or' ' source_group_name must be provided for' ' security group rules.') rule_found = False # for each rule in existing security group ruleset determine if # new rule exists for _rule in _rules: if _check_rule(rule, _rule): rule_found = True break if not rule_found: to_create.append(rule) # for each rule in existing security group configuration # 1. determine if rules needed to be deleted for _rule in _rules: rule_found = False for rule in rules: if _check_rule(rule, _rule): rule_found = True break if not rule_found: # Can only supply name or id, not both. Since we're deleting # entries, it doesn't matter which we pick. _rule.pop('source_group_name', None) to_delete.append(_rule) log.debug('Rules to be deleted: %s', to_delete) log.debug('Rules to be created: %s', to_create) return (to_delete, to_create)
python
def _get_rule_changes(rules, _rules): ''' given a list of desired rules (rules) and existing rules (_rules) return a list of rules to delete (to_delete) and to create (to_create) ''' to_delete = [] to_create = [] # for each rule in state file # 1. validate rule # 2. determine if rule exists in existing security group rules for rule in rules: try: ip_protocol = six.text_type(rule.get('ip_protocol')) except KeyError: raise SaltInvocationError('ip_protocol, to_port, and from_port are' ' required arguments for security group' ' rules.') supported_protocols = ['tcp', '6', 6, 'udp', '17', 17, 'icmp', '1', 1, 'all', '-1', -1] if ip_protocol not in supported_protocols and (not '{0}'.format(ip_protocol).isdigit() or int(ip_protocol) > 255): raise SaltInvocationError( 'Invalid ip_protocol {0} specified in security group rule.'.format(ip_protocol)) # For the 'all' case, we need to change the protocol name to '-1'. if ip_protocol == 'all': rule['ip_protocol'] = '-1' cidr_ip = rule.get('cidr_ip', None) group_name = rule.get('source_group_name', None) group_id = rule.get('source_group_group_id', None) if cidr_ip and (group_id or group_name): raise SaltInvocationError('cidr_ip and source groups can not both' ' be specified in security group rules.') if group_id and group_name: raise SaltInvocationError('Either source_group_group_id or' ' source_group_name can be specified in' ' security group rules, but not both.') if not (cidr_ip or group_id or group_name): raise SaltInvocationError('cidr_ip, source_group_group_id, or' ' source_group_name must be provided for' ' security group rules.') rule_found = False # for each rule in existing security group ruleset determine if # new rule exists for _rule in _rules: if _check_rule(rule, _rule): rule_found = True break if not rule_found: to_create.append(rule) # for each rule in existing security group configuration # 1. determine if rules needed to be deleted for _rule in _rules: rule_found = False for rule in rules: if _check_rule(rule, _rule): rule_found = True break if not rule_found: # Can only supply name or id, not both. Since we're deleting # entries, it doesn't matter which we pick. _rule.pop('source_group_name', None) to_delete.append(_rule) log.debug('Rules to be deleted: %s', to_delete) log.debug('Rules to be created: %s', to_create) return (to_delete, to_create)
['def', '_get_rule_changes', '(', 'rules', ',', '_rules', ')', ':', 'to_delete', '=', '[', ']', 'to_create', '=', '[', ']', '# for each rule in state file', '# 1. validate rule', '# 2. determine if rule exists in existing security group rules', 'for', 'rule', 'in', 'rules', ':', 'try', ':', 'ip_protocol', '=', 'six', '.', 'text_type', '(', 'rule', '.', 'get', '(', "'ip_protocol'", ')', ')', 'except', 'KeyError', ':', 'raise', 'SaltInvocationError', '(', "'ip_protocol, to_port, and from_port are'", "' required arguments for security group'", "' rules.'", ')', 'supported_protocols', '=', '[', "'tcp'", ',', "'6'", ',', '6', ',', "'udp'", ',', "'17'", ',', '17', ',', "'icmp'", ',', "'1'", ',', '1', ',', "'all'", ',', "'-1'", ',', '-', '1', ']', 'if', 'ip_protocol', 'not', 'in', 'supported_protocols', 'and', '(', 'not', "'{0}'", '.', 'format', '(', 'ip_protocol', ')', '.', 'isdigit', '(', ')', 'or', 'int', '(', 'ip_protocol', ')', '>', '255', ')', ':', 'raise', 'SaltInvocationError', '(', "'Invalid ip_protocol {0} specified in security group rule.'", '.', 'format', '(', 'ip_protocol', ')', ')', "# For the 'all' case, we need to change the protocol name to '-1'.", 'if', 'ip_protocol', '==', "'all'", ':', 'rule', '[', "'ip_protocol'", ']', '=', "'-1'", 'cidr_ip', '=', 'rule', '.', 'get', '(', "'cidr_ip'", ',', 'None', ')', 'group_name', '=', 'rule', '.', 'get', '(', "'source_group_name'", ',', 'None', ')', 'group_id', '=', 'rule', '.', 'get', '(', "'source_group_group_id'", ',', 'None', ')', 'if', 'cidr_ip', 'and', '(', 'group_id', 'or', 'group_name', ')', ':', 'raise', 'SaltInvocationError', '(', "'cidr_ip and source groups can not both'", "' be specified in security group rules.'", ')', 'if', 'group_id', 'and', 'group_name', ':', 'raise', 'SaltInvocationError', '(', "'Either source_group_group_id or'", "' source_group_name can be specified in'", "' security group rules, but not both.'", ')', 'if', 'not', '(', 'cidr_ip', 'or', 'group_id', 'or', 'group_name', ')', ':', 'raise', 'SaltInvocationError', '(', "'cidr_ip, source_group_group_id, or'", "' source_group_name must be provided for'", "' security group rules.'", ')', 'rule_found', '=', 'False', '# for each rule in existing security group ruleset determine if', '# new rule exists', 'for', '_rule', 'in', '_rules', ':', 'if', '_check_rule', '(', 'rule', ',', '_rule', ')', ':', 'rule_found', '=', 'True', 'break', 'if', 'not', 'rule_found', ':', 'to_create', '.', 'append', '(', 'rule', ')', '# for each rule in existing security group configuration', '# 1. determine if rules needed to be deleted', 'for', '_rule', 'in', '_rules', ':', 'rule_found', '=', 'False', 'for', 'rule', 'in', 'rules', ':', 'if', '_check_rule', '(', 'rule', ',', '_rule', ')', ':', 'rule_found', '=', 'True', 'break', 'if', 'not', 'rule_found', ':', "# Can only supply name or id, not both. Since we're deleting", "# entries, it doesn't matter which we pick.", '_rule', '.', 'pop', '(', "'source_group_name'", ',', 'None', ')', 'to_delete', '.', 'append', '(', '_rule', ')', 'log', '.', 'debug', '(', "'Rules to be deleted: %s'", ',', 'to_delete', ')', 'log', '.', 'debug', '(', "'Rules to be created: %s'", ',', 'to_create', ')', 'return', '(', 'to_delete', ',', 'to_create', ')']
given a list of desired rules (rules) and existing rules (_rules) return a list of rules to delete (to_delete) and to create (to_create)
['given', 'a', 'list', 'of', 'desired', 'rules', '(', 'rules', ')', 'and', 'existing', 'rules', '(', '_rules', ')', 'return', 'a', 'list', 'of', 'rules', 'to', 'delete', '(', 'to_delete', ')', 'and', 'to', 'create', '(', 'to_create', ')']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_secgroup.py#L341-L405
2,187
DarkEnergySurvey/ugali
ugali/utils/idl.py
bprecess
def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): """ NAME: BPRECESS PURPOSE: Precess positions from J2000.0 (FK5) to B1950.0 (FK4) EXPLANATION: Calculates the mean place of a star at B1950.0 on the FK4 system from the mean place at J2000.0 on the FK5 system. CALLING SEQUENCE: bprecess, ra, dec, ra_1950, dec_1950, [ MU_RADEC = , PARALLAX = RAD_VEL =, EPOCH = ] INPUTS: RA,DEC - Input J2000 right ascension and declination in *degrees*. Scalar or N element vector OUTPUTS: RA_1950, DEC_1950 - The corresponding B1950 right ascension and declination in *degrees*. Same number of elements as RA,DEC but always double precision. OPTIONAL INPUT-OUTPUT KEYWORDS MU_RADEC - 2xN element double precision vector containing the proper motion in seconds of arc per tropical *century* in right ascension and declination. PARALLAX - N_element vector giving stellar parallax (seconds of arc) RAD_VEL - N_element vector giving radial velocity in km/s The values of MU_RADEC, PARALLAX, and RADVEL will all be modified upon output to contain the values of these quantities in the B1950 system. The parallax and radial velocity will have a very minor influence on the B1950 position. EPOCH - scalar giving epoch of original observations, default 2000.0d This keyword value is only used if the MU_RADEC keyword is not set. NOTES: The algorithm is taken from the Explanatory Supplement to the Astronomical Almanac 1992, page 186. Also see Aoki et al (1983), A&A, 128,263 BPRECESS distinguishes between the following two cases: (1) The proper motion is known and non-zero (2) the proper motion is unknown or known to be exactly zero (i.e. extragalactic radio sources). In this case, the reverse of the algorithm in Appendix 2 of Aoki et al. (1983) is used to ensure that the output proper motion is exactly zero. Better precision can be achieved in this case by inputting the EPOCH of the original observations. The error in using the IDL procedure PRECESS for converting between B1950 and J1950 can be up to 12", mainly in right ascension. If better accuracy than this is needed then BPRECESS should be used. An unsystematic comparison of BPRECESS with the IPAC precession routine (http://nedwww.ipac.caltech.edu/forms/calculator.html) always gives differences less than 0.15". EXAMPLE: The SAO2000 catalogue gives the J2000 position and proper motion for the star HD 119288. Find the B1950 position. RA(2000) = 13h 42m 12.740s Dec(2000) = 8d 23' 17.69'' Mu(RA) = -.0257 s/yr Mu(Dec) = -.090 ''/yr IDL> mu_radec = 100D* [ -15D*.0257, -0.090 ] IDL> ra = ten(13, 42, 12.740)*15.D IDL> dec = ten(8, 23, 17.69) IDL> bprecess, ra, dec, ra1950, dec1950, mu_radec = mu_radec IDL> print, adstring(ra1950, dec1950,2) ===> 13h 39m 44.526s +08d 38' 28.63" REVISION HISTORY: Written, W. Landsman October, 1992 Vectorized, W. Landsman February, 1994 Treat case where proper motion not known or exactly zero November 1994 Handling of arrays larger than 32767 Lars L. Christensen, march, 1995 Converted to IDL V5.0 W. Landsman September 1997 Fixed bug where A term not initialized for vector input W. Landsman February 2000 Converted to python Sergey Koposov july 2010 """ scal = True if isinstance(ra0, ndarray): ra = ra0 dec = dec0 n = ra.size scal = False else: n = 1 ra = array([ra0]) dec = array([dec0]) if rad_vel is None: rad_vel = zeros(n) else: if not isinstance(rad_vel, ndarray): rad_vel = array([rad_vel],dtype=float) if rad_vel.size != n: raise Exception('ERROR - RAD_VEL keyword vector must be of the same length as RA and DEC') if (mu_radec is not None): if (array(mu_radec).size != 2 * n): raise Exception('ERROR - MU_RADEC keyword (proper motion) be dimensioned (2,' + strtrim(n, 2) + ')') mu_radec = mu_radec * 1. if parallax is None: parallax = zeros(n) else: if not isinstance(parallax, ndarray): parallax = array([parallax],dtype=float) if epoch is None: epoch = 2000.0e0 radeg = 180.e0 / pi sec_to_radian = lambda x : deg2rad(x/3600.) m = array([array([+0.9999256795e0, -0.0111814828e0, -0.0048590040e0, -0.000551e0, -0.238560e0, +0.435730e0]), array([+0.0111814828e0, +0.9999374849e0, -0.0000271557e0, +0.238509e0, -0.002667e0, -0.008541e0]), array([+0.0048590039e0, -0.0000271771e0, +0.9999881946e0, -0.435614e0, +0.012254e0, +0.002117e0]), array([-0.00000242389840e0, +0.00000002710544e0, +0.00000001177742e0, +0.99990432e0, -0.01118145e0, -0.00485852e0]), array([-0.00000002710544e0, -0.00000242392702e0, +0.00000000006585e0, +0.01118145e0, +0.99991613e0, -0.00002716e0]), array([-0.00000001177742e0, +0.00000000006585e0, -0.00000242404995e0, +0.00485852e0, -0.00002717e0, +0.99996684e0])]) a_dot = 1e-3 * array([1.244e0, -1.579e0, -0.660e0]) #in arc seconds per century ra_rad = deg2rad(ra) dec_rad = deg2rad(dec) cosra = cos(ra_rad) sinra = sin(ra_rad) cosdec = cos(dec_rad) sindec = sin(dec_rad) dec_1950 = dec * 0. ra_1950 = ra * 0. for i in range(n): # Following statement moved inside loop in Feb 2000. a = 1e-6 * array([-1.62557e0, -0.31919e0, -0.13843e0]) #in radians r0 = array([cosra[i] * cosdec[i], sinra[i] * cosdec[i], sindec[i]]) if (mu_radec is not None): mu_a = mu_radec[i,0] mu_d = mu_radec[i,1] r0_dot = array([-mu_a * sinra[i] * cosdec[i] - mu_d * cosra[i] * sindec[i], mu_a * cosra[i] * cosdec[i] - mu_d * sinra[i] * sindec[i], mu_d * cosdec[i]]) + 21.095e0 * rad_vel[i] * parallax[i] * r0 else: r0_dot = array([0.0e0, 0.0e0, 0.0e0]) r_0 = concatenate((r0, r0_dot)) r_1 = transpose(dot(transpose(m), transpose(r_0))) # Include the effects of the E-terms of aberration to form r and r_dot. r1 = r_1[0:3] r1_dot = r_1[3:6] if mu_radec is None: r1 = r1 + sec_to_radian ( r1_dot * (epoch - 1950.0e0) / 100. ) a = a + sec_to_radian ( a_dot * (epoch - 1950.0e0) / 100. ) x1 = r_1[0] ; y1 = r_1[1] ; z1 = r_1[2] rmag = sqrt(x1 ** 2 + y1 ** 2 + z1 ** 2) s1 = r1 / rmag ; s1_dot = r1_dot / rmag s = s1 for j in arange(0, 3): r = s1 + a - ((s * a).sum()) * s s = r / rmag x = r[0] ; y = r[1] ; z = r[2] r2 = x ** 2 + y ** 2 + z ** 2 rmag = sqrt(r2) if mu_radec is not None: r_dot = s1_dot + a_dot - ((s * a_dot).sum()) * s x_dot = r_dot[0] ; y_dot = r_dot[1] ; z_dot = r_dot[2] mu_radec[i,0] = (x * y_dot - y * x_dot) / (x ** 2 + y ** 2) mu_radec[i,1] = (z_dot * (x ** 2 + y ** 2) - z * (x * x_dot + y * y_dot)) / (r2 * sqrt(x ** 2 + y ** 2)) dec_1950[i] = arcsin(z / rmag) ra_1950[i] = arctan2(y, x) if parallax[i] > 0.: rad_vel[i] = (x * x_dot + y * y_dot + z * z_dot) / (21.095 * parallax[i] * rmag) parallax[i] = parallax[i] / rmag neg = (ra_1950 < 0) if neg.any() > 0: ra_1950[neg] = ra_1950[neg] + 2.e0 * pi ra_1950 = rad2deg(ra_1950) dec_1950 = rad2deg(dec_1950) # Make output scalar if input was scalar if scal: return ra_1950[0],dec_1950[0] else: return ra_1950, dec_1950
python
def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): """ NAME: BPRECESS PURPOSE: Precess positions from J2000.0 (FK5) to B1950.0 (FK4) EXPLANATION: Calculates the mean place of a star at B1950.0 on the FK4 system from the mean place at J2000.0 on the FK5 system. CALLING SEQUENCE: bprecess, ra, dec, ra_1950, dec_1950, [ MU_RADEC = , PARALLAX = RAD_VEL =, EPOCH = ] INPUTS: RA,DEC - Input J2000 right ascension and declination in *degrees*. Scalar or N element vector OUTPUTS: RA_1950, DEC_1950 - The corresponding B1950 right ascension and declination in *degrees*. Same number of elements as RA,DEC but always double precision. OPTIONAL INPUT-OUTPUT KEYWORDS MU_RADEC - 2xN element double precision vector containing the proper motion in seconds of arc per tropical *century* in right ascension and declination. PARALLAX - N_element vector giving stellar parallax (seconds of arc) RAD_VEL - N_element vector giving radial velocity in km/s The values of MU_RADEC, PARALLAX, and RADVEL will all be modified upon output to contain the values of these quantities in the B1950 system. The parallax and radial velocity will have a very minor influence on the B1950 position. EPOCH - scalar giving epoch of original observations, default 2000.0d This keyword value is only used if the MU_RADEC keyword is not set. NOTES: The algorithm is taken from the Explanatory Supplement to the Astronomical Almanac 1992, page 186. Also see Aoki et al (1983), A&A, 128,263 BPRECESS distinguishes between the following two cases: (1) The proper motion is known and non-zero (2) the proper motion is unknown or known to be exactly zero (i.e. extragalactic radio sources). In this case, the reverse of the algorithm in Appendix 2 of Aoki et al. (1983) is used to ensure that the output proper motion is exactly zero. Better precision can be achieved in this case by inputting the EPOCH of the original observations. The error in using the IDL procedure PRECESS for converting between B1950 and J1950 can be up to 12", mainly in right ascension. If better accuracy than this is needed then BPRECESS should be used. An unsystematic comparison of BPRECESS with the IPAC precession routine (http://nedwww.ipac.caltech.edu/forms/calculator.html) always gives differences less than 0.15". EXAMPLE: The SAO2000 catalogue gives the J2000 position and proper motion for the star HD 119288. Find the B1950 position. RA(2000) = 13h 42m 12.740s Dec(2000) = 8d 23' 17.69'' Mu(RA) = -.0257 s/yr Mu(Dec) = -.090 ''/yr IDL> mu_radec = 100D* [ -15D*.0257, -0.090 ] IDL> ra = ten(13, 42, 12.740)*15.D IDL> dec = ten(8, 23, 17.69) IDL> bprecess, ra, dec, ra1950, dec1950, mu_radec = mu_radec IDL> print, adstring(ra1950, dec1950,2) ===> 13h 39m 44.526s +08d 38' 28.63" REVISION HISTORY: Written, W. Landsman October, 1992 Vectorized, W. Landsman February, 1994 Treat case where proper motion not known or exactly zero November 1994 Handling of arrays larger than 32767 Lars L. Christensen, march, 1995 Converted to IDL V5.0 W. Landsman September 1997 Fixed bug where A term not initialized for vector input W. Landsman February 2000 Converted to python Sergey Koposov july 2010 """ scal = True if isinstance(ra0, ndarray): ra = ra0 dec = dec0 n = ra.size scal = False else: n = 1 ra = array([ra0]) dec = array([dec0]) if rad_vel is None: rad_vel = zeros(n) else: if not isinstance(rad_vel, ndarray): rad_vel = array([rad_vel],dtype=float) if rad_vel.size != n: raise Exception('ERROR - RAD_VEL keyword vector must be of the same length as RA and DEC') if (mu_radec is not None): if (array(mu_radec).size != 2 * n): raise Exception('ERROR - MU_RADEC keyword (proper motion) be dimensioned (2,' + strtrim(n, 2) + ')') mu_radec = mu_radec * 1. if parallax is None: parallax = zeros(n) else: if not isinstance(parallax, ndarray): parallax = array([parallax],dtype=float) if epoch is None: epoch = 2000.0e0 radeg = 180.e0 / pi sec_to_radian = lambda x : deg2rad(x/3600.) m = array([array([+0.9999256795e0, -0.0111814828e0, -0.0048590040e0, -0.000551e0, -0.238560e0, +0.435730e0]), array([+0.0111814828e0, +0.9999374849e0, -0.0000271557e0, +0.238509e0, -0.002667e0, -0.008541e0]), array([+0.0048590039e0, -0.0000271771e0, +0.9999881946e0, -0.435614e0, +0.012254e0, +0.002117e0]), array([-0.00000242389840e0, +0.00000002710544e0, +0.00000001177742e0, +0.99990432e0, -0.01118145e0, -0.00485852e0]), array([-0.00000002710544e0, -0.00000242392702e0, +0.00000000006585e0, +0.01118145e0, +0.99991613e0, -0.00002716e0]), array([-0.00000001177742e0, +0.00000000006585e0, -0.00000242404995e0, +0.00485852e0, -0.00002717e0, +0.99996684e0])]) a_dot = 1e-3 * array([1.244e0, -1.579e0, -0.660e0]) #in arc seconds per century ra_rad = deg2rad(ra) dec_rad = deg2rad(dec) cosra = cos(ra_rad) sinra = sin(ra_rad) cosdec = cos(dec_rad) sindec = sin(dec_rad) dec_1950 = dec * 0. ra_1950 = ra * 0. for i in range(n): # Following statement moved inside loop in Feb 2000. a = 1e-6 * array([-1.62557e0, -0.31919e0, -0.13843e0]) #in radians r0 = array([cosra[i] * cosdec[i], sinra[i] * cosdec[i], sindec[i]]) if (mu_radec is not None): mu_a = mu_radec[i,0] mu_d = mu_radec[i,1] r0_dot = array([-mu_a * sinra[i] * cosdec[i] - mu_d * cosra[i] * sindec[i], mu_a * cosra[i] * cosdec[i] - mu_d * sinra[i] * sindec[i], mu_d * cosdec[i]]) + 21.095e0 * rad_vel[i] * parallax[i] * r0 else: r0_dot = array([0.0e0, 0.0e0, 0.0e0]) r_0 = concatenate((r0, r0_dot)) r_1 = transpose(dot(transpose(m), transpose(r_0))) # Include the effects of the E-terms of aberration to form r and r_dot. r1 = r_1[0:3] r1_dot = r_1[3:6] if mu_radec is None: r1 = r1 + sec_to_radian ( r1_dot * (epoch - 1950.0e0) / 100. ) a = a + sec_to_radian ( a_dot * (epoch - 1950.0e0) / 100. ) x1 = r_1[0] ; y1 = r_1[1] ; z1 = r_1[2] rmag = sqrt(x1 ** 2 + y1 ** 2 + z1 ** 2) s1 = r1 / rmag ; s1_dot = r1_dot / rmag s = s1 for j in arange(0, 3): r = s1 + a - ((s * a).sum()) * s s = r / rmag x = r[0] ; y = r[1] ; z = r[2] r2 = x ** 2 + y ** 2 + z ** 2 rmag = sqrt(r2) if mu_radec is not None: r_dot = s1_dot + a_dot - ((s * a_dot).sum()) * s x_dot = r_dot[0] ; y_dot = r_dot[1] ; z_dot = r_dot[2] mu_radec[i,0] = (x * y_dot - y * x_dot) / (x ** 2 + y ** 2) mu_radec[i,1] = (z_dot * (x ** 2 + y ** 2) - z * (x * x_dot + y * y_dot)) / (r2 * sqrt(x ** 2 + y ** 2)) dec_1950[i] = arcsin(z / rmag) ra_1950[i] = arctan2(y, x) if parallax[i] > 0.: rad_vel[i] = (x * x_dot + y * y_dot + z * z_dot) / (21.095 * parallax[i] * rmag) parallax[i] = parallax[i] / rmag neg = (ra_1950 < 0) if neg.any() > 0: ra_1950[neg] = ra_1950[neg] + 2.e0 * pi ra_1950 = rad2deg(ra_1950) dec_1950 = rad2deg(dec_1950) # Make output scalar if input was scalar if scal: return ra_1950[0],dec_1950[0] else: return ra_1950, dec_1950
['def', 'bprecess', '(', 'ra0', ',', 'dec0', ',', 'mu_radec', '=', 'None', ',', 'parallax', '=', 'None', ',', 'rad_vel', '=', 'None', ',', 'epoch', '=', 'None', ')', ':', 'scal', '=', 'True', 'if', 'isinstance', '(', 'ra0', ',', 'ndarray', ')', ':', 'ra', '=', 'ra0', 'dec', '=', 'dec0', 'n', '=', 'ra', '.', 'size', 'scal', '=', 'False', 'else', ':', 'n', '=', '1', 'ra', '=', 'array', '(', '[', 'ra0', ']', ')', 'dec', '=', 'array', '(', '[', 'dec0', ']', ')', 'if', 'rad_vel', 'is', 'None', ':', 'rad_vel', '=', 'zeros', '(', 'n', ')', 'else', ':', 'if', 'not', 'isinstance', '(', 'rad_vel', ',', 'ndarray', ')', ':', 'rad_vel', '=', 'array', '(', '[', 'rad_vel', ']', ',', 'dtype', '=', 'float', ')', 'if', 'rad_vel', '.', 'size', '!=', 'n', ':', 'raise', 'Exception', '(', "'ERROR - RAD_VEL keyword vector must be of the same length as RA and DEC'", ')', 'if', '(', 'mu_radec', 'is', 'not', 'None', ')', ':', 'if', '(', 'array', '(', 'mu_radec', ')', '.', 'size', '!=', '2', '*', 'n', ')', ':', 'raise', 'Exception', '(', "'ERROR - MU_RADEC keyword (proper motion) be dimensioned (2,'", '+', 'strtrim', '(', 'n', ',', '2', ')', '+', "')'", ')', 'mu_radec', '=', 'mu_radec', '*', '1.', 'if', 'parallax', 'is', 'None', ':', 'parallax', '=', 'zeros', '(', 'n', ')', 'else', ':', 'if', 'not', 'isinstance', '(', 'parallax', ',', 'ndarray', ')', ':', 'parallax', '=', 'array', '(', '[', 'parallax', ']', ',', 'dtype', '=', 'float', ')', 'if', 'epoch', 'is', 'None', ':', 'epoch', '=', '2000.0e0', 'radeg', '=', '180.e0', '/', 'pi', 'sec_to_radian', '=', 'lambda', 'x', ':', 'deg2rad', '(', 'x', '/', '3600.', ')', 'm', '=', 'array', '(', '[', 'array', '(', '[', '+', '0.9999256795e0', ',', '-', '0.0111814828e0', ',', '-', '0.0048590040e0', ',', '-', '0.000551e0', ',', '-', '0.238560e0', ',', '+', '0.435730e0', ']', ')', ',', 'array', '(', '[', '+', '0.0111814828e0', ',', '+', '0.9999374849e0', ',', '-', '0.0000271557e0', ',', '+', '0.238509e0', ',', '-', '0.002667e0', ',', '-', '0.008541e0', ']', ')', ',', 'array', '(', '[', '+', '0.0048590039e0', ',', '-', '0.0000271771e0', ',', '+', '0.9999881946e0', ',', '-', '0.435614e0', ',', '+', '0.012254e0', ',', '+', '0.002117e0', ']', ')', ',', 'array', '(', '[', '-', '0.00000242389840e0', ',', '+', '0.00000002710544e0', ',', '+', '0.00000001177742e0', ',', '+', '0.99990432e0', ',', '-', '0.01118145e0', ',', '-', '0.00485852e0', ']', ')', ',', 'array', '(', '[', '-', '0.00000002710544e0', ',', '-', '0.00000242392702e0', ',', '+', '0.00000000006585e0', ',', '+', '0.01118145e0', ',', '+', '0.99991613e0', ',', '-', '0.00002716e0', ']', ')', ',', 'array', '(', '[', '-', '0.00000001177742e0', ',', '+', '0.00000000006585e0', ',', '-', '0.00000242404995e0', ',', '+', '0.00485852e0', ',', '-', '0.00002717e0', ',', '+', '0.99996684e0', ']', ')', ']', ')', 'a_dot', '=', '1e-3', '*', 'array', '(', '[', '1.244e0', ',', '-', '1.579e0', ',', '-', '0.660e0', ']', ')', '#in arc seconds per century', 'ra_rad', '=', 'deg2rad', '(', 'ra', ')', 'dec_rad', '=', 'deg2rad', '(', 'dec', ')', 'cosra', '=', 'cos', '(', 'ra_rad', ')', 'sinra', '=', 'sin', '(', 'ra_rad', ')', 'cosdec', '=', 'cos', '(', 'dec_rad', ')', 'sindec', '=', 'sin', '(', 'dec_rad', ')', 'dec_1950', '=', 'dec', '*', '0.', 'ra_1950', '=', 'ra', '*', '0.', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', '# Following statement moved inside loop in Feb 2000.', 'a', '=', '1e-6', '*', 'array', '(', '[', '-', '1.62557e0', ',', '-', '0.31919e0', ',', '-', '0.13843e0', ']', ')', '#in radians', 'r0', '=', 'array', '(', '[', 'cosra', '[', 'i', ']', '*', 'cosdec', '[', 'i', ']', ',', 'sinra', '[', 'i', ']', '*', 'cosdec', '[', 'i', ']', ',', 'sindec', '[', 'i', ']', ']', ')', 'if', '(', 'mu_radec', 'is', 'not', 'None', ')', ':', 'mu_a', '=', 'mu_radec', '[', 'i', ',', '0', ']', 'mu_d', '=', 'mu_radec', '[', 'i', ',', '1', ']', 'r0_dot', '=', 'array', '(', '[', '-', 'mu_a', '*', 'sinra', '[', 'i', ']', '*', 'cosdec', '[', 'i', ']', '-', 'mu_d', '*', 'cosra', '[', 'i', ']', '*', 'sindec', '[', 'i', ']', ',', 'mu_a', '*', 'cosra', '[', 'i', ']', '*', 'cosdec', '[', 'i', ']', '-', 'mu_d', '*', 'sinra', '[', 'i', ']', '*', 'sindec', '[', 'i', ']', ',', 'mu_d', '*', 'cosdec', '[', 'i', ']', ']', ')', '+', '21.095e0', '*', 'rad_vel', '[', 'i', ']', '*', 'parallax', '[', 'i', ']', '*', 'r0', 'else', ':', 'r0_dot', '=', 'array', '(', '[', '0.0e0', ',', '0.0e0', ',', '0.0e0', ']', ')', 'r_0', '=', 'concatenate', '(', '(', 'r0', ',', 'r0_dot', ')', ')', 'r_1', '=', 'transpose', '(', 'dot', '(', 'transpose', '(', 'm', ')', ',', 'transpose', '(', 'r_0', ')', ')', ')', '# Include the effects of the E-terms of aberration to form r and r_dot.', 'r1', '=', 'r_1', '[', '0', ':', '3', ']', 'r1_dot', '=', 'r_1', '[', '3', ':', '6', ']', 'if', 'mu_radec', 'is', 'None', ':', 'r1', '=', 'r1', '+', 'sec_to_radian', '(', 'r1_dot', '*', '(', 'epoch', '-', '1950.0e0', ')', '/', '100.', ')', 'a', '=', 'a', '+', 'sec_to_radian', '(', 'a_dot', '*', '(', 'epoch', '-', '1950.0e0', ')', '/', '100.', ')', 'x1', '=', 'r_1', '[', '0', ']', 'y1', '=', 'r_1', '[', '1', ']', 'z1', '=', 'r_1', '[', '2', ']', 'rmag', '=', 'sqrt', '(', 'x1', '**', '2', '+', 'y1', '**', '2', '+', 'z1', '**', '2', ')', 's1', '=', 'r1', '/', 'rmag', 's1_dot', '=', 'r1_dot', '/', 'rmag', 's', '=', 's1', 'for', 'j', 'in', 'arange', '(', '0', ',', '3', ')', ':', 'r', '=', 's1', '+', 'a', '-', '(', '(', 's', '*', 'a', ')', '.', 'sum', '(', ')', ')', '*', 's', 's', '=', 'r', '/', 'rmag', 'x', '=', 'r', '[', '0', ']', 'y', '=', 'r', '[', '1', ']', 'z', '=', 'r', '[', '2', ']', 'r2', '=', 'x', '**', '2', '+', 'y', '**', '2', '+', 'z', '**', '2', 'rmag', '=', 'sqrt', '(', 'r2', ')', 'if', 'mu_radec', 'is', 'not', 'None', ':', 'r_dot', '=', 's1_dot', '+', 'a_dot', '-', '(', '(', 's', '*', 'a_dot', ')', '.', 'sum', '(', ')', ')', '*', 's', 'x_dot', '=', 'r_dot', '[', '0', ']', 'y_dot', '=', 'r_dot', '[', '1', ']', 'z_dot', '=', 'r_dot', '[', '2', ']', 'mu_radec', '[', 'i', ',', '0', ']', '=', '(', 'x', '*', 'y_dot', '-', 'y', '*', 'x_dot', ')', '/', '(', 'x', '**', '2', '+', 'y', '**', '2', ')', 'mu_radec', '[', 'i', ',', '1', ']', '=', '(', 'z_dot', '*', '(', 'x', '**', '2', '+', 'y', '**', '2', ')', '-', 'z', '*', '(', 'x', '*', 'x_dot', '+', 'y', '*', 'y_dot', ')', ')', '/', '(', 'r2', '*', 'sqrt', '(', 'x', '**', '2', '+', 'y', '**', '2', ')', ')', 'dec_1950', '[', 'i', ']', '=', 'arcsin', '(', 'z', '/', 'rmag', ')', 'ra_1950', '[', 'i', ']', '=', 'arctan2', '(', 'y', ',', 'x', ')', 'if', 'parallax', '[', 'i', ']', '>', '0.', ':', 'rad_vel', '[', 'i', ']', '=', '(', 'x', '*', 'x_dot', '+', 'y', '*', 'y_dot', '+', 'z', '*', 'z_dot', ')', '/', '(', '21.095', '*', 'parallax', '[', 'i', ']', '*', 'rmag', ')', 'parallax', '[', 'i', ']', '=', 'parallax', '[', 'i', ']', '/', 'rmag', 'neg', '=', '(', 'ra_1950', '<', '0', ')', 'if', 'neg', '.', 'any', '(', ')', '>', '0', ':', 'ra_1950', '[', 'neg', ']', '=', 'ra_1950', '[', 'neg', ']', '+', '2.e0', '*', 'pi', 'ra_1950', '=', 'rad2deg', '(', 'ra_1950', ')', 'dec_1950', '=', 'rad2deg', '(', 'dec_1950', ')', '# Make output scalar if input was scalar', 'if', 'scal', ':', 'return', 'ra_1950', '[', '0', ']', ',', 'dec_1950', '[', '0', ']', 'else', ':', 'return', 'ra_1950', ',', 'dec_1950']
NAME: BPRECESS PURPOSE: Precess positions from J2000.0 (FK5) to B1950.0 (FK4) EXPLANATION: Calculates the mean place of a star at B1950.0 on the FK4 system from the mean place at J2000.0 on the FK5 system. CALLING SEQUENCE: bprecess, ra, dec, ra_1950, dec_1950, [ MU_RADEC = , PARALLAX = RAD_VEL =, EPOCH = ] INPUTS: RA,DEC - Input J2000 right ascension and declination in *degrees*. Scalar or N element vector OUTPUTS: RA_1950, DEC_1950 - The corresponding B1950 right ascension and declination in *degrees*. Same number of elements as RA,DEC but always double precision. OPTIONAL INPUT-OUTPUT KEYWORDS MU_RADEC - 2xN element double precision vector containing the proper motion in seconds of arc per tropical *century* in right ascension and declination. PARALLAX - N_element vector giving stellar parallax (seconds of arc) RAD_VEL - N_element vector giving radial velocity in km/s The values of MU_RADEC, PARALLAX, and RADVEL will all be modified upon output to contain the values of these quantities in the B1950 system. The parallax and radial velocity will have a very minor influence on the B1950 position. EPOCH - scalar giving epoch of original observations, default 2000.0d This keyword value is only used if the MU_RADEC keyword is not set. NOTES: The algorithm is taken from the Explanatory Supplement to the Astronomical Almanac 1992, page 186. Also see Aoki et al (1983), A&A, 128,263 BPRECESS distinguishes between the following two cases: (1) The proper motion is known and non-zero (2) the proper motion is unknown or known to be exactly zero (i.e. extragalactic radio sources). In this case, the reverse of the algorithm in Appendix 2 of Aoki et al. (1983) is used to ensure that the output proper motion is exactly zero. Better precision can be achieved in this case by inputting the EPOCH of the original observations. The error in using the IDL procedure PRECESS for converting between B1950 and J1950 can be up to 12", mainly in right ascension. If better accuracy than this is needed then BPRECESS should be used. An unsystematic comparison of BPRECESS with the IPAC precession routine (http://nedwww.ipac.caltech.edu/forms/calculator.html) always gives differences less than 0.15". EXAMPLE: The SAO2000 catalogue gives the J2000 position and proper motion for the star HD 119288. Find the B1950 position. RA(2000) = 13h 42m 12.740s Dec(2000) = 8d 23' 17.69'' Mu(RA) = -.0257 s/yr Mu(Dec) = -.090 ''/yr IDL> mu_radec = 100D* [ -15D*.0257, -0.090 ] IDL> ra = ten(13, 42, 12.740)*15.D IDL> dec = ten(8, 23, 17.69) IDL> bprecess, ra, dec, ra1950, dec1950, mu_radec = mu_radec IDL> print, adstring(ra1950, dec1950,2) ===> 13h 39m 44.526s +08d 38' 28.63" REVISION HISTORY: Written, W. Landsman October, 1992 Vectorized, W. Landsman February, 1994 Treat case where proper motion not known or exactly zero November 1994 Handling of arrays larger than 32767 Lars L. Christensen, march, 1995 Converted to IDL V5.0 W. Landsman September 1997 Fixed bug where A term not initialized for vector input W. Landsman February 2000 Converted to python Sergey Koposov july 2010
['NAME', ':', 'BPRECESS', 'PURPOSE', ':', 'Precess', 'positions', 'from', 'J2000', '.', '0', '(', 'FK5', ')', 'to', 'B1950', '.', '0', '(', 'FK4', ')', 'EXPLANATION', ':', 'Calculates', 'the', 'mean', 'place', 'of', 'a', 'star', 'at', 'B1950', '.', '0', 'on', 'the', 'FK4', 'system', 'from', 'the', 'mean', 'place', 'at', 'J2000', '.', '0', 'on', 'the', 'FK5', 'system', '.', 'CALLING', 'SEQUENCE', ':', 'bprecess', 'ra', 'dec', 'ra_1950', 'dec_1950', '[', 'MU_RADEC', '=', 'PARALLAX', '=', 'RAD_VEL', '=', 'EPOCH', '=', ']', 'INPUTS', ':', 'RA', 'DEC', '-', 'Input', 'J2000', 'right', 'ascension', 'and', 'declination', 'in', '*', 'degrees', '*', '.', 'Scalar', 'or', 'N', 'element', 'vector', 'OUTPUTS', ':', 'RA_1950', 'DEC_1950', '-', 'The', 'corresponding', 'B1950', 'right', 'ascension', 'and', 'declination', 'in', '*', 'degrees', '*', '.', 'Same', 'number', 'of', 'elements', 'as', 'RA', 'DEC', 'but', 'always', 'double', 'precision', '.', 'OPTIONAL', 'INPUT', '-', 'OUTPUT', 'KEYWORDS', 'MU_RADEC', '-', '2xN', 'element', 'double', 'precision', 'vector', 'containing', 'the', 'proper', 'motion', 'in', 'seconds', 'of', 'arc', 'per', 'tropical', '*', 'century', '*', 'in', 'right', 'ascension', 'and', 'declination', '.', 'PARALLAX', '-', 'N_element', 'vector', 'giving', 'stellar', 'parallax', '(', 'seconds', 'of', 'arc', ')', 'RAD_VEL', '-', 'N_element', 'vector', 'giving', 'radial', 'velocity', 'in', 'km', '/', 's', 'The', 'values', 'of', 'MU_RADEC', 'PARALLAX', 'and', 'RADVEL', 'will', 'all', 'be', 'modified', 'upon', 'output', 'to', 'contain', 'the', 'values', 'of', 'these', 'quantities', 'in', 'the', 'B1950', 'system', '.', 'The', 'parallax', 'and', 'radial', 'velocity', 'will', 'have', 'a', 'very', 'minor', 'influence', 'on', 'the', 'B1950', 'position', '.', 'EPOCH', '-', 'scalar', 'giving', 'epoch', 'of', 'original', 'observations', 'default', '2000', '.', '0d', 'This', 'keyword', 'value', 'is', 'only', 'used', 'if', 'the', 'MU_RADEC', 'keyword', 'is', 'not', 'set', '.', 'NOTES', ':', 'The', 'algorithm', 'is', 'taken', 'from', 'the', 'Explanatory', 'Supplement', 'to', 'the', 'Astronomical', 'Almanac', '1992', 'page', '186', '.', 'Also', 'see', 'Aoki', 'et', 'al', '(', '1983', ')', 'A&A', '128', '263', 'BPRECESS', 'distinguishes', 'between', 'the', 'following', 'two', 'cases', ':', '(', '1', ')', 'The', 'proper', 'motion', 'is', 'known', 'and', 'non', '-', 'zero', '(', '2', ')', 'the', 'proper', 'motion', 'is', 'unknown', 'or', 'known', 'to', 'be', 'exactly', 'zero', '(', 'i', '.', 'e', '.', 'extragalactic', 'radio', 'sources', ')', '.', 'In', 'this', 'case', 'the', 'reverse', 'of', 'the', 'algorithm', 'in', 'Appendix', '2', 'of', 'Aoki', 'et', 'al', '.', '(', '1983', ')', 'is', 'used', 'to', 'ensure', 'that', 'the', 'output', 'proper', 'motion', 'is', 'exactly', 'zero', '.', 'Better', 'precision', 'can', 'be', 'achieved', 'in', 'this', 'case', 'by', 'inputting', 'the', 'EPOCH', 'of', 'the', 'original', 'observations', '.', 'The', 'error', 'in', 'using', 'the', 'IDL', 'procedure', 'PRECESS', 'for', 'converting', 'between', 'B1950', 'and', 'J1950', 'can', 'be', 'up', 'to', '12', 'mainly', 'in', 'right', 'ascension', '.', 'If', 'better', 'accuracy', 'than', 'this', 'is', 'needed', 'then', 'BPRECESS', 'should', 'be', 'used', '.', 'An', 'unsystematic', 'comparison', 'of', 'BPRECESS', 'with', 'the', 'IPAC', 'precession', 'routine', '(', 'http', ':', '//', 'nedwww', '.', 'ipac', '.', 'caltech', '.', 'edu', '/', 'forms', '/', 'calculator', '.', 'html', ')', 'always', 'gives', 'differences', 'less', 'than', '0', '.', '15', '.', 'EXAMPLE', ':', 'The', 'SAO2000', 'catalogue', 'gives', 'the', 'J2000', 'position', 'and', 'proper', 'motion', 'for', 'the', 'star', 'HD', '119288', '.', 'Find', 'the', 'B1950', 'position', '.', 'RA', '(', '2000', ')', '=', '13h', '42m', '12', '.', '740s', 'Dec', '(', '2000', ')', '=', '8d', '23', '17', '.', '69', 'Mu', '(', 'RA', ')', '=', '-', '.', '0257', 's', '/', 'yr', 'Mu', '(', 'Dec', ')', '=', '-', '.', '090', '/', 'yr', 'IDL', '>', 'mu_radec', '=', '100D', '*', '[', '-', '15D', '*', '.', '0257', '-', '0', '.', '090', ']', 'IDL', '>', 'ra', '=', 'ten', '(', '13', '42', '12', '.', '740', ')', '*', '15', '.', 'D', 'IDL', '>', 'dec', '=', 'ten', '(', '8', '23', '17', '.', '69', ')', 'IDL', '>', 'bprecess', 'ra', 'dec', 'ra1950', 'dec1950', 'mu_radec', '=', 'mu_radec', 'IDL', '>', 'print', 'adstring', '(', 'ra1950', 'dec1950', '2', ')', '===', '>', '13h', '39m', '44', '.', '526s', '+', '08d', '38', '28', '.', '63', 'REVISION', 'HISTORY', ':', 'Written', 'W', '.', 'Landsman', 'October', '1992', 'Vectorized', 'W', '.', 'Landsman', 'February', '1994', 'Treat', 'case', 'where', 'proper', 'motion', 'not', 'known', 'or', 'exactly', 'zero', 'November', '1994', 'Handling', 'of', 'arrays', 'larger', 'than', '32767', 'Lars', 'L', '.', 'Christensen', 'march', '1995', 'Converted', 'to', 'IDL', 'V5', '.', '0', 'W', '.', 'Landsman', 'September', '1997', 'Fixed', 'bug', 'where', 'A', 'term', 'not', 'initialized', 'for', 'vector', 'input', 'W', '.', 'Landsman', 'February', '2000', 'Converted', 'to', 'python', 'Sergey', 'Koposov', 'july', '2010']
train
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/idl.py#L214-L418
2,188
hazelcast/hazelcast-python-client
hazelcast/proxy/map.py
Map.remove
def remove(self, key): """ Removes the mapping for a key from this map if it is present. The map will not contain a mapping for the specified key once the call returns. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._remove_internal(key_data)
python
def remove(self, key): """ Removes the mapping for a key from this map if it is present. The map will not contain a mapping for the specified key once the call returns. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._remove_internal(key_data)
['def', 'remove', '(', 'self', ',', 'key', ')', ':', 'check_not_none', '(', 'key', ',', '"key can\'t be None"', ')', 'key_data', '=', 'self', '.', '_to_data', '(', 'key', ')', 'return', 'self', '.', '_remove_internal', '(', 'key_data', ')']
Removes the mapping for a key from this map if it is present. The map will not contain a mapping for the specified key once the call returns. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key.
['Removes', 'the', 'mapping', 'for', 'a', 'key', 'from', 'this', 'map', 'if', 'it', 'is', 'present', '.', 'The', 'map', 'will', 'not', 'contain', 'a', 'mapping', 'for', 'the', 'specified', 'key', 'once', 'the', 'call', 'returns', '.']
train
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/map.py#L595-L608
2,189
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
DepCreatorDialog.create_dep
def create_dep(self, ): """Create a dep and store it in the self.dep :returns: None :rtype: None :raises: None """ name = self.name_le.text() short = self.short_le.text() assetflag = self.asset_rb.isChecked() ordervalue = self.ordervalue_sb.value() desc = self.desc_pte.toPlainText() try: dep = djadapter.models.Department(name=name, short=short, assetflag=assetflag, ordervalue=ordervalue, description=desc) dep.save() for prj in self.projects: dep.projects.add(prj) self.dep = dep self.accept() except: log.exception("Could not create new department.")
python
def create_dep(self, ): """Create a dep and store it in the self.dep :returns: None :rtype: None :raises: None """ name = self.name_le.text() short = self.short_le.text() assetflag = self.asset_rb.isChecked() ordervalue = self.ordervalue_sb.value() desc = self.desc_pte.toPlainText() try: dep = djadapter.models.Department(name=name, short=short, assetflag=assetflag, ordervalue=ordervalue, description=desc) dep.save() for prj in self.projects: dep.projects.add(prj) self.dep = dep self.accept() except: log.exception("Could not create new department.")
['def', 'create_dep', '(', 'self', ',', ')', ':', 'name', '=', 'self', '.', 'name_le', '.', 'text', '(', ')', 'short', '=', 'self', '.', 'short_le', '.', 'text', '(', ')', 'assetflag', '=', 'self', '.', 'asset_rb', '.', 'isChecked', '(', ')', 'ordervalue', '=', 'self', '.', 'ordervalue_sb', '.', 'value', '(', ')', 'desc', '=', 'self', '.', 'desc_pte', '.', 'toPlainText', '(', ')', 'try', ':', 'dep', '=', 'djadapter', '.', 'models', '.', 'Department', '(', 'name', '=', 'name', ',', 'short', '=', 'short', ',', 'assetflag', '=', 'assetflag', ',', 'ordervalue', '=', 'ordervalue', ',', 'description', '=', 'desc', ')', 'dep', '.', 'save', '(', ')', 'for', 'prj', 'in', 'self', '.', 'projects', ':', 'dep', '.', 'projects', '.', 'add', '(', 'prj', ')', 'self', '.', 'dep', '=', 'dep', 'self', '.', 'accept', '(', ')', 'except', ':', 'log', '.', 'exception', '(', '"Could not create new department."', ')']
Create a dep and store it in the self.dep :returns: None :rtype: None :raises: None
['Create', 'a', 'dep', 'and', 'store', 'it', 'in', 'the', 'self', '.', 'dep']
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L272-L292
2,190
OLC-Bioinformatics/ConFindr
confindr_src/wrappers/bbtools.py
bbduk_trim
def bbduk_trim(forward_in, forward_out, reverse_in='NA', reverse_out='NA', returncmd=False, **kwargs): """ Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can be overwritten by using keyword parameters. :param forward_in: Forward reads you want to quality trim. :param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value. :param forward_out: Output forward reads. :param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used. :param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used. :param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list. :return: out and err: stdout string and stderr string from running bbduk. """ options = kwargs_to_string(kwargs) cmd = 'which bbduk.sh' try: subprocess.check_output(cmd.split()).decode('utf-8') except subprocess.CalledProcessError: print('ERROR: Could not find bbduk. Plase check that the bbtools package is installed and on your $PATH.\n\n') raise FileNotFoundError if os.path.isfile(forward_in.replace('_R1', '_R2')) and reverse_in == 'NA' and '_R1' in forward_in: reverse_in = forward_in.replace('_R1', '_R2') if reverse_out == 'NA': if '_R1' in forward_out: reverse_out = forward_out.replace('_R1', '_R2') else: raise ValueError('If you do not specify reverse_out, forward_out must contain R1.\n\n') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) elif reverse_in == 'NA': cmd = 'bbduk.sh in={f_in} out={f_out} qtrim=w trimq=20 k=25 minlength=50 forcetrimleft=15' \ ' ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, f_out=forward_out, optn=options) else: if reverse_out == 'NA': raise ValueError('Reverse output reads must be specified.') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) out, err = run_subprocess(cmd) if returncmd: return out, err, cmd else: return out, err
python
def bbduk_trim(forward_in, forward_out, reverse_in='NA', reverse_out='NA', returncmd=False, **kwargs): """ Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can be overwritten by using keyword parameters. :param forward_in: Forward reads you want to quality trim. :param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value. :param forward_out: Output forward reads. :param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used. :param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used. :param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list. :return: out and err: stdout string and stderr string from running bbduk. """ options = kwargs_to_string(kwargs) cmd = 'which bbduk.sh' try: subprocess.check_output(cmd.split()).decode('utf-8') except subprocess.CalledProcessError: print('ERROR: Could not find bbduk. Plase check that the bbtools package is installed and on your $PATH.\n\n') raise FileNotFoundError if os.path.isfile(forward_in.replace('_R1', '_R2')) and reverse_in == 'NA' and '_R1' in forward_in: reverse_in = forward_in.replace('_R1', '_R2') if reverse_out == 'NA': if '_R1' in forward_out: reverse_out = forward_out.replace('_R1', '_R2') else: raise ValueError('If you do not specify reverse_out, forward_out must contain R1.\n\n') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) elif reverse_in == 'NA': cmd = 'bbduk.sh in={f_in} out={f_out} qtrim=w trimq=20 k=25 minlength=50 forcetrimleft=15' \ ' ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, f_out=forward_out, optn=options) else: if reverse_out == 'NA': raise ValueError('Reverse output reads must be specified.') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) out, err = run_subprocess(cmd) if returncmd: return out, err, cmd else: return out, err
['def', 'bbduk_trim', '(', 'forward_in', ',', 'forward_out', ',', 'reverse_in', '=', "'NA'", ',', 'reverse_out', '=', "'NA'", ',', 'returncmd', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'options', '=', 'kwargs_to_string', '(', 'kwargs', ')', 'cmd', '=', "'which bbduk.sh'", 'try', ':', 'subprocess', '.', 'check_output', '(', 'cmd', '.', 'split', '(', ')', ')', '.', 'decode', '(', "'utf-8'", ')', 'except', 'subprocess', '.', 'CalledProcessError', ':', 'print', '(', "'ERROR: Could not find bbduk. Plase check that the bbtools package is installed and on your $PATH.\\n\\n'", ')', 'raise', 'FileNotFoundError', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'forward_in', '.', 'replace', '(', "'_R1'", ',', "'_R2'", ')', ')', 'and', 'reverse_in', '==', "'NA'", 'and', "'_R1'", 'in', 'forward_in', ':', 'reverse_in', '=', 'forward_in', '.', 'replace', '(', "'_R1'", ',', "'_R2'", ')', 'if', 'reverse_out', '==', "'NA'", ':', 'if', "'_R1'", 'in', 'forward_out', ':', 'reverse_out', '=', 'forward_out', '.', 'replace', '(', "'_R1'", ',', "'_R2'", ')', 'else', ':', 'raise', 'ValueError', '(', "'If you do not specify reverse_out, forward_out must contain R1.\\n\\n'", ')', 'cmd', '=', "'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 '", "'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'", '.', 'format', '(', 'f_in', '=', 'forward_in', ',', 'r_in', '=', 'reverse_in', ',', 'f_out', '=', 'forward_out', ',', 'r_out', '=', 'reverse_out', ',', 'optn', '=', 'options', ')', 'elif', 'reverse_in', '==', "'NA'", ':', 'cmd', '=', "'bbduk.sh in={f_in} out={f_out} qtrim=w trimq=20 k=25 minlength=50 forcetrimleft=15'", "' ref=adapters overwrite hdist=1 tpe tbo{optn}'", '.', 'format', '(', 'f_in', '=', 'forward_in', ',', 'f_out', '=', 'forward_out', ',', 'optn', '=', 'options', ')', 'else', ':', 'if', 'reverse_out', '==', "'NA'", ':', 'raise', 'ValueError', '(', "'Reverse output reads must be specified.'", ')', 'cmd', '=', "'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 '", "'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'", '.', 'format', '(', 'f_in', '=', 'forward_in', ',', 'r_in', '=', 'reverse_in', ',', 'f_out', '=', 'forward_out', ',', 'r_out', '=', 'reverse_out', ',', 'optn', '=', 'options', ')', 'out', ',', 'err', '=', 'run_subprocess', '(', 'cmd', ')', 'if', 'returncmd', ':', 'return', 'out', ',', 'err', ',', 'cmd', 'else', ':', 'return', 'out', ',', 'err']
Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can be overwritten by using keyword parameters. :param forward_in: Forward reads you want to quality trim. :param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value. :param forward_out: Output forward reads. :param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used. :param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used. :param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list. :return: out and err: stdout string and stderr string from running bbduk.
['Wrapper', 'for', 'using', 'bbduk', 'to', 'quality', 'trim', 'reads', '.', 'Contains', 'arguments', 'used', 'in', 'OLC', 'Assembly', 'Pipeline', 'but', 'these', 'can', 'be', 'overwritten', 'by', 'using', 'keyword', 'parameters', '.', ':', 'param', 'forward_in', ':', 'Forward', 'reads', 'you', 'want', 'to', 'quality', 'trim', '.', ':', 'param', 'returncmd', ':', 'If', 'set', 'to', 'true', 'function', 'will', 'return', 'the', 'cmd', 'string', 'passed', 'to', 'subprocess', 'as', 'a', 'third', 'value', '.', ':', 'param', 'forward_out', ':', 'Output', 'forward', 'reads', '.', ':', 'param', 'reverse_in', ':', 'Reverse', 'input', 'reads', '.', 'Don', 't', 'need', 'to', 'be', 'specified', 'if', '_R1', '/', '_R2', 'naming', 'convention', 'is', 'used', '.', ':', 'param', 'reverse_out', ':', 'Reverse', 'output', 'reads', '.', 'Don', 't', 'need', 'to', 'be', 'specified', 'if', '_R1', '/', '_R2', 'convention', 'is', 'used', '.', ':', 'param', 'kwargs', ':', 'Other', 'arguments', 'to', 'give', 'to', 'bbduk', 'in', 'parameter', '=', 'argument', 'format', '.', 'See', 'bbduk', 'documentation', 'for', 'full', 'list', '.', ':', 'return', ':', 'out', 'and', 'err', ':', 'stdout', 'string', 'and', 'stderr', 'string', 'from', 'running', 'bbduk', '.']
train
https://github.com/OLC-Bioinformatics/ConFindr/blob/4c292617c3f270ebd5ff138cbc5a107f6d01200d/confindr_src/wrappers/bbtools.py#L59-L112
2,191
dwavesystems/dwave_networkx
dwave_networkx/drawing/chimera_layout.py
draw_chimera_embedding
def draw_chimera_embedding(G, *args, **kwargs): """Draws an embedding onto the chimera graph G, according to layout. If interaction_edges is not None, then only display the couplers in that list. If embedded_graph is not None, the only display the couplers between chains with intended couplings according to embedded_graph. Parameters ---------- G : NetworkX graph Should be a Chimera graph or a subgraph of a Chimera graph. emb : dict A dict of chains associated with each node in G. Should be of the form {node: chain, ...}. Chains should be iterables of qubit labels (qubits are nodes in G). embedded_graph : NetworkX graph (optional, default None) A graph which contains all keys of emb as nodes. If specified, edges of G will be considered interactions if and only if they exist between two chains of emb if their keys are connected by an edge in embedded_graph interaction_edges : list (optional, default None) A list of edges which will be used as interactions. show_labels: boolean (optional, default False) If show_labels is True, then each chain in emb is labelled with its key. chain_color : dict (optional, default None) A dict of colors associated with each key in emb. Should be of the form {node: rgba_color, ...}. Colors should be length-4 tuples of floats between 0 and 1 inclusive. If chain_color is None, each chain will be assigned a different color. unused_color : tuple (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not involved in chains, and edges which are neither chain edges nor interactions. If unused_color is None, these nodes and edges will not be shown at all. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. """ draw_embedding(G, chimera_layout(G), *args, **kwargs)
python
def draw_chimera_embedding(G, *args, **kwargs): """Draws an embedding onto the chimera graph G, according to layout. If interaction_edges is not None, then only display the couplers in that list. If embedded_graph is not None, the only display the couplers between chains with intended couplings according to embedded_graph. Parameters ---------- G : NetworkX graph Should be a Chimera graph or a subgraph of a Chimera graph. emb : dict A dict of chains associated with each node in G. Should be of the form {node: chain, ...}. Chains should be iterables of qubit labels (qubits are nodes in G). embedded_graph : NetworkX graph (optional, default None) A graph which contains all keys of emb as nodes. If specified, edges of G will be considered interactions if and only if they exist between two chains of emb if their keys are connected by an edge in embedded_graph interaction_edges : list (optional, default None) A list of edges which will be used as interactions. show_labels: boolean (optional, default False) If show_labels is True, then each chain in emb is labelled with its key. chain_color : dict (optional, default None) A dict of colors associated with each key in emb. Should be of the form {node: rgba_color, ...}. Colors should be length-4 tuples of floats between 0 and 1 inclusive. If chain_color is None, each chain will be assigned a different color. unused_color : tuple (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not involved in chains, and edges which are neither chain edges nor interactions. If unused_color is None, these nodes and edges will not be shown at all. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. """ draw_embedding(G, chimera_layout(G), *args, **kwargs)
['def', 'draw_chimera_embedding', '(', 'G', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'draw_embedding', '(', 'G', ',', 'chimera_layout', '(', 'G', ')', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Draws an embedding onto the chimera graph G, according to layout. If interaction_edges is not None, then only display the couplers in that list. If embedded_graph is not None, the only display the couplers between chains with intended couplings according to embedded_graph. Parameters ---------- G : NetworkX graph Should be a Chimera graph or a subgraph of a Chimera graph. emb : dict A dict of chains associated with each node in G. Should be of the form {node: chain, ...}. Chains should be iterables of qubit labels (qubits are nodes in G). embedded_graph : NetworkX graph (optional, default None) A graph which contains all keys of emb as nodes. If specified, edges of G will be considered interactions if and only if they exist between two chains of emb if their keys are connected by an edge in embedded_graph interaction_edges : list (optional, default None) A list of edges which will be used as interactions. show_labels: boolean (optional, default False) If show_labels is True, then each chain in emb is labelled with its key. chain_color : dict (optional, default None) A dict of colors associated with each key in emb. Should be of the form {node: rgba_color, ...}. Colors should be length-4 tuples of floats between 0 and 1 inclusive. If chain_color is None, each chain will be assigned a different color. unused_color : tuple (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not involved in chains, and edges which are neither chain edges nor interactions. If unused_color is None, these nodes and edges will not be shown at all. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored.
['Draws', 'an', 'embedding', 'onto', 'the', 'chimera', 'graph', 'G', 'according', 'to', 'layout', '.']
train
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/drawing/chimera_layout.py#L246-L292
2,192
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exmaralda.py
ExmaraldaDocumentGraph.__add_token_annotation_tier
def __add_token_annotation_tier(self, tier): """ adds a tier to the document graph, in which each event annotates exactly one token. """ for i, event in enumerate(tier.iter('event')): anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category']) anno_val = event.text if event.text else '' self.node[event.attrib['start']][anno_key] = anno_val
python
def __add_token_annotation_tier(self, tier): """ adds a tier to the document graph, in which each event annotates exactly one token. """ for i, event in enumerate(tier.iter('event')): anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category']) anno_val = event.text if event.text else '' self.node[event.attrib['start']][anno_key] = anno_val
['def', '__add_token_annotation_tier', '(', 'self', ',', 'tier', ')', ':', 'for', 'i', ',', 'event', 'in', 'enumerate', '(', 'tier', '.', 'iter', '(', "'event'", ')', ')', ':', 'anno_key', '=', "'{0}:{1}'", '.', 'format', '(', 'self', '.', 'ns', ',', 'tier', '.', 'attrib', '[', "'category'", ']', ')', 'anno_val', '=', 'event', '.', 'text', 'if', 'event', '.', 'text', 'else', "''", 'self', '.', 'node', '[', 'event', '.', 'attrib', '[', "'start'", ']', ']', '[', 'anno_key', ']', '=', 'anno_val']
adds a tier to the document graph, in which each event annotates exactly one token.
['adds', 'a', 'tier', 'to', 'the', 'document', 'graph', 'in', 'which', 'each', 'event', 'annotates', 'exactly', 'one', 'token', '.']
train
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exmaralda.py#L366-L374
2,193
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/browser.py
CBLevel.current_changed
def current_changed(self, i): """Slot for when the current index changes. Emits the :data:`AbstractLevel.new_root` signal. :param index: the new current index :type index: int :returns: None :rtype: None :raises: None """ m = self.model() ri = self.rootModelIndex() index = m.index(i, 0, ri) self.new_root.emit(index)
python
def current_changed(self, i): """Slot for when the current index changes. Emits the :data:`AbstractLevel.new_root` signal. :param index: the new current index :type index: int :returns: None :rtype: None :raises: None """ m = self.model() ri = self.rootModelIndex() index = m.index(i, 0, ri) self.new_root.emit(index)
['def', 'current_changed', '(', 'self', ',', 'i', ')', ':', 'm', '=', 'self', '.', 'model', '(', ')', 'ri', '=', 'self', '.', 'rootModelIndex', '(', ')', 'index', '=', 'm', '.', 'index', '(', 'i', ',', '0', ',', 'ri', ')', 'self', '.', 'new_root', '.', 'emit', '(', 'index', ')']
Slot for when the current index changes. Emits the :data:`AbstractLevel.new_root` signal. :param index: the new current index :type index: int :returns: None :rtype: None :raises: None
['Slot', 'for', 'when', 'the', 'current', 'index', 'changes', '.', 'Emits', 'the', ':', 'data', ':', 'AbstractLevel', '.', 'new_root', 'signal', '.']
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L345-L358
2,194
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
namePop
def namePop(ctxt): """Pops the top element name from the name stack """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.namePop(ctxt__o) return ret
python
def namePop(ctxt): """Pops the top element name from the name stack """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.namePop(ctxt__o) return ret
['def', 'namePop', '(', 'ctxt', ')', ':', 'if', 'ctxt', 'is', 'None', ':', 'ctxt__o', '=', 'None', 'else', ':', 'ctxt__o', '=', 'ctxt', '.', '_o', 'ret', '=', 'libxml2mod', '.', 'namePop', '(', 'ctxt__o', ')', 'return', 'ret']
Pops the top element name from the name stack
['Pops', 'the', 'top', 'element', 'name', 'from', 'the', 'name', 'stack']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L1517-L1522
2,195
polyaxon/rhea
rhea/manager.py
Rhea.get_string
def get_string(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `str`/`list(str)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)
python
def get_string(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `str`/`list(str)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)
['def', 'get_string', '(', 'self', ',', 'key', ',', 'is_list', '=', 'False', ',', 'is_optional', '=', 'False', ',', 'is_secret', '=', 'False', ',', 'is_local', '=', 'False', ',', 'default', '=', 'None', ',', 'options', '=', 'None', ')', ':', 'if', 'is_list', ':', 'return', 'self', '.', '_get_typed_list_value', '(', 'key', '=', 'key', ',', 'target_type', '=', 'str', ',', 'type_convert', '=', 'str', ',', 'is_optional', '=', 'is_optional', ',', 'is_secret', '=', 'is_secret', ',', 'is_local', '=', 'is_local', ',', 'default', '=', 'default', ',', 'options', '=', 'options', ')', 'return', 'self', '.', '_get_typed_value', '(', 'key', '=', 'key', ',', 'target_type', '=', 'str', ',', 'type_convert', '=', 'str', ',', 'is_optional', '=', 'is_optional', ',', 'is_secret', '=', 'is_secret', ',', 'is_local', '=', 'is_local', ',', 'default', '=', 'default', ',', 'options', '=', 'options', ')']
Get a the value corresponding to the key and converts it to `str`/`list(str)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key.
['Get', 'a', 'the', 'value', 'corresponding', 'to', 'the', 'key', 'and', 'converts', 'it', 'to', 'str', '/', 'list', '(', 'str', ')', '.']
train
https://github.com/polyaxon/rhea/blob/f47b59777cd996d834a0497a1ab442541aaa8a62/rhea/manager.py#L189-L229
2,196
tanghaibao/jcvi
jcvi/utils/brewer2mpl.py
get_map
def get_map(name, map_type, number, reverse=False): """ Return a `BrewerMap` representation of the specified color map. Parameters ---------- name : str Name of color map. Use `print_maps` to see available color maps. map_type : {'Sequential', 'Diverging', 'Qualitative'} Select color map type. number : int Number of defined colors in color map. reverse : bool, optional Set to True to get the reversed color map. """ number = str(number) map_type = map_type.lower().capitalize() # check for valid type if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) # make a dict of lower case map name to map name so this can be # insensitive to case. # this would be a perfect spot for a dict comprehension but going to # wait on that to preserve 2.6 compatibility. # map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()} map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys()) # check for valid name if name.lower() not in map_names: s = 'Invalid color map name {0!r} for type {1!r}.\n' s = s.format(name, map_type) valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()] valid_names.sort() s += 'Valid names are: {0}'.format(valid_names) raise ValueError(s) name = map_names[name.lower()] # check for valid number if number not in COLOR_MAPS[map_type][name]: s = 'Invalid number for map type {0!r} and name {1!r}.\n' s = s.format(map_type, str(name)) valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()] valid_numbers.sort() s += 'Valid numbers are : {0}'.format(valid_numbers) raise ValueError(s) colors = COLOR_MAPS[map_type][name][number]['Colors'] if reverse: name += '_r' colors = [x for x in reversed(colors)] return BrewerMap(name, map_type, colors)
python
def get_map(name, map_type, number, reverse=False): """ Return a `BrewerMap` representation of the specified color map. Parameters ---------- name : str Name of color map. Use `print_maps` to see available color maps. map_type : {'Sequential', 'Diverging', 'Qualitative'} Select color map type. number : int Number of defined colors in color map. reverse : bool, optional Set to True to get the reversed color map. """ number = str(number) map_type = map_type.lower().capitalize() # check for valid type if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) # make a dict of lower case map name to map name so this can be # insensitive to case. # this would be a perfect spot for a dict comprehension but going to # wait on that to preserve 2.6 compatibility. # map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()} map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys()) # check for valid name if name.lower() not in map_names: s = 'Invalid color map name {0!r} for type {1!r}.\n' s = s.format(name, map_type) valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()] valid_names.sort() s += 'Valid names are: {0}'.format(valid_names) raise ValueError(s) name = map_names[name.lower()] # check for valid number if number not in COLOR_MAPS[map_type][name]: s = 'Invalid number for map type {0!r} and name {1!r}.\n' s = s.format(map_type, str(name)) valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()] valid_numbers.sort() s += 'Valid numbers are : {0}'.format(valid_numbers) raise ValueError(s) colors = COLOR_MAPS[map_type][name][number]['Colors'] if reverse: name += '_r' colors = [x for x in reversed(colors)] return BrewerMap(name, map_type, colors)
['def', 'get_map', '(', 'name', ',', 'map_type', ',', 'number', ',', 'reverse', '=', 'False', ')', ':', 'number', '=', 'str', '(', 'number', ')', 'map_type', '=', 'map_type', '.', 'lower', '(', ')', '.', 'capitalize', '(', ')', '# check for valid type', 'if', 'map_type', 'not', 'in', 'MAP_TYPES', ':', 's', '=', "'Invalid map type, must be one of {0}'", '.', 'format', '(', 'MAP_TYPES', ')', 'raise', 'ValueError', '(', 's', ')', '# make a dict of lower case map name to map name so this can be', '# insensitive to case.', '# this would be a perfect spot for a dict comprehension but going to', '# wait on that to preserve 2.6 compatibility.', '# map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()}', 'map_names', '=', 'dict', '(', '(', 'k', '.', 'lower', '(', ')', ',', 'k', ')', 'for', 'k', 'in', 'COLOR_MAPS', '[', 'map_type', ']', '.', 'keys', '(', ')', ')', '# check for valid name', 'if', 'name', '.', 'lower', '(', ')', 'not', 'in', 'map_names', ':', 's', '=', "'Invalid color map name {0!r} for type {1!r}.\\n'", 's', '=', 's', '.', 'format', '(', 'name', ',', 'map_type', ')', 'valid_names', '=', '[', 'str', '(', 'k', ')', 'for', 'k', 'in', 'COLOR_MAPS', '[', 'map_type', ']', '.', 'keys', '(', ')', ']', 'valid_names', '.', 'sort', '(', ')', 's', '+=', "'Valid names are: {0}'", '.', 'format', '(', 'valid_names', ')', 'raise', 'ValueError', '(', 's', ')', 'name', '=', 'map_names', '[', 'name', '.', 'lower', '(', ')', ']', '# check for valid number', 'if', 'number', 'not', 'in', 'COLOR_MAPS', '[', 'map_type', ']', '[', 'name', ']', ':', 's', '=', "'Invalid number for map type {0!r} and name {1!r}.\\n'", 's', '=', 's', '.', 'format', '(', 'map_type', ',', 'str', '(', 'name', ')', ')', 'valid_numbers', '=', '[', 'int', '(', 'k', ')', 'for', 'k', 'in', 'COLOR_MAPS', '[', 'map_type', ']', '[', 'name', ']', '.', 'keys', '(', ')', ']', 'valid_numbers', '.', 'sort', '(', ')', 's', '+=', "'Valid numbers are : {0}'", '.', 'format', '(', 'valid_numbers', ')', 'raise', 'ValueError', '(', 's', ')', 'colors', '=', 'COLOR_MAPS', '[', 'map_type', ']', '[', 'name', ']', '[', 'number', ']', '[', "'Colors'", ']', 'if', 'reverse', ':', 'name', '+=', "'_r'", 'colors', '=', '[', 'x', 'for', 'x', 'in', 'reversed', '(', 'colors', ')', ']', 'return', 'BrewerMap', '(', 'name', ',', 'map_type', ',', 'colors', ')']
Return a `BrewerMap` representation of the specified color map. Parameters ---------- name : str Name of color map. Use `print_maps` to see available color maps. map_type : {'Sequential', 'Diverging', 'Qualitative'} Select color map type. number : int Number of defined colors in color map. reverse : bool, optional Set to True to get the reversed color map.
['Return', 'a', 'BrewerMap', 'representation', 'of', 'the', 'specified', 'color', 'map', '.']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/brewer2mpl.py#L240-L297
2,197
spotify/ulogger
ulogger/stackdriver.py
CloudLoggingHandlerBuilder.get_handler
def get_handler(self): """Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ gcl_client = gcl_logging.Client( project=self.project_id, credentials=self.credentials) handler = gcl_handlers.CloudLoggingHandler( gcl_client, resource=self.resource, labels={ 'resource_id': self.instance_id, 'resource_project': self.project_id, 'resource_zone': self.zone, 'resource_host': self.hostname }) handler.setFormatter(self.get_formatter()) self._set_worker_thread_level() return handler
python
def get_handler(self): """Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ gcl_client = gcl_logging.Client( project=self.project_id, credentials=self.credentials) handler = gcl_handlers.CloudLoggingHandler( gcl_client, resource=self.resource, labels={ 'resource_id': self.instance_id, 'resource_project': self.project_id, 'resource_zone': self.zone, 'resource_host': self.hostname }) handler.setFormatter(self.get_formatter()) self._set_worker_thread_level() return handler
['def', 'get_handler', '(', 'self', ')', ':', 'gcl_client', '=', 'gcl_logging', '.', 'Client', '(', 'project', '=', 'self', '.', 'project_id', ',', 'credentials', '=', 'self', '.', 'credentials', ')', 'handler', '=', 'gcl_handlers', '.', 'CloudLoggingHandler', '(', 'gcl_client', ',', 'resource', '=', 'self', '.', 'resource', ',', 'labels', '=', '{', "'resource_id'", ':', 'self', '.', 'instance_id', ',', "'resource_project'", ':', 'self', '.', 'project_id', ',', "'resource_zone'", ':', 'self', '.', 'zone', ',', "'resource_host'", ':', 'self', '.', 'hostname', '}', ')', 'handler', '.', 'setFormatter', '(', 'self', '.', 'get_formatter', '(', ')', ')', 'self', '.', '_set_worker_thread_level', '(', ')', 'return', 'handler']
Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler`
['Create', 'a', 'fully', 'configured', 'CloudLoggingHandler', '.']
train
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L173-L194
2,198
ralphhaygood/sklearn-gbmi
sklearn_gbmi/sklearn_gbmi.py
h_all_pairs
def h_all_pairs(gbm, array_or_frame, indices_or_columns = 'all'): """ PURPOSE Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient- boosting model between each pair of variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of variables or NaN if a computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables. EXAMPLE Friedman and Popescu's (2008) formula (44) for every j and k corresponds to h_all_pairs(F, x) NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm. """ if gbm.max_depth < 2: raise Exception("gbm.max_depth must be at least 2.") check_args_contd(array_or_frame, indices_or_columns) arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns) width = arr.shape[1] f_vals = {} for n in [2, 1]: for inds in itertools.combinations(range(width), n): f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds) h_vals = {} for inds in itertools.combinations(range(width), 2): h_vals[inds] = compute_h_val(f_vals, arr, inds) if indices_or_columns != 'all': h_vals = {tuple(model_inds[(inds,)]): h_vals[inds] for inds in h_vals.keys()} if not isinstance(array_or_frame, np.ndarray): all_cols = array_or_frame.columns.values h_vals = {tuple(all_cols[(inds,)]): h_vals[inds] for inds in h_vals.keys()} return h_vals
python
def h_all_pairs(gbm, array_or_frame, indices_or_columns = 'all'): """ PURPOSE Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient- boosting model between each pair of variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of variables or NaN if a computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables. EXAMPLE Friedman and Popescu's (2008) formula (44) for every j and k corresponds to h_all_pairs(F, x) NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm. """ if gbm.max_depth < 2: raise Exception("gbm.max_depth must be at least 2.") check_args_contd(array_or_frame, indices_or_columns) arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns) width = arr.shape[1] f_vals = {} for n in [2, 1]: for inds in itertools.combinations(range(width), n): f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds) h_vals = {} for inds in itertools.combinations(range(width), 2): h_vals[inds] = compute_h_val(f_vals, arr, inds) if indices_or_columns != 'all': h_vals = {tuple(model_inds[(inds,)]): h_vals[inds] for inds in h_vals.keys()} if not isinstance(array_or_frame, np.ndarray): all_cols = array_or_frame.columns.values h_vals = {tuple(all_cols[(inds,)]): h_vals[inds] for inds in h_vals.keys()} return h_vals
['def', 'h_all_pairs', '(', 'gbm', ',', 'array_or_frame', ',', 'indices_or_columns', '=', "'all'", ')', ':', 'if', 'gbm', '.', 'max_depth', '<', '2', ':', 'raise', 'Exception', '(', '"gbm.max_depth must be at least 2."', ')', 'check_args_contd', '(', 'array_or_frame', ',', 'indices_or_columns', ')', 'arr', ',', 'model_inds', '=', 'get_arr_and_model_inds', '(', 'array_or_frame', ',', 'indices_or_columns', ')', 'width', '=', 'arr', '.', 'shape', '[', '1', ']', 'f_vals', '=', '{', '}', 'for', 'n', 'in', '[', '2', ',', '1', ']', ':', 'for', 'inds', 'in', 'itertools', '.', 'combinations', '(', 'range', '(', 'width', ')', ',', 'n', ')', ':', 'f_vals', '[', 'inds', ']', '=', 'compute_f_vals', '(', 'gbm', ',', 'model_inds', ',', 'arr', ',', 'inds', ')', 'h_vals', '=', '{', '}', 'for', 'inds', 'in', 'itertools', '.', 'combinations', '(', 'range', '(', 'width', ')', ',', '2', ')', ':', 'h_vals', '[', 'inds', ']', '=', 'compute_h_val', '(', 'f_vals', ',', 'arr', ',', 'inds', ')', 'if', 'indices_or_columns', '!=', "'all'", ':', 'h_vals', '=', '{', 'tuple', '(', 'model_inds', '[', '(', 'inds', ',', ')', ']', ')', ':', 'h_vals', '[', 'inds', ']', 'for', 'inds', 'in', 'h_vals', '.', 'keys', '(', ')', '}', 'if', 'not', 'isinstance', '(', 'array_or_frame', ',', 'np', '.', 'ndarray', ')', ':', 'all_cols', '=', 'array_or_frame', '.', 'columns', '.', 'values', 'h_vals', '=', '{', 'tuple', '(', 'all_cols', '[', '(', 'inds', ',', ')', ']', ')', ':', 'h_vals', '[', 'inds', ']', 'for', 'inds', 'in', 'h_vals', '.', 'keys', '(', ')', '}', 'return', 'h_vals']
PURPOSE Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient- boosting model between each pair of variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of variables or NaN if a computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables. EXAMPLE Friedman and Popescu's (2008) formula (44) for every j and k corresponds to h_all_pairs(F, x) NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm.
['PURPOSE']
train
https://github.com/ralphhaygood/sklearn-gbmi/blob/23a1e7fd50e53d6261379f22a337d8fa4ee6aabe/sklearn_gbmi/sklearn_gbmi.py#L98-L169
2,199
jamieleshaw/lurklib
lurklib/optional.py
_Optional.away
def away(self, msg=''): """ Sets/unsets your away status. Optional arguments: * msg='' - Away reason. """ with self.lock: self.send('AWAY :%s' % msg) if self.readable(): msg = self._recv(expected_replies=('306', '305')) if msg[0] == '306': self.is_away = True elif msg[0] == '305': self.is_away = False
python
def away(self, msg=''): """ Sets/unsets your away status. Optional arguments: * msg='' - Away reason. """ with self.lock: self.send('AWAY :%s' % msg) if self.readable(): msg = self._recv(expected_replies=('306', '305')) if msg[0] == '306': self.is_away = True elif msg[0] == '305': self.is_away = False
['def', 'away', '(', 'self', ',', 'msg', '=', "''", ')', ':', 'with', 'self', '.', 'lock', ':', 'self', '.', 'send', '(', "'AWAY :%s'", '%', 'msg', ')', 'if', 'self', '.', 'readable', '(', ')', ':', 'msg', '=', 'self', '.', '_recv', '(', 'expected_replies', '=', '(', "'306'", ',', "'305'", ')', ')', 'if', 'msg', '[', '0', ']', '==', "'306'", ':', 'self', '.', 'is_away', '=', 'True', 'elif', 'msg', '[', '0', ']', '==', "'305'", ':', 'self', '.', 'is_away', '=', 'False']
Sets/unsets your away status. Optional arguments: * msg='' - Away reason.
['Sets', '/', 'unsets', 'your', 'away', 'status', '.', 'Optional', 'arguments', ':', '*', 'msg', '=', '-', 'Away', 'reason', '.']
train
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/optional.py#L24-L37