repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
zeth/inputs
inputs.py
https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L1525-L1556
def emulate_wheel(self, data, direction, timeval): """Emulate rel values for the mouse wheel. In evdev, a single click forwards of the mouse wheel is 1 and a click back is -1. Windows uses 120 and -120. We floor divide the Windows number by 120. This is fine for the digital scroll wheels found on the vast majority of mice. It also works on the analogue ball on the top of the Apple mouse. What do the analogue scroll wheels found on 200 quid high end gaming mice do? If the lowest unit is 120 then we are okay. If they report changes of less than 120 units Windows, then this might be an unacceptable loss of precision. Needless to say, I don't have such a mouse to test one way or the other. """ if direction == 'x': code = 0x06 elif direction == 'z': # Not enitely sure if this exists code = 0x07 else: code = 0x08 if WIN: data = data // 120 return self.create_event_object( "Relative", code, data, timeval)
[ "def", "emulate_wheel", "(", "self", ",", "data", ",", "direction", ",", "timeval", ")", ":", "if", "direction", "==", "'x'", ":", "code", "=", "0x06", "elif", "direction", "==", "'z'", ":", "# Not enitely sure if this exists", "code", "=", "0x07", "else", ":", "code", "=", "0x08", "if", "WIN", ":", "data", "=", "data", "//", "120", "return", "self", ".", "create_event_object", "(", "\"Relative\"", ",", "code", ",", "data", ",", "timeval", ")" ]
Emulate rel values for the mouse wheel. In evdev, a single click forwards of the mouse wheel is 1 and a click back is -1. Windows uses 120 and -120. We floor divide the Windows number by 120. This is fine for the digital scroll wheels found on the vast majority of mice. It also works on the analogue ball on the top of the Apple mouse. What do the analogue scroll wheels found on 200 quid high end gaming mice do? If the lowest unit is 120 then we are okay. If they report changes of less than 120 units Windows, then this might be an unacceptable loss of precision. Needless to say, I don't have such a mouse to test one way or the other.
[ "Emulate", "rel", "values", "for", "the", "mouse", "wheel", "." ]
python
train
35.28125
SergeySatskiy/cdm-gc-plugin
cdmplugins/gc/__init__.py
https://github.com/SergeySatskiy/cdm-gc-plugin/blob/f6dd59d5dc80d3f8f5ca5bcf49bad86c88be38df/cdmplugins/gc/__init__.py#L140-L161
def populateBufferContextMenu(self, parentMenu): """Populates the editing buffer context menu. The buffer context menu shown for the current edited/viewed file will have an item with a plugin name and subitems which are populated here. If no items were populated then the plugin menu item will not be shown. Note: when a buffer context menu is selected by the user it always refers to the current widget. To get access to the current editing widget the plugin can use: self.ide.currentEditorWidget The widget could be of different types and some circumstances should be considered, e.g.: - it could be a new file which has not been saved yet - it could be modified - it could be that the disk file has already been deleted - etc. Having the current widget reference the plugin is able to retrieve the infirmation it needs. """ parentMenu.addAction("Configure", self.configure) parentMenu.addAction("Collect garbage", self.__collectGarbage)
[ "def", "populateBufferContextMenu", "(", "self", ",", "parentMenu", ")", ":", "parentMenu", ".", "addAction", "(", "\"Configure\"", ",", "self", ".", "configure", ")", "parentMenu", ".", "addAction", "(", "\"Collect garbage\"", ",", "self", ".", "__collectGarbage", ")" ]
Populates the editing buffer context menu. The buffer context menu shown for the current edited/viewed file will have an item with a plugin name and subitems which are populated here. If no items were populated then the plugin menu item will not be shown. Note: when a buffer context menu is selected by the user it always refers to the current widget. To get access to the current editing widget the plugin can use: self.ide.currentEditorWidget The widget could be of different types and some circumstances should be considered, e.g.: - it could be a new file which has not been saved yet - it could be modified - it could be that the disk file has already been deleted - etc. Having the current widget reference the plugin is able to retrieve the infirmation it needs.
[ "Populates", "the", "editing", "buffer", "context", "menu", "." ]
python
train
51.454545
nion-software/nionswift
nion/swift/Thumbnails.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Thumbnails.py#L56-L60
def mark_data_dirty(self): """ Called from item to indicate its data or metadata has changed.""" self.__cache.set_cached_value_dirty(self.__display_item, self.__cache_property_name) self.__initialize_cache() self.__cached_value_dirty = True
[ "def", "mark_data_dirty", "(", "self", ")", ":", "self", ".", "__cache", ".", "set_cached_value_dirty", "(", "self", ".", "__display_item", ",", "self", ".", "__cache_property_name", ")", "self", ".", "__initialize_cache", "(", ")", "self", ".", "__cached_value_dirty", "=", "True" ]
Called from item to indicate its data or metadata has changed.
[ "Called", "from", "item", "to", "indicate", "its", "data", "or", "metadata", "has", "changed", "." ]
python
train
53.6
PMBio/limix-backup
limix/deprecated/archive/FastVDMM.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/archive/FastVDMM.py#L145-L199
def fit(self,Params0=None,grad_threshold=1e-2): """ fit a variance component model with the predefined design and the initialization and returns all the results """ # GPVD initialization lik = limix.CLikNormalNULL() # Initial Params if Params0==None: n_params = self.C1.getNumberParams() n_params+= self.C2.getNumberParams() Params0 = SP.rand(n_params) # MultiGP Framework covar = [] gp = [] mean = [] for i in range(self.N): covar.append(limix.CLinCombCF()) covar[i].addCovariance(self.C1) covar[i].addCovariance(self.C2) coeff = SP.array([self.eigen[i],1]) covar[i].setCoeff(coeff) mean.append(limix.CLinearMean(self.Yt[:,i],SP.eye(self.P))) gpVD = limix.CGPvarDecomp(covar[0],lik,mean[0],SP.ones(self.N),self.P,self.Yt,Params0) for i in range(self.N): gp.append(limix.CGPbase(covar[i],lik,mean[i])) gp[i].setY(self.Yt[:,i]) gpVD.addGP(gp[i]) # Optimization gpVD.initGPs() gpopt = limix.CGPopt(gpVD) LML0=-1.0*gpVD.LML() start_time = time.time() conv = gpopt.opt() time_train = time.time() - start_time LML=-1.0*gpVD.LML() LMLgrad = SP.linalg.norm(gpVD.LMLgrad()['covar']) Params = gpVD.getParams()['covar'] # Check whether limix::CVarianceDecomposition.train() has converged if conv!=True or LMLgrad>grad_threshold or Params.max()>10: print('limix::CVarianceDecomposition::train has not converged') res=None else: res = { 'Params0': Params0, 'Params': Params, 'LML': SP.array([LML]), 'LML0': SP.array([LML0]), 'LMLgrad': SP.array([LMLgrad]), 'time_train': SP.array([time_train]), } return res pass
[ "def", "fit", "(", "self", ",", "Params0", "=", "None", ",", "grad_threshold", "=", "1e-2", ")", ":", "# GPVD initialization", "lik", "=", "limix", ".", "CLikNormalNULL", "(", ")", "# Initial Params", "if", "Params0", "==", "None", ":", "n_params", "=", "self", ".", "C1", ".", "getNumberParams", "(", ")", "n_params", "+=", "self", ".", "C2", ".", "getNumberParams", "(", ")", "Params0", "=", "SP", ".", "rand", "(", "n_params", ")", "# MultiGP Framework", "covar", "=", "[", "]", "gp", "=", "[", "]", "mean", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "N", ")", ":", "covar", ".", "append", "(", "limix", ".", "CLinCombCF", "(", ")", ")", "covar", "[", "i", "]", ".", "addCovariance", "(", "self", ".", "C1", ")", "covar", "[", "i", "]", ".", "addCovariance", "(", "self", ".", "C2", ")", "coeff", "=", "SP", ".", "array", "(", "[", "self", ".", "eigen", "[", "i", "]", ",", "1", "]", ")", "covar", "[", "i", "]", ".", "setCoeff", "(", "coeff", ")", "mean", ".", "append", "(", "limix", ".", "CLinearMean", "(", "self", ".", "Yt", "[", ":", ",", "i", "]", ",", "SP", ".", "eye", "(", "self", ".", "P", ")", ")", ")", "gpVD", "=", "limix", ".", "CGPvarDecomp", "(", "covar", "[", "0", "]", ",", "lik", ",", "mean", "[", "0", "]", ",", "SP", ".", "ones", "(", "self", ".", "N", ")", ",", "self", ".", "P", ",", "self", ".", "Yt", ",", "Params0", ")", "for", "i", "in", "range", "(", "self", ".", "N", ")", ":", "gp", ".", "append", "(", "limix", ".", "CGPbase", "(", "covar", "[", "i", "]", ",", "lik", ",", "mean", "[", "i", "]", ")", ")", "gp", "[", "i", "]", ".", "setY", "(", "self", ".", "Yt", "[", ":", ",", "i", "]", ")", "gpVD", ".", "addGP", "(", "gp", "[", "i", "]", ")", "# Optimization", "gpVD", ".", "initGPs", "(", ")", "gpopt", "=", "limix", ".", "CGPopt", "(", "gpVD", ")", "LML0", "=", "-", "1.0", "*", "gpVD", ".", "LML", "(", ")", "start_time", "=", "time", ".", "time", "(", ")", "conv", "=", "gpopt", ".", "opt", "(", ")", "time_train", "=", "time", ".", "time", "(", ")", "-", "start_time", "LML", "=", "-", "1.0", "*", "gpVD", ".", "LML", "(", ")", "LMLgrad", "=", "SP", ".", "linalg", ".", "norm", "(", "gpVD", ".", "LMLgrad", "(", ")", "[", "'covar'", "]", ")", "Params", "=", "gpVD", ".", "getParams", "(", ")", "[", "'covar'", "]", "# Check whether limix::CVarianceDecomposition.train() has converged", "if", "conv", "!=", "True", "or", "LMLgrad", ">", "grad_threshold", "or", "Params", ".", "max", "(", ")", ">", "10", ":", "print", "(", "'limix::CVarianceDecomposition::train has not converged'", ")", "res", "=", "None", "else", ":", "res", "=", "{", "'Params0'", ":", "Params0", ",", "'Params'", ":", "Params", ",", "'LML'", ":", "SP", ".", "array", "(", "[", "LML", "]", ")", ",", "'LML0'", ":", "SP", ".", "array", "(", "[", "LML0", "]", ")", ",", "'LMLgrad'", ":", "SP", ".", "array", "(", "[", "LMLgrad", "]", ")", ",", "'time_train'", ":", "SP", ".", "array", "(", "[", "time_train", "]", ")", ",", "}", "return", "res", "pass" ]
fit a variance component model with the predefined design and the initialization and returns all the results
[ "fit", "a", "variance", "component", "model", "with", "the", "predefined", "design", "and", "the", "initialization", "and", "returns", "all", "the", "results" ]
python
train
37.381818
census-instrumentation/opencensus-python
opencensus/common/monitored_resource/k8s_utils.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/monitored_resource/k8s_utils.py#L50-L64
def get_k8s_metadata(): """Get kubernetes container metadata, as on GCP GKE.""" k8s_metadata = {} gcp_cluster = (gcp_metadata_config.GcpMetadataConfig .get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY)) if gcp_cluster is not None: k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster for attribute_key, attribute_env in _K8S_ENV_ATTRIBUTES.items(): attribute_value = os.environ.get(attribute_env) if attribute_value is not None: k8s_metadata[attribute_key] = attribute_value return k8s_metadata
[ "def", "get_k8s_metadata", "(", ")", ":", "k8s_metadata", "=", "{", "}", "gcp_cluster", "=", "(", "gcp_metadata_config", ".", "GcpMetadataConfig", ".", "get_attribute", "(", "gcp_metadata_config", ".", "CLUSTER_NAME_KEY", ")", ")", "if", "gcp_cluster", "is", "not", "None", ":", "k8s_metadata", "[", "CLUSTER_NAME_KEY", "]", "=", "gcp_cluster", "for", "attribute_key", ",", "attribute_env", "in", "_K8S_ENV_ATTRIBUTES", ".", "items", "(", ")", ":", "attribute_value", "=", "os", ".", "environ", ".", "get", "(", "attribute_env", ")", "if", "attribute_value", "is", "not", "None", ":", "k8s_metadata", "[", "attribute_key", "]", "=", "attribute_value", "return", "k8s_metadata" ]
Get kubernetes container metadata, as on GCP GKE.
[ "Get", "kubernetes", "container", "metadata", "as", "on", "GCP", "GKE", "." ]
python
train
37.066667
cuihantao/andes
andes/models/pq.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/models/pq.py#L62-L65
def init0(self, dae): """Set initial p and q for power flow""" self.p0 = matrix(self.p, (self.n, 1), 'd') self.q0 = matrix(self.q, (self.n, 1), 'd')
[ "def", "init0", "(", "self", ",", "dae", ")", ":", "self", ".", "p0", "=", "matrix", "(", "self", ".", "p", ",", "(", "self", ".", "n", ",", "1", ")", ",", "'d'", ")", "self", ".", "q0", "=", "matrix", "(", "self", ".", "q", ",", "(", "self", ".", "n", ",", "1", ")", ",", "'d'", ")" ]
Set initial p and q for power flow
[ "Set", "initial", "p", "and", "q", "for", "power", "flow" ]
python
train
42.25
Spinmob/spinmob
egg/_gui.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L949-L959
def click(self): """ Pretends to user clicked it, sending the signal and everything. """ if self.is_checkable(): if self.is_checked(): self.set_checked(False) else: self.set_checked(True) self.signal_clicked.emit(self.is_checked()) else: self.signal_clicked.emit(True) return self
[ "def", "click", "(", "self", ")", ":", "if", "self", ".", "is_checkable", "(", ")", ":", "if", "self", ".", "is_checked", "(", ")", ":", "self", ".", "set_checked", "(", "False", ")", "else", ":", "self", ".", "set_checked", "(", "True", ")", "self", ".", "signal_clicked", ".", "emit", "(", "self", ".", "is_checked", "(", ")", ")", "else", ":", "self", ".", "signal_clicked", ".", "emit", "(", "True", ")", "return", "self" ]
Pretends to user clicked it, sending the signal and everything.
[ "Pretends", "to", "user", "clicked", "it", "sending", "the", "signal", "and", "everything", "." ]
python
train
35.454545
nerdvegas/rez
src/rez/vendor/pyparsing/pyparsing.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pyparsing/pyparsing.py#L570-L589
def dump(self,indent='',depth=0): """Diagnostic method for listing out the contents of a C{ParseResults}. Accepts an optional C{indent} argument so that this string can be embedded in a nested display of other data.""" out = [] out.append( indent+_ustr(self.asList()) ) keys = self.items() keys.sort() for k,v in keys: if out: out.append('\n') out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) if isinstance(v,ParseResults): if v.keys(): out.append( v.dump(indent,depth+1) ) else: out.append(_ustr(v)) else: out.append(_ustr(v)) return "".join(out)
[ "def", "dump", "(", "self", ",", "indent", "=", "''", ",", "depth", "=", "0", ")", ":", "out", "=", "[", "]", "out", ".", "append", "(", "indent", "+", "_ustr", "(", "self", ".", "asList", "(", ")", ")", ")", "keys", "=", "self", ".", "items", "(", ")", "keys", ".", "sort", "(", ")", "for", "k", ",", "v", "in", "keys", ":", "if", "out", ":", "out", ".", "append", "(", "'\\n'", ")", "out", ".", "append", "(", "\"%s%s- %s: \"", "%", "(", "indent", ",", "(", "' '", "*", "depth", ")", ",", "k", ")", ")", "if", "isinstance", "(", "v", ",", "ParseResults", ")", ":", "if", "v", ".", "keys", "(", ")", ":", "out", ".", "append", "(", "v", ".", "dump", "(", "indent", ",", "depth", "+", "1", ")", ")", "else", ":", "out", ".", "append", "(", "_ustr", "(", "v", ")", ")", "else", ":", "out", ".", "append", "(", "_ustr", "(", "v", ")", ")", "return", "\"\"", ".", "join", "(", "out", ")" ]
Diagnostic method for listing out the contents of a C{ParseResults}. Accepts an optional C{indent} argument so that this string can be embedded in a nested display of other data.
[ "Diagnostic", "method", "for", "listing", "out", "the", "contents", "of", "a", "C", "{", "ParseResults", "}", ".", "Accepts", "an", "optional", "C", "{", "indent", "}", "argument", "so", "that", "this", "string", "can", "be", "embedded", "in", "a", "nested", "display", "of", "other", "data", "." ]
python
train
39.1
centralniak/py-raildriver
raildriver/library.py
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L50-L65
def get_controller_list(self): """ Returns an iterable of tuples containing (index, controller_name) pairs. Controller indexes start at 0. You may easily transform this to a {name: index} mapping by using: >>> controllers = {name: index for index, name in raildriver.get_controller_list()} :return enumerate """ ret_str = self.dll.GetControllerList().decode() if not ret_str: return [] return enumerate(ret_str.split('::'))
[ "def", "get_controller_list", "(", "self", ")", ":", "ret_str", "=", "self", ".", "dll", ".", "GetControllerList", "(", ")", ".", "decode", "(", ")", "if", "not", "ret_str", ":", "return", "[", "]", "return", "enumerate", "(", "ret_str", ".", "split", "(", "'::'", ")", ")" ]
Returns an iterable of tuples containing (index, controller_name) pairs. Controller indexes start at 0. You may easily transform this to a {name: index} mapping by using: >>> controllers = {name: index for index, name in raildriver.get_controller_list()} :return enumerate
[ "Returns", "an", "iterable", "of", "tuples", "containing", "(", "index", "controller_name", ")", "pairs", "." ]
python
train
31.5
mitsei/dlkit
dlkit/json_/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L5796-L5835
def update_objective_bank(self, objective_bank_form): """Updates an existing objective bank. arg: objective_bank_form (osid.learning.ObjectiveBankForm): the form containing the elements to be updated raise: IllegalState - ``objective_bank_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``objective_bank_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``objective_bank_form did not originate from get_objective_bank_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.update_bin_template if self._catalog_session is not None: return self._catalog_session.update_catalog(catalog_form=objective_bank_form) collection = JSONClientValidated('learning', collection='ObjectiveBank', runtime=self._runtime) if not isinstance(objective_bank_form, ABCObjectiveBankForm): raise errors.InvalidArgument('argument type is not an ObjectiveBankForm') if not objective_bank_form.is_for_update(): raise errors.InvalidArgument('the ObjectiveBankForm is for update only, not create') try: if self._forms[objective_bank_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('objective_bank_form already used in an update transaction') except KeyError: raise errors.Unsupported('objective_bank_form did not originate from this session') if not objective_bank_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(objective_bank_form._my_map) # save is deprecated - change to replace_one self._forms[objective_bank_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned return objects.ObjectiveBank(osid_object_map=objective_bank_form._my_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "update_objective_bank", "(", "self", ",", "objective_bank_form", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.update_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "update_catalog", "(", "catalog_form", "=", "objective_bank_form", ")", "collection", "=", "JSONClientValidated", "(", "'learning'", ",", "collection", "=", "'ObjectiveBank'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "not", "isinstance", "(", "objective_bank_form", ",", "ABCObjectiveBankForm", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'argument type is not an ObjectiveBankForm'", ")", "if", "not", "objective_bank_form", ".", "is_for_update", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the ObjectiveBankForm is for update only, not create'", ")", "try", ":", "if", "self", ".", "_forms", "[", "objective_bank_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "==", "UPDATED", ":", "raise", "errors", ".", "IllegalState", "(", "'objective_bank_form already used in an update transaction'", ")", "except", "KeyError", ":", "raise", "errors", ".", "Unsupported", "(", "'objective_bank_form did not originate from this session'", ")", "if", "not", "objective_bank_form", ".", "is_valid", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more of the form elements is invalid'", ")", "collection", ".", "save", "(", "objective_bank_form", ".", "_my_map", ")", "# save is deprecated - change to replace_one", "self", ".", "_forms", "[", "objective_bank_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "UPDATED", "# Note: this is out of spec. The OSIDs don't require an object to be returned", "return", "objects", ".", "ObjectiveBank", "(", "osid_object_map", "=", "objective_bank_form", ".", "_my_map", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Updates an existing objective bank. arg: objective_bank_form (osid.learning.ObjectiveBankForm): the form containing the elements to be updated raise: IllegalState - ``objective_bank_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``objective_bank_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``objective_bank_form did not originate from get_objective_bank_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
[ "Updates", "an", "existing", "objective", "bank", "." ]
python
train
58.575
openstack/networking-cisco
networking_cisco/plugins/cisco/cpnr/cpnr_client.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cpnr/cpnr_client.py#L140-L143
def get_scopes(self, vpnid='.*'): """Returns a list of all the scopes from CPNR server.""" request_url = self._build_url(['Scope'], vpn=vpnid) return self._do_request('GET', request_url)
[ "def", "get_scopes", "(", "self", ",", "vpnid", "=", "'.*'", ")", ":", "request_url", "=", "self", ".", "_build_url", "(", "[", "'Scope'", "]", ",", "vpn", "=", "vpnid", ")", "return", "self", ".", "_do_request", "(", "'GET'", ",", "request_url", ")" ]
Returns a list of all the scopes from CPNR server.
[ "Returns", "a", "list", "of", "all", "the", "scopes", "from", "CPNR", "server", "." ]
python
train
51.75
globocom/GloboNetworkAPI-client-python
networkapiclient/ClientFactory.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ClientFactory.py#L602-L608
def create_usuario(self): """Get an instance of usuario services facade.""" return Usuario( self.networkapi_url, self.user, self.password, self.user_ldap)
[ "def", "create_usuario", "(", "self", ")", ":", "return", "Usuario", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Get an instance of usuario services facade.
[ "Get", "an", "instance", "of", "usuario", "services", "facade", "." ]
python
train
30.285714
Galarzaa90/tibia.py
tibiapy/utils.py
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/utils.py#L210-L239
def try_date(obj) -> Optional[datetime.date]: """Attempts to convert an object into a date. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`datetime.datetime`, :class:`datetime.date` The object to convert. Returns ------- :class:`datetime.date`, optional The represented date. """ if obj is None: return None if isinstance(obj, datetime.datetime): return obj.date() if isinstance(obj, datetime.date): return obj res = parse_tibia_date(obj) if res is not None: return res res = parse_tibia_full_date(obj) if res is not None: return res res = parse_tibiadata_date(obj) return res
[ "def", "try_date", "(", "obj", ")", "->", "Optional", "[", "datetime", ".", "date", "]", ":", "if", "obj", "is", "None", ":", "return", "None", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "obj", ".", "date", "(", ")", "if", "isinstance", "(", "obj", ",", "datetime", ".", "date", ")", ":", "return", "obj", "res", "=", "parse_tibia_date", "(", "obj", ")", "if", "res", "is", "not", "None", ":", "return", "res", "res", "=", "parse_tibia_full_date", "(", "obj", ")", "if", "res", "is", "not", "None", ":", "return", "res", "res", "=", "parse_tibiadata_date", "(", "obj", ")", "return", "res" ]
Attempts to convert an object into a date. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`datetime.datetime`, :class:`datetime.date` The object to convert. Returns ------- :class:`datetime.date`, optional The represented date.
[ "Attempts", "to", "convert", "an", "object", "into", "a", "date", "." ]
python
train
26.866667
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L2294-L2419
def create_dialog_node(self, workspace_id, dialog_node, description=None, conditions=None, parent=None, previous_sibling=None, output=None, context=None, metadata=None, next_step=None, title=None, node_type=None, event_name=None, variable=None, actions=None, digress_in=None, digress_out=None, digress_out_slots=None, user_label=None, **kwargs): """ Create dialog node. Create a new dialog node. This operation is limited to 500 requests per 30 minutes. For more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str dialog_node: The dialog node ID. This string must conform to the following restrictions: - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot characters. - It must be no longer than 1024 characters. :param str description: The description of the dialog node. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 128 characters. :param str conditions: The condition that will trigger the dialog node. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 2048 characters. :param str parent: The ID of the parent dialog node. This property is omitted if the dialog node has no parent. :param str previous_sibling: The ID of the previous sibling dialog node. This property is omitted if the dialog node has no previous sibling. :param DialogNodeOutput output: The output of the dialog node. For more information about how to specify dialog node output, see the [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). :param dict context: The context for the dialog node. :param dict metadata: The metadata for the dialog node. :param DialogNodeNextStep next_step: The next step to execute following this dialog node. :param str title: The alias used to identify the dialog node. This string must conform to the following restrictions: - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot characters. - It must be no longer than 64 characters. :param str node_type: How the dialog node is processed. :param str event_name: How an `event_handler` node is processed. :param str variable: The location in the dialog context where output is stored. :param list[DialogNodeAction] actions: An array of objects describing any actions to be invoked by the dialog node. :param str digress_in: Whether this top-level dialog node can be digressed into. :param str digress_out: Whether this dialog node can be returned to after a digression. :param str digress_out_slots: Whether the user can digress to top-level nodes while filling out slots. :param str user_label: A label that can be displayed externally to describe the purpose of the node to users. This string must be no longer than 512 characters. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if workspace_id is None: raise ValueError('workspace_id must be provided') if dialog_node is None: raise ValueError('dialog_node must be provided') if output is not None: output = self._convert_model(output, DialogNodeOutput) if next_step is not None: next_step = self._convert_model(next_step, DialogNodeNextStep) if actions is not None: actions = [ self._convert_model(x, DialogNodeAction) for x in actions ] headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('conversation', 'V1', 'create_dialog_node') headers.update(sdk_headers) params = {'version': self.version} data = { 'dialog_node': dialog_node, 'description': description, 'conditions': conditions, 'parent': parent, 'previous_sibling': previous_sibling, 'output': output, 'context': context, 'metadata': metadata, 'next_step': next_step, 'title': title, 'type': node_type, 'event_name': event_name, 'variable': variable, 'actions': actions, 'digress_in': digress_in, 'digress_out': digress_out, 'digress_out_slots': digress_out_slots, 'user_label': user_label } url = '/v1/workspaces/{0}/dialog_nodes'.format( *self._encode_path_vars(workspace_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
[ "def", "create_dialog_node", "(", "self", ",", "workspace_id", ",", "dialog_node", ",", "description", "=", "None", ",", "conditions", "=", "None", ",", "parent", "=", "None", ",", "previous_sibling", "=", "None", ",", "output", "=", "None", ",", "context", "=", "None", ",", "metadata", "=", "None", ",", "next_step", "=", "None", ",", "title", "=", "None", ",", "node_type", "=", "None", ",", "event_name", "=", "None", ",", "variable", "=", "None", ",", "actions", "=", "None", ",", "digress_in", "=", "None", ",", "digress_out", "=", "None", ",", "digress_out_slots", "=", "None", ",", "user_label", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "workspace_id", "is", "None", ":", "raise", "ValueError", "(", "'workspace_id must be provided'", ")", "if", "dialog_node", "is", "None", ":", "raise", "ValueError", "(", "'dialog_node must be provided'", ")", "if", "output", "is", "not", "None", ":", "output", "=", "self", ".", "_convert_model", "(", "output", ",", "DialogNodeOutput", ")", "if", "next_step", "is", "not", "None", ":", "next_step", "=", "self", ".", "_convert_model", "(", "next_step", ",", "DialogNodeNextStep", ")", "if", "actions", "is", "not", "None", ":", "actions", "=", "[", "self", ".", "_convert_model", "(", "x", ",", "DialogNodeAction", ")", "for", "x", "in", "actions", "]", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'conversation'", ",", "'V1'", ",", "'create_dialog_node'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", "}", "data", "=", "{", "'dialog_node'", ":", "dialog_node", ",", "'description'", ":", "description", ",", "'conditions'", ":", "conditions", ",", "'parent'", ":", "parent", ",", "'previous_sibling'", ":", "previous_sibling", ",", "'output'", ":", "output", ",", "'context'", ":", "context", ",", "'metadata'", ":", "metadata", ",", "'next_step'", ":", "next_step", ",", "'title'", ":", "title", ",", "'type'", ":", "node_type", ",", "'event_name'", ":", "event_name", ",", "'variable'", ":", "variable", ",", "'actions'", ":", "actions", ",", "'digress_in'", ":", "digress_in", ",", "'digress_out'", ":", "digress_out", ",", "'digress_out_slots'", ":", "digress_out_slots", ",", "'user_label'", ":", "user_label", "}", "url", "=", "'/v1/workspaces/{0}/dialog_nodes'", ".", "format", "(", "*", "self", ".", "_encode_path_vars", "(", "workspace_id", ")", ")", "response", "=", "self", ".", "request", "(", "method", "=", "'POST'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "json", "=", "data", ",", "accept_json", "=", "True", ")", "return", "response" ]
Create dialog node. Create a new dialog node. This operation is limited to 500 requests per 30 minutes. For more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str dialog_node: The dialog node ID. This string must conform to the following restrictions: - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot characters. - It must be no longer than 1024 characters. :param str description: The description of the dialog node. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 128 characters. :param str conditions: The condition that will trigger the dialog node. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 2048 characters. :param str parent: The ID of the parent dialog node. This property is omitted if the dialog node has no parent. :param str previous_sibling: The ID of the previous sibling dialog node. This property is omitted if the dialog node has no previous sibling. :param DialogNodeOutput output: The output of the dialog node. For more information about how to specify dialog node output, see the [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). :param dict context: The context for the dialog node. :param dict metadata: The metadata for the dialog node. :param DialogNodeNextStep next_step: The next step to execute following this dialog node. :param str title: The alias used to identify the dialog node. This string must conform to the following restrictions: - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot characters. - It must be no longer than 64 characters. :param str node_type: How the dialog node is processed. :param str event_name: How an `event_handler` node is processed. :param str variable: The location in the dialog context where output is stored. :param list[DialogNodeAction] actions: An array of objects describing any actions to be invoked by the dialog node. :param str digress_in: Whether this top-level dialog node can be digressed into. :param str digress_out: Whether this dialog node can be returned to after a digression. :param str digress_out_slots: Whether the user can digress to top-level nodes while filling out slots. :param str user_label: A label that can be displayed externally to describe the purpose of the node to users. This string must be no longer than 512 characters. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
[ "Create", "dialog", "node", "." ]
python
train
45.238095
automl/HpBandSter
hpbandster/optimizers/config_generators/bohb.py
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/optimizers/config_generators/bohb.py#L99-L234
def get_config(self, budget): """ Function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should return a valid configuration """ self.logger.debug('start sampling a new configuration.') sample = None info_dict = {} # If no model is available, sample from prior # also mix in a fraction of random configs if len(self.kde_models.keys()) == 0 or np.random.rand() < self.random_fraction: sample = self.configspace.sample_configuration() info_dict['model_based_pick'] = False best = np.inf best_vector = None if sample is None: try: #sample from largest budget budget = max(self.kde_models.keys()) l = self.kde_models[budget]['good'].pdf g = self.kde_models[budget]['bad' ].pdf minimize_me = lambda x: max(1e-32, g(x))/max(l(x),1e-32) kde_good = self.kde_models[budget]['good'] kde_bad = self.kde_models[budget]['bad'] for i in range(self.num_samples): idx = np.random.randint(0, len(kde_good.data)) datum = kde_good.data[idx] vector = [] for m,bw,t in zip(datum, kde_good.bw, self.vartypes): bw = max(bw, self.min_bandwidth) if t == 0: bw = self.bw_factor*bw try: vector.append(sps.truncnorm.rvs(-m/bw,(1-m)/bw, loc=m, scale=bw)) except: self.logger.warning("Truncated Normal failed for:\ndatum=%s\nbandwidth=%s\nfor entry with value %s"%(datum, kde_good.bw, m)) self.logger.warning("data in the KDE:\n%s"%kde_good.data) else: if np.random.rand() < (1-bw): vector.append(int(m)) else: vector.append(np.random.randint(t)) val = minimize_me(vector) if not np.isfinite(val): self.logger.warning('sampled vector: %s has EI value %s'%(vector, val)) self.logger.warning("data in the KDEs:\n%s\n%s"%(kde_good.data, kde_bad.data)) self.logger.warning("bandwidth of the KDEs:\n%s\n%s"%(kde_good.bw, kde_bad.bw)) self.logger.warning("l(x) = %s"%(l(vector))) self.logger.warning("g(x) = %s"%(g(vector))) # right now, this happens because a KDE does not contain all values for a categorical parameter # this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one # if the good_kde has a finite value, i.e. there is no config with that value in the bad kde, so it shouldn't be terrible. if np.isfinite(l(vector)): best_vector = vector break if val < best: best = val best_vector = vector if best_vector is None: self.logger.debug("Sampling based optimization with %i samples failed -> using random configuration"%self.num_samples) sample = self.configspace.sample_configuration().get_dictionary() info_dict['model_based_pick'] = False else: self.logger.debug('best_vector: {}, {}, {}, {}'.format(best_vector, best, l(best_vector), g(best_vector))) for i, hp_value in enumerate(best_vector): if isinstance( self.configspace.get_hyperparameter( self.configspace.get_hyperparameter_by_idx(i) ), ConfigSpace.hyperparameters.CategoricalHyperparameter ): best_vector[i] = int(np.rint(best_vector[i])) sample = ConfigSpace.Configuration(self.configspace, vector=best_vector).get_dictionary() try: sample = ConfigSpace.util.deactivate_inactive_hyperparameters( configuration_space=self.configspace, configuration=sample ) info_dict['model_based_pick'] = True except Exception as e: self.logger.warning(("="*50 + "\n")*3 +\ "Error converting configuration:\n%s"%sample+\ "\n here is a traceback:" +\ traceback.format_exc()) raise(e) except: self.logger.warning("Sampling based optimization with %i samples failed\n %s \nUsing random configuration"%(self.num_samples, traceback.format_exc())) sample = self.configspace.sample_configuration() info_dict['model_based_pick'] = False try: sample = ConfigSpace.util.deactivate_inactive_hyperparameters( configuration_space=self.configspace, configuration=sample.get_dictionary() ).get_dictionary() except Exception as e: self.logger.warning("Error (%s) converting configuration: %s -> " "using random configuration!", e, sample) sample = self.configspace.sample_configuration().get_dictionary() self.logger.debug('done sampling a new configuration.') return sample, info_dict
[ "def", "get_config", "(", "self", ",", "budget", ")", ":", "self", ".", "logger", ".", "debug", "(", "'start sampling a new configuration.'", ")", "sample", "=", "None", "info_dict", "=", "{", "}", "# If no model is available, sample from prior", "# also mix in a fraction of random configs", "if", "len", "(", "self", ".", "kde_models", ".", "keys", "(", ")", ")", "==", "0", "or", "np", ".", "random", ".", "rand", "(", ")", "<", "self", ".", "random_fraction", ":", "sample", "=", "self", ".", "configspace", ".", "sample_configuration", "(", ")", "info_dict", "[", "'model_based_pick'", "]", "=", "False", "best", "=", "np", ".", "inf", "best_vector", "=", "None", "if", "sample", "is", "None", ":", "try", ":", "#sample from largest budget", "budget", "=", "max", "(", "self", ".", "kde_models", ".", "keys", "(", ")", ")", "l", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'good'", "]", ".", "pdf", "g", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'bad'", "]", ".", "pdf", "minimize_me", "=", "lambda", "x", ":", "max", "(", "1e-32", ",", "g", "(", "x", ")", ")", "/", "max", "(", "l", "(", "x", ")", ",", "1e-32", ")", "kde_good", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'good'", "]", "kde_bad", "=", "self", ".", "kde_models", "[", "budget", "]", "[", "'bad'", "]", "for", "i", "in", "range", "(", "self", ".", "num_samples", ")", ":", "idx", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "len", "(", "kde_good", ".", "data", ")", ")", "datum", "=", "kde_good", ".", "data", "[", "idx", "]", "vector", "=", "[", "]", "for", "m", ",", "bw", ",", "t", "in", "zip", "(", "datum", ",", "kde_good", ".", "bw", ",", "self", ".", "vartypes", ")", ":", "bw", "=", "max", "(", "bw", ",", "self", ".", "min_bandwidth", ")", "if", "t", "==", "0", ":", "bw", "=", "self", ".", "bw_factor", "*", "bw", "try", ":", "vector", ".", "append", "(", "sps", ".", "truncnorm", ".", "rvs", "(", "-", "m", "/", "bw", ",", "(", "1", "-", "m", ")", "/", "bw", ",", "loc", "=", "m", ",", "scale", "=", "bw", ")", ")", "except", ":", "self", ".", "logger", ".", "warning", "(", "\"Truncated Normal failed for:\\ndatum=%s\\nbandwidth=%s\\nfor entry with value %s\"", "%", "(", "datum", ",", "kde_good", ".", "bw", ",", "m", ")", ")", "self", ".", "logger", ".", "warning", "(", "\"data in the KDE:\\n%s\"", "%", "kde_good", ".", "data", ")", "else", ":", "if", "np", ".", "random", ".", "rand", "(", ")", "<", "(", "1", "-", "bw", ")", ":", "vector", ".", "append", "(", "int", "(", "m", ")", ")", "else", ":", "vector", ".", "append", "(", "np", ".", "random", ".", "randint", "(", "t", ")", ")", "val", "=", "minimize_me", "(", "vector", ")", "if", "not", "np", ".", "isfinite", "(", "val", ")", ":", "self", ".", "logger", ".", "warning", "(", "'sampled vector: %s has EI value %s'", "%", "(", "vector", ",", "val", ")", ")", "self", ".", "logger", ".", "warning", "(", "\"data in the KDEs:\\n%s\\n%s\"", "%", "(", "kde_good", ".", "data", ",", "kde_bad", ".", "data", ")", ")", "self", ".", "logger", ".", "warning", "(", "\"bandwidth of the KDEs:\\n%s\\n%s\"", "%", "(", "kde_good", ".", "bw", ",", "kde_bad", ".", "bw", ")", ")", "self", ".", "logger", ".", "warning", "(", "\"l(x) = %s\"", "%", "(", "l", "(", "vector", ")", ")", ")", "self", ".", "logger", ".", "warning", "(", "\"g(x) = %s\"", "%", "(", "g", "(", "vector", ")", ")", ")", "# right now, this happens because a KDE does not contain all values for a categorical parameter", "# this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one", "# if the good_kde has a finite value, i.e. there is no config with that value in the bad kde, so it shouldn't be terrible.", "if", "np", ".", "isfinite", "(", "l", "(", "vector", ")", ")", ":", "best_vector", "=", "vector", "break", "if", "val", "<", "best", ":", "best", "=", "val", "best_vector", "=", "vector", "if", "best_vector", "is", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"Sampling based optimization with %i samples failed -> using random configuration\"", "%", "self", ".", "num_samples", ")", "sample", "=", "self", ".", "configspace", ".", "sample_configuration", "(", ")", ".", "get_dictionary", "(", ")", "info_dict", "[", "'model_based_pick'", "]", "=", "False", "else", ":", "self", ".", "logger", ".", "debug", "(", "'best_vector: {}, {}, {}, {}'", ".", "format", "(", "best_vector", ",", "best", ",", "l", "(", "best_vector", ")", ",", "g", "(", "best_vector", ")", ")", ")", "for", "i", ",", "hp_value", "in", "enumerate", "(", "best_vector", ")", ":", "if", "isinstance", "(", "self", ".", "configspace", ".", "get_hyperparameter", "(", "self", ".", "configspace", ".", "get_hyperparameter_by_idx", "(", "i", ")", ")", ",", "ConfigSpace", ".", "hyperparameters", ".", "CategoricalHyperparameter", ")", ":", "best_vector", "[", "i", "]", "=", "int", "(", "np", ".", "rint", "(", "best_vector", "[", "i", "]", ")", ")", "sample", "=", "ConfigSpace", ".", "Configuration", "(", "self", ".", "configspace", ",", "vector", "=", "best_vector", ")", ".", "get_dictionary", "(", ")", "try", ":", "sample", "=", "ConfigSpace", ".", "util", ".", "deactivate_inactive_hyperparameters", "(", "configuration_space", "=", "self", ".", "configspace", ",", "configuration", "=", "sample", ")", "info_dict", "[", "'model_based_pick'", "]", "=", "True", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "warning", "(", "(", "\"=\"", "*", "50", "+", "\"\\n\"", ")", "*", "3", "+", "\"Error converting configuration:\\n%s\"", "%", "sample", "+", "\"\\n here is a traceback:\"", "+", "traceback", ".", "format_exc", "(", ")", ")", "raise", "(", "e", ")", "except", ":", "self", ".", "logger", ".", "warning", "(", "\"Sampling based optimization with %i samples failed\\n %s \\nUsing random configuration\"", "%", "(", "self", ".", "num_samples", ",", "traceback", ".", "format_exc", "(", ")", ")", ")", "sample", "=", "self", ".", "configspace", ".", "sample_configuration", "(", ")", "info_dict", "[", "'model_based_pick'", "]", "=", "False", "try", ":", "sample", "=", "ConfigSpace", ".", "util", ".", "deactivate_inactive_hyperparameters", "(", "configuration_space", "=", "self", ".", "configspace", ",", "configuration", "=", "sample", ".", "get_dictionary", "(", ")", ")", ".", "get_dictionary", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "logger", ".", "warning", "(", "\"Error (%s) converting configuration: %s -> \"", "\"using random configuration!\"", ",", "e", ",", "sample", ")", "sample", "=", "self", ".", "configspace", ".", "sample_configuration", "(", ")", ".", "get_dictionary", "(", ")", "self", ".", "logger", ".", "debug", "(", "'done sampling a new configuration.'", ")", "return", "sample", ",", "info_dict" ]
Function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should return a valid configuration
[ "Function", "to", "sample", "a", "new", "configuration" ]
python
train
33.551471
ajenhl/tacl
tacl/__main__.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/__main__.py#L527-L539
def prepare_xml(args, parser): """Prepares XML files for stripping. This process creates a single, normalised TEI XML file for each work. """ if args.source == constants.TEI_SOURCE_CBETA_GITHUB: corpus_class = tacl.TEICorpusCBETAGitHub else: raise Exception('Unsupported TEI source option provided') corpus = corpus_class(args.input, args.output) corpus.tidy()
[ "def", "prepare_xml", "(", "args", ",", "parser", ")", ":", "if", "args", ".", "source", "==", "constants", ".", "TEI_SOURCE_CBETA_GITHUB", ":", "corpus_class", "=", "tacl", ".", "TEICorpusCBETAGitHub", "else", ":", "raise", "Exception", "(", "'Unsupported TEI source option provided'", ")", "corpus", "=", "corpus_class", "(", "args", ".", "input", ",", "args", ".", "output", ")", "corpus", ".", "tidy", "(", ")" ]
Prepares XML files for stripping. This process creates a single, normalised TEI XML file for each work.
[ "Prepares", "XML", "files", "for", "stripping", "." ]
python
train
30.615385
scrapinghub/dateparser
dateparser/languages/locale.py
https://github.com/scrapinghub/dateparser/blob/11a761c99d3ee522a3c63756b70c106a579e8b5c/dateparser/languages/locale.py#L114-L149
def translate(self, date_string, keep_formatting=False, settings=None): """ Translate the date string to its English equivalent. :param date_string: A string representing date and/or time in a recognizably valid format. :type date_string: str|unicode :param keep_formatting: If True, retain formatting of the date string after translation. :type keep_formatting: bool :return: translated date string. """ date_string = self._translate_numerals(date_string) if settings.NORMALIZE: date_string = normalize_unicode(date_string) date_string = self._simplify(date_string, settings=settings) dictionary = self._get_dictionary(settings) date_string_tokens = dictionary.split(date_string, keep_formatting) relative_translations = self._get_relative_translations(settings=settings) for i, word in enumerate(date_string_tokens): word = word.lower() for pattern, replacement in relative_translations.items(): if pattern.match(word): date_string_tokens[i] = pattern.sub(replacement, word) else: if word in dictionary: date_string_tokens[i] = dictionary[word] or '' if "in" in date_string_tokens: date_string_tokens = self._clear_future_words(date_string_tokens) return self._join(list(filter(bool, date_string_tokens)), separator="" if keep_formatting else " ", settings=settings)
[ "def", "translate", "(", "self", ",", "date_string", ",", "keep_formatting", "=", "False", ",", "settings", "=", "None", ")", ":", "date_string", "=", "self", ".", "_translate_numerals", "(", "date_string", ")", "if", "settings", ".", "NORMALIZE", ":", "date_string", "=", "normalize_unicode", "(", "date_string", ")", "date_string", "=", "self", ".", "_simplify", "(", "date_string", ",", "settings", "=", "settings", ")", "dictionary", "=", "self", ".", "_get_dictionary", "(", "settings", ")", "date_string_tokens", "=", "dictionary", ".", "split", "(", "date_string", ",", "keep_formatting", ")", "relative_translations", "=", "self", ".", "_get_relative_translations", "(", "settings", "=", "settings", ")", "for", "i", ",", "word", "in", "enumerate", "(", "date_string_tokens", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "for", "pattern", ",", "replacement", "in", "relative_translations", ".", "items", "(", ")", ":", "if", "pattern", ".", "match", "(", "word", ")", ":", "date_string_tokens", "[", "i", "]", "=", "pattern", ".", "sub", "(", "replacement", ",", "word", ")", "else", ":", "if", "word", "in", "dictionary", ":", "date_string_tokens", "[", "i", "]", "=", "dictionary", "[", "word", "]", "or", "''", "if", "\"in\"", "in", "date_string_tokens", ":", "date_string_tokens", "=", "self", ".", "_clear_future_words", "(", "date_string_tokens", ")", "return", "self", ".", "_join", "(", "list", "(", "filter", "(", "bool", ",", "date_string_tokens", ")", ")", ",", "separator", "=", "\"\"", "if", "keep_formatting", "else", "\" \"", ",", "settings", "=", "settings", ")" ]
Translate the date string to its English equivalent. :param date_string: A string representing date and/or time in a recognizably valid format. :type date_string: str|unicode :param keep_formatting: If True, retain formatting of the date string after translation. :type keep_formatting: bool :return: translated date string.
[ "Translate", "the", "date", "string", "to", "its", "English", "equivalent", "." ]
python
test
43.25
bunq/sdk_python
bunq/sdk/security.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/security.py#L254-L271
def _generate_response_head_bytes(status_code, headers): """ :type status_code: int :type headers: dict[str, str] :rtype: bytes """ head_string = str(status_code) + _DELIMITER_NEWLINE header_tuples = sorted((k, headers[k]) for k in headers) for name, value in header_tuples: name = _get_header_correctly_cased(name) if _should_sign_response_header(name): head_string += _FORMAT_HEADER_STRING.format(name, value) return (head_string + _DELIMITER_NEWLINE).encode()
[ "def", "_generate_response_head_bytes", "(", "status_code", ",", "headers", ")", ":", "head_string", "=", "str", "(", "status_code", ")", "+", "_DELIMITER_NEWLINE", "header_tuples", "=", "sorted", "(", "(", "k", ",", "headers", "[", "k", "]", ")", "for", "k", "in", "headers", ")", "for", "name", ",", "value", "in", "header_tuples", ":", "name", "=", "_get_header_correctly_cased", "(", "name", ")", "if", "_should_sign_response_header", "(", "name", ")", ":", "head_string", "+=", "_FORMAT_HEADER_STRING", ".", "format", "(", "name", ",", "value", ")", "return", "(", "head_string", "+", "_DELIMITER_NEWLINE", ")", ".", "encode", "(", ")" ]
:type status_code: int :type headers: dict[str, str] :rtype: bytes
[ ":", "type", "status_code", ":", "int", ":", "type", "headers", ":", "dict", "[", "str", "str", "]" ]
python
train
28.555556
lago-project/lago
lago/utils.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/utils.py#L850-L865
def ver_cmp(ver1, ver2): """ Compare lago versions Args: ver1(str): version string ver2(str): version string Returns: Return negative if ver1<ver2, zero if ver1==ver2, positive if ver1>ver2. """ return cmp( pkg_resources.parse_version(ver1), pkg_resources.parse_version(ver2) )
[ "def", "ver_cmp", "(", "ver1", ",", "ver2", ")", ":", "return", "cmp", "(", "pkg_resources", ".", "parse_version", "(", "ver1", ")", ",", "pkg_resources", ".", "parse_version", "(", "ver2", ")", ")" ]
Compare lago versions Args: ver1(str): version string ver2(str): version string Returns: Return negative if ver1<ver2, zero if ver1==ver2, positive if ver1>ver2.
[ "Compare", "lago", "versions" ]
python
train
20.8125
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winresource.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winresource.py#L267-L278
def UpdateResourcesFromResFile(dstpath, srcpath, types=None, names=None, languages=None): """ Update or add resources from dll/exe file srcpath in dll/exe file dstpath. types = a list of resource types to update (None = all) names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all) """ res = GetResources(srcpath, types, names, languages) UpdateResourcesFromDict(dstpath, res)
[ "def", "UpdateResourcesFromResFile", "(", "dstpath", ",", "srcpath", ",", "types", "=", "None", ",", "names", "=", "None", ",", "languages", "=", "None", ")", ":", "res", "=", "GetResources", "(", "srcpath", ",", "types", ",", "names", ",", "languages", ")", "UpdateResourcesFromDict", "(", "dstpath", ",", "res", ")" ]
Update or add resources from dll/exe file srcpath in dll/exe file dstpath. types = a list of resource types to update (None = all) names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all)
[ "Update", "or", "add", "resources", "from", "dll", "/", "exe", "file", "srcpath", "in", "dll", "/", "exe", "file", "dstpath", ".", "types", "=", "a", "list", "of", "resource", "types", "to", "update", "(", "None", "=", "all", ")", "names", "=", "a", "list", "of", "resource", "names", "to", "update", "(", "None", "=", "all", ")", "languages", "=", "a", "list", "of", "resource", "languages", "to", "update", "(", "None", "=", "all", ")" ]
python
train
41.833333
MrYsLab/pymata-aio
pymata_aio/pymata_core.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_core.py#L610-L622
async def disable_digital_reporting(self, pin): """ Disables digital reporting. By turning reporting off for this pin, Reporting is disabled for all 8 bits in the "port" :param pin: Pin and all pins for this port :returns: No return value """ port = pin // 8 command = [PrivateConstants.REPORT_DIGITAL + port, PrivateConstants.REPORTING_DISABLE] await self._send_command(command)
[ "async", "def", "disable_digital_reporting", "(", "self", ",", "pin", ")", ":", "port", "=", "pin", "//", "8", "command", "=", "[", "PrivateConstants", ".", "REPORT_DIGITAL", "+", "port", ",", "PrivateConstants", ".", "REPORTING_DISABLE", "]", "await", "self", ".", "_send_command", "(", "command", ")" ]
Disables digital reporting. By turning reporting off for this pin, Reporting is disabled for all 8 bits in the "port" :param pin: Pin and all pins for this port :returns: No return value
[ "Disables", "digital", "reporting", ".", "By", "turning", "reporting", "off", "for", "this", "pin", "Reporting", "is", "disabled", "for", "all", "8", "bits", "in", "the", "port" ]
python
train
35.384615
pandas-dev/pandas
pandas/core/groupby/categorical.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/categorical.py#L77-L100
def recode_from_groupby(c, sort, ci): """ Reverse the codes_to_groupby to account for sort / observed. Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. ci : CategoricalIndex The codes / categories to recode Returns ------- CategoricalIndex """ # we re-order to the original category orderings if sort: return ci.set_categories(c.categories) # we are not sorting, so add unobserved to the end return ci.add_categories( c.categories[~c.categories.isin(ci.categories)])
[ "def", "recode_from_groupby", "(", "c", ",", "sort", ",", "ci", ")", ":", "# we re-order to the original category orderings", "if", "sort", ":", "return", "ci", ".", "set_categories", "(", "c", ".", "categories", ")", "# we are not sorting, so add unobserved to the end", "return", "ci", ".", "add_categories", "(", "c", ".", "categories", "[", "~", "c", ".", "categories", ".", "isin", "(", "ci", ".", "categories", ")", "]", ")" ]
Reverse the codes_to_groupby to account for sort / observed. Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. ci : CategoricalIndex The codes / categories to recode Returns ------- CategoricalIndex
[ "Reverse", "the", "codes_to_groupby", "to", "account", "for", "sort", "/", "observed", "." ]
python
train
25
MillionIntegrals/vel
vel/models/imagenet/resnet34.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/models/imagenet/resnet34.py#L102-L107
def create(fc_layers=None, dropout=None, pretrained=True): """ Vel factory function """ def instantiate(**_): return Resnet34(fc_layers, dropout, pretrained) return ModelFactory.generic(instantiate)
[ "def", "create", "(", "fc_layers", "=", "None", ",", "dropout", "=", "None", ",", "pretrained", "=", "True", ")", ":", "def", "instantiate", "(", "*", "*", "_", ")", ":", "return", "Resnet34", "(", "fc_layers", ",", "dropout", ",", "pretrained", ")", "return", "ModelFactory", ".", "generic", "(", "instantiate", ")" ]
Vel factory function
[ "Vel", "factory", "function" ]
python
train
35.666667
mmp2/megaman
megaman/utils/analyze_dimension_and_radius.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/analyze_dimension_and_radius.py#L90-L139
def run_analyze_dimension_and_radius(data, rmin, rmax, nradii, adjacency_method='brute', adjacency_kwds = {}, fit_range=None, savefig=False, plot_name = 'dimension_plot.png'): """ This function is used to estimate the doubling dimension (approximately equal to the intrinsic dimension) by computing a graph of neighborhood radius versus average number of neighbors. The "radius" refers to the truncation constant where all distances greater than a specified radius are taken to be infinite. This is used for example in the truncated Gaussian kernel in estimate_radius.py Parameters ---------- data : numpy array, Original data set for which we are estimating the bandwidth rmin : float, smallest radius to consider rmax : float, largest radius to consider nradii : int, number of radii between rmax and rmin to consider adjacency_method : string, megaman adjacency method to use, default 'brute' see geometry.py for details adjacency_kwds : dict, dictionary of keywords for adjacency method fit_range : list of ints, range of radii to consider default is range(nradii), i.e. all of them savefig: bool, whether to save the radius vs. neighbors figure plot_name: string, filename of the figure to be saved as. Returns ------- results : dictionary contains the radii, average nieghbors, min and max number of neighbors and number of points with no neighbors. dim : float, estimated doubling dimension (used as an estimate of the intrinsic dimension) """ n, D = data.shape radii = 10**(np.linspace(np.log10(rmin), np.log10(rmax), nradii)) dists = compute_largest_radius_distance(data, rmax, adjacency_method, adjacency_kwds) results = neighborhood_analysis(dists, radii) avg_neighbors = results['avg_neighbors'].flatten() radii = results['radii'].flatten() if fit_range is None: fit_range = range(len(radii)) dim = find_dimension_plot(avg_neighbors, radii, fit_range, savefig, plot_name) return(results, dim)
[ "def", "run_analyze_dimension_and_radius", "(", "data", ",", "rmin", ",", "rmax", ",", "nradii", ",", "adjacency_method", "=", "'brute'", ",", "adjacency_kwds", "=", "{", "}", ",", "fit_range", "=", "None", ",", "savefig", "=", "False", ",", "plot_name", "=", "'dimension_plot.png'", ")", ":", "n", ",", "D", "=", "data", ".", "shape", "radii", "=", "10", "**", "(", "np", ".", "linspace", "(", "np", ".", "log10", "(", "rmin", ")", ",", "np", ".", "log10", "(", "rmax", ")", ",", "nradii", ")", ")", "dists", "=", "compute_largest_radius_distance", "(", "data", ",", "rmax", ",", "adjacency_method", ",", "adjacency_kwds", ")", "results", "=", "neighborhood_analysis", "(", "dists", ",", "radii", ")", "avg_neighbors", "=", "results", "[", "'avg_neighbors'", "]", ".", "flatten", "(", ")", "radii", "=", "results", "[", "'radii'", "]", ".", "flatten", "(", ")", "if", "fit_range", "is", "None", ":", "fit_range", "=", "range", "(", "len", "(", "radii", ")", ")", "dim", "=", "find_dimension_plot", "(", "avg_neighbors", ",", "radii", ",", "fit_range", ",", "savefig", ",", "plot_name", ")", "return", "(", "results", ",", "dim", ")" ]
This function is used to estimate the doubling dimension (approximately equal to the intrinsic dimension) by computing a graph of neighborhood radius versus average number of neighbors. The "radius" refers to the truncation constant where all distances greater than a specified radius are taken to be infinite. This is used for example in the truncated Gaussian kernel in estimate_radius.py Parameters ---------- data : numpy array, Original data set for which we are estimating the bandwidth rmin : float, smallest radius to consider rmax : float, largest radius to consider nradii : int, number of radii between rmax and rmin to consider adjacency_method : string, megaman adjacency method to use, default 'brute' see geometry.py for details adjacency_kwds : dict, dictionary of keywords for adjacency method fit_range : list of ints, range of radii to consider default is range(nradii), i.e. all of them savefig: bool, whether to save the radius vs. neighbors figure plot_name: string, filename of the figure to be saved as. Returns ------- results : dictionary contains the radii, average nieghbors, min and max number of neighbors and number of points with no neighbors. dim : float, estimated doubling dimension (used as an estimate of the intrinsic dimension)
[ "This", "function", "is", "used", "to", "estimate", "the", "doubling", "dimension", "(", "approximately", "equal", "to", "the", "intrinsic", "dimension", ")", "by", "computing", "a", "graph", "of", "neighborhood", "radius", "versus", "average", "number", "of", "neighbors", "." ]
python
train
42.48
dwavesystems/minorminer
examples/fourcolor.py
https://github.com/dwavesystems/minorminer/blob/05cac6db180adf8223a613dff808248e3048b07d/examples/fourcolor.py#L43-L70
def graph_coloring_qubo(graph, k): """ the QUBO for k-coloring a graph A is as follows: variables: x_{v,c} = 1 if vertex v of A gets color c; x_{v,c} = 0 otherwise constraints: 1) each v in A gets exactly one color. This constraint is enforced by including the term (\sum_c x_{v,c} - 1)^2 in the QUBO, which is minimized when \sum_c x_{v,c} = 1. 2) If u and v in A are adjacent, then they get different colors. This constraint is enforced by including terms x_{v,c} x_{u,c} in the QUBO, which is minimzed when at most one of u and v get color c. Total QUBO: Q(x) = \sum_v (\sum_c x_{v,c} - 1)^2 + \sum_{u ~ v} \sum_c x_{v,c} x_{u,c} The graph of interactions for this QUBO consists of cliques of size k (with vertices {x_{v,c} for c = 0,...,k-1}) plus k disjoint copies of the graph A (one for each color). """ K = nx.complete_graph(k) g1 = nx.cartesian_product(nx.create_empty_copy(graph), K) g2 = nx.cartesian_product(graph, nx.create_empty_copy(K)) return nx.compose(g1, g2)
[ "def", "graph_coloring_qubo", "(", "graph", ",", "k", ")", ":", "K", "=", "nx", ".", "complete_graph", "(", "k", ")", "g1", "=", "nx", ".", "cartesian_product", "(", "nx", ".", "create_empty_copy", "(", "graph", ")", ",", "K", ")", "g2", "=", "nx", ".", "cartesian_product", "(", "graph", ",", "nx", ".", "create_empty_copy", "(", "K", ")", ")", "return", "nx", ".", "compose", "(", "g1", ",", "g2", ")" ]
the QUBO for k-coloring a graph A is as follows: variables: x_{v,c} = 1 if vertex v of A gets color c; x_{v,c} = 0 otherwise constraints: 1) each v in A gets exactly one color. This constraint is enforced by including the term (\sum_c x_{v,c} - 1)^2 in the QUBO, which is minimized when \sum_c x_{v,c} = 1. 2) If u and v in A are adjacent, then they get different colors. This constraint is enforced by including terms x_{v,c} x_{u,c} in the QUBO, which is minimzed when at most one of u and v get color c. Total QUBO: Q(x) = \sum_v (\sum_c x_{v,c} - 1)^2 + \sum_{u ~ v} \sum_c x_{v,c} x_{u,c} The graph of interactions for this QUBO consists of cliques of size k (with vertices {x_{v,c} for c = 0,...,k-1}) plus k disjoint copies of the graph A (one for each color).
[ "the", "QUBO", "for", "k", "-", "coloring", "a", "graph", "A", "is", "as", "follows", ":" ]
python
test
37.535714
genialis/resolwe
resolwe/flow/engine.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/engine.py#L29-L86
def load_engines(manager, class_name, base_module, engines, class_key='ENGINE', engine_type='engine'): """Load engines.""" loaded_engines = {} for module_name_or_dict in engines: if not isinstance(module_name_or_dict, dict): module_name_or_dict = { class_key: module_name_or_dict } try: module_name = module_name_or_dict[class_key] engine_settings = module_name_or_dict except KeyError: raise ImproperlyConfigured("If {} specification is a dictionary, it must define {}".format( engine_type, class_key)) try: engine_module = import_module(module_name) try: engine = getattr(engine_module, class_name)(manager=manager, settings=engine_settings) if not isinstance(engine, BaseEngine): raise ImproperlyConfigured("{} module {} class {} must extend BaseEngine".format( engine_type.capitalize(), module_name, class_name)) except AttributeError: raise ImproperlyConfigured("{} module {} is missing a {} class".format( engine_type.capitalize(), module_name, class_name)) if engine.get_name() in loaded_engines: raise ImproperlyConfigured("Duplicated {} {}".format(engine_type, engine.get_name())) loaded_engines[engine.get_name()] = engine except ImportError as ex: # The engine wasn't found. Display a helpful error message listing all possible # (built-in) engines. engine_dir = os.path.join(os.path.dirname(upath(__file__)), base_module) try: builtin_engines = [name for _, name, _ in pkgutil.iter_modules([engine_dir])] except EnvironmentError: builtin_engines = [] if module_name not in ['resolwe.flow.{}.{}'.format(base_module, builtin_engine) for builtin_engine in builtin_engines]: engine_reprs = map(repr, sorted(builtin_engines)) error_msg = ("{} isn't an available dataflow {}.\n" "Try using 'resolwe.flow.{}.XXX', where XXX is one of:\n" " {}\n" "Error was: {}".format( module_name, engine_type, base_module, ", ".join(engine_reprs), ex )) raise ImproperlyConfigured(error_msg) else: # If there's some other error, this must be an error in Django raise return loaded_engines
[ "def", "load_engines", "(", "manager", ",", "class_name", ",", "base_module", ",", "engines", ",", "class_key", "=", "'ENGINE'", ",", "engine_type", "=", "'engine'", ")", ":", "loaded_engines", "=", "{", "}", "for", "module_name_or_dict", "in", "engines", ":", "if", "not", "isinstance", "(", "module_name_or_dict", ",", "dict", ")", ":", "module_name_or_dict", "=", "{", "class_key", ":", "module_name_or_dict", "}", "try", ":", "module_name", "=", "module_name_or_dict", "[", "class_key", "]", "engine_settings", "=", "module_name_or_dict", "except", "KeyError", ":", "raise", "ImproperlyConfigured", "(", "\"If {} specification is a dictionary, it must define {}\"", ".", "format", "(", "engine_type", ",", "class_key", ")", ")", "try", ":", "engine_module", "=", "import_module", "(", "module_name", ")", "try", ":", "engine", "=", "getattr", "(", "engine_module", ",", "class_name", ")", "(", "manager", "=", "manager", ",", "settings", "=", "engine_settings", ")", "if", "not", "isinstance", "(", "engine", ",", "BaseEngine", ")", ":", "raise", "ImproperlyConfigured", "(", "\"{} module {} class {} must extend BaseEngine\"", ".", "format", "(", "engine_type", ".", "capitalize", "(", ")", ",", "module_name", ",", "class_name", ")", ")", "except", "AttributeError", ":", "raise", "ImproperlyConfigured", "(", "\"{} module {} is missing a {} class\"", ".", "format", "(", "engine_type", ".", "capitalize", "(", ")", ",", "module_name", ",", "class_name", ")", ")", "if", "engine", ".", "get_name", "(", ")", "in", "loaded_engines", ":", "raise", "ImproperlyConfigured", "(", "\"Duplicated {} {}\"", ".", "format", "(", "engine_type", ",", "engine", ".", "get_name", "(", ")", ")", ")", "loaded_engines", "[", "engine", ".", "get_name", "(", ")", "]", "=", "engine", "except", "ImportError", "as", "ex", ":", "# The engine wasn't found. Display a helpful error message listing all possible", "# (built-in) engines.", "engine_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "upath", "(", "__file__", ")", ")", ",", "base_module", ")", "try", ":", "builtin_engines", "=", "[", "name", "for", "_", ",", "name", ",", "_", "in", "pkgutil", ".", "iter_modules", "(", "[", "engine_dir", "]", ")", "]", "except", "EnvironmentError", ":", "builtin_engines", "=", "[", "]", "if", "module_name", "not", "in", "[", "'resolwe.flow.{}.{}'", ".", "format", "(", "base_module", ",", "builtin_engine", ")", "for", "builtin_engine", "in", "builtin_engines", "]", ":", "engine_reprs", "=", "map", "(", "repr", ",", "sorted", "(", "builtin_engines", ")", ")", "error_msg", "=", "(", "\"{} isn't an available dataflow {}.\\n\"", "\"Try using 'resolwe.flow.{}.XXX', where XXX is one of:\\n\"", "\" {}\\n\"", "\"Error was: {}\"", ".", "format", "(", "module_name", ",", "engine_type", ",", "base_module", ",", "\", \"", ".", "join", "(", "engine_reprs", ")", ",", "ex", ")", ")", "raise", "ImproperlyConfigured", "(", "error_msg", ")", "else", ":", "# If there's some other error, this must be an error in Django", "raise", "return", "loaded_engines" ]
Load engines.
[ "Load", "engines", "." ]
python
train
45.689655
NASA-AMMOS/AIT-Core
ait/core/bin/ait_create_dirs.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bin/ait_create_dirs.py#L129-L159
def createDirStruct(paths, verbose=True): '''Loops ait.config._datapaths from AIT_CONFIG and creates a directory. Replaces year and doy with the respective year and day-of-year. If neither are given as arguments, current UTC day and year are used. Args: paths: [optional] list of directory paths you would like to create. doy and year will be replaced by the datetime day and year, respectively. datetime: UTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ ''' for k, path in paths.items(): p = None try: pathlist = path if type(path) is list else [ path ] for p in pathlist: os.makedirs(p) if verbose: log.info('Creating directory: ' + p) except OSError, e: #print path if e.errno == errno.EEXIST and os.path.isdir(p): pass else: raise return True
[ "def", "createDirStruct", "(", "paths", ",", "verbose", "=", "True", ")", ":", "for", "k", ",", "path", "in", "paths", ".", "items", "(", ")", ":", "p", "=", "None", "try", ":", "pathlist", "=", "path", "if", "type", "(", "path", ")", "is", "list", "else", "[", "path", "]", "for", "p", "in", "pathlist", ":", "os", ".", "makedirs", "(", "p", ")", "if", "verbose", ":", "log", ".", "info", "(", "'Creating directory: '", "+", "p", ")", "except", "OSError", ",", "e", ":", "#print path", "if", "e", ".", "errno", "==", "errno", ".", "EEXIST", "and", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "pass", "else", ":", "raise", "return", "True" ]
Loops ait.config._datapaths from AIT_CONFIG and creates a directory. Replaces year and doy with the respective year and day-of-year. If neither are given as arguments, current UTC day and year are used. Args: paths: [optional] list of directory paths you would like to create. doy and year will be replaced by the datetime day and year, respectively. datetime: UTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ
[ "Loops", "ait", ".", "config", ".", "_datapaths", "from", "AIT_CONFIG", "and", "creates", "a", "directory", "." ]
python
train
31.612903
pybel/pybel
src/pybel/io/web.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/io/web.py#L52-L100
def to_web(graph: BELGraph, host: Optional[str] = None, user: Optional[str] = None, password: Optional[str] = None, public: bool = False, ) -> requests.Response: """Send a graph to the receiver service and returns the :mod:`requests` response object. :param graph: A BEL graph :param host: The location of the BEL Commons server. Alternatively, looks up in PyBEL config with ``PYBEL_REMOTE_HOST`` or the environment as ``PYBEL_REMOTE_HOST`` Defaults to :data:`pybel.constants.DEFAULT_SERVICE_URL` :param user: Username for BEL Commons. Alternatively, looks up in PyBEL config with ``PYBEL_REMOTE_USER`` or the environment as ``PYBEL_REMOTE_USER`` :param password: Password for BEL Commons. Alternatively, looks up in PyBEL config with ``PYBEL_REMOTE_PASSWORD`` or the environment as ``PYBEL_REMOTE_PASSWORD`` :return: The response object from :mod:`requests` """ if host is None: host = _get_host() log.debug('using host: %s', host) if user is None: user = _get_user() if user is None: raise ValueError('no user found') if password is None: password = _get_password() if password is None: raise ValueError('no password found') url = host.rstrip('/') + RECIEVE_ENDPOINT response = requests.post( url, json=to_json(graph), headers={ 'content-type': 'application/json', 'User-Agent': 'PyBEL v{}'.format(get_version()), 'bel-commons-public': 'true' if public else 'false', }, auth=(user, password), ) log.debug('received response: %s', response) return response
[ "def", "to_web", "(", "graph", ":", "BELGraph", ",", "host", ":", "Optional", "[", "str", "]", "=", "None", ",", "user", ":", "Optional", "[", "str", "]", "=", "None", ",", "password", ":", "Optional", "[", "str", "]", "=", "None", ",", "public", ":", "bool", "=", "False", ",", ")", "->", "requests", ".", "Response", ":", "if", "host", "is", "None", ":", "host", "=", "_get_host", "(", ")", "log", ".", "debug", "(", "'using host: %s'", ",", "host", ")", "if", "user", "is", "None", ":", "user", "=", "_get_user", "(", ")", "if", "user", "is", "None", ":", "raise", "ValueError", "(", "'no user found'", ")", "if", "password", "is", "None", ":", "password", "=", "_get_password", "(", ")", "if", "password", "is", "None", ":", "raise", "ValueError", "(", "'no password found'", ")", "url", "=", "host", ".", "rstrip", "(", "'/'", ")", "+", "RECIEVE_ENDPOINT", "response", "=", "requests", ".", "post", "(", "url", ",", "json", "=", "to_json", "(", "graph", ")", ",", "headers", "=", "{", "'content-type'", ":", "'application/json'", ",", "'User-Agent'", ":", "'PyBEL v{}'", ".", "format", "(", "get_version", "(", ")", ")", ",", "'bel-commons-public'", ":", "'true'", "if", "public", "else", "'false'", ",", "}", ",", "auth", "=", "(", "user", ",", "password", ")", ",", ")", "log", ".", "debug", "(", "'received response: %s'", ",", "response", ")", "return", "response" ]
Send a graph to the receiver service and returns the :mod:`requests` response object. :param graph: A BEL graph :param host: The location of the BEL Commons server. Alternatively, looks up in PyBEL config with ``PYBEL_REMOTE_HOST`` or the environment as ``PYBEL_REMOTE_HOST`` Defaults to :data:`pybel.constants.DEFAULT_SERVICE_URL` :param user: Username for BEL Commons. Alternatively, looks up in PyBEL config with ``PYBEL_REMOTE_USER`` or the environment as ``PYBEL_REMOTE_USER`` :param password: Password for BEL Commons. Alternatively, looks up in PyBEL config with ``PYBEL_REMOTE_PASSWORD`` or the environment as ``PYBEL_REMOTE_PASSWORD`` :return: The response object from :mod:`requests`
[ "Send", "a", "graph", "to", "the", "receiver", "service", "and", "returns", "the", ":", "mod", ":", "requests", "response", "object", "." ]
python
train
34.693878
Scifabric/pybossa-client
pbclient/__init__.py
https://github.com/Scifabric/pybossa-client/blob/998d7cb0207ff5030dc800f0c2577c5692316c2c/pbclient/__init__.py#L260-L277
def update_project(project): """Update a project instance. :param project: PYBOSSA project :type project: PYBOSSA Project :returns: True -- the response status code """ try: project_id = project.id project = _forbidden_attributes(project) res = _pybossa_req('put', 'project', project_id, payload=project.data) if res.get('id'): return Project(res) else: return res except: # pragma: no cover raise
[ "def", "update_project", "(", "project", ")", ":", "try", ":", "project_id", "=", "project", ".", "id", "project", "=", "_forbidden_attributes", "(", "project", ")", "res", "=", "_pybossa_req", "(", "'put'", ",", "'project'", ",", "project_id", ",", "payload", "=", "project", ".", "data", ")", "if", "res", ".", "get", "(", "'id'", ")", ":", "return", "Project", "(", "res", ")", "else", ":", "return", "res", "except", ":", "# pragma: no cover", "raise" ]
Update a project instance. :param project: PYBOSSA project :type project: PYBOSSA Project :returns: True -- the response status code
[ "Update", "a", "project", "instance", "." ]
python
valid
26.833333
noahbenson/pimms
pimms/immutable.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L247-L255
def _imm_dir(self): ''' An immutable object's dir function should list not only its attributes, but also its un-cached lazy values. ''' dir0 = set(dir(self.__class__)) dir0.update(self.__dict__.keys()) dir0.update(six.iterkeys(_imm_value_data(self))) return sorted(list(dir0))
[ "def", "_imm_dir", "(", "self", ")", ":", "dir0", "=", "set", "(", "dir", "(", "self", ".", "__class__", ")", ")", "dir0", ".", "update", "(", "self", ".", "__dict__", ".", "keys", "(", ")", ")", "dir0", ".", "update", "(", "six", ".", "iterkeys", "(", "_imm_value_data", "(", "self", ")", ")", ")", "return", "sorted", "(", "list", "(", "dir0", ")", ")" ]
An immutable object's dir function should list not only its attributes, but also its un-cached lazy values.
[ "An", "immutable", "object", "s", "dir", "function", "should", "list", "not", "only", "its", "attributes", "but", "also", "its", "un", "-", "cached", "lazy", "values", "." ]
python
train
33.333333
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1785-L1796
def step_through(self, msg='', shutit_pexpect_child=None, level=1, print_input=True, value=True): """Implements a step-through function, using pause_point. """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) if (not shutit_global.shutit_global_object.determine_interactive() or not shutit_global.shutit_global_object.interactive or shutit_global.shutit_global_object.interactive < level): return True self.build['step_through'] = value shutit_pexpect_session.pause_point(msg, print_input=print_input, level=level) return True
[ "def", "step_through", "(", "self", ",", "msg", "=", "''", ",", "shutit_pexpect_child", "=", "None", ",", "level", "=", "1", ",", "print_input", "=", "True", ",", "value", "=", "True", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "shutit_pexpect_child", "=", "shutit_pexpect_child", "or", "self", ".", "get_current_shutit_pexpect_session", "(", ")", ".", "pexpect_child", "shutit_pexpect_session", "=", "self", ".", "get_shutit_pexpect_session_from_child", "(", "shutit_pexpect_child", ")", "if", "(", "not", "shutit_global", ".", "shutit_global_object", ".", "determine_interactive", "(", ")", "or", "not", "shutit_global", ".", "shutit_global_object", ".", "interactive", "or", "shutit_global", ".", "shutit_global_object", ".", "interactive", "<", "level", ")", ":", "return", "True", "self", ".", "build", "[", "'step_through'", "]", "=", "value", "shutit_pexpect_session", ".", "pause_point", "(", "msg", ",", "print_input", "=", "print_input", ",", "level", "=", "level", ")", "return", "True" ]
Implements a step-through function, using pause_point.
[ "Implements", "a", "step", "-", "through", "function", "using", "pause_point", "." ]
python
train
61.166667
hollenstein/maspy
maspy/xml.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/xml.py#L491-L501
def parseSpectra(self): """ #TODO: docstring :returns: #TODO: docstring """ #Note: the spectra need to be iterated completely to save the #metadataNode if self._parsed: raise TypeError('Mzml file already parsed.') self._parsed = True return self._parseMzml()
[ "def", "parseSpectra", "(", "self", ")", ":", "#Note: the spectra need to be iterated completely to save the", "#metadataNode", "if", "self", ".", "_parsed", ":", "raise", "TypeError", "(", "'Mzml file already parsed.'", ")", "self", ".", "_parsed", "=", "True", "return", "self", ".", "_parseMzml", "(", ")" ]
#TODO: docstring :returns: #TODO: docstring
[ "#TODO", ":", "docstring" ]
python
train
29.545455
phaethon/kamene
kamene/layers/inet6.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/layers/inet6.py#L1899-L1926
def names2dnsrepr(x): """ Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!! """ if type(x) is str: if x and x[-1] == '\x00': # stupid heuristic return x.encode('ascii') x = [x.encode('ascii')] elif type(x) is bytes: if x and x[-1] == 0: return x x = [x] res = [] for n in x: if type(n) is str: n = n.encode('ascii') termin = b"\x00" if n.count(b'.') == 0: # single-component gets one more termin += bytes([0]) n = b"".join(map(lambda y: chr(len(y)).encode('ascii')+y, n.split(b"."))) + termin res.append(n) return b"".join(res)
[ "def", "names2dnsrepr", "(", "x", ")", ":", "if", "type", "(", "x", ")", "is", "str", ":", "if", "x", "and", "x", "[", "-", "1", "]", "==", "'\\x00'", ":", "# stupid heuristic", "return", "x", ".", "encode", "(", "'ascii'", ")", "x", "=", "[", "x", ".", "encode", "(", "'ascii'", ")", "]", "elif", "type", "(", "x", ")", "is", "bytes", ":", "if", "x", "and", "x", "[", "-", "1", "]", "==", "0", ":", "return", "x", "x", "=", "[", "x", "]", "res", "=", "[", "]", "for", "n", "in", "x", ":", "if", "type", "(", "n", ")", "is", "str", ":", "n", "=", "n", ".", "encode", "(", "'ascii'", ")", "termin", "=", "b\"\\x00\"", "if", "n", ".", "count", "(", "b'.'", ")", "==", "0", ":", "# single-component gets one more", "termin", "+=", "bytes", "(", "[", "0", "]", ")", "n", "=", "b\"\"", ".", "join", "(", "map", "(", "lambda", "y", ":", "chr", "(", "len", "(", "y", ")", ")", ".", "encode", "(", "'ascii'", ")", "+", "y", ",", "n", ".", "split", "(", "b\".\"", ")", ")", ")", "+", "termin", "res", ".", "append", "(", "n", ")", "return", "b\"\"", ".", "join", "(", "res", ")" ]
Take as input a list of DNS names or a single DNS name and encode it in DNS format (with possible compression) If a string that is already a DNS name in DNS format is passed, it is returned unmodified. Result is a string. !!! At the moment, compression is not implemented !!!
[ "Take", "as", "input", "a", "list", "of", "DNS", "names", "or", "a", "single", "DNS", "name", "and", "encode", "it", "in", "DNS", "format", "(", "with", "possible", "compression", ")", "If", "a", "string", "that", "is", "already", "a", "DNS", "name", "in", "DNS", "format", "is", "passed", "it", "is", "returned", "unmodified", ".", "Result", "is", "a", "string", ".", "!!!", "At", "the", "moment", "compression", "is", "not", "implemented", "!!!" ]
python
train
32.25
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/completer.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/completer.py#L642-L665
def python_matches(self,text): """Match attributes or global python names""" #io.rprint('Completer->python_matches, txt=%r' % text) # dbg if "." in text: try: matches = self.attr_matches(text) if text.endswith('.') and self.omit__names: if self.omit__names == 1: # true if txt is _not_ a __ name, false otherwise: no__name = (lambda txt: re.match(r'.*\.__.*?__',txt) is None) else: # true if txt is _not_ a _ name, false otherwise: no__name = (lambda txt: re.match(r'.*\._.*?',txt) is None) matches = filter(no__name, matches) except NameError: # catches <undefined attributes>.<tab> matches = [] else: matches = self.global_matches(text) return matches
[ "def", "python_matches", "(", "self", ",", "text", ")", ":", "#io.rprint('Completer->python_matches, txt=%r' % text) # dbg", "if", "\".\"", "in", "text", ":", "try", ":", "matches", "=", "self", ".", "attr_matches", "(", "text", ")", "if", "text", ".", "endswith", "(", "'.'", ")", "and", "self", ".", "omit__names", ":", "if", "self", ".", "omit__names", "==", "1", ":", "# true if txt is _not_ a __ name, false otherwise:", "no__name", "=", "(", "lambda", "txt", ":", "re", ".", "match", "(", "r'.*\\.__.*?__'", ",", "txt", ")", "is", "None", ")", "else", ":", "# true if txt is _not_ a _ name, false otherwise:", "no__name", "=", "(", "lambda", "txt", ":", "re", ".", "match", "(", "r'.*\\._.*?'", ",", "txt", ")", "is", "None", ")", "matches", "=", "filter", "(", "no__name", ",", "matches", ")", "except", "NameError", ":", "# catches <undefined attributes>.<tab>", "matches", "=", "[", "]", "else", ":", "matches", "=", "self", ".", "global_matches", "(", "text", ")", "return", "matches" ]
Match attributes or global python names
[ "Match", "attributes", "or", "global", "python", "names" ]
python
test
42
mar10/wsgidav
wsgidav/lock_storage.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/lock_storage.py#L356-L363
def _flush(self): """Write persistent dictionary to disc.""" _logger.debug("_flush()") self._lock.acquire_write() # TODO: read access is enough? try: self._dict.sync() finally: self._lock.release()
[ "def", "_flush", "(", "self", ")", ":", "_logger", ".", "debug", "(", "\"_flush()\"", ")", "self", ".", "_lock", ".", "acquire_write", "(", ")", "# TODO: read access is enough?", "try", ":", "self", ".", "_dict", ".", "sync", "(", ")", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")" ]
Write persistent dictionary to disc.
[ "Write", "persistent", "dictionary", "to", "disc", "." ]
python
valid
31.875
pmacosta/peng
peng/functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/functions.py#L571-L595
def peng_mant(snum): r""" Return the mantissa of a number represented in engineering notation. :param snum: Number :type snum: :ref:`EngineeringNotationNumber` :rtype: float .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.functions.peng_mant :raises: RuntimeError (Argument \`snum\` is not valid) .. [[[end]]] For example: >>> import peng >>> peng.peng_mant(peng.peng(1235.6789E3, 3, False)) 1.236 """ snum = snum.rstrip() return float(snum if snum[-1].isdigit() else snum[:-1])
[ "def", "peng_mant", "(", "snum", ")", ":", "snum", "=", "snum", ".", "rstrip", "(", ")", "return", "float", "(", "snum", "if", "snum", "[", "-", "1", "]", ".", "isdigit", "(", ")", "else", "snum", "[", ":", "-", "1", "]", ")" ]
r""" Return the mantissa of a number represented in engineering notation. :param snum: Number :type snum: :ref:`EngineeringNotationNumber` :rtype: float .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.functions.peng_mant :raises: RuntimeError (Argument \`snum\` is not valid) .. [[[end]]] For example: >>> import peng >>> peng.peng_mant(peng.peng(1235.6789E3, 3, False)) 1.236
[ "r", "Return", "the", "mantissa", "of", "a", "number", "represented", "in", "engineering", "notation", "." ]
python
test
24.12
nuagenetworks/monolithe
monolithe/generators/lang/csharp/writers/apiversionwriter.py
https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/csharp/writers/apiversionwriter.py#L142-L173
def _write_model(self, specification, specification_set): """ Write autogenerate specification file """ filename = "vspk/%s%s.cs" % (self._class_prefix, specification.entity_name) override_content = self._extract_override_content(specification.entity_name) superclass_name = "RestObject" defaults = {} section = specification.entity_name if self.attrs_defaults.has_section(section): for attribute in self.attrs_defaults.options(section): defaults[attribute] = self.attrs_defaults.get(section, attribute) self.write(destination=self.output_directory, filename=filename, template_name="model.cs.tpl", specification=specification, specification_set=specification_set, version=self.api_version, name=self._name, class_prefix=self._class_prefix, product_accronym=self._product_accronym, override_content=override_content, superclass_name=superclass_name, header=self.header_content, version_string=self._api_version_string, package_name=self._package_name, attribute_defaults=defaults) return (filename, specification.entity_name)
[ "def", "_write_model", "(", "self", ",", "specification", ",", "specification_set", ")", ":", "filename", "=", "\"vspk/%s%s.cs\"", "%", "(", "self", ".", "_class_prefix", ",", "specification", ".", "entity_name", ")", "override_content", "=", "self", ".", "_extract_override_content", "(", "specification", ".", "entity_name", ")", "superclass_name", "=", "\"RestObject\"", "defaults", "=", "{", "}", "section", "=", "specification", ".", "entity_name", "if", "self", ".", "attrs_defaults", ".", "has_section", "(", "section", ")", ":", "for", "attribute", "in", "self", ".", "attrs_defaults", ".", "options", "(", "section", ")", ":", "defaults", "[", "attribute", "]", "=", "self", ".", "attrs_defaults", ".", "get", "(", "section", ",", "attribute", ")", "self", ".", "write", "(", "destination", "=", "self", ".", "output_directory", ",", "filename", "=", "filename", ",", "template_name", "=", "\"model.cs.tpl\"", ",", "specification", "=", "specification", ",", "specification_set", "=", "specification_set", ",", "version", "=", "self", ".", "api_version", ",", "name", "=", "self", ".", "_name", ",", "class_prefix", "=", "self", ".", "_class_prefix", ",", "product_accronym", "=", "self", ".", "_product_accronym", ",", "override_content", "=", "override_content", ",", "superclass_name", "=", "superclass_name", ",", "header", "=", "self", ".", "header_content", ",", "version_string", "=", "self", ".", "_api_version_string", ",", "package_name", "=", "self", ".", "_package_name", ",", "attribute_defaults", "=", "defaults", ")", "return", "(", "filename", ",", "specification", ".", "entity_name", ")" ]
Write autogenerate specification file
[ "Write", "autogenerate", "specification", "file" ]
python
train
42.90625
pydata/xarray
xarray/backends/common.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/common.py#L307-L338
def set_dimensions(self, variables, unlimited_dims=None): """ This provides a centralized method to set the dimensions on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ if unlimited_dims is None: unlimited_dims = set() existing_dims = self.get_dimensions() dims = OrderedDict() for v in unlimited_dims: # put unlimited_dims first dims[v] = None for v in variables.values(): dims.update(dict(zip(v.dims, v.shape))) for dim, length in dims.items(): if dim in existing_dims and length != existing_dims[dim]: raise ValueError( "Unable to update size for existing dimension" "%r (%d != %d)" % (dim, length, existing_dims[dim])) elif dim not in existing_dims: is_unlimited = dim in unlimited_dims self.set_dimension(dim, length, is_unlimited)
[ "def", "set_dimensions", "(", "self", ",", "variables", ",", "unlimited_dims", "=", "None", ")", ":", "if", "unlimited_dims", "is", "None", ":", "unlimited_dims", "=", "set", "(", ")", "existing_dims", "=", "self", ".", "get_dimensions", "(", ")", "dims", "=", "OrderedDict", "(", ")", "for", "v", "in", "unlimited_dims", ":", "# put unlimited_dims first", "dims", "[", "v", "]", "=", "None", "for", "v", "in", "variables", ".", "values", "(", ")", ":", "dims", ".", "update", "(", "dict", "(", "zip", "(", "v", ".", "dims", ",", "v", ".", "shape", ")", ")", ")", "for", "dim", ",", "length", "in", "dims", ".", "items", "(", ")", ":", "if", "dim", "in", "existing_dims", "and", "length", "!=", "existing_dims", "[", "dim", "]", ":", "raise", "ValueError", "(", "\"Unable to update size for existing dimension\"", "\"%r (%d != %d)\"", "%", "(", "dim", ",", "length", ",", "existing_dims", "[", "dim", "]", ")", ")", "elif", "dim", "not", "in", "existing_dims", ":", "is_unlimited", "=", "dim", "in", "unlimited_dims", "self", ".", "set_dimension", "(", "dim", ",", "length", ",", "is_unlimited", ")" ]
This provides a centralized method to set the dimensions on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions.
[ "This", "provides", "a", "centralized", "method", "to", "set", "the", "dimensions", "on", "the", "data", "store", "." ]
python
train
36.9375
pyQode/pyqode.core
pyqode/core/widgets/menu_recents.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/menu_recents.py#L74-L83
def set_value(self, key, value): """ Set the recent files value in QSettings. :param key: value key :param value: new value """ if value is None: value = [] value = [os.path.normpath(pth) for pth in value] self._settings.setValue('recent_files/%s' % key, value)
[ "def", "set_value", "(", "self", ",", "key", ",", "value", ")", ":", "if", "value", "is", "None", ":", "value", "=", "[", "]", "value", "=", "[", "os", ".", "path", ".", "normpath", "(", "pth", ")", "for", "pth", "in", "value", "]", "self", ".", "_settings", ".", "setValue", "(", "'recent_files/%s'", "%", "key", ",", "value", ")" ]
Set the recent files value in QSettings. :param key: value key :param value: new value
[ "Set", "the", "recent", "files", "value", "in", "QSettings", ".", ":", "param", "key", ":", "value", "key", ":", "param", "value", ":", "new", "value" ]
python
train
32.8
treethought/flask-assistant
flask_assistant/response.py
https://github.com/treethought/flask-assistant/blob/9331b9796644dfa987bcd97a13e78e9ab62923d3/flask_assistant/response.py#L161-L192
def build_list(self, title=None, items=None): """Presents the user with a vertical list of multiple items. Allows the user to select a single item. Selection generates a user query containing the title of the list item *Note* Returns a completely new object, and does not modify the existing response object Therefore, to add items, must be assigned to new variable or call the method directly after initializing list example usage: simple = ask('I speak this text') mylist = simple.build_list('List Title') mylist.add_item('Item1', 'key1') mylist.add_item('Item2', 'key2') return mylist Arguments: title {str} -- Title displayed at top of list card Returns: _ListSelector -- [_Response object exposing the add_item method] """ list_card = _ListSelector( self._speech, display_text=self._display_text, title=title, items=items ) return list_card
[ "def", "build_list", "(", "self", ",", "title", "=", "None", ",", "items", "=", "None", ")", ":", "list_card", "=", "_ListSelector", "(", "self", ".", "_speech", ",", "display_text", "=", "self", ".", "_display_text", ",", "title", "=", "title", ",", "items", "=", "items", ")", "return", "list_card" ]
Presents the user with a vertical list of multiple items. Allows the user to select a single item. Selection generates a user query containing the title of the list item *Note* Returns a completely new object, and does not modify the existing response object Therefore, to add items, must be assigned to new variable or call the method directly after initializing list example usage: simple = ask('I speak this text') mylist = simple.build_list('List Title') mylist.add_item('Item1', 'key1') mylist.add_item('Item2', 'key2') return mylist Arguments: title {str} -- Title displayed at top of list card Returns: _ListSelector -- [_Response object exposing the add_item method]
[ "Presents", "the", "user", "with", "a", "vertical", "list", "of", "multiple", "items", "." ]
python
train
32.21875
IDSIA/sacred
sacred/utils.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/utils.py#L372-L395
def set_by_dotted_path(d, path, value): """ Set an entry in a nested dict using a dotted path. Will create dictionaries as needed. Examples -------- >>> d = {'foo': {'bar': 7}} >>> set_by_dotted_path(d, 'foo.bar', 10) >>> d {'foo': {'bar': 10}} >>> set_by_dotted_path(d, 'foo.d.baz', 3) >>> d {'foo': {'bar': 10, 'd': {'baz': 3}}} """ split_path = path.split('.') current_option = d for p in split_path[:-1]: if p not in current_option: current_option[p] = dict() current_option = current_option[p] current_option[split_path[-1]] = value
[ "def", "set_by_dotted_path", "(", "d", ",", "path", ",", "value", ")", ":", "split_path", "=", "path", ".", "split", "(", "'.'", ")", "current_option", "=", "d", "for", "p", "in", "split_path", "[", ":", "-", "1", "]", ":", "if", "p", "not", "in", "current_option", ":", "current_option", "[", "p", "]", "=", "dict", "(", ")", "current_option", "=", "current_option", "[", "p", "]", "current_option", "[", "split_path", "[", "-", "1", "]", "]", "=", "value" ]
Set an entry in a nested dict using a dotted path. Will create dictionaries as needed. Examples -------- >>> d = {'foo': {'bar': 7}} >>> set_by_dotted_path(d, 'foo.bar', 10) >>> d {'foo': {'bar': 10}} >>> set_by_dotted_path(d, 'foo.d.baz', 3) >>> d {'foo': {'bar': 10, 'd': {'baz': 3}}}
[ "Set", "an", "entry", "in", "a", "nested", "dict", "using", "a", "dotted", "path", "." ]
python
train
25.541667
saltstack/salt
salt/runners/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/state.py#L43-L52
def soft_kill(jid, state_id=None): ''' Set up a state run to die before executing the given state id, this instructs a running state to safely exit at a given state id. This needs to pass in the jid of the running state. If a state_id is not passed then the jid referenced will be safely exited at the beginning of the next state run. ''' minion = salt.minion.MasterMinion(__opts__) minion.functions['state.soft_kill'](jid, state_id)
[ "def", "soft_kill", "(", "jid", ",", "state_id", "=", "None", ")", ":", "minion", "=", "salt", ".", "minion", ".", "MasterMinion", "(", "__opts__", ")", "minion", ".", "functions", "[", "'state.soft_kill'", "]", "(", "jid", ",", "state_id", ")" ]
Set up a state run to die before executing the given state id, this instructs a running state to safely exit at a given state id. This needs to pass in the jid of the running state. If a state_id is not passed then the jid referenced will be safely exited at the beginning of the next state run.
[ "Set", "up", "a", "state", "run", "to", "die", "before", "executing", "the", "given", "state", "id", "this", "instructs", "a", "running", "state", "to", "safely", "exit", "at", "a", "given", "state", "id", ".", "This", "needs", "to", "pass", "in", "the", "jid", "of", "the", "running", "state", ".", "If", "a", "state_id", "is", "not", "passed", "then", "the", "jid", "referenced", "will", "be", "safely", "exited", "at", "the", "beginning", "of", "the", "next", "state", "run", "." ]
python
train
46
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L40-L63
def _create_get_request(self, resource, billomat_id='', command=None, params=None): """ Creates a get request and return the response data """ if not params: params = {} if not command: command = '' else: command = '/' + command assert (isinstance(resource, str)) if billomat_id: assert (isinstance(billomat_id, int) or isinstance(billomat_id, str)) if isinstance(billomat_id, int): billomat_id = str(billomat_id) response = self.session.get( url=self.api_url + resource + ('/' + billomat_id if billomat_id else '') + command, params=params, ) return self._handle_response(response)
[ "def", "_create_get_request", "(", "self", ",", "resource", ",", "billomat_id", "=", "''", ",", "command", "=", "None", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "if", "not", "command", ":", "command", "=", "''", "else", ":", "command", "=", "'/'", "+", "command", "assert", "(", "isinstance", "(", "resource", ",", "str", ")", ")", "if", "billomat_id", ":", "assert", "(", "isinstance", "(", "billomat_id", ",", "int", ")", "or", "isinstance", "(", "billomat_id", ",", "str", ")", ")", "if", "isinstance", "(", "billomat_id", ",", "int", ")", ":", "billomat_id", "=", "str", "(", "billomat_id", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", "=", "self", ".", "api_url", "+", "resource", "+", "(", "'/'", "+", "billomat_id", "if", "billomat_id", "else", "''", ")", "+", "command", ",", "params", "=", "params", ",", ")", "return", "self", ".", "_handle_response", "(", "response", ")" ]
Creates a get request and return the response data
[ "Creates", "a", "get", "request", "and", "return", "the", "response", "data" ]
python
train
31.291667
all-umass/graphs
graphs/base/base.py
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/base/base.py#L94-L110
def reweight_by_distance(self, coords, metric='l2', copy=False): '''Replaces existing edge weights by distances between connected vertices. The new weight of edge (i,j) is given by: metric(coords[i], coords[j]). coords : (num_vertices x d) array of coordinates, in vertex order metric : str or callable, see sklearn.metrics.pairwise.paired_distances''' if not self.is_weighted(): warnings.warn('Cannot supply weights for unweighted graph; ' 'ignoring call to reweight_by_distance') return self # TODO: take advantage of symmetry of metric function ii, jj = self.pairs().T if metric == 'precomputed': assert coords.ndim == 2 and coords.shape[0] == coords.shape[1] d = coords[ii,jj] else: d = paired_distances(coords[ii], coords[jj], metric=metric) return self._update_edges(d, copy=copy)
[ "def", "reweight_by_distance", "(", "self", ",", "coords", ",", "metric", "=", "'l2'", ",", "copy", "=", "False", ")", ":", "if", "not", "self", ".", "is_weighted", "(", ")", ":", "warnings", ".", "warn", "(", "'Cannot supply weights for unweighted graph; '", "'ignoring call to reweight_by_distance'", ")", "return", "self", "# TODO: take advantage of symmetry of metric function", "ii", ",", "jj", "=", "self", ".", "pairs", "(", ")", ".", "T", "if", "metric", "==", "'precomputed'", ":", "assert", "coords", ".", "ndim", "==", "2", "and", "coords", ".", "shape", "[", "0", "]", "==", "coords", ".", "shape", "[", "1", "]", "d", "=", "coords", "[", "ii", ",", "jj", "]", "else", ":", "d", "=", "paired_distances", "(", "coords", "[", "ii", "]", ",", "coords", "[", "jj", "]", ",", "metric", "=", "metric", ")", "return", "self", ".", "_update_edges", "(", "d", ",", "copy", "=", "copy", ")" ]
Replaces existing edge weights by distances between connected vertices. The new weight of edge (i,j) is given by: metric(coords[i], coords[j]). coords : (num_vertices x d) array of coordinates, in vertex order metric : str or callable, see sklearn.metrics.pairwise.paired_distances
[ "Replaces", "existing", "edge", "weights", "by", "distances", "between", "connected", "vertices", ".", "The", "new", "weight", "of", "edge", "(", "i", "j", ")", "is", "given", "by", ":", "metric", "(", "coords", "[", "i", "]", "coords", "[", "j", "]", ")", ".", "coords", ":", "(", "num_vertices", "x", "d", ")", "array", "of", "coordinates", "in", "vertex", "order", "metric", ":", "str", "or", "callable", "see", "sklearn", ".", "metrics", ".", "pairwise", ".", "paired_distances" ]
python
train
50.588235
googleapis/google-cloud-python
dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py#L202-L268
def create_workflow_template( self, parent, template, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates new workflow template. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() >>> >>> parent = client.region_path('[PROJECT]', '[REGION]') >>> >>> # TODO: Initialize `template`: >>> template = {} >>> >>> response = client.create_workflow_template(parent, template) Args: parent (str): Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource\_names of the form ``projects/{project_id}/regions/{region}`` template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_workflow_template" not in self._inner_api_calls: self._inner_api_calls[ "create_workflow_template" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_workflow_template, default_retry=self._method_configs["CreateWorkflowTemplate"].retry, default_timeout=self._method_configs["CreateWorkflowTemplate"].timeout, client_info=self._client_info, ) request = workflow_templates_pb2.CreateWorkflowTemplateRequest( parent=parent, template=template ) return self._inner_api_calls["create_workflow_template"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "create_workflow_template", "(", "self", ",", "parent", ",", "template", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"create_workflow_template\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"create_workflow_template\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "create_workflow_template", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"CreateWorkflowTemplate\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"CreateWorkflowTemplate\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "workflow_templates_pb2", ".", "CreateWorkflowTemplateRequest", "(", "parent", "=", "parent", ",", "template", "=", "template", ")", "return", "self", ".", "_inner_api_calls", "[", "\"create_workflow_template\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Creates new workflow template. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() >>> >>> parent = client.region_path('[PROJECT]', '[REGION]') >>> >>> # TODO: Initialize `template`: >>> template = {} >>> >>> response = client.create_workflow_template(parent, template) Args: parent (str): Required. The "resource name" of the region, as described in https://cloud.google.com/apis/design/resource\_names of the form ``projects/{project_id}/regions/{region}`` template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Creates", "new", "workflow", "template", "." ]
python
train
45.164179
openfisca/openfisca-survey-manager
openfisca_survey_manager/scenarios.py
https://github.com/openfisca/openfisca-survey-manager/blob/bed6c65dc5e4ec2bdc9cda5b865fefd9e3d0c358/openfisca_survey_manager/scenarios.py#L1210-L1217
def _set_id_variable_by_entity_key(self) -> Dict[str, str]: '''Identify and set the good ids for the different entities''' if self.id_variable_by_entity_key is None: self.id_variable_by_entity_key = dict( (entity.key, entity.key + '_id') for entity in self.tax_benefit_system.entities) log.debug("Use default id_variable names:\n {}".format(self.id_variable_by_entity_key)) return self.id_variable_by_entity_key
[ "def", "_set_id_variable_by_entity_key", "(", "self", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "if", "self", ".", "id_variable_by_entity_key", "is", "None", ":", "self", ".", "id_variable_by_entity_key", "=", "dict", "(", "(", "entity", ".", "key", ",", "entity", ".", "key", "+", "'_id'", ")", "for", "entity", "in", "self", ".", "tax_benefit_system", ".", "entities", ")", "log", ".", "debug", "(", "\"Use default id_variable names:\\n {}\"", ".", "format", "(", "self", ".", "id_variable_by_entity_key", ")", ")", "return", "self", ".", "id_variable_by_entity_key" ]
Identify and set the good ids for the different entities
[ "Identify", "and", "set", "the", "good", "ids", "for", "the", "different", "entities" ]
python
train
58.75
Jajcus/pyxmpp2
pyxmpp2/ext/muc/muccore.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muccore.py#L635-L644
def make_muc_userinfo(self): """ Create <x xmlns="...muc#user"/> element in the stanza. :return: the element created. :returntype: `MucUserX` """ self.clear_muc_child() self.muc_child=MucUserX(parent=self.xmlnode) return self.muc_child
[ "def", "make_muc_userinfo", "(", "self", ")", ":", "self", ".", "clear_muc_child", "(", ")", "self", ".", "muc_child", "=", "MucUserX", "(", "parent", "=", "self", ".", "xmlnode", ")", "return", "self", ".", "muc_child" ]
Create <x xmlns="...muc#user"/> element in the stanza. :return: the element created. :returntype: `MucUserX`
[ "Create", "<x", "xmlns", "=", "...", "muc#user", "/", ">", "element", "in", "the", "stanza", "." ]
python
valid
29.1
thusoy/python-crypt
pcrypt.py
https://github.com/thusoy/python-crypt/blob/0835f7568c14762890cea70b7605d04b3459e4a0/pcrypt.py#L81-L216
def sha2_crypt(key, salt, hashfunc, rounds=_ROUNDS_DEFAULT): """ This algorithm is insane. History can be found at https://en.wikipedia.org/wiki/Crypt_%28C%29 """ key = key.encode('utf-8') h = hashfunc() alt_h = hashfunc() digest_size = h.digest_size key_len = len(key) # First, feed key, salt and then key again to the alt hash alt_h.update(key) alt_h.update(salt.encode('utf-8')) alt_h.update(key) alt_result = alt_h.digest() # Feed key and salt to the primary hash h.update(key) h.update(salt.encode('utf-8')) # Feed as many (loopping) bytes from alt digest as the length of the key for i in range(key_len//digest_size): h.update(alt_result) h.update(alt_result[:(key_len % digest_size)]) # Take the binary representation of the length of the key and for every # 1 add the alternate digest, for every 0 the key bits = key_len while bits > 0: if bits & 1 == 0: h.update(key) else: h.update(alt_result) bits >>= 1 # Store the results from the primary hash alt_result = h.digest() h = hashfunc() # Add password for each character in the password for i in range(key_len): h.update(key) temp_result = h.digest() # Compute a P array of the bytes in temp repeated for the length of the key p_bytes = temp_result * (key_len // digest_size) p_bytes += temp_result[:(key_len % digest_size)] alt_h = hashfunc() # Add the salt 16 + arbitrary amount decided by first byte in alt digest for i in range(16 + byte2int(alt_result[0])): alt_h.update(salt.encode('utf-8')) temp_result = alt_h.digest() # Compute a S array of the bytes in temp_result repeated for the length # of the salt s_bytes = temp_result * (len(salt) // digest_size) s_bytes += temp_result[:(len(salt) % digest_size)] # Do the actual iterations for i in range(rounds): h = hashfunc() # Alternate adding either the P array or the alt digest if i & 1 != 0: h.update(p_bytes) else: h.update(alt_result) # If the round is divisible by 3, add the S array if i % 3 != 0: h.update(s_bytes) # If the round is divisible by 7, add the P array if i % 7 != 0: h.update(p_bytes) # Alternate adding either the P array or the alt digest, opposite # of first step if i & 1 != 0: h.update(alt_result) else: h.update(p_bytes) alt_result = h.digest() # Compute the base64-ish representation of the hash ret = [] if digest_size == 64: # SHA-512 ret.append(b64_from_24bit(alt_result[0], alt_result[21], alt_result[42], 4)) ret.append(b64_from_24bit(alt_result[22], alt_result[43], alt_result[1], 4)) ret.append(b64_from_24bit(alt_result[44], alt_result[2], alt_result[23], 4)) ret.append(b64_from_24bit(alt_result[3], alt_result[24], alt_result[45], 4)) ret.append(b64_from_24bit(alt_result[25], alt_result[46], alt_result[4], 4)) ret.append(b64_from_24bit(alt_result[47], alt_result[5], alt_result[26], 4)) ret.append(b64_from_24bit(alt_result[6], alt_result[27], alt_result[48], 4)) ret.append(b64_from_24bit(alt_result[28], alt_result[49], alt_result[7], 4)) ret.append(b64_from_24bit(alt_result[50], alt_result[8], alt_result[29], 4)) ret.append(b64_from_24bit(alt_result[9], alt_result[30], alt_result[51], 4)) ret.append(b64_from_24bit(alt_result[31], alt_result[52], alt_result[10], 4)) ret.append(b64_from_24bit(alt_result[53], alt_result[11], alt_result[32], 4)) ret.append(b64_from_24bit(alt_result[12], alt_result[33], alt_result[54], 4)) ret.append(b64_from_24bit(alt_result[34], alt_result[55], alt_result[13], 4)) ret.append(b64_from_24bit(alt_result[56], alt_result[14], alt_result[35], 4)) ret.append(b64_from_24bit(alt_result[15], alt_result[36], alt_result[57], 4)) ret.append(b64_from_24bit(alt_result[37], alt_result[58], alt_result[16], 4)) ret.append(b64_from_24bit(alt_result[59], alt_result[17], alt_result[38], 4)) ret.append(b64_from_24bit(alt_result[18], alt_result[39], alt_result[60], 4)) ret.append(b64_from_24bit(alt_result[40], alt_result[61], alt_result[19], 4)) ret.append(b64_from_24bit(alt_result[62], alt_result[20], alt_result[41], 4)) ret.append(b64_from_24bit(int2byte(0), int2byte(0), alt_result[63], 2)) else: # SHA-256 ret.append(b64_from_24bit(alt_result[0], alt_result[10], alt_result[20], 4)) ret.append(b64_from_24bit(alt_result[21], alt_result[1], alt_result[11], 4)) ret.append(b64_from_24bit(alt_result[12], alt_result[22], alt_result[2], 4)) ret.append(b64_from_24bit(alt_result[3], alt_result[13], alt_result[23], 4)) ret.append(b64_from_24bit(alt_result[24], alt_result[4], alt_result[14], 4)) ret.append(b64_from_24bit(alt_result[15], alt_result[25], alt_result[5], 4)) ret.append(b64_from_24bit(alt_result[6], alt_result[16], alt_result[26], 4)) ret.append(b64_from_24bit(alt_result[27], alt_result[7], alt_result[17], 4)) ret.append(b64_from_24bit(alt_result[18], alt_result[28], alt_result[8], 4)) ret.append(b64_from_24bit(alt_result[9], alt_result[19], alt_result[29], 4)) ret.append(b64_from_24bit(int2byte(0), alt_result[31], alt_result[30], 3)) algo = 6 if digest_size == 64 else 5 if rounds == _ROUNDS_DEFAULT: return '${0}${1}${2}'.format(algo, salt, ''.join(ret)) else: return '${0}$rounds={1}${2}${3}'.format(algo, rounds, salt, ''.join(ret))
[ "def", "sha2_crypt", "(", "key", ",", "salt", ",", "hashfunc", ",", "rounds", "=", "_ROUNDS_DEFAULT", ")", ":", "key", "=", "key", ".", "encode", "(", "'utf-8'", ")", "h", "=", "hashfunc", "(", ")", "alt_h", "=", "hashfunc", "(", ")", "digest_size", "=", "h", ".", "digest_size", "key_len", "=", "len", "(", "key", ")", "# First, feed key, salt and then key again to the alt hash", "alt_h", ".", "update", "(", "key", ")", "alt_h", ".", "update", "(", "salt", ".", "encode", "(", "'utf-8'", ")", ")", "alt_h", ".", "update", "(", "key", ")", "alt_result", "=", "alt_h", ".", "digest", "(", ")", "# Feed key and salt to the primary hash", "h", ".", "update", "(", "key", ")", "h", ".", "update", "(", "salt", ".", "encode", "(", "'utf-8'", ")", ")", "# Feed as many (loopping) bytes from alt digest as the length of the key", "for", "i", "in", "range", "(", "key_len", "//", "digest_size", ")", ":", "h", ".", "update", "(", "alt_result", ")", "h", ".", "update", "(", "alt_result", "[", ":", "(", "key_len", "%", "digest_size", ")", "]", ")", "# Take the binary representation of the length of the key and for every", "# 1 add the alternate digest, for every 0 the key", "bits", "=", "key_len", "while", "bits", ">", "0", ":", "if", "bits", "&", "1", "==", "0", ":", "h", ".", "update", "(", "key", ")", "else", ":", "h", ".", "update", "(", "alt_result", ")", "bits", ">>=", "1", "# Store the results from the primary hash", "alt_result", "=", "h", ".", "digest", "(", ")", "h", "=", "hashfunc", "(", ")", "# Add password for each character in the password", "for", "i", "in", "range", "(", "key_len", ")", ":", "h", ".", "update", "(", "key", ")", "temp_result", "=", "h", ".", "digest", "(", ")", "# Compute a P array of the bytes in temp repeated for the length of the key", "p_bytes", "=", "temp_result", "*", "(", "key_len", "//", "digest_size", ")", "p_bytes", "+=", "temp_result", "[", ":", "(", "key_len", "%", "digest_size", ")", "]", "alt_h", "=", "hashfunc", "(", ")", "# Add the salt 16 + arbitrary amount decided by first byte in alt digest", "for", "i", "in", "range", "(", "16", "+", "byte2int", "(", "alt_result", "[", "0", "]", ")", ")", ":", "alt_h", ".", "update", "(", "salt", ".", "encode", "(", "'utf-8'", ")", ")", "temp_result", "=", "alt_h", ".", "digest", "(", ")", "# Compute a S array of the bytes in temp_result repeated for the length", "# of the salt", "s_bytes", "=", "temp_result", "*", "(", "len", "(", "salt", ")", "//", "digest_size", ")", "s_bytes", "+=", "temp_result", "[", ":", "(", "len", "(", "salt", ")", "%", "digest_size", ")", "]", "# Do the actual iterations", "for", "i", "in", "range", "(", "rounds", ")", ":", "h", "=", "hashfunc", "(", ")", "# Alternate adding either the P array or the alt digest", "if", "i", "&", "1", "!=", "0", ":", "h", ".", "update", "(", "p_bytes", ")", "else", ":", "h", ".", "update", "(", "alt_result", ")", "# If the round is divisible by 3, add the S array", "if", "i", "%", "3", "!=", "0", ":", "h", ".", "update", "(", "s_bytes", ")", "# If the round is divisible by 7, add the P array", "if", "i", "%", "7", "!=", "0", ":", "h", ".", "update", "(", "p_bytes", ")", "# Alternate adding either the P array or the alt digest, opposite", "# of first step", "if", "i", "&", "1", "!=", "0", ":", "h", ".", "update", "(", "alt_result", ")", "else", ":", "h", ".", "update", "(", "p_bytes", ")", "alt_result", "=", "h", ".", "digest", "(", ")", "# Compute the base64-ish representation of the hash", "ret", "=", "[", "]", "if", "digest_size", "==", "64", ":", "# SHA-512", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "0", "]", ",", "alt_result", "[", "21", "]", ",", "alt_result", "[", "42", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "22", "]", ",", "alt_result", "[", "43", "]", ",", "alt_result", "[", "1", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "44", "]", ",", "alt_result", "[", "2", "]", ",", "alt_result", "[", "23", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "3", "]", ",", "alt_result", "[", "24", "]", ",", "alt_result", "[", "45", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "25", "]", ",", "alt_result", "[", "46", "]", ",", "alt_result", "[", "4", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "47", "]", ",", "alt_result", "[", "5", "]", ",", "alt_result", "[", "26", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "6", "]", ",", "alt_result", "[", "27", "]", ",", "alt_result", "[", "48", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "28", "]", ",", "alt_result", "[", "49", "]", ",", "alt_result", "[", "7", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "50", "]", ",", "alt_result", "[", "8", "]", ",", "alt_result", "[", "29", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "9", "]", ",", "alt_result", "[", "30", "]", ",", "alt_result", "[", "51", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "31", "]", ",", "alt_result", "[", "52", "]", ",", "alt_result", "[", "10", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "53", "]", ",", "alt_result", "[", "11", "]", ",", "alt_result", "[", "32", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "12", "]", ",", "alt_result", "[", "33", "]", ",", "alt_result", "[", "54", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "34", "]", ",", "alt_result", "[", "55", "]", ",", "alt_result", "[", "13", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "56", "]", ",", "alt_result", "[", "14", "]", ",", "alt_result", "[", "35", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "15", "]", ",", "alt_result", "[", "36", "]", ",", "alt_result", "[", "57", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "37", "]", ",", "alt_result", "[", "58", "]", ",", "alt_result", "[", "16", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "59", "]", ",", "alt_result", "[", "17", "]", ",", "alt_result", "[", "38", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "18", "]", ",", "alt_result", "[", "39", "]", ",", "alt_result", "[", "60", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "40", "]", ",", "alt_result", "[", "61", "]", ",", "alt_result", "[", "19", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "62", "]", ",", "alt_result", "[", "20", "]", ",", "alt_result", "[", "41", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "int2byte", "(", "0", ")", ",", "int2byte", "(", "0", ")", ",", "alt_result", "[", "63", "]", ",", "2", ")", ")", "else", ":", "# SHA-256", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "0", "]", ",", "alt_result", "[", "10", "]", ",", "alt_result", "[", "20", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "21", "]", ",", "alt_result", "[", "1", "]", ",", "alt_result", "[", "11", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "12", "]", ",", "alt_result", "[", "22", "]", ",", "alt_result", "[", "2", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "3", "]", ",", "alt_result", "[", "13", "]", ",", "alt_result", "[", "23", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "24", "]", ",", "alt_result", "[", "4", "]", ",", "alt_result", "[", "14", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "15", "]", ",", "alt_result", "[", "25", "]", ",", "alt_result", "[", "5", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "6", "]", ",", "alt_result", "[", "16", "]", ",", "alt_result", "[", "26", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "27", "]", ",", "alt_result", "[", "7", "]", ",", "alt_result", "[", "17", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "18", "]", ",", "alt_result", "[", "28", "]", ",", "alt_result", "[", "8", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "alt_result", "[", "9", "]", ",", "alt_result", "[", "19", "]", ",", "alt_result", "[", "29", "]", ",", "4", ")", ")", "ret", ".", "append", "(", "b64_from_24bit", "(", "int2byte", "(", "0", ")", ",", "alt_result", "[", "31", "]", ",", "alt_result", "[", "30", "]", ",", "3", ")", ")", "algo", "=", "6", "if", "digest_size", "==", "64", "else", "5", "if", "rounds", "==", "_ROUNDS_DEFAULT", ":", "return", "'${0}${1}${2}'", ".", "format", "(", "algo", ",", "salt", ",", "''", ".", "join", "(", "ret", ")", ")", "else", ":", "return", "'${0}$rounds={1}${2}${3}'", ".", "format", "(", "algo", ",", "rounds", ",", "salt", ",", "''", ".", "join", "(", "ret", ")", ")" ]
This algorithm is insane. History can be found at https://en.wikipedia.org/wiki/Crypt_%28C%29
[ "This", "algorithm", "is", "insane", ".", "History", "can", "be", "found", "at", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Crypt_%28C%29" ]
python
train
41.713235
seung-lab/cloud-volume
cloudvolume/cloudvolume.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/cloudvolume.py#L940-L1019
def transfer_to(self, cloudpath, bbox, block_size=None, compress=True): """ Transfer files from one storage location to another, bypassing volume painting. This enables using a single CloudVolume instance to transfer big volumes. In some cases, gsutil or aws s3 cli tools may be more appropriate. This method is provided for convenience. It may be optimized for better performance over time as demand requires. cloudpath (str): path to storage layer bbox (Bbox object): ROI to transfer block_size (int): number of file chunks to transfer per I/O batch. compress (bool): Set to False to upload as uncompressed """ if type(bbox) is Bbox: requested_bbox = bbox else: (requested_bbox, _, _) = self.__interpret_slices(bbox) realized_bbox = self.__realized_bbox(requested_bbox) if requested_bbox != realized_bbox: raise exceptions.AlignmentError( "Unable to transfer non-chunk aligned bounding boxes. Requested: {}, Realized: {}".format( requested_bbox, realized_bbox )) default_block_size_MB = 50 # MB chunk_MB = self.underlying.rectVolume() * np.dtype(self.dtype).itemsize * self.num_channels if self.layer_type == 'image': # kind of an average guess for some EM datasets, have seen up to 1.9x and as low as 1.1 # affinites are also images, but have very different compression ratios. e.g. 3x for kempressed chunk_MB /= 1.3 else: # segmentation chunk_MB /= 100.0 # compression ratios between 80 and 800.... chunk_MB /= 1024.0 * 1024.0 if block_size: step = block_size else: step = int(default_block_size_MB // chunk_MB) + 1 try: destvol = CloudVolume(cloudpath, mip=self.mip) except exceptions.InfoUnavailableError: destvol = CloudVolume(cloudpath, mip=self.mip, info=self.info, provenance=self.provenance.serialize()) destvol.commit_info() destvol.commit_provenance() except exceptions.ScaleUnavailableError: destvol = CloudVolume(cloudpath) for i in range(len(destvol.scales) + 1, len(self.scales)): destvol.scales.append( self.scales[i] ) destvol.commit_info() destvol.commit_provenance() num_blocks = np.ceil(self.bounds.volume() / self.underlying.rectVolume()) / step num_blocks = int(np.ceil(num_blocks)) cloudpaths = txrx.chunknames(realized_bbox, self.bounds, self.key, self.underlying) pbar = tqdm( desc='Transferring Blocks of {} Chunks'.format(step), unit='blocks', disable=(not self.progress), total=num_blocks, ) with pbar: with Storage(self.layer_cloudpath) as src_stor: with Storage(cloudpath) as dest_stor: for _ in range(num_blocks, 0, -1): srcpaths = list(itertools.islice(cloudpaths, step)) files = src_stor.get_files(srcpaths) files = [ (f['filename'], f['content']) for f in files ] dest_stor.put_files( files=files, compress=compress, content_type=txrx.content_type(destvol), ) pbar.update()
[ "def", "transfer_to", "(", "self", ",", "cloudpath", ",", "bbox", ",", "block_size", "=", "None", ",", "compress", "=", "True", ")", ":", "if", "type", "(", "bbox", ")", "is", "Bbox", ":", "requested_bbox", "=", "bbox", "else", ":", "(", "requested_bbox", ",", "_", ",", "_", ")", "=", "self", ".", "__interpret_slices", "(", "bbox", ")", "realized_bbox", "=", "self", ".", "__realized_bbox", "(", "requested_bbox", ")", "if", "requested_bbox", "!=", "realized_bbox", ":", "raise", "exceptions", ".", "AlignmentError", "(", "\"Unable to transfer non-chunk aligned bounding boxes. Requested: {}, Realized: {}\"", ".", "format", "(", "requested_bbox", ",", "realized_bbox", ")", ")", "default_block_size_MB", "=", "50", "# MB", "chunk_MB", "=", "self", ".", "underlying", ".", "rectVolume", "(", ")", "*", "np", ".", "dtype", "(", "self", ".", "dtype", ")", ".", "itemsize", "*", "self", ".", "num_channels", "if", "self", ".", "layer_type", "==", "'image'", ":", "# kind of an average guess for some EM datasets, have seen up to 1.9x and as low as 1.1", "# affinites are also images, but have very different compression ratios. e.g. 3x for kempressed", "chunk_MB", "/=", "1.3", "else", ":", "# segmentation", "chunk_MB", "/=", "100.0", "# compression ratios between 80 and 800....", "chunk_MB", "/=", "1024.0", "*", "1024.0", "if", "block_size", ":", "step", "=", "block_size", "else", ":", "step", "=", "int", "(", "default_block_size_MB", "//", "chunk_MB", ")", "+", "1", "try", ":", "destvol", "=", "CloudVolume", "(", "cloudpath", ",", "mip", "=", "self", ".", "mip", ")", "except", "exceptions", ".", "InfoUnavailableError", ":", "destvol", "=", "CloudVolume", "(", "cloudpath", ",", "mip", "=", "self", ".", "mip", ",", "info", "=", "self", ".", "info", ",", "provenance", "=", "self", ".", "provenance", ".", "serialize", "(", ")", ")", "destvol", ".", "commit_info", "(", ")", "destvol", ".", "commit_provenance", "(", ")", "except", "exceptions", ".", "ScaleUnavailableError", ":", "destvol", "=", "CloudVolume", "(", "cloudpath", ")", "for", "i", "in", "range", "(", "len", "(", "destvol", ".", "scales", ")", "+", "1", ",", "len", "(", "self", ".", "scales", ")", ")", ":", "destvol", ".", "scales", ".", "append", "(", "self", ".", "scales", "[", "i", "]", ")", "destvol", ".", "commit_info", "(", ")", "destvol", ".", "commit_provenance", "(", ")", "num_blocks", "=", "np", ".", "ceil", "(", "self", ".", "bounds", ".", "volume", "(", ")", "/", "self", ".", "underlying", ".", "rectVolume", "(", ")", ")", "/", "step", "num_blocks", "=", "int", "(", "np", ".", "ceil", "(", "num_blocks", ")", ")", "cloudpaths", "=", "txrx", ".", "chunknames", "(", "realized_bbox", ",", "self", ".", "bounds", ",", "self", ".", "key", ",", "self", ".", "underlying", ")", "pbar", "=", "tqdm", "(", "desc", "=", "'Transferring Blocks of {} Chunks'", ".", "format", "(", "step", ")", ",", "unit", "=", "'blocks'", ",", "disable", "=", "(", "not", "self", ".", "progress", ")", ",", "total", "=", "num_blocks", ",", ")", "with", "pbar", ":", "with", "Storage", "(", "self", ".", "layer_cloudpath", ")", "as", "src_stor", ":", "with", "Storage", "(", "cloudpath", ")", "as", "dest_stor", ":", "for", "_", "in", "range", "(", "num_blocks", ",", "0", ",", "-", "1", ")", ":", "srcpaths", "=", "list", "(", "itertools", ".", "islice", "(", "cloudpaths", ",", "step", ")", ")", "files", "=", "src_stor", ".", "get_files", "(", "srcpaths", ")", "files", "=", "[", "(", "f", "[", "'filename'", "]", ",", "f", "[", "'content'", "]", ")", "for", "f", "in", "files", "]", "dest_stor", ".", "put_files", "(", "files", "=", "files", ",", "compress", "=", "compress", ",", "content_type", "=", "txrx", ".", "content_type", "(", "destvol", ")", ",", ")", "pbar", ".", "update", "(", ")" ]
Transfer files from one storage location to another, bypassing volume painting. This enables using a single CloudVolume instance to transfer big volumes. In some cases, gsutil or aws s3 cli tools may be more appropriate. This method is provided for convenience. It may be optimized for better performance over time as demand requires. cloudpath (str): path to storage layer bbox (Bbox object): ROI to transfer block_size (int): number of file chunks to transfer per I/O batch. compress (bool): Set to False to upload as uncompressed
[ "Transfer", "files", "from", "one", "storage", "location", "to", "another", "bypassing", "volume", "painting", ".", "This", "enables", "using", "a", "single", "CloudVolume", "instance", "to", "transfer", "big", "volumes", ".", "In", "some", "cases", "gsutil", "or", "aws", "s3", "cli", "tools", "may", "be", "more", "appropriate", ".", "This", "method", "is", "provided", "for", "convenience", ".", "It", "may", "be", "optimized", "for", "better", "performance", "over", "time", "as", "demand", "requires", "." ]
python
train
38.5625
Garee/pytodoist
pytodoist/api.py
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/api.py#L415-L427
def _get(self, end_point, params=None, **kwargs): """Send a HTTP GET request to a Todoist API end-point. :param end_point: The Todoist API end-point. :type end_point: str :param params: The required request parameters. :type params: dict :param kwargs: Any optional parameters. :type kwargs: dict :return: The HTTP response to the request. :rtype: :class:`requests.Response` """ return self._request(requests.get, end_point, params, **kwargs)
[ "def", "_get", "(", "self", ",", "end_point", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_request", "(", "requests", ".", "get", ",", "end_point", ",", "params", ",", "*", "*", "kwargs", ")" ]
Send a HTTP GET request to a Todoist API end-point. :param end_point: The Todoist API end-point. :type end_point: str :param params: The required request parameters. :type params: dict :param kwargs: Any optional parameters. :type kwargs: dict :return: The HTTP response to the request. :rtype: :class:`requests.Response`
[ "Send", "a", "HTTP", "GET", "request", "to", "a", "Todoist", "API", "end", "-", "point", "." ]
python
train
39.923077
tanghaibao/jcvi
jcvi/variation/cnv.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/cnv.py#L849-L888
def hmm(args): """ %prog hmm workdir sample_key Run CNV segmentation caller. The workdir must contain a subfolder called `sample_key-cn` that contains CN for each chromosome. A `beta` directory that contains scaler for each bin must also be present in the current directory. """ p = OptionParser(hmm.__doc__) p.add_option("--mu", default=.003, type="float", help="Transition probability") p.add_option("--sigma", default=.1, type="float", help="Standard deviation of Gaussian emission distribution") p.add_option("--threshold", default=1, type="float", help="Standard deviation must be < this " "in the baseline population") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) workdir, sample_key = args model = CopyNumberHMM(workdir=workdir, mu=opts.mu, sigma=opts.sigma, threshold=opts.threshold) events = model.run(sample_key) params = ".mu-{}.sigma-{}.threshold-{}"\ .format(opts.mu, opts.sigma, opts.threshold) hmmfile = op.join(workdir, sample_key + params + ".seg") fw = open(hmmfile, "w") nevents = 0 for mean_cn, rr, event in events: if event is None: continue print(" ".join((event.bedline, sample_key)), file=fw) nevents += 1 fw.close() logging.debug("A total of {} aberrant events written to `{}`" .format(nevents, hmmfile)) return hmmfile
[ "def", "hmm", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "hmm", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--mu\"", ",", "default", "=", ".003", ",", "type", "=", "\"float\"", ",", "help", "=", "\"Transition probability\"", ")", "p", ".", "add_option", "(", "\"--sigma\"", ",", "default", "=", ".1", ",", "type", "=", "\"float\"", ",", "help", "=", "\"Standard deviation of Gaussian emission distribution\"", ")", "p", ".", "add_option", "(", "\"--threshold\"", ",", "default", "=", "1", ",", "type", "=", "\"float\"", ",", "help", "=", "\"Standard deviation must be < this \"", "\"in the baseline population\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "workdir", ",", "sample_key", "=", "args", "model", "=", "CopyNumberHMM", "(", "workdir", "=", "workdir", ",", "mu", "=", "opts", ".", "mu", ",", "sigma", "=", "opts", ".", "sigma", ",", "threshold", "=", "opts", ".", "threshold", ")", "events", "=", "model", ".", "run", "(", "sample_key", ")", "params", "=", "\".mu-{}.sigma-{}.threshold-{}\"", ".", "format", "(", "opts", ".", "mu", ",", "opts", ".", "sigma", ",", "opts", ".", "threshold", ")", "hmmfile", "=", "op", ".", "join", "(", "workdir", ",", "sample_key", "+", "params", "+", "\".seg\"", ")", "fw", "=", "open", "(", "hmmfile", ",", "\"w\"", ")", "nevents", "=", "0", "for", "mean_cn", ",", "rr", ",", "event", "in", "events", ":", "if", "event", "is", "None", ":", "continue", "print", "(", "\" \"", ".", "join", "(", "(", "event", ".", "bedline", ",", "sample_key", ")", ")", ",", "file", "=", "fw", ")", "nevents", "+=", "1", "fw", ".", "close", "(", ")", "logging", ".", "debug", "(", "\"A total of {} aberrant events written to `{}`\"", ".", "format", "(", "nevents", ",", "hmmfile", ")", ")", "return", "hmmfile" ]
%prog hmm workdir sample_key Run CNV segmentation caller. The workdir must contain a subfolder called `sample_key-cn` that contains CN for each chromosome. A `beta` directory that contains scaler for each bin must also be present in the current directory.
[ "%prog", "hmm", "workdir", "sample_key" ]
python
train
37.825
hydpy-dev/hydpy
hydpy/models/hland/hland_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/hland/hland_model.py#L13-L57
def calc_tc_v1(self): """Adjust the measured air temperature to the altitude of the individual zones. Required control parameters: |NmbZones| |TCAlt| |ZoneZ| |ZRelT| Required input sequence: |hland_inputs.T| Calculated flux sequences: |TC| Basic equation: :math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)` Examples: Prepare two zones, the first one lying at the reference height and the second one 200 meters above: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zrelt(2.0) >>> zonez(2.0, 4.0) Applying the usual temperature lapse rate of 0.6°C/100m does not affect the temperature of the first zone but reduces the temperature of the second zone by 1.2°C: >>> tcalt(0.6) >>> inputs.t = 5.0 >>> model.calc_tc_v1() >>> fluxes.tc tc(5.0, 3.8) """ con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nmbzones): flu.tc[k] = inp.t-con.tcalt[k]*(con.zonez[k]-con.zrelt)
[ "def", "calc_tc_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "inp", "=", "self", ".", "sequences", ".", "inputs", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "for", "k", "in", "range", "(", "con", ".", "nmbzones", ")", ":", "flu", ".", "tc", "[", "k", "]", "=", "inp", ".", "t", "-", "con", ".", "tcalt", "[", "k", "]", "*", "(", "con", ".", "zonez", "[", "k", "]", "-", "con", ".", "zrelt", ")" ]
Adjust the measured air temperature to the altitude of the individual zones. Required control parameters: |NmbZones| |TCAlt| |ZoneZ| |ZRelT| Required input sequence: |hland_inputs.T| Calculated flux sequences: |TC| Basic equation: :math:`TC = T - TCAlt \\cdot (ZoneZ-ZRelT)` Examples: Prepare two zones, the first one lying at the reference height and the second one 200 meters above: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(2) >>> zrelt(2.0) >>> zonez(2.0, 4.0) Applying the usual temperature lapse rate of 0.6°C/100m does not affect the temperature of the first zone but reduces the temperature of the second zone by 1.2°C: >>> tcalt(0.6) >>> inputs.t = 5.0 >>> model.calc_tc_v1() >>> fluxes.tc tc(5.0, 3.8)
[ "Adjust", "the", "measured", "air", "temperature", "to", "the", "altitude", "of", "the", "individual", "zones", "." ]
python
train
25.777778
spyder-ide/spyder
spyder/plugins/ipythonconsole/widgets/client.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/client.py#L305-L312
def stop_button_click_handler(self): """Method to handle what to do when the stop button is pressed""" self.stop_button.setDisabled(True) # Interrupt computations or stop debugging if not self.shellwidget._reading: self.interrupt_kernel() else: self.shellwidget.write_to_stdin('exit')
[ "def", "stop_button_click_handler", "(", "self", ")", ":", "self", ".", "stop_button", ".", "setDisabled", "(", "True", ")", "# Interrupt computations or stop debugging\r", "if", "not", "self", ".", "shellwidget", ".", "_reading", ":", "self", ".", "interrupt_kernel", "(", ")", "else", ":", "self", ".", "shellwidget", ".", "write_to_stdin", "(", "'exit'", ")" ]
Method to handle what to do when the stop button is pressed
[ "Method", "to", "handle", "what", "to", "do", "when", "the", "stop", "button", "is", "pressed" ]
python
train
43.5
CalebBell/fluids
fluids/numerics/__init__.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/numerics/__init__.py#L620-L640
def polyder(c, m=1, scl=1, axis=0): '''not quite a copy of numpy's version because this was faster to implement. ''' c = list(c) cnt = int(m) if cnt == 0: return c n = len(c) if cnt >= n: c = c[:1]*0 else: for i in range(cnt): n = n - 1 c *= scl der = [0.0 for _ in range(n)] for j in range(n, 0, -1): der[j - 1] = j*c[j] c = der return c
[ "def", "polyder", "(", "c", ",", "m", "=", "1", ",", "scl", "=", "1", ",", "axis", "=", "0", ")", ":", "c", "=", "list", "(", "c", ")", "cnt", "=", "int", "(", "m", ")", "if", "cnt", "==", "0", ":", "return", "c", "n", "=", "len", "(", "c", ")", "if", "cnt", ">=", "n", ":", "c", "=", "c", "[", ":", "1", "]", "*", "0", "else", ":", "for", "i", "in", "range", "(", "cnt", ")", ":", "n", "=", "n", "-", "1", "c", "*=", "scl", "der", "=", "[", "0.0", "for", "_", "in", "range", "(", "n", ")", "]", "for", "j", "in", "range", "(", "n", ",", "0", ",", "-", "1", ")", ":", "der", "[", "j", "-", "1", "]", "=", "j", "*", "c", "[", "j", "]", "c", "=", "der", "return", "c" ]
not quite a copy of numpy's version because this was faster to implement.
[ "not", "quite", "a", "copy", "of", "numpy", "s", "version", "because", "this", "was", "faster", "to", "implement", "." ]
python
train
21.714286
bububa/pyTOP
pyTOP/packages/requests/models.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/models.py#L569-L600
def content(self): """Content of the response, in bytes or unicode (if available). """ if self._content is not None: return self._content if self._content_consumed: raise RuntimeError('The content for this response was ' 'already consumed') # Read the contents. try: self._content = self.raw.read() except AttributeError: return None # Decode GZip'd content. if 'gzip' in self.headers.get('content-encoding', ''): try: self._content = decode_gzip(self._content) except zlib.error: pass # Decode unicode content. if self.config.get('decode_unicode'): self._content = get_unicode_from_response(self) self._content_consumed = True return self._content
[ "def", "content", "(", "self", ")", ":", "if", "self", ".", "_content", "is", "not", "None", ":", "return", "self", ".", "_content", "if", "self", ".", "_content_consumed", ":", "raise", "RuntimeError", "(", "'The content for this response was '", "'already consumed'", ")", "# Read the contents.", "try", ":", "self", ".", "_content", "=", "self", ".", "raw", ".", "read", "(", ")", "except", "AttributeError", ":", "return", "None", "# Decode GZip'd content.", "if", "'gzip'", "in", "self", ".", "headers", ".", "get", "(", "'content-encoding'", ",", "''", ")", ":", "try", ":", "self", ".", "_content", "=", "decode_gzip", "(", "self", ".", "_content", ")", "except", "zlib", ".", "error", ":", "pass", "# Decode unicode content.", "if", "self", ".", "config", ".", "get", "(", "'decode_unicode'", ")", ":", "self", ".", "_content", "=", "get_unicode_from_response", "(", "self", ")", "self", ".", "_content_consumed", "=", "True", "return", "self", ".", "_content" ]
Content of the response, in bytes or unicode (if available).
[ "Content", "of", "the", "response", "in", "bytes", "or", "unicode", "(", "if", "available", ")", "." ]
python
train
27.59375
kgori/treeCl
treeCl/utils/ambiguate.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/utils/ambiguate.py#L83-L90
def remove_empty(rec): """ Deletes sequences that were marked for deletion by convert_to_IUPAC """ for header, sequence in rec.mapping.items(): if all(char == 'X' for char in sequence): rec.headers.remove(header) rec.sequences.remove(sequence) rec.update() return rec
[ "def", "remove_empty", "(", "rec", ")", ":", "for", "header", ",", "sequence", "in", "rec", ".", "mapping", ".", "items", "(", ")", ":", "if", "all", "(", "char", "==", "'X'", "for", "char", "in", "sequence", ")", ":", "rec", ".", "headers", ".", "remove", "(", "header", ")", "rec", ".", "sequences", ".", "remove", "(", "sequence", ")", "rec", ".", "update", "(", ")", "return", "rec" ]
Deletes sequences that were marked for deletion by convert_to_IUPAC
[ "Deletes", "sequences", "that", "were", "marked", "for", "deletion", "by", "convert_to_IUPAC" ]
python
train
38.5
keras-rl/keras-rl
rl/callbacks.py
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L267-L279
def on_step_end(self, step, logs): """ Update progression bar at the end of each step """ if self.info_names is None: self.info_names = logs['info'].keys() values = [('reward', logs['reward'])] if KERAS_VERSION > '2.1.3': self.progbar.update((self.step % self.interval) + 1, values=values) else: self.progbar.update((self.step % self.interval) + 1, values=values, force=True) self.step += 1 self.metrics.append(logs['metrics']) if len(self.info_names) > 0: self.infos.append([logs['info'][k] for k in self.info_names])
[ "def", "on_step_end", "(", "self", ",", "step", ",", "logs", ")", ":", "if", "self", ".", "info_names", "is", "None", ":", "self", ".", "info_names", "=", "logs", "[", "'info'", "]", ".", "keys", "(", ")", "values", "=", "[", "(", "'reward'", ",", "logs", "[", "'reward'", "]", ")", "]", "if", "KERAS_VERSION", ">", "'2.1.3'", ":", "self", ".", "progbar", ".", "update", "(", "(", "self", ".", "step", "%", "self", ".", "interval", ")", "+", "1", ",", "values", "=", "values", ")", "else", ":", "self", ".", "progbar", ".", "update", "(", "(", "self", ".", "step", "%", "self", ".", "interval", ")", "+", "1", ",", "values", "=", "values", ",", "force", "=", "True", ")", "self", ".", "step", "+=", "1", "self", ".", "metrics", ".", "append", "(", "logs", "[", "'metrics'", "]", ")", "if", "len", "(", "self", ".", "info_names", ")", ">", "0", ":", "self", ".", "infos", ".", "append", "(", "[", "logs", "[", "'info'", "]", "[", "k", "]", "for", "k", "in", "self", ".", "info_names", "]", ")" ]
Update progression bar at the end of each step
[ "Update", "progression", "bar", "at", "the", "end", "of", "each", "step" ]
python
train
47.538462
instaloader/instaloader
instaloader/instaloader.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloader.py#L603-L636
def download_highlights(self, user: Union[int, Profile], fast_update: bool = False, filename_target: Optional[str] = None, storyitem_filter: Optional[Callable[[StoryItem], bool]] = None) -> None: """ Download available highlights from a user whose ID is given. To use this, one needs to be logged in. .. versionadded:: 4.1 :param user: ID or Profile of the user whose highlights should get downloaded. :param fast_update: If true, abort when first already-downloaded picture is encountered :param filename_target: Replacement for {target} in dirname_pattern and filename_pattern or None if profile name and the highlights' titles should be used instead :param storyitem_filter: function(storyitem), which returns True if given StoryItem should be downloaded """ for user_highlight in self.get_highlights(user): name = user_highlight.owner_username self.context.log("Retrieving highlights \"{}\" from profile {}".format(user_highlight.title, name)) totalcount = user_highlight.itemcount count = 1 for item in user_highlight.get_items(): if storyitem_filter is not None and not storyitem_filter(item): self.context.log("<{} skipped>".format(item), flush=True) continue self.context.log("[%3i/%3i] " % (count, totalcount), end="", flush=True) count += 1 with self.context.error_catcher('Download highlights \"{}\" from user {}'.format(user_highlight.title, name)): downloaded = self.download_storyitem(item, filename_target if filename_target else '{}/{}'.format(name, user_highlight.title)) if fast_update and not downloaded: break
[ "def", "download_highlights", "(", "self", ",", "user", ":", "Union", "[", "int", ",", "Profile", "]", ",", "fast_update", ":", "bool", "=", "False", ",", "filename_target", ":", "Optional", "[", "str", "]", "=", "None", ",", "storyitem_filter", ":", "Optional", "[", "Callable", "[", "[", "StoryItem", "]", ",", "bool", "]", "]", "=", "None", ")", "->", "None", ":", "for", "user_highlight", "in", "self", ".", "get_highlights", "(", "user", ")", ":", "name", "=", "user_highlight", ".", "owner_username", "self", ".", "context", ".", "log", "(", "\"Retrieving highlights \\\"{}\\\" from profile {}\"", ".", "format", "(", "user_highlight", ".", "title", ",", "name", ")", ")", "totalcount", "=", "user_highlight", ".", "itemcount", "count", "=", "1", "for", "item", "in", "user_highlight", ".", "get_items", "(", ")", ":", "if", "storyitem_filter", "is", "not", "None", "and", "not", "storyitem_filter", "(", "item", ")", ":", "self", ".", "context", ".", "log", "(", "\"<{} skipped>\"", ".", "format", "(", "item", ")", ",", "flush", "=", "True", ")", "continue", "self", ".", "context", ".", "log", "(", "\"[%3i/%3i] \"", "%", "(", "count", ",", "totalcount", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "count", "+=", "1", "with", "self", ".", "context", ".", "error_catcher", "(", "'Download highlights \\\"{}\\\" from user {}'", ".", "format", "(", "user_highlight", ".", "title", ",", "name", ")", ")", ":", "downloaded", "=", "self", ".", "download_storyitem", "(", "item", ",", "filename_target", "if", "filename_target", "else", "'{}/{}'", ".", "format", "(", "name", ",", "user_highlight", ".", "title", ")", ")", "if", "fast_update", "and", "not", "downloaded", ":", "break" ]
Download available highlights from a user whose ID is given. To use this, one needs to be logged in. .. versionadded:: 4.1 :param user: ID or Profile of the user whose highlights should get downloaded. :param fast_update: If true, abort when first already-downloaded picture is encountered :param filename_target: Replacement for {target} in dirname_pattern and filename_pattern or None if profile name and the highlights' titles should be used instead :param storyitem_filter: function(storyitem), which returns True if given StoryItem should be downloaded
[ "Download", "available", "highlights", "from", "a", "user", "whose", "ID", "is", "given", ".", "To", "use", "this", "one", "needs", "to", "be", "logged", "in", "." ]
python
train
60.205882
a1ezzz/wasp-general
wasp_general/task/registry.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/registry.py#L180-L192
def remove(self, task_cls): """ Remove task from the storage. If task class are stored multiple times (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is True) - removes all of them. :param task_cls: task to remove :return: None """ registry_tag = task_cls.__registry_tag__ if registry_tag in self.__registry.keys(): self.__registry[registry_tag] = \ list(filter(lambda x: x != task_cls, self.__registry[registry_tag])) if len(self.__registry[registry_tag]) == 0: self.__registry.pop(registry_tag)
[ "def", "remove", "(", "self", ",", "task_cls", ")", ":", "registry_tag", "=", "task_cls", ".", "__registry_tag__", "if", "registry_tag", "in", "self", ".", "__registry", ".", "keys", "(", ")", ":", "self", ".", "__registry", "[", "registry_tag", "]", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "!=", "task_cls", ",", "self", ".", "__registry", "[", "registry_tag", "]", ")", ")", "if", "len", "(", "self", ".", "__registry", "[", "registry_tag", "]", ")", "==", "0", ":", "self", ".", "__registry", ".", "pop", "(", "registry_tag", ")" ]
Remove task from the storage. If task class are stored multiple times (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is True) - removes all of them. :param task_cls: task to remove :return: None
[ "Remove", "task", "from", "the", "storage", ".", "If", "task", "class", "are", "stored", "multiple", "times", "(", "if", ":", "attr", ":", ".", "WTaskRegistryStorage", ".", "__multiple_tasks_per_tag__", "is", "True", ")", "-", "removes", "all", "of", "them", "." ]
python
train
40.461538
saltstack/salt
salt/modules/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystone.py#L542-L558
def service_delete(service_id=None, name=None, profile=None, **connection_args): ''' Delete a service from Keystone service catalog CLI Examples: .. code-block:: bash salt '*' keystone.service_delete c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.service_delete name=nova ''' kstone = auth(profile, **connection_args) if name: service_id = service_get(name=name, profile=profile, **connection_args)[name]['id'] kstone.services.delete(service_id) return 'Keystone service ID "{0}" deleted'.format(service_id)
[ "def", "service_delete", "(", "service_id", "=", "None", ",", "name", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "if", "name", ":", "service_id", "=", "service_get", "(", "name", "=", "name", ",", "profile", "=", "profile", ",", "*", "*", "connection_args", ")", "[", "name", "]", "[", "'id'", "]", "kstone", ".", "services", ".", "delete", "(", "service_id", ")", "return", "'Keystone service ID \"{0}\" deleted'", ".", "format", "(", "service_id", ")" ]
Delete a service from Keystone service catalog CLI Examples: .. code-block:: bash salt '*' keystone.service_delete c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.service_delete name=nova
[ "Delete", "a", "service", "from", "Keystone", "service", "catalog" ]
python
train
34.764706
gem/oq-engine
openquake/hazardlib/mfd/youngs_coppersmith_1985.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/mfd/youngs_coppersmith_1985.py#L289-L308
def _get_rate(self, mag): """ Calculate and return the annual occurrence rate for a specific bin. :param mag: Magnitude value corresponding to the center of the bin of interest. :returns: Float number, the annual occurrence rate for the :param mag value. """ mag_lo = mag - self.bin_width / 2.0 mag_hi = mag + self.bin_width / 2.0 if mag >= self.min_mag and mag < self.char_mag - DELTA_CHAR / 2: # return rate according to exponential distribution return (10 ** (self.a_val - self.b_val * mag_lo) - 10 ** (self.a_val - self.b_val * mag_hi)) else: # return characteristic rate (distributed over the characteristic # range) for the given bin width return (self.char_rate / DELTA_CHAR) * self.bin_width
[ "def", "_get_rate", "(", "self", ",", "mag", ")", ":", "mag_lo", "=", "mag", "-", "self", ".", "bin_width", "/", "2.0", "mag_hi", "=", "mag", "+", "self", ".", "bin_width", "/", "2.0", "if", "mag", ">=", "self", ".", "min_mag", "and", "mag", "<", "self", ".", "char_mag", "-", "DELTA_CHAR", "/", "2", ":", "# return rate according to exponential distribution", "return", "(", "10", "**", "(", "self", ".", "a_val", "-", "self", ".", "b_val", "*", "mag_lo", ")", "-", "10", "**", "(", "self", ".", "a_val", "-", "self", ".", "b_val", "*", "mag_hi", ")", ")", "else", ":", "# return characteristic rate (distributed over the characteristic", "# range) for the given bin width", "return", "(", "self", ".", "char_rate", "/", "DELTA_CHAR", ")", "*", "self", ".", "bin_width" ]
Calculate and return the annual occurrence rate for a specific bin. :param mag: Magnitude value corresponding to the center of the bin of interest. :returns: Float number, the annual occurrence rate for the :param mag value.
[ "Calculate", "and", "return", "the", "annual", "occurrence", "rate", "for", "a", "specific", "bin", "." ]
python
train
42.9
phac-nml/sistr_cmd
sistr/src/serovar_prediction/__init__.py
https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/serovar_prediction/__init__.py#L434-L540
def overall_serovar_call(serovar_prediction, antigen_predictor): """ Predict serovar from cgMLST cluster membership analysis and antigen BLAST results. SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results. Antigen BLAST results will predict a particular serovar or list of serovars, however, the cgMLST membership may be able to help narrow down the list of potential serovars. Notes: If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars, then the serovar is assigned the cgMLST predicted serovar. If all antigens are found, but an antigen serovar is not found then the serovar is assigned a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction. If the antigen predicted serovar does not match the cgMLST predicted serovar, - the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less) - otherwise, the serovar is antigen predicted serovar(s) Args: serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash]) antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results Returns: src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST """ assert isinstance(serovar_prediction, SerovarPrediction) assert isinstance(antigen_predictor, SerovarPredictor) h1 = antigen_predictor.h1 h2 = antigen_predictor.h2 sg = antigen_predictor.serogroup spp = serovar_prediction.cgmlst_subspecies if spp is None: if 'mash_match' in serovar_prediction.__dict__: spp = serovar_prediction.__dict__['mash_subspecies'] serovar_prediction.serovar_antigen = antigen_predictor.serovar cgmlst_serovar = serovar_prediction.serovar_cgmlst cgmlst_distance = float(serovar_prediction.cgmlst_distance) null_result = '-:-:-' try: spp_roman = spp_name_to_roman[spp] except: spp_roman = None is_antigen_null = lambda x: (x is None or x == '' or x == '-') if antigen_predictor.serovar is None: if is_antigen_null(sg) and is_antigen_null(h1) and is_antigen_null(h2): if spp_roman is not None: serovar_prediction.serovar = '{} {}:{}:{}'.format(spp_roman, sg, h1, h2) else: serovar_prediction.serovar = '{}:{}:{}'.format(spp_roman, sg, h1, h2) elif cgmlst_serovar is not None and cgmlst_distance <= CGMLST_DISTANCE_THRESHOLD: serovar_prediction.serovar = cgmlst_serovar else: serovar_prediction.serovar = null_result if 'mash_match' in serovar_prediction.__dict__: spd = serovar_prediction.__dict__ mash_dist = float(spd['mash_distance']) if mash_dist <= MASH_DISTANCE_THRESHOLD: serovar_prediction.serovar = spd['mash_serovar'] else: serovars_from_antigen = antigen_predictor.serovar.split('|') if not isinstance(serovars_from_antigen, list): serovars_from_antigen = [serovars_from_antigen] if cgmlst_serovar is not None: if cgmlst_serovar in serovars_from_antigen: serovar_prediction.serovar = cgmlst_serovar else: if float(cgmlst_distance) <= CGMLST_DISTANCE_THRESHOLD: serovar_prediction.serovar = cgmlst_serovar elif 'mash_match' in serovar_prediction.__dict__: spd = serovar_prediction.__dict__ mash_serovar = spd['mash_serovar'] mash_dist = float(spd['mash_distance']) if mash_serovar in serovars_from_antigen: serovar_prediction.serovar = mash_serovar else: if mash_dist <= MASH_DISTANCE_THRESHOLD: serovar_prediction.serovar = mash_serovar if serovar_prediction.serovar is None: serovar_prediction.serovar = serovar_prediction.serovar_antigen if serovar_prediction.h1 is None: serovar_prediction.h1 = '-' if serovar_prediction.h2 is None: serovar_prediction.h2 = '-' if serovar_prediction.serogroup is None: serovar_prediction.serogroup = '-' if serovar_prediction.serovar_antigen is None: if spp_roman is not None: serovar_prediction.serovar_antigen = '{} -:-:-'.format(spp_roman) else: serovar_prediction.serovar_antigen = '-:-:-' if serovar_prediction.serovar is None: serovar_prediction.serovar = serovar_prediction.serovar_antigen return serovar_prediction
[ "def", "overall_serovar_call", "(", "serovar_prediction", ",", "antigen_predictor", ")", ":", "assert", "isinstance", "(", "serovar_prediction", ",", "SerovarPrediction", ")", "assert", "isinstance", "(", "antigen_predictor", ",", "SerovarPredictor", ")", "h1", "=", "antigen_predictor", ".", "h1", "h2", "=", "antigen_predictor", ".", "h2", "sg", "=", "antigen_predictor", ".", "serogroup", "spp", "=", "serovar_prediction", ".", "cgmlst_subspecies", "if", "spp", "is", "None", ":", "if", "'mash_match'", "in", "serovar_prediction", ".", "__dict__", ":", "spp", "=", "serovar_prediction", ".", "__dict__", "[", "'mash_subspecies'", "]", "serovar_prediction", ".", "serovar_antigen", "=", "antigen_predictor", ".", "serovar", "cgmlst_serovar", "=", "serovar_prediction", ".", "serovar_cgmlst", "cgmlst_distance", "=", "float", "(", "serovar_prediction", ".", "cgmlst_distance", ")", "null_result", "=", "'-:-:-'", "try", ":", "spp_roman", "=", "spp_name_to_roman", "[", "spp", "]", "except", ":", "spp_roman", "=", "None", "is_antigen_null", "=", "lambda", "x", ":", "(", "x", "is", "None", "or", "x", "==", "''", "or", "x", "==", "'-'", ")", "if", "antigen_predictor", ".", "serovar", "is", "None", ":", "if", "is_antigen_null", "(", "sg", ")", "and", "is_antigen_null", "(", "h1", ")", "and", "is_antigen_null", "(", "h2", ")", ":", "if", "spp_roman", "is", "not", "None", ":", "serovar_prediction", ".", "serovar", "=", "'{} {}:{}:{}'", ".", "format", "(", "spp_roman", ",", "sg", ",", "h1", ",", "h2", ")", "else", ":", "serovar_prediction", ".", "serovar", "=", "'{}:{}:{}'", ".", "format", "(", "spp_roman", ",", "sg", ",", "h1", ",", "h2", ")", "elif", "cgmlst_serovar", "is", "not", "None", "and", "cgmlst_distance", "<=", "CGMLST_DISTANCE_THRESHOLD", ":", "serovar_prediction", ".", "serovar", "=", "cgmlst_serovar", "else", ":", "serovar_prediction", ".", "serovar", "=", "null_result", "if", "'mash_match'", "in", "serovar_prediction", ".", "__dict__", ":", "spd", "=", "serovar_prediction", ".", "__dict__", "mash_dist", "=", "float", "(", "spd", "[", "'mash_distance'", "]", ")", "if", "mash_dist", "<=", "MASH_DISTANCE_THRESHOLD", ":", "serovar_prediction", ".", "serovar", "=", "spd", "[", "'mash_serovar'", "]", "else", ":", "serovars_from_antigen", "=", "antigen_predictor", ".", "serovar", ".", "split", "(", "'|'", ")", "if", "not", "isinstance", "(", "serovars_from_antigen", ",", "list", ")", ":", "serovars_from_antigen", "=", "[", "serovars_from_antigen", "]", "if", "cgmlst_serovar", "is", "not", "None", ":", "if", "cgmlst_serovar", "in", "serovars_from_antigen", ":", "serovar_prediction", ".", "serovar", "=", "cgmlst_serovar", "else", ":", "if", "float", "(", "cgmlst_distance", ")", "<=", "CGMLST_DISTANCE_THRESHOLD", ":", "serovar_prediction", ".", "serovar", "=", "cgmlst_serovar", "elif", "'mash_match'", "in", "serovar_prediction", ".", "__dict__", ":", "spd", "=", "serovar_prediction", ".", "__dict__", "mash_serovar", "=", "spd", "[", "'mash_serovar'", "]", "mash_dist", "=", "float", "(", "spd", "[", "'mash_distance'", "]", ")", "if", "mash_serovar", "in", "serovars_from_antigen", ":", "serovar_prediction", ".", "serovar", "=", "mash_serovar", "else", ":", "if", "mash_dist", "<=", "MASH_DISTANCE_THRESHOLD", ":", "serovar_prediction", ".", "serovar", "=", "mash_serovar", "if", "serovar_prediction", ".", "serovar", "is", "None", ":", "serovar_prediction", ".", "serovar", "=", "serovar_prediction", ".", "serovar_antigen", "if", "serovar_prediction", ".", "h1", "is", "None", ":", "serovar_prediction", ".", "h1", "=", "'-'", "if", "serovar_prediction", ".", "h2", "is", "None", ":", "serovar_prediction", ".", "h2", "=", "'-'", "if", "serovar_prediction", ".", "serogroup", "is", "None", ":", "serovar_prediction", ".", "serogroup", "=", "'-'", "if", "serovar_prediction", ".", "serovar_antigen", "is", "None", ":", "if", "spp_roman", "is", "not", "None", ":", "serovar_prediction", ".", "serovar_antigen", "=", "'{} -:-:-'", ".", "format", "(", "spp_roman", ")", "else", ":", "serovar_prediction", ".", "serovar_antigen", "=", "'-:-:-'", "if", "serovar_prediction", ".", "serovar", "is", "None", ":", "serovar_prediction", ".", "serovar", "=", "serovar_prediction", ".", "serovar_antigen", "return", "serovar_prediction" ]
Predict serovar from cgMLST cluster membership analysis and antigen BLAST results. SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results. Antigen BLAST results will predict a particular serovar or list of serovars, however, the cgMLST membership may be able to help narrow down the list of potential serovars. Notes: If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars, then the serovar is assigned the cgMLST predicted serovar. If all antigens are found, but an antigen serovar is not found then the serovar is assigned a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction. If the antigen predicted serovar does not match the cgMLST predicted serovar, - the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less) - otherwise, the serovar is antigen predicted serovar(s) Args: serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash]) antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results Returns: src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST
[ "Predict", "serovar", "from", "cgMLST", "cluster", "membership", "analysis", "and", "antigen", "BLAST", "results", ".", "SerovarPrediction", "object", "is", "assigned", "H1", "H2", "and", "Serogroup", "from", "the", "antigen", "BLAST", "results", ".", "Antigen", "BLAST", "results", "will", "predict", "a", "particular", "serovar", "or", "list", "of", "serovars", "however", "the", "cgMLST", "membership", "may", "be", "able", "to", "help", "narrow", "down", "the", "list", "of", "potential", "serovars", "." ]
python
train
43.654206
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/superop.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/superop.py#L416-L425
def _instruction_to_superop(cls, instruction): """Convert a QuantumCircuit or Instruction to a SuperOp.""" # Convert circuit to an instruction if isinstance(instruction, QuantumCircuit): instruction = instruction.to_instruction() # Initialize an identity superoperator of the correct size # of the circuit op = SuperOp(np.eye(4 ** instruction.num_qubits)) op._append_instruction(instruction) return op
[ "def", "_instruction_to_superop", "(", "cls", ",", "instruction", ")", ":", "# Convert circuit to an instruction", "if", "isinstance", "(", "instruction", ",", "QuantumCircuit", ")", ":", "instruction", "=", "instruction", ".", "to_instruction", "(", ")", "# Initialize an identity superoperator of the correct size", "# of the circuit", "op", "=", "SuperOp", "(", "np", ".", "eye", "(", "4", "**", "instruction", ".", "num_qubits", ")", ")", "op", ".", "_append_instruction", "(", "instruction", ")", "return", "op" ]
Convert a QuantumCircuit or Instruction to a SuperOp.
[ "Convert", "a", "QuantumCircuit", "or", "Instruction", "to", "a", "SuperOp", "." ]
python
test
46.8
openstack/horizon
openstack_dashboard/utils/settings.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/settings.py#L41-L61
def import_dashboard_config(modules): """Imports configuration from all the modules and merges it.""" config = collections.defaultdict(dict) for module in modules: for submodule in import_submodules(module).values(): if hasattr(submodule, 'DASHBOARD'): dashboard = submodule.DASHBOARD config[dashboard].update(submodule.__dict__) elif (hasattr(submodule, 'PANEL') or hasattr(submodule, 'PANEL_GROUP') or hasattr(submodule, 'FEATURE')): # If enabled and local.enabled contains a same filename, # the file loaded later (i.e., local.enabled) will be used. name = submodule.__name__.rsplit('.', 1)[1] config[name] = submodule.__dict__ else: logging.warning("Skipping %s because it doesn't have DASHBOARD" ", PANEL, PANEL_GROUP, or FEATURE defined.", submodule.__name__) return sorted(config.items(), key=lambda c: c[1]['__name__'].rsplit('.', 1)[1])
[ "def", "import_dashboard_config", "(", "modules", ")", ":", "config", "=", "collections", ".", "defaultdict", "(", "dict", ")", "for", "module", "in", "modules", ":", "for", "submodule", "in", "import_submodules", "(", "module", ")", ".", "values", "(", ")", ":", "if", "hasattr", "(", "submodule", ",", "'DASHBOARD'", ")", ":", "dashboard", "=", "submodule", ".", "DASHBOARD", "config", "[", "dashboard", "]", ".", "update", "(", "submodule", ".", "__dict__", ")", "elif", "(", "hasattr", "(", "submodule", ",", "'PANEL'", ")", "or", "hasattr", "(", "submodule", ",", "'PANEL_GROUP'", ")", "or", "hasattr", "(", "submodule", ",", "'FEATURE'", ")", ")", ":", "# If enabled and local.enabled contains a same filename,", "# the file loaded later (i.e., local.enabled) will be used.", "name", "=", "submodule", ".", "__name__", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", "config", "[", "name", "]", "=", "submodule", ".", "__dict__", "else", ":", "logging", ".", "warning", "(", "\"Skipping %s because it doesn't have DASHBOARD\"", "\", PANEL, PANEL_GROUP, or FEATURE defined.\"", ",", "submodule", ".", "__name__", ")", "return", "sorted", "(", "config", ".", "items", "(", ")", ",", "key", "=", "lambda", "c", ":", "c", "[", "1", "]", "[", "'__name__'", "]", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", ")" ]
Imports configuration from all the modules and merges it.
[ "Imports", "configuration", "from", "all", "the", "modules", "and", "merges", "it", "." ]
python
train
53.095238
juztin/flask-tracy
flask_tracy/base.py
https://github.com/juztin/flask-tracy/blob/8a43094f0fced3c216f7b65ad6c5c7a22c14ea25/flask_tracy/base.py#L48-L57
def init_app(self, app): """Setup before_request, after_request handlers for tracing. """ app.config.setdefault("TRACY_REQUIRE_CLIENT", False) if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['restpoints'] = self app.before_request(self._before) app.after_request(self._after)
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "app", ".", "config", ".", "setdefault", "(", "\"TRACY_REQUIRE_CLIENT\"", ",", "False", ")", "if", "not", "hasattr", "(", "app", ",", "'extensions'", ")", ":", "app", ".", "extensions", "=", "{", "}", "app", ".", "extensions", "[", "'restpoints'", "]", "=", "self", "app", ".", "before_request", "(", "self", ".", "_before", ")", "app", ".", "after_request", "(", "self", ".", "_after", ")" ]
Setup before_request, after_request handlers for tracing.
[ "Setup", "before_request", "after_request", "handlers", "for", "tracing", "." ]
python
valid
35.7
Crunch-io/crunch-cube
src/cr/cube/cube_slice.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/cube_slice.py#L588-L609
def _array_type_std_res(self, counts, total, colsum, rowsum): """Return ndarray containing standard residuals for array values. The shape of the return value is the same as that of *counts*. Array variables require special processing because of the underlying math. Essentially, it boils down to the fact that the variable dimensions are mutually independent, and standard residuals are calculated for each of them separately, and then stacked together in the resulting array. """ if self.mr_dim_ind == 0: # --This is a special case where broadcasting cannot be # --automatically done. We need to "inflate" the single dimensional # --ndarrays, to be able to treat them as "columns" (essentially a # --Nx1 ndarray). This is needed for subsequent multiplication # --that needs to happen column wise (rowsum * colsum) / total. total = total[:, np.newaxis] rowsum = rowsum[:, np.newaxis] expected_counts = rowsum * colsum / total variance = rowsum * colsum * (total - rowsum) * (total - colsum) / total ** 3 return (counts - expected_counts) / np.sqrt(variance)
[ "def", "_array_type_std_res", "(", "self", ",", "counts", ",", "total", ",", "colsum", ",", "rowsum", ")", ":", "if", "self", ".", "mr_dim_ind", "==", "0", ":", "# --This is a special case where broadcasting cannot be", "# --automatically done. We need to \"inflate\" the single dimensional", "# --ndarrays, to be able to treat them as \"columns\" (essentially a", "# --Nx1 ndarray). This is needed for subsequent multiplication", "# --that needs to happen column wise (rowsum * colsum) / total.", "total", "=", "total", "[", ":", ",", "np", ".", "newaxis", "]", "rowsum", "=", "rowsum", "[", ":", ",", "np", ".", "newaxis", "]", "expected_counts", "=", "rowsum", "*", "colsum", "/", "total", "variance", "=", "rowsum", "*", "colsum", "*", "(", "total", "-", "rowsum", ")", "*", "(", "total", "-", "colsum", ")", "/", "total", "**", "3", "return", "(", "counts", "-", "expected_counts", ")", "/", "np", ".", "sqrt", "(", "variance", ")" ]
Return ndarray containing standard residuals for array values. The shape of the return value is the same as that of *counts*. Array variables require special processing because of the underlying math. Essentially, it boils down to the fact that the variable dimensions are mutually independent, and standard residuals are calculated for each of them separately, and then stacked together in the resulting array.
[ "Return", "ndarray", "containing", "standard", "residuals", "for", "array", "values", "." ]
python
train
55.363636
bitly/asyncmongo
asyncmongo/pool.py
https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/pool.py#L41-L54
def close_idle_connections(self, pool_id=None): """close idle connections to mongo""" if not hasattr(self, '_pools'): return if pool_id: if pool_id not in self._pools: raise ProgrammingError("pool %r does not exist" % pool_id) else: pool = self._pools[pool_id] pool.close() else: for pool_id, pool in self._pools.items(): pool.close()
[ "def", "close_idle_connections", "(", "self", ",", "pool_id", "=", "None", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_pools'", ")", ":", "return", "if", "pool_id", ":", "if", "pool_id", "not", "in", "self", ".", "_pools", ":", "raise", "ProgrammingError", "(", "\"pool %r does not exist\"", "%", "pool_id", ")", "else", ":", "pool", "=", "self", ".", "_pools", "[", "pool_id", "]", "pool", ".", "close", "(", ")", "else", ":", "for", "pool_id", ",", "pool", "in", "self", ".", "_pools", ".", "items", "(", ")", ":", "pool", ".", "close", "(", ")" ]
close idle connections to mongo
[ "close", "idle", "connections", "to", "mongo" ]
python
train
33.285714
gwastro/pycbc
pycbc/inference/sampler/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base_mcmc.py#L358-L369
def pos(self): """A dictionary of the current walker positions. If the sampler hasn't been run yet, returns p0. """ pos = self._pos if pos is None: return self.p0 # convert to dict pos = {param: self._pos[..., k] for (k, param) in enumerate(self.sampling_params)} return pos
[ "def", "pos", "(", "self", ")", ":", "pos", "=", "self", ".", "_pos", "if", "pos", "is", "None", ":", "return", "self", ".", "p0", "# convert to dict", "pos", "=", "{", "param", ":", "self", ".", "_pos", "[", "...", ",", "k", "]", "for", "(", "k", ",", "param", ")", "in", "enumerate", "(", "self", ".", "sampling_params", ")", "}", "return", "pos" ]
A dictionary of the current walker positions. If the sampler hasn't been run yet, returns p0.
[ "A", "dictionary", "of", "the", "current", "walker", "positions", "." ]
python
train
29.583333
eirannejad/Revit-Journal-Maker
rjm/__init__.py
https://github.com/eirannejad/Revit-Journal-Maker/blob/09a4f27da6d183f63a2c93ed99dca8a8590d5241/rjm/__init__.py#L59-L73
def _init_journal(self, permissive=True): """Add the initialization lines to the journal. By default adds JrnObj variable and timestamp to the journal contents. Args: permissive (bool): if True most errors in journal will not cause Revit to stop journal execution. Some still do. """ nowstamp = datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")[:-3] self._add_entry(templates.INIT .format(time_stamp=nowstamp)) if permissive: self._add_entry(templates.INIT_DEBUG)
[ "def", "_init_journal", "(", "self", ",", "permissive", "=", "True", ")", ":", "nowstamp", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%d-%b-%Y %H:%M:%S.%f\"", ")", "[", ":", "-", "3", "]", "self", ".", "_add_entry", "(", "templates", ".", "INIT", ".", "format", "(", "time_stamp", "=", "nowstamp", ")", ")", "if", "permissive", ":", "self", ".", "_add_entry", "(", "templates", ".", "INIT_DEBUG", ")" ]
Add the initialization lines to the journal. By default adds JrnObj variable and timestamp to the journal contents. Args: permissive (bool): if True most errors in journal will not cause Revit to stop journal execution. Some still do.
[ "Add", "the", "initialization", "lines", "to", "the", "journal", "." ]
python
train
41.6
pysathq/pysat
pysat/card.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/card.py#L463-L531
def increase(self, ubound=1, top_id=None): """ Increases a potential upper bound that can be imposed on the literals in the sum of an existing :class:`ITotalizer` object to a new value. :param ubound: a new upper bound. :param top_id: a new top variable identifier. :type ubound: int :type top_id: integer or None The top identifier ``top_id`` applied only if it is greater than the one used in ``self``. This method creates additional clauses encoding the existing totalizer tree up to the new upper bound given and appends them to the list of clauses of :class:`.CNF` ``self.cnf``. The number of newly created clauses is stored in variable ``self.nof_new``. Also, a list of bounds ``self.rhs`` gets increased and its length becomes ``ubound+1``. The method can be used in the following way: .. code-block:: python >>> from pysat.card import ITotalizer >>> t = ITotalizer(lits=[1, 2, 3], ubound=1) >>> print t.cnf.clauses [[-2, 4], [-1, 4], [-1, -2, 5], [-4, 6], [-5, 7], [-3, 6], [-3, -4, 7]] >>> print t.rhs [6, 7] >>> >>> t.increase(ubound=2) >>> print t.cnf.clauses [[-2, 4], [-1, 4], [-1, -2, 5], [-4, 6], [-5, 7], [-3, 6], [-3, -4, 7], [-3, -5, 8]] >>> print t.cnf.clauses[-t.nof_new:] [[-3, -5, 8]] >>> print t.rhs [6, 7, 8] >>> t.delete() """ self.top_id = max(self.top_id, top_id if top_id != None else 0) # do nothing if the bound is set incorrectly if ubound <= self.ubound or self.ubound >= len(self.lits): self.nof_new = 0 return else: self.ubound = ubound # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) # updating the object and adding more variables and clauses clauses, self.rhs, self.top_id = pycard.itot_inc(self.tobj, self.ubound, self.top_id) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) # saving the result self.cnf.clauses.extend(clauses) self.cnf.nv = self.top_id # keeping the number of newly added clauses self.nof_new = len(clauses)
[ "def", "increase", "(", "self", ",", "ubound", "=", "1", ",", "top_id", "=", "None", ")", ":", "self", ".", "top_id", "=", "max", "(", "self", ".", "top_id", ",", "top_id", "if", "top_id", "!=", "None", "else", "0", ")", "# do nothing if the bound is set incorrectly", "if", "ubound", "<=", "self", ".", "ubound", "or", "self", ".", "ubound", ">=", "len", "(", "self", ".", "lits", ")", ":", "self", ".", "nof_new", "=", "0", "return", "else", ":", "self", ".", "ubound", "=", "ubound", "# saving default SIGINT handler", "def_sigint_handler", "=", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_DFL", ")", "# updating the object and adding more variables and clauses", "clauses", ",", "self", ".", "rhs", ",", "self", ".", "top_id", "=", "pycard", ".", "itot_inc", "(", "self", ".", "tobj", ",", "self", ".", "ubound", ",", "self", ".", "top_id", ")", "# recovering default SIGINT handler", "def_sigint_handler", "=", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "def_sigint_handler", ")", "# saving the result", "self", ".", "cnf", ".", "clauses", ".", "extend", "(", "clauses", ")", "self", ".", "cnf", ".", "nv", "=", "self", ".", "top_id", "# keeping the number of newly added clauses", "self", ".", "nof_new", "=", "len", "(", "clauses", ")" ]
Increases a potential upper bound that can be imposed on the literals in the sum of an existing :class:`ITotalizer` object to a new value. :param ubound: a new upper bound. :param top_id: a new top variable identifier. :type ubound: int :type top_id: integer or None The top identifier ``top_id`` applied only if it is greater than the one used in ``self``. This method creates additional clauses encoding the existing totalizer tree up to the new upper bound given and appends them to the list of clauses of :class:`.CNF` ``self.cnf``. The number of newly created clauses is stored in variable ``self.nof_new``. Also, a list of bounds ``self.rhs`` gets increased and its length becomes ``ubound+1``. The method can be used in the following way: .. code-block:: python >>> from pysat.card import ITotalizer >>> t = ITotalizer(lits=[1, 2, 3], ubound=1) >>> print t.cnf.clauses [[-2, 4], [-1, 4], [-1, -2, 5], [-4, 6], [-5, 7], [-3, 6], [-3, -4, 7]] >>> print t.rhs [6, 7] >>> >>> t.increase(ubound=2) >>> print t.cnf.clauses [[-2, 4], [-1, 4], [-1, -2, 5], [-4, 6], [-5, 7], [-3, 6], [-3, -4, 7], [-3, -5, 8]] >>> print t.cnf.clauses[-t.nof_new:] [[-3, -5, 8]] >>> print t.rhs [6, 7, 8] >>> t.delete()
[ "Increases", "a", "potential", "upper", "bound", "that", "can", "be", "imposed", "on", "the", "literals", "in", "the", "sum", "of", "an", "existing", ":", "class", ":", "ITotalizer", "object", "to", "a", "new", "value", "." ]
python
train
36.956522
esheldon/fitsio
fitsio/header.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L372-L438
def _record2card(self, record): """ when we add new records they don't have a card, this sort of fakes it up similar to what cfitsio does, just for display purposes. e.g. DBL = 23.299843 LNG = 3423432 KEYSNC = 'hello ' KEYSC = 'hello ' / a comment for string KEYDC = 3.14159265358979 / a comment for pi KEYLC = 323423432 / a comment for long basically, - 8 chars, left aligned, for the keyword name - a space - 20 chars for value, left aligned for strings, right aligned for numbers - if there is a comment, one space followed by / then another space then the comment out to 80 chars """ name = record['name'] value = record['value'] v_isstring = isstring(value) if name == 'COMMENT': # card = 'COMMENT %s' % value card = 'COMMENT %s' % value elif name == 'CONTINUE': card = 'CONTINUE %s' % value elif name == 'HISTORY': card = 'HISTORY %s' % value else: if len(name) > 8: card = 'HIERARCH %s= ' % name else: card = '%-8s= ' % name[0:8] # these may be string representations of data, or actual strings if v_isstring: value = str(value) if len(value) > 0: if value[0] != "'": # this is a string representing a string header field # make it look like it will look in the header value = "'" + value + "'" vstr = '%-20s' % value else: vstr = "%20s" % value else: vstr = "''" else: vstr = '%20s' % value card += vstr if 'comment' in record: card += ' / %s' % record['comment'] if v_isstring and len(card) > 80: card = card[0:79] + "'" else: card = card[0:80] return card
[ "def", "_record2card", "(", "self", ",", "record", ")", ":", "name", "=", "record", "[", "'name'", "]", "value", "=", "record", "[", "'value'", "]", "v_isstring", "=", "isstring", "(", "value", ")", "if", "name", "==", "'COMMENT'", ":", "# card = 'COMMENT %s' % value", "card", "=", "'COMMENT %s'", "%", "value", "elif", "name", "==", "'CONTINUE'", ":", "card", "=", "'CONTINUE %s'", "%", "value", "elif", "name", "==", "'HISTORY'", ":", "card", "=", "'HISTORY %s'", "%", "value", "else", ":", "if", "len", "(", "name", ")", ">", "8", ":", "card", "=", "'HIERARCH %s= '", "%", "name", "else", ":", "card", "=", "'%-8s= '", "%", "name", "[", "0", ":", "8", "]", "# these may be string representations of data, or actual strings", "if", "v_isstring", ":", "value", "=", "str", "(", "value", ")", "if", "len", "(", "value", ")", ">", "0", ":", "if", "value", "[", "0", "]", "!=", "\"'\"", ":", "# this is a string representing a string header field", "# make it look like it will look in the header", "value", "=", "\"'\"", "+", "value", "+", "\"'\"", "vstr", "=", "'%-20s'", "%", "value", "else", ":", "vstr", "=", "\"%20s\"", "%", "value", "else", ":", "vstr", "=", "\"''\"", "else", ":", "vstr", "=", "'%20s'", "%", "value", "card", "+=", "vstr", "if", "'comment'", "in", "record", ":", "card", "+=", "' / %s'", "%", "record", "[", "'comment'", "]", "if", "v_isstring", "and", "len", "(", "card", ")", ">", "80", ":", "card", "=", "card", "[", "0", ":", "79", "]", "+", "\"'\"", "else", ":", "card", "=", "card", "[", "0", ":", "80", "]", "return", "card" ]
when we add new records they don't have a card, this sort of fakes it up similar to what cfitsio does, just for display purposes. e.g. DBL = 23.299843 LNG = 3423432 KEYSNC = 'hello ' KEYSC = 'hello ' / a comment for string KEYDC = 3.14159265358979 / a comment for pi KEYLC = 323423432 / a comment for long basically, - 8 chars, left aligned, for the keyword name - a space - 20 chars for value, left aligned for strings, right aligned for numbers - if there is a comment, one space followed by / then another space then the comment out to 80 chars
[ "when", "we", "add", "new", "records", "they", "don", "t", "have", "a", "card", "this", "sort", "of", "fakes", "it", "up", "similar", "to", "what", "cfitsio", "does", "just", "for", "display", "purposes", ".", "e", ".", "g", "." ]
python
train
32.955224
ga4gh/ga4gh-server
ga4gh/server/datamodel/__init__.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/__init__.py#L250-L257
def obfuscate(cls, idStr): """ Mildly obfuscates the specified ID string in an easily reversible fashion. This is not intended for security purposes, but rather to dissuade users from depending on our internal ID structures. """ return unicode(base64.urlsafe_b64encode( idStr.encode('utf-8')).replace(b'=', b''))
[ "def", "obfuscate", "(", "cls", ",", "idStr", ")", ":", "return", "unicode", "(", "base64", ".", "urlsafe_b64encode", "(", "idStr", ".", "encode", "(", "'utf-8'", ")", ")", ".", "replace", "(", "b'='", ",", "b''", ")", ")" ]
Mildly obfuscates the specified ID string in an easily reversible fashion. This is not intended for security purposes, but rather to dissuade users from depending on our internal ID structures.
[ "Mildly", "obfuscates", "the", "specified", "ID", "string", "in", "an", "easily", "reversible", "fashion", ".", "This", "is", "not", "intended", "for", "security", "purposes", "but", "rather", "to", "dissuade", "users", "from", "depending", "on", "our", "internal", "ID", "structures", "." ]
python
train
45.625
saltstack/salt
salt/grains/core.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/core.py#L2087-L2112
def locale_info(): ''' Provides defaultlanguage defaultencoding ''' grains = {} grains['locale_info'] = {} if salt.utils.platform.is_proxy(): return grains try: ( grains['locale_info']['defaultlanguage'], grains['locale_info']['defaultencoding'] ) = locale.getdefaultlocale() except Exception: # locale.getdefaultlocale can ValueError!! Catch anything else it # might do, per #2205 grains['locale_info']['defaultlanguage'] = 'unknown' grains['locale_info']['defaultencoding'] = 'unknown' grains['locale_info']['detectedencoding'] = __salt_system_encoding__ if _DATEUTIL_TZ: grains['locale_info']['timezone'] = datetime.datetime.now(dateutil.tz.tzlocal()).tzname() return grains
[ "def", "locale_info", "(", ")", ":", "grains", "=", "{", "}", "grains", "[", "'locale_info'", "]", "=", "{", "}", "if", "salt", ".", "utils", ".", "platform", ".", "is_proxy", "(", ")", ":", "return", "grains", "try", ":", "(", "grains", "[", "'locale_info'", "]", "[", "'defaultlanguage'", "]", ",", "grains", "[", "'locale_info'", "]", "[", "'defaultencoding'", "]", ")", "=", "locale", ".", "getdefaultlocale", "(", ")", "except", "Exception", ":", "# locale.getdefaultlocale can ValueError!! Catch anything else it", "# might do, per #2205", "grains", "[", "'locale_info'", "]", "[", "'defaultlanguage'", "]", "=", "'unknown'", "grains", "[", "'locale_info'", "]", "[", "'defaultencoding'", "]", "=", "'unknown'", "grains", "[", "'locale_info'", "]", "[", "'detectedencoding'", "]", "=", "__salt_system_encoding__", "if", "_DATEUTIL_TZ", ":", "grains", "[", "'locale_info'", "]", "[", "'timezone'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", "dateutil", ".", "tz", ".", "tzlocal", "(", ")", ")", ".", "tzname", "(", ")", "return", "grains" ]
Provides defaultlanguage defaultencoding
[ "Provides", "defaultlanguage", "defaultencoding" ]
python
train
30.846154
eqcorrscan/EQcorrscan
eqcorrscan/core/match_filter.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L366-L375
def select(self, template_name): """ Select a specific family from the party. :type template_name: str :param template_name: Template name of Family to select from a party. :returns: Family """ return [fam for fam in self.families if fam.template.name == template_name][0]
[ "def", "select", "(", "self", ",", "template_name", ")", ":", "return", "[", "fam", "for", "fam", "in", "self", ".", "families", "if", "fam", ".", "template", ".", "name", "==", "template_name", "]", "[", "0", "]" ]
Select a specific family from the party. :type template_name: str :param template_name: Template name of Family to select from a party. :returns: Family
[ "Select", "a", "specific", "family", "from", "the", "party", "." ]
python
train
33.6
hydpy-dev/hydpy
hydpy/models/lland/lland_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_model.py#L47-L78
def calc_tkor_v1(self): """Adjust the given air temperature values. Required control parameters: |NHRU| |KT| Required input sequence: |TemL| Calculated flux sequence: |TKor| Basic equation: :math:`TKor = KT + TemL` Example: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> kt(-2.0, 0.0, 2.0) >>> inputs.teml(1.) >>> model.calc_tkor_v1() >>> fluxes.tkor tkor(-1.0, 1.0, 3.0) """ con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.tkor[k] = con.kt[k] + inp.teml
[ "def", "calc_tkor_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "inp", "=", "self", ".", "sequences", ".", "inputs", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "for", "k", "in", "range", "(", "con", ".", "nhru", ")", ":", "flu", ".", "tkor", "[", "k", "]", "=", "con", ".", "kt", "[", "k", "]", "+", "inp", ".", "teml" ]
Adjust the given air temperature values. Required control parameters: |NHRU| |KT| Required input sequence: |TemL| Calculated flux sequence: |TKor| Basic equation: :math:`TKor = KT + TemL` Example: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> kt(-2.0, 0.0, 2.0) >>> inputs.teml(1.) >>> model.calc_tkor_v1() >>> fluxes.tkor tkor(-1.0, 1.0, 3.0)
[ "Adjust", "the", "given", "air", "temperature", "values", "." ]
python
train
22.0625
gem/oq-engine
openquake/hazardlib/site.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/site.py#L256-L262
def make_complete(self): """ Turns the site collection into a complete one, if needed """ # reset the site indices from 0 to N-1 and set self.complete to self self.array['sids'] = numpy.arange(len(self), dtype=numpy.uint32) self.complete = self
[ "def", "make_complete", "(", "self", ")", ":", "# reset the site indices from 0 to N-1 and set self.complete to self", "self", ".", "array", "[", "'sids'", "]", "=", "numpy", ".", "arange", "(", "len", "(", "self", ")", ",", "dtype", "=", "numpy", ".", "uint32", ")", "self", ".", "complete", "=", "self" ]
Turns the site collection into a complete one, if needed
[ "Turns", "the", "site", "collection", "into", "a", "complete", "one", "if", "needed" ]
python
train
40.857143
toumorokoshi/transmute-core
transmute_core/function/transmute_function.py
https://github.com/toumorokoshi/transmute-core/blob/a2c26625d5d8bab37e00038f9d615a26167fc7f4/transmute_core/function/transmute_function.py#L152-L165
def _parse_response_types(argspec, attrs): """ from the given parameters, return back the response type dictionaries. """ return_type = argspec.annotations.get("return") or None type_description = attrs.parameter_descriptions.get("return", "") response_types = attrs.response_types.copy() if return_type or len(response_types) == 0: response_types[attrs.success_code] = ResponseType( type=return_type, type_description=type_description, description="success", ) return response_types
[ "def", "_parse_response_types", "(", "argspec", ",", "attrs", ")", ":", "return_type", "=", "argspec", ".", "annotations", ".", "get", "(", "\"return\"", ")", "or", "None", "type_description", "=", "attrs", ".", "parameter_descriptions", ".", "get", "(", "\"return\"", ",", "\"\"", ")", "response_types", "=", "attrs", ".", "response_types", ".", "copy", "(", ")", "if", "return_type", "or", "len", "(", "response_types", ")", "==", "0", ":", "response_types", "[", "attrs", ".", "success_code", "]", "=", "ResponseType", "(", "type", "=", "return_type", ",", "type_description", "=", "type_description", ",", "description", "=", "\"success\"", ",", ")", "return", "response_types" ]
from the given parameters, return back the response type dictionaries.
[ "from", "the", "given", "parameters", "return", "back", "the", "response", "type", "dictionaries", "." ]
python
train
43.285714
saltstack/salt
salt/modules/cabal.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cabal.py#L106-L149
def list_( pkg=None, user=None, installed=False, env=None): ''' List packages matching a search string. pkg Search string for matching package names user The user to run cabal list with installed If True, only return installed packages. env Environment variables to set when invoking cabal. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function CLI example: .. code-block:: bash salt '*' cabal.list salt '*' cabal.list ShellCheck ''' cmd = ['cabal list --simple-output'] if installed: cmd.append('--installed') if pkg: cmd.append('"{0}"'.format(pkg)) result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env) packages = {} for line in result['stdout'].splitlines(): data = line.split() package_name = data[0] package_version = data[1] packages[package_name] = package_version return packages
[ "def", "list_", "(", "pkg", "=", "None", ",", "user", "=", "None", ",", "installed", "=", "False", ",", "env", "=", "None", ")", ":", "cmd", "=", "[", "'cabal list --simple-output'", "]", "if", "installed", ":", "cmd", ".", "append", "(", "'--installed'", ")", "if", "pkg", ":", "cmd", ".", "append", "(", "'\"{0}\"'", ".", "format", "(", "pkg", ")", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "' '", ".", "join", "(", "cmd", ")", ",", "runas", "=", "user", ",", "env", "=", "env", ")", "packages", "=", "{", "}", "for", "line", "in", "result", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "data", "=", "line", ".", "split", "(", ")", "package_name", "=", "data", "[", "0", "]", "package_version", "=", "data", "[", "1", "]", "packages", "[", "package_name", "]", "=", "package_version", "return", "packages" ]
List packages matching a search string. pkg Search string for matching package names user The user to run cabal list with installed If True, only return installed packages. env Environment variables to set when invoking cabal. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function CLI example: .. code-block:: bash salt '*' cabal.list salt '*' cabal.list ShellCheck
[ "List", "packages", "matching", "a", "search", "string", "." ]
python
train
23.227273
ThreatConnect-Inc/tcex
tcex/tcex_bin_package.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_package.py#L352-L368
def zip_file(self, app_path, app_name, tmp_path): """Zip the App with tcex extension. Args: app_path (str): The path of the current project. app_name (str): The name of the App. tmp_path (str): The temp output path for the zip. """ # zip build directory zip_file = os.path.join(app_path, self.args.outdir, app_name) zip_file_zip = '{}.zip'.format(zip_file) zip_file_tcx = '{}.tcx'.format(zip_file) shutil.make_archive(zip_file, 'zip', tmp_path, app_name) shutil.move(zip_file_zip, zip_file_tcx) self._app_packages.append(zip_file_tcx) # update package data self.package_data['package'].append({'action': 'App Package:', 'output': zip_file_tcx})
[ "def", "zip_file", "(", "self", ",", "app_path", ",", "app_name", ",", "tmp_path", ")", ":", "# zip build directory", "zip_file", "=", "os", ".", "path", ".", "join", "(", "app_path", ",", "self", ".", "args", ".", "outdir", ",", "app_name", ")", "zip_file_zip", "=", "'{}.zip'", ".", "format", "(", "zip_file", ")", "zip_file_tcx", "=", "'{}.tcx'", ".", "format", "(", "zip_file", ")", "shutil", ".", "make_archive", "(", "zip_file", ",", "'zip'", ",", "tmp_path", ",", "app_name", ")", "shutil", ".", "move", "(", "zip_file_zip", ",", "zip_file_tcx", ")", "self", ".", "_app_packages", ".", "append", "(", "zip_file_tcx", ")", "# update package data", "self", ".", "package_data", "[", "'package'", "]", ".", "append", "(", "{", "'action'", ":", "'App Package:'", ",", "'output'", ":", "zip_file_tcx", "}", ")" ]
Zip the App with tcex extension. Args: app_path (str): The path of the current project. app_name (str): The name of the App. tmp_path (str): The temp output path for the zip.
[ "Zip", "the", "App", "with", "tcex", "extension", "." ]
python
train
44.764706
pytorch/text
torchtext/data/field.py
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/data/field.py#L694-L723
def numericalize(self, arrs, device=None): """Convert a padded minibatch into a variable tensor. Each item in the minibatch will be numericalized independently and the resulting tensors will be stacked at the first dimension. Arguments: arr (List[List[str]]): List of tokenized and padded examples. device (str or torch.device): A string or instance of `torch.device` specifying which device the Variables are going to be created on. If left as default, the tensors will be created on cpu. Default: None. """ numericalized = [] self.nesting_field.include_lengths = False if self.include_lengths: arrs, sentence_lengths, word_lengths = arrs for arr in arrs: numericalized_ex = self.nesting_field.numericalize( arr, device=device) numericalized.append(numericalized_ex) padded_batch = torch.stack(numericalized) self.nesting_field.include_lengths = True if self.include_lengths: sentence_lengths = \ torch.tensor(sentence_lengths, dtype=self.dtype, device=device) word_lengths = torch.tensor(word_lengths, dtype=self.dtype, device=device) return (padded_batch, sentence_lengths, word_lengths) return padded_batch
[ "def", "numericalize", "(", "self", ",", "arrs", ",", "device", "=", "None", ")", ":", "numericalized", "=", "[", "]", "self", ".", "nesting_field", ".", "include_lengths", "=", "False", "if", "self", ".", "include_lengths", ":", "arrs", ",", "sentence_lengths", ",", "word_lengths", "=", "arrs", "for", "arr", "in", "arrs", ":", "numericalized_ex", "=", "self", ".", "nesting_field", ".", "numericalize", "(", "arr", ",", "device", "=", "device", ")", "numericalized", ".", "append", "(", "numericalized_ex", ")", "padded_batch", "=", "torch", ".", "stack", "(", "numericalized", ")", "self", ".", "nesting_field", ".", "include_lengths", "=", "True", "if", "self", ".", "include_lengths", ":", "sentence_lengths", "=", "torch", ".", "tensor", "(", "sentence_lengths", ",", "dtype", "=", "self", ".", "dtype", ",", "device", "=", "device", ")", "word_lengths", "=", "torch", ".", "tensor", "(", "word_lengths", ",", "dtype", "=", "self", ".", "dtype", ",", "device", "=", "device", ")", "return", "(", "padded_batch", ",", "sentence_lengths", ",", "word_lengths", ")", "return", "padded_batch" ]
Convert a padded minibatch into a variable tensor. Each item in the minibatch will be numericalized independently and the resulting tensors will be stacked at the first dimension. Arguments: arr (List[List[str]]): List of tokenized and padded examples. device (str or torch.device): A string or instance of `torch.device` specifying which device the Variables are going to be created on. If left as default, the tensors will be created on cpu. Default: None.
[ "Convert", "a", "padded", "minibatch", "into", "a", "variable", "tensor", "." ]
python
train
44.966667
kubernetes-client/python
kubernetes/client/apis/authorization_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/authorization_v1_api.py#L369-L391
def create_subject_access_review(self, body, **kwargs): """ create a SubjectAccessReview This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_subject_access_review(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1SubjectAccessReview body: (required) :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :param str pretty: If 'true', then the output is pretty printed. :return: V1SubjectAccessReview If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_subject_access_review_with_http_info(body, **kwargs) else: (data) = self.create_subject_access_review_with_http_info(body, **kwargs) return data
[ "def", "create_subject_access_review", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "create_subject_access_review_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "create_subject_access_review_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "return", "data" ]
create a SubjectAccessReview This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_subject_access_review(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1SubjectAccessReview body: (required) :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :param str pretty: If 'true', then the output is pretty printed. :return: V1SubjectAccessReview If the method is called asynchronously, returns the request thread.
[ "create", "a", "SubjectAccessReview", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "create_subject_access_review", "(", "body", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
64.304348
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L682-L713
def _validate_filters_ndb(cls, filters, model_class): """Validate ndb.Model filters.""" if not filters: return properties = model_class._properties for idx, f in enumerate(filters): prop, ineq, val = f if prop not in properties: raise errors.BadReaderParamsError( "Property %s is not defined for entity type %s", prop, model_class._get_kind()) # Attempt to cast the value to a KeyProperty if appropriate. # This enables filtering against keys. try: if (isinstance(val, basestring) and isinstance(properties[prop], (ndb.KeyProperty, ndb.ComputedProperty))): val = ndb.Key(urlsafe=val) filters[idx] = [prop, ineq, val] except: pass # Validate the value of each filter. We need to know filters have # valid value to carry out splits. try: properties[prop]._do_validate(val) except db.BadValueError, e: raise errors.BadReaderParamsError(e)
[ "def", "_validate_filters_ndb", "(", "cls", ",", "filters", ",", "model_class", ")", ":", "if", "not", "filters", ":", "return", "properties", "=", "model_class", ".", "_properties", "for", "idx", ",", "f", "in", "enumerate", "(", "filters", ")", ":", "prop", ",", "ineq", ",", "val", "=", "f", "if", "prop", "not", "in", "properties", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "\"Property %s is not defined for entity type %s\"", ",", "prop", ",", "model_class", ".", "_get_kind", "(", ")", ")", "# Attempt to cast the value to a KeyProperty if appropriate.", "# This enables filtering against keys.", "try", ":", "if", "(", "isinstance", "(", "val", ",", "basestring", ")", "and", "isinstance", "(", "properties", "[", "prop", "]", ",", "(", "ndb", ".", "KeyProperty", ",", "ndb", ".", "ComputedProperty", ")", ")", ")", ":", "val", "=", "ndb", ".", "Key", "(", "urlsafe", "=", "val", ")", "filters", "[", "idx", "]", "=", "[", "prop", ",", "ineq", ",", "val", "]", "except", ":", "pass", "# Validate the value of each filter. We need to know filters have", "# valid value to carry out splits.", "try", ":", "properties", "[", "prop", "]", ".", "_do_validate", "(", "val", ")", "except", "db", ".", "BadValueError", ",", "e", ":", "raise", "errors", ".", "BadReaderParamsError", "(", "e", ")" ]
Validate ndb.Model filters.
[ "Validate", "ndb", ".", "Model", "filters", "." ]
python
train
31.28125
boto/s3transfer
s3transfer/processpool.py
https://github.com/boto/s3transfer/blob/2aead638c8385d8ae0b1756b2de17e8fad45fffa/s3transfer/processpool.py#L321-L364
def download_file(self, bucket, key, filename, extra_args=None, expected_size=None): """Downloads the object's contents to a file :type bucket: str :param bucket: The name of the bucket to download from :type key: str :param key: The name of the key to download from :type filename: str :param filename: The name of a file to download to. :type extra_args: dict :param extra_args: Extra arguments that may be passed to the client operation :type expected_size: int :param expected_size: The expected size in bytes of the download. If provided, the downloader will not call HeadObject to determine the object's size and use the provided value instead. The size is needed to determine whether to do a multipart download. :rtype: s3transfer.futures.TransferFuture :returns: Transfer future representing the download """ self._start_if_needed() if extra_args is None: extra_args = {} self._validate_all_known_args(extra_args) transfer_id = self._transfer_monitor.notify_new_transfer() download_file_request = DownloadFileRequest( transfer_id=transfer_id, bucket=bucket, key=key, filename=filename, extra_args=extra_args, expected_size=expected_size, ) logger.debug( 'Submitting download file request: %s.', download_file_request) self._download_request_queue.put(download_file_request) call_args = CallArgs( bucket=bucket, key=key, filename=filename, extra_args=extra_args, expected_size=expected_size) future = self._get_transfer_future(transfer_id, call_args) return future
[ "def", "download_file", "(", "self", ",", "bucket", ",", "key", ",", "filename", ",", "extra_args", "=", "None", ",", "expected_size", "=", "None", ")", ":", "self", ".", "_start_if_needed", "(", ")", "if", "extra_args", "is", "None", ":", "extra_args", "=", "{", "}", "self", ".", "_validate_all_known_args", "(", "extra_args", ")", "transfer_id", "=", "self", ".", "_transfer_monitor", ".", "notify_new_transfer", "(", ")", "download_file_request", "=", "DownloadFileRequest", "(", "transfer_id", "=", "transfer_id", ",", "bucket", "=", "bucket", ",", "key", "=", "key", ",", "filename", "=", "filename", ",", "extra_args", "=", "extra_args", ",", "expected_size", "=", "expected_size", ",", ")", "logger", ".", "debug", "(", "'Submitting download file request: %s.'", ",", "download_file_request", ")", "self", ".", "_download_request_queue", ".", "put", "(", "download_file_request", ")", "call_args", "=", "CallArgs", "(", "bucket", "=", "bucket", ",", "key", "=", "key", ",", "filename", "=", "filename", ",", "extra_args", "=", "extra_args", ",", "expected_size", "=", "expected_size", ")", "future", "=", "self", ".", "_get_transfer_future", "(", "transfer_id", ",", "call_args", ")", "return", "future" ]
Downloads the object's contents to a file :type bucket: str :param bucket: The name of the bucket to download from :type key: str :param key: The name of the key to download from :type filename: str :param filename: The name of a file to download to. :type extra_args: dict :param extra_args: Extra arguments that may be passed to the client operation :type expected_size: int :param expected_size: The expected size in bytes of the download. If provided, the downloader will not call HeadObject to determine the object's size and use the provided value instead. The size is needed to determine whether to do a multipart download. :rtype: s3transfer.futures.TransferFuture :returns: Transfer future representing the download
[ "Downloads", "the", "object", "s", "contents", "to", "a", "file" ]
python
test
41
brocade/pynos
pynos/versions/base/yang/brocade_sflow.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_sflow.py#L22-L34
def sflow_collector_collector_ip_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") sflow = ET.SubElement(config, "sflow", xmlns="urn:brocade.com:mgmt:brocade-sflow") collector = ET.SubElement(sflow, "collector") collector_port_number_key = ET.SubElement(collector, "collector-port-number") collector_port_number_key.text = kwargs.pop('collector_port_number') collector_ip_address = ET.SubElement(collector, "collector-ip-address") collector_ip_address.text = kwargs.pop('collector_ip_address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "sflow_collector_collector_ip_address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "sflow", "=", "ET", ".", "SubElement", "(", "config", ",", "\"sflow\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-sflow\"", ")", "collector", "=", "ET", ".", "SubElement", "(", "sflow", ",", "\"collector\"", ")", "collector_port_number_key", "=", "ET", ".", "SubElement", "(", "collector", ",", "\"collector-port-number\"", ")", "collector_port_number_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'collector_port_number'", ")", "collector_ip_address", "=", "ET", ".", "SubElement", "(", "collector", ",", "\"collector-ip-address\"", ")", "collector_ip_address", ".", "text", "=", "kwargs", ".", "pop", "(", "'collector_ip_address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
52
Enteee/pdml2flow
pdml2flow/autovivification.py
https://github.com/Enteee/pdml2flow/blob/bc9efe379b0b2406bfbbbd8e0f678b1f63805c66/pdml2flow/autovivification.py#L36-L55
def compress(self, d=DEFAULT): """Returns a copy of d with compressed leaves.""" if d is DEFAULT: d = self if isinstance(d, list): l = [v for v in (self.compress(v) for v in d)] try: return list(set(l)) except TypeError: # list contains not hashables ret = [] for i in l: if i not in ret: ret.append(i) return ret elif isinstance(d, type(self)): return type(self)({k: v for k, v in ((k, self.compress(v)) for k, v in d.items())}) elif isinstance(d, dict): return {k: v for k, v in ((k, self.compress(v)) for k, v in d.items())} return d
[ "def", "compress", "(", "self", ",", "d", "=", "DEFAULT", ")", ":", "if", "d", "is", "DEFAULT", ":", "d", "=", "self", "if", "isinstance", "(", "d", ",", "list", ")", ":", "l", "=", "[", "v", "for", "v", "in", "(", "self", ".", "compress", "(", "v", ")", "for", "v", "in", "d", ")", "]", "try", ":", "return", "list", "(", "set", "(", "l", ")", ")", "except", "TypeError", ":", "# list contains not hashables", "ret", "=", "[", "]", "for", "i", "in", "l", ":", "if", "i", "not", "in", "ret", ":", "ret", ".", "append", "(", "i", ")", "return", "ret", "elif", "isinstance", "(", "d", ",", "type", "(", "self", ")", ")", ":", "return", "type", "(", "self", ")", "(", "{", "k", ":", "v", "for", "k", ",", "v", "in", "(", "(", "k", ",", "self", ".", "compress", "(", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", "}", ")", "elif", "isinstance", "(", "d", ",", "dict", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "(", "(", "k", ",", "self", ".", "compress", "(", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", "}", "return", "d" ]
Returns a copy of d with compressed leaves.
[ "Returns", "a", "copy", "of", "d", "with", "compressed", "leaves", "." ]
python
train
38.05
taskcluster/taskcluster-client.py
taskcluster/authevents.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/authevents.py#L128-L150
def roleUpdated(self, *args, **kwargs): """ Role Updated Messages Message that a new role has been updated. This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. """ ref = { 'exchange': 'role-updated', 'name': 'roleUpdated', 'routingKey': [ { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/role-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
[ "def", "roleUpdated", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ref", "=", "{", "'exchange'", ":", "'role-updated'", ",", "'name'", ":", "'roleUpdated'", ",", "'routingKey'", ":", "[", "{", "'multipleWords'", ":", "True", ",", "'name'", ":", "'reserved'", ",", "}", ",", "]", ",", "'schema'", ":", "'v1/role-message.json#'", ",", "}", "return", "self", ".", "_makeTopicExchange", "(", "ref", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Role Updated Messages Message that a new role has been updated. This exchange outputs: ``v1/role-message.json#``This exchange takes the following keys: * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
[ "Role", "Updated", "Messages" ]
python
train
33.130435
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L2744-L2766
def enable_one_shot_page_breakpoint(self, dwProcessId, address): """ Enables the page breakpoint at the given address for only one shot. @see: L{define_page_breakpoint}, L{has_page_breakpoint}, L{get_page_breakpoint}, L{enable_page_breakpoint}, L{disable_page_breakpoint} L{erase_page_breakpoint}, @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of breakpoint. """ p = self.system.get_process(dwProcessId) bp = self.get_page_breakpoint(dwProcessId, address) if bp.is_running(): self.__del_running_bp_from_all_threads(bp) bp.one_shot(p, None)
[ "def", "enable_one_shot_page_breakpoint", "(", "self", ",", "dwProcessId", ",", "address", ")", ":", "p", "=", "self", ".", "system", ".", "get_process", "(", "dwProcessId", ")", "bp", "=", "self", ".", "get_page_breakpoint", "(", "dwProcessId", ",", "address", ")", "if", "bp", ".", "is_running", "(", ")", ":", "self", ".", "__del_running_bp_from_all_threads", "(", "bp", ")", "bp", ".", "one_shot", "(", "p", ",", "None", ")" ]
Enables the page breakpoint at the given address for only one shot. @see: L{define_page_breakpoint}, L{has_page_breakpoint}, L{get_page_breakpoint}, L{enable_page_breakpoint}, L{disable_page_breakpoint} L{erase_page_breakpoint}, @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of breakpoint.
[ "Enables", "the", "page", "breakpoint", "at", "the", "given", "address", "for", "only", "one", "shot", "." ]
python
train
33.434783
mezz64/pyEmby
pyemby/server.py
https://github.com/mezz64/pyEmby/blob/6bb621e4e25bf1b9b0aba2c38b588e68f8816226/pyemby/server.py#L232-L253
async def api_request(self, url, params): """Make api fetch request.""" request = None try: with async_timeout.timeout(DEFAULT_TIMEOUT, loop=self._event_loop): request = await self._api_session.get( url, params=params) if request.status != 200: _LOGGER.error('Error fetching Emby data: %s', request.status) return None request_json = await request.json() if 'error' in request_json: _LOGGER.error('Error converting Emby data to json: %s: %s', request_json['error']['code'], request_json['error']['message']) return None return request_json except (aiohttp.ClientError, asyncio.TimeoutError, ConnectionRefusedError) as err: _LOGGER.error('Error fetching Emby data: %s', err) return None
[ "async", "def", "api_request", "(", "self", ",", "url", ",", "params", ")", ":", "request", "=", "None", "try", ":", "with", "async_timeout", ".", "timeout", "(", "DEFAULT_TIMEOUT", ",", "loop", "=", "self", ".", "_event_loop", ")", ":", "request", "=", "await", "self", ".", "_api_session", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "request", ".", "status", "!=", "200", ":", "_LOGGER", ".", "error", "(", "'Error fetching Emby data: %s'", ",", "request", ".", "status", ")", "return", "None", "request_json", "=", "await", "request", ".", "json", "(", ")", "if", "'error'", "in", "request_json", ":", "_LOGGER", ".", "error", "(", "'Error converting Emby data to json: %s: %s'", ",", "request_json", "[", "'error'", "]", "[", "'code'", "]", ",", "request_json", "[", "'error'", "]", "[", "'message'", "]", ")", "return", "None", "return", "request_json", "except", "(", "aiohttp", ".", "ClientError", ",", "asyncio", ".", "TimeoutError", ",", "ConnectionRefusedError", ")", "as", "err", ":", "_LOGGER", ".", "error", "(", "'Error fetching Emby data: %s'", ",", "err", ")", "return", "None" ]
Make api fetch request.
[ "Make", "api", "fetch", "request", "." ]
python
train
43.5
nok/sklearn-porter
sklearn_porter/estimator/classifier/RandomForestClassifier/__init__.py
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/RandomForestClassifier/__init__.py#L153-L176
def predict(self, temp_type): """ Transpile the predict method. Parameters ---------- :param temp_type : string The kind of export type (embedded, separated, exported). Returns ------- :return : string The transpiled predict method as string. """ # Exported: if temp_type == 'exported': temp = self.temp('exported.class') return temp.format(class_name=self.class_name, method_name=self.method_name, n_features=self.n_features) # Embedded: if temp_type == 'embedded': method = self.create_method_embedded() return self.create_class_embedded(method)
[ "def", "predict", "(", "self", ",", "temp_type", ")", ":", "# Exported:", "if", "temp_type", "==", "'exported'", ":", "temp", "=", "self", ".", "temp", "(", "'exported.class'", ")", "return", "temp", ".", "format", "(", "class_name", "=", "self", ".", "class_name", ",", "method_name", "=", "self", ".", "method_name", ",", "n_features", "=", "self", ".", "n_features", ")", "# Embedded:", "if", "temp_type", "==", "'embedded'", ":", "method", "=", "self", ".", "create_method_embedded", "(", ")", "return", "self", ".", "create_class_embedded", "(", "method", ")" ]
Transpile the predict method. Parameters ---------- :param temp_type : string The kind of export type (embedded, separated, exported). Returns ------- :return : string The transpiled predict method as string.
[ "Transpile", "the", "predict", "method", "." ]
python
train
31.833333
timothydmorton/VESPA
vespa/populations.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/populations.py#L409-L422
def prior(self): """ Model prior for particular model. Product of eclipse probability (``self.prob``), the fraction of scenario that is allowed by the various constraints (``self.selectfrac``), and all additional factors in ``self.priorfactors``. """ prior = self.prob * self.selectfrac for f in self.priorfactors: prior *= self.priorfactors[f] return prior
[ "def", "prior", "(", "self", ")", ":", "prior", "=", "self", ".", "prob", "*", "self", ".", "selectfrac", "for", "f", "in", "self", ".", "priorfactors", ":", "prior", "*=", "self", ".", "priorfactors", "[", "f", "]", "return", "prior" ]
Model prior for particular model. Product of eclipse probability (``self.prob``), the fraction of scenario that is allowed by the various constraints (``self.selectfrac``), and all additional factors in ``self.priorfactors``.
[ "Model", "prior", "for", "particular", "model", "." ]
python
train
31.285714
google/python-adb
adb/fastboot.py
https://github.com/google/python-adb/blob/d9b94b2dda555c14674c19806debb8449c0e9652/adb/fastboot.py#L328-L340
def Flash(self, partition, timeout_ms=0, info_cb=DEFAULT_MESSAGE_CALLBACK): """Flashes the last downloaded file to the given partition. Args: partition: Partition to overwrite with the new image. timeout_ms: Optional timeout in milliseconds to wait for it to finish. info_cb: See Download. Usually no messages. Returns: Response to a download request, normally nothing. """ return self._SimpleCommand(b'flash', arg=partition, info_cb=info_cb, timeout_ms=timeout_ms)
[ "def", "Flash", "(", "self", ",", "partition", ",", "timeout_ms", "=", "0", ",", "info_cb", "=", "DEFAULT_MESSAGE_CALLBACK", ")", ":", "return", "self", ".", "_SimpleCommand", "(", "b'flash'", ",", "arg", "=", "partition", ",", "info_cb", "=", "info_cb", ",", "timeout_ms", "=", "timeout_ms", ")" ]
Flashes the last downloaded file to the given partition. Args: partition: Partition to overwrite with the new image. timeout_ms: Optional timeout in milliseconds to wait for it to finish. info_cb: See Download. Usually no messages. Returns: Response to a download request, normally nothing.
[ "Flashes", "the", "last", "downloaded", "file", "to", "the", "given", "partition", "." ]
python
train
43.846154
pybel/pybel
src/pybel/struct/filters/node_predicates.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/filters/node_predicates.py#L152-L174
def _node_has_modifier(graph: BELGraph, node: BaseEntity, modifier: str) -> bool: """Return true if over any of a nodes edges, it has a given modifier. Modifier can be one of: - :data:`pybel.constants.ACTIVITY`, - :data:`pybel.constants.DEGRADATION` - :data:`pybel.constants.TRANSLOCATION`. :param modifier: One of :data:`pybel.constants.ACTIVITY`, :data:`pybel.constants.DEGRADATION`, or :data:`pybel.constants.TRANSLOCATION` """ modifier_in_subject = any( part_has_modifier(d, SUBJECT, modifier) for _, _, d in graph.out_edges(node, data=True) ) modifier_in_object = any( part_has_modifier(d, OBJECT, modifier) for _, _, d in graph.in_edges(node, data=True) ) return modifier_in_subject or modifier_in_object
[ "def", "_node_has_modifier", "(", "graph", ":", "BELGraph", ",", "node", ":", "BaseEntity", ",", "modifier", ":", "str", ")", "->", "bool", ":", "modifier_in_subject", "=", "any", "(", "part_has_modifier", "(", "d", ",", "SUBJECT", ",", "modifier", ")", "for", "_", ",", "_", ",", "d", "in", "graph", ".", "out_edges", "(", "node", ",", "data", "=", "True", ")", ")", "modifier_in_object", "=", "any", "(", "part_has_modifier", "(", "d", ",", "OBJECT", ",", "modifier", ")", "for", "_", ",", "_", ",", "d", "in", "graph", ".", "in_edges", "(", "node", ",", "data", "=", "True", ")", ")", "return", "modifier_in_subject", "or", "modifier_in_object" ]
Return true if over any of a nodes edges, it has a given modifier. Modifier can be one of: - :data:`pybel.constants.ACTIVITY`, - :data:`pybel.constants.DEGRADATION` - :data:`pybel.constants.TRANSLOCATION`. :param modifier: One of :data:`pybel.constants.ACTIVITY`, :data:`pybel.constants.DEGRADATION`, or :data:`pybel.constants.TRANSLOCATION`
[ "Return", "true", "if", "over", "any", "of", "a", "nodes", "edges", "it", "has", "a", "given", "modifier", "." ]
python
train
34.826087
JosuaKrause/quick_cache
quick_cache.py
https://github.com/JosuaKrause/quick_cache/blob/a6001f2d77247ae278e679a026174c83ff195d5a/quick_cache.py#L316-L319
def has(self): """Whether the cache file exists in the file system.""" self._done = os.path.exists(self._cache_file) return self._done or self._out is not None
[ "def", "has", "(", "self", ")", ":", "self", ".", "_done", "=", "os", ".", "path", ".", "exists", "(", "self", ".", "_cache_file", ")", "return", "self", ".", "_done", "or", "self", ".", "_out", "is", "not", "None" ]
Whether the cache file exists in the file system.
[ "Whether", "the", "cache", "file", "exists", "in", "the", "file", "system", "." ]
python
train
45
Carbonara-Project/Guanciale
guanciale/idblib.py
https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L924-L936
def makekey(self, *args): """ return a binary key for the nodeid, tag and optional value """ if len(args) > 1: args = args[:1] + (args[1].encode('utf-8'),) + args[2:] if len(args) == 3 and type(args[-1]) == str: # node.tag.string type keys return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8') elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0: # negative values -> need lowercase fmt char return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args) else: # node.tag.value type keys return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)
[ "def", "makekey", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", ">", "1", ":", "args", "=", "args", "[", ":", "1", "]", "+", "(", "args", "[", "1", "]", ".", "encode", "(", "'utf-8'", ")", ",", ")", "+", "args", "[", "2", ":", "]", "if", "len", "(", "args", ")", "==", "3", "and", "type", "(", "args", "[", "-", "1", "]", ")", "==", "str", ":", "# node.tag.string type keys\r", "return", "struct", ".", "pack", "(", "self", ".", "keyfmt", "[", ":", "1", "+", "len", "(", "args", ")", "]", ",", "b'.'", ",", "*", "args", "[", ":", "-", "1", "]", ")", "+", "args", "[", "-", "1", "]", ".", "encode", "(", "'utf-8'", ")", "elif", "len", "(", "args", ")", "==", "3", "and", "type", "(", "args", "[", "-", "1", "]", ")", "==", "type", "(", "-", "1", ")", "and", "args", "[", "-", "1", "]", "<", "0", ":", "# negative values -> need lowercase fmt char\r", "return", "struct", ".", "pack", "(", "self", ".", "keyfmt", "[", ":", "1", "+", "len", "(", "args", ")", "]", "+", "self", ".", "fmt", ".", "lower", "(", ")", ",", "b'.'", ",", "*", "args", ")", "else", ":", "# node.tag.value type keys\r", "return", "struct", ".", "pack", "(", "self", ".", "keyfmt", "[", ":", "2", "+", "len", "(", "args", ")", "]", ",", "b'.'", ",", "*", "args", ")" ]
return a binary key for the nodeid, tag and optional value
[ "return", "a", "binary", "key", "for", "the", "nodeid", "tag", "and", "optional", "value" ]
python
train
57.230769
cltk/cltk
cltk/corpus/akkadian/cdli_corpus.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/akkadian/cdli_corpus.py#L108-L119
def toc(self): """ Returns a rich list of texts in the catalog. """ output = [] for key in sorted(self.catalog.keys()): edition = self.catalog[key]['edition'] length = len(self.catalog[key]['transliteration']) output.append( "Pnum: {key}, Edition: {edition}, length: {length} line(s)".format( key=key, edition=edition, length=length)) return output
[ "def", "toc", "(", "self", ")", ":", "output", "=", "[", "]", "for", "key", "in", "sorted", "(", "self", ".", "catalog", ".", "keys", "(", ")", ")", ":", "edition", "=", "self", ".", "catalog", "[", "key", "]", "[", "'edition'", "]", "length", "=", "len", "(", "self", ".", "catalog", "[", "key", "]", "[", "'transliteration'", "]", ")", "output", ".", "append", "(", "\"Pnum: {key}, Edition: {edition}, length: {length} line(s)\"", ".", "format", "(", "key", "=", "key", ",", "edition", "=", "edition", ",", "length", "=", "length", ")", ")", "return", "output" ]
Returns a rich list of texts in the catalog.
[ "Returns", "a", "rich", "list", "of", "texts", "in", "the", "catalog", "." ]
python
train
38.083333
mitsei/dlkit
dlkit/json_/assessment/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/managers.py#L2284-L2307
def get_assessment_lookup_session_for_bank(self, bank_id, proxy): """Gets the ``OsidSession`` associated with the assessment lookup service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentLookupSession) - ``an _assessment_lookup_session`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_lookup()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_assessment_lookup(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentLookupSession(bank_id, proxy, self._runtime)
[ "def", "get_assessment_lookup_session_for_bank", "(", "self", ",", "bank_id", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_assessment_lookup", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to see if the catalog Id is found otherwise raise errors.NotFound", "##", "# pylint: disable=no-member", "return", "sessions", ".", "AssessmentLookupSession", "(", "bank_id", ",", "proxy", ",", "self", ".", "_runtime", ")" ]
Gets the ``OsidSession`` associated with the assessment lookup service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentLookupSession) - ``an _assessment_lookup_session`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_lookup()`` and ``supports_visible_federation()`` are ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "assessment", "lookup", "service", "for", "the", "given", "bank", "." ]
python
train
49.375
cloudmesh/cloudmesh-common
cloudmesh/common/util.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/util.py#L117-L132
def convert_from_unicode(data): """ converts unicode data to a string :param data: the data to convert :return: """ # if isinstance(data, basestring): if isinstance(data, str): return str(data) elif isinstance(data, collectionsAbc.Mapping): return dict(map(convert_from_unicode, data.items())) elif isinstance(data, collectionsAbc.Iterable): return type(data)(map(convert_from_unicode, data)) else: return data
[ "def", "convert_from_unicode", "(", "data", ")", ":", "# if isinstance(data, basestring):", "if", "isinstance", "(", "data", ",", "str", ")", ":", "return", "str", "(", "data", ")", "elif", "isinstance", "(", "data", ",", "collectionsAbc", ".", "Mapping", ")", ":", "return", "dict", "(", "map", "(", "convert_from_unicode", ",", "data", ".", "items", "(", ")", ")", ")", "elif", "isinstance", "(", "data", ",", "collectionsAbc", ".", "Iterable", ")", ":", "return", "type", "(", "data", ")", "(", "map", "(", "convert_from_unicode", ",", "data", ")", ")", "else", ":", "return", "data" ]
converts unicode data to a string :param data: the data to convert :return:
[ "converts", "unicode", "data", "to", "a", "string", ":", "param", "data", ":", "the", "data", "to", "convert", ":", "return", ":" ]
python
train
29.3125
python-openxml/python-docx
docx/image/image.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/image/image.py#L30-L36
def from_blob(cls, blob): """ Return a new |Image| subclass instance parsed from the image binary contained in *blob*. """ stream = BytesIO(blob) return cls._from_stream(stream, blob)
[ "def", "from_blob", "(", "cls", ",", "blob", ")", ":", "stream", "=", "BytesIO", "(", "blob", ")", "return", "cls", ".", "_from_stream", "(", "stream", ",", "blob", ")" ]
Return a new |Image| subclass instance parsed from the image binary contained in *blob*.
[ "Return", "a", "new", "|Image|", "subclass", "instance", "parsed", "from", "the", "image", "binary", "contained", "in", "*", "blob", "*", "." ]
python
train
32.142857
basho/riak-python-client
riak/bucket.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/bucket.py#L694-L710
def get_buckets(self, timeout=None): """ Get the list of buckets under this bucket-type as :class:`RiakBucket <riak.bucket.RiakBucket>` instances. .. warning:: Do not use this in production, as it requires traversing through all keys stored in a cluster. .. note:: This request is automatically retried :attr:`retries` times if it fails due to network error. :param timeout: a timeout value in milliseconds :type timeout: int :rtype: list of :class:`RiakBucket <riak.bucket.RiakBucket>` instances """ return self._client.get_buckets(bucket_type=self, timeout=timeout)
[ "def", "get_buckets", "(", "self", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "_client", ".", "get_buckets", "(", "bucket_type", "=", "self", ",", "timeout", "=", "timeout", ")" ]
Get the list of buckets under this bucket-type as :class:`RiakBucket <riak.bucket.RiakBucket>` instances. .. warning:: Do not use this in production, as it requires traversing through all keys stored in a cluster. .. note:: This request is automatically retried :attr:`retries` times if it fails due to network error. :param timeout: a timeout value in milliseconds :type timeout: int :rtype: list of :class:`RiakBucket <riak.bucket.RiakBucket>` instances
[ "Get", "the", "list", "of", "buckets", "under", "this", "bucket", "-", "type", "as", ":", "class", ":", "RiakBucket", "<riak", ".", "bucket", ".", "RiakBucket", ">", "instances", "." ]
python
train
39.529412
joeyespo/gitpress
gitpress/themes.py
https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/themes.py#L17-L21
def list_themes(directory=None): """Gets a list of the installed themes.""" repo = require_repo(directory) path = os.path.join(repo, themes_dir) return os.listdir(path) if os.path.isdir(path) else None
[ "def", "list_themes", "(", "directory", "=", "None", ")", ":", "repo", "=", "require_repo", "(", "directory", ")", "path", "=", "os", ".", "path", ".", "join", "(", "repo", ",", "themes_dir", ")", "return", "os", ".", "listdir", "(", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "else", "None" ]
Gets a list of the installed themes.
[ "Gets", "a", "list", "of", "the", "installed", "themes", "." ]
python
train
42.6