repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
mitsei/dlkit
dlkit/records/repository/basic/media_accessibility.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/repository/basic/media_accessibility.py#L200-L212
def add_media_description(self, media_description): """Adds a media_description. arg: media_description (displayText): the new media_description raise: InvalidArgument - ``media_description`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``media_description`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if self.get_media_descriptions_metadata().is_read_only(): raise NoAccess() self.add_or_replace_value('mediaDescriptions', media_description)
[ "def", "add_media_description", "(", "self", ",", "media_description", ")", ":", "if", "self", ".", "get_media_descriptions_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "NoAccess", "(", ")", "self", ".", "add_or_replace_value", "(", "'mediaDescriptions'", ",", "media_description", ")" ]
Adds a media_description. arg: media_description (displayText): the new media_description raise: InvalidArgument - ``media_description`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``media_description`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Adds", "a", "media_description", "." ]
python
train
46.076923
numenta/htmresearch
projects/location_layer/location_module_experiment/grid_2d_location_experiment.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/location_layer/location_module_experiment/grid_2d_location_experiment.py#L275-L324
def inferObjectsWithRandomMovements(self): """ Infer each object without any location input. """ for objectName, objectFeatures in self.objects.iteritems(): self.reset() inferred = False prevTouchSequence = None for _ in xrange(4): while True: touchSequence = list(objectFeatures) random.shuffle(touchSequence) if prevTouchSequence is not None: if touchSequence[0] == prevTouchSequence[-1]: continue break for i, feature in enumerate(touchSequence): locationOnObject = (feature["top"] + feature["height"]/2, feature["left"] + feature["width"]/2) self.move(objectName, locationOnObject) featureName = feature["name"] featureSDR = self.features[featureName] self.sense(featureSDR, learn=False) inferred = ( set(self.objectLayer.getActiveCells()) == set(self.objectRepresentations[objectName]) and set(self.inputLayer.getActiveCells()) == set(self.inputRepresentations[(objectName, locationOnObject, featureName)]) and set(self.getActiveLocationCells()) == set(self.locationRepresentations[(objectName, locationOnObject)])) if inferred: break prevTouchSequence = touchSequence if inferred: break
[ "def", "inferObjectsWithRandomMovements", "(", "self", ")", ":", "for", "objectName", ",", "objectFeatures", "in", "self", ".", "objects", ".", "iteritems", "(", ")", ":", "self", ".", "reset", "(", ")", "inferred", "=", "False", "prevTouchSequence", "=", "None", "for", "_", "in", "xrange", "(", "4", ")", ":", "while", "True", ":", "touchSequence", "=", "list", "(", "objectFeatures", ")", "random", ".", "shuffle", "(", "touchSequence", ")", "if", "prevTouchSequence", "is", "not", "None", ":", "if", "touchSequence", "[", "0", "]", "==", "prevTouchSequence", "[", "-", "1", "]", ":", "continue", "break", "for", "i", ",", "feature", "in", "enumerate", "(", "touchSequence", ")", ":", "locationOnObject", "=", "(", "feature", "[", "\"top\"", "]", "+", "feature", "[", "\"height\"", "]", "/", "2", ",", "feature", "[", "\"left\"", "]", "+", "feature", "[", "\"width\"", "]", "/", "2", ")", "self", ".", "move", "(", "objectName", ",", "locationOnObject", ")", "featureName", "=", "feature", "[", "\"name\"", "]", "featureSDR", "=", "self", ".", "features", "[", "featureName", "]", "self", ".", "sense", "(", "featureSDR", ",", "learn", "=", "False", ")", "inferred", "=", "(", "set", "(", "self", ".", "objectLayer", ".", "getActiveCells", "(", ")", ")", "==", "set", "(", "self", ".", "objectRepresentations", "[", "objectName", "]", ")", "and", "set", "(", "self", ".", "inputLayer", ".", "getActiveCells", "(", ")", ")", "==", "set", "(", "self", ".", "inputRepresentations", "[", "(", "objectName", ",", "locationOnObject", ",", "featureName", ")", "]", ")", "and", "set", "(", "self", ".", "getActiveLocationCells", "(", ")", ")", "==", "set", "(", "self", ".", "locationRepresentations", "[", "(", "objectName", ",", "locationOnObject", ")", "]", ")", ")", "if", "inferred", ":", "break", "prevTouchSequence", "=", "touchSequence", "if", "inferred", ":", "break" ]
Infer each object without any location input.
[ "Infer", "each", "object", "without", "any", "location", "input", "." ]
python
train
29.48
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L731-L743
def _element_to_bson(key, value, check_keys, opts): """Encode a single key, value pair.""" if not isinstance(key, string_type): raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % (key,)) if "." in key: raise InvalidDocument("key %r must not contain '.'" % (key,)) name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts)
[ "def", "_element_to_bson", "(", "key", ",", "value", ",", "check_keys", ",", "opts", ")", ":", "if", "not", "isinstance", "(", "key", ",", "string_type", ")", ":", "raise", "InvalidDocument", "(", "\"documents must have only string keys, \"", "\"key was %r\"", "%", "(", "key", ",", ")", ")", "if", "check_keys", ":", "if", "key", ".", "startswith", "(", "\"$\"", ")", ":", "raise", "InvalidDocument", "(", "\"key %r must not start with '$'\"", "%", "(", "key", ",", ")", ")", "if", "\".\"", "in", "key", ":", "raise", "InvalidDocument", "(", "\"key %r must not contain '.'\"", "%", "(", "key", ",", ")", ")", "name", "=", "_make_name", "(", "key", ")", "return", "_name_value_to_bson", "(", "name", ",", "value", ",", "check_keys", ",", "opts", ")" ]
Encode a single key, value pair.
[ "Encode", "a", "single", "key", "value", "pair", "." ]
python
train
43.230769
spyder-ide/spyder
spyder/plugins/ipythonconsole/widgets/help.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/help.py#L88-L101
def get_doc(self, objtxt): """Get object documentation dictionary""" if self._reading: return wait_loop = QEventLoop() self.sig_got_reply.connect(wait_loop.quit) self.silent_exec_method("get_ipython().kernel.get_doc('%s')" % objtxt) wait_loop.exec_() # Remove loop connection and loop self.sig_got_reply.disconnect(wait_loop.quit) wait_loop = None return self._kernel_reply
[ "def", "get_doc", "(", "self", ",", "objtxt", ")", ":", "if", "self", ".", "_reading", ":", "return", "wait_loop", "=", "QEventLoop", "(", ")", "self", ".", "sig_got_reply", ".", "connect", "(", "wait_loop", ".", "quit", ")", "self", ".", "silent_exec_method", "(", "\"get_ipython().kernel.get_doc('%s')\"", "%", "objtxt", ")", "wait_loop", ".", "exec_", "(", ")", "# Remove loop connection and loop", "self", ".", "sig_got_reply", ".", "disconnect", "(", "wait_loop", ".", "quit", ")", "wait_loop", "=", "None", "return", "self", ".", "_kernel_reply" ]
Get object documentation dictionary
[ "Get", "object", "documentation", "dictionary" ]
python
train
32.428571
saltstack/salt
salt/states/pkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L3329-L3349
def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
[ "def", "mod_watch", "(", "name", ",", "*", "*", "kwargs", ")", ":", "sfun", "=", "kwargs", ".", "pop", "(", "'sfun'", ",", "None", ")", "mapfun", "=", "{", "'purged'", ":", "purged", ",", "'latest'", ":", "latest", ",", "'removed'", ":", "removed", ",", "'installed'", ":", "installed", "}", "if", "sfun", "in", "mapfun", ":", "return", "mapfun", "[", "sfun", "]", "(", "name", ",", "*", "*", "kwargs", ")", "return", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "'pkg.{0} does not work with the watch requisite'", ".", "format", "(", "sfun", ")", ",", "'result'", ":", "False", "}" ]
Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
[ "Install", "/", "reinstall", "a", "package", "based", "on", "a", "watch", "requisite" ]
python
train
34.904762
ns1/ns1-python
ns1/__init__.py
https://github.com/ns1/ns1-python/blob/f3e1d90a3b76a1bd18f855f2c622a8a49d4b585e/ns1/__init__.py#L240-L248
def loadNetworkbyName(self, name, callback=None, errback=None): """ Load an existing Network by name into a high level Network object :param str name: Name of an existing Network """ import ns1.ipam network = ns1.ipam.Network(self.config, name=name) return network.load(callback=callback, errback=errback)
[ "def", "loadNetworkbyName", "(", "self", ",", "name", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "import", "ns1", ".", "ipam", "network", "=", "ns1", ".", "ipam", ".", "Network", "(", "self", ".", "config", ",", "name", "=", "name", ")", "return", "network", ".", "load", "(", "callback", "=", "callback", ",", "errback", "=", "errback", ")" ]
Load an existing Network by name into a high level Network object :param str name: Name of an existing Network
[ "Load", "an", "existing", "Network", "by", "name", "into", "a", "high", "level", "Network", "object" ]
python
train
39.333333
StackStorm/pybind
pybind/slxos/v17r_2_00/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/__init__.py#L6613-L6636
def _set_hw_state(self, v, load=False): """ Setter method for hw_state, mapped from YANG variable /hw_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_hw_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_hw_state() directly. YANG Description: HW Route Info """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=hw_state.hw_state, is_container='container', presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-hw', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """hw_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=hw_state.hw_state, is_container='container', presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-hw', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=True)""", }) self.__hw_state = t if hasattr(self, '_set'): self._set()
[ "def", "_set_hw_state", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "hw_state", ".", "hw_state", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"hw-state\"", ",", "rest_name", "=", "\"hw-state\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'callpoint'", ":", "u'sysdiag-hw'", ",", "u'cli-suppress-show-path'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-sysdiag-operational'", ",", "defining_module", "=", "'brocade-sysdiag-operational'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"hw_state must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=hw_state.hw_state, is_container='container', presence=False, yang_name=\"hw-state\", rest_name=\"hw-state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-hw', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__hw_state", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for hw_state, mapped from YANG variable /hw_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_hw_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_hw_state() directly. YANG Description: HW Route Info
[ "Setter", "method", "for", "hw_state", "mapped", "from", "YANG", "variable", "/", "hw_state", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_hw_state", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_hw_state", "()", "directly", "." ]
python
train
70.291667
numenta/nupic
src/nupic/algorithms/spatial_pooler.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L792-L799
def getPotential(self, columnIndex, potential): """ :param columnIndex: (int) column index to get potential for. :param potential: (list) will be overwritten with column potentials. Must match the number of inputs. """ assert(columnIndex < self._numColumns) potential[:] = self._potentialPools[columnIndex]
[ "def", "getPotential", "(", "self", ",", "columnIndex", ",", "potential", ")", ":", "assert", "(", "columnIndex", "<", "self", ".", "_numColumns", ")", "potential", "[", ":", "]", "=", "self", ".", "_potentialPools", "[", "columnIndex", "]" ]
:param columnIndex: (int) column index to get potential for. :param potential: (list) will be overwritten with column potentials. Must match the number of inputs.
[ ":", "param", "columnIndex", ":", "(", "int", ")", "column", "index", "to", "get", "potential", "for", ".", ":", "param", "potential", ":", "(", "list", ")", "will", "be", "overwritten", "with", "column", "potentials", ".", "Must", "match", "the", "number", "of", "inputs", "." ]
python
valid
41.875
GNS3/gns3-server
gns3server/compute/vpcs/vpcs_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/vpcs_vm.py#L152-L166
def name(self, new_name): """ Sets the name of this VPCS VM. :param new_name: name """ if self.script_file: content = self.startup_script content = content.replace(self._name, new_name) escaped_name = new_name.replace('\\', '') content = re.sub(r"^set pcname .+$", "set pcname " + escaped_name, content, flags=re.MULTILINE) self.startup_script = content super(VPCSVM, VPCSVM).name.__set__(self, new_name)
[ "def", "name", "(", "self", ",", "new_name", ")", ":", "if", "self", ".", "script_file", ":", "content", "=", "self", ".", "startup_script", "content", "=", "content", ".", "replace", "(", "self", ".", "_name", ",", "new_name", ")", "escaped_name", "=", "new_name", ".", "replace", "(", "'\\\\'", ",", "''", ")", "content", "=", "re", ".", "sub", "(", "r\"^set pcname .+$\"", ",", "\"set pcname \"", "+", "escaped_name", ",", "content", ",", "flags", "=", "re", ".", "MULTILINE", ")", "self", ".", "startup_script", "=", "content", "super", "(", "VPCSVM", ",", "VPCSVM", ")", ".", "name", ".", "__set__", "(", "self", ",", "new_name", ")" ]
Sets the name of this VPCS VM. :param new_name: name
[ "Sets", "the", "name", "of", "this", "VPCS", "VM", "." ]
python
train
33.4
the01/python-floscraper
floscraper/cache.py
https://github.com/the01/python-floscraper/blob/d578cd3d6381070d9a07dade1e10387ae33e9a65/floscraper/cache.py#L170-L189
def put(self, url, html, cache_info=None): """ Put response into cache :param url: Url to cache :type url: str | unicode :param html: HTML content of url :type html: str | unicode :param cache_info: Cache Info (default: None) :type cache_info: floscraper.models.CacheInfo :rtype: None """ key = hashlib.md5(url).hexdigest() try: self._cache_set(key, html) except: self.exception("Failed to write cache") return self.update(url, cache_info)
[ "def", "put", "(", "self", ",", "url", ",", "html", ",", "cache_info", "=", "None", ")", ":", "key", "=", "hashlib", ".", "md5", "(", "url", ")", ".", "hexdigest", "(", ")", "try", ":", "self", ".", "_cache_set", "(", "key", ",", "html", ")", "except", ":", "self", ".", "exception", "(", "\"Failed to write cache\"", ")", "return", "self", ".", "update", "(", "url", ",", "cache_info", ")" ]
Put response into cache :param url: Url to cache :type url: str | unicode :param html: HTML content of url :type html: str | unicode :param cache_info: Cache Info (default: None) :type cache_info: floscraper.models.CacheInfo :rtype: None
[ "Put", "response", "into", "cache" ]
python
train
28.5
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/solar_system.py
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/solar_system.py#L143-L193
def simulate_leapfrog(config_func: Callable, accel_func: Callable, t0: date, t1: date, steps_per_day: int): """ Simulate the earth-sun system from t0 to t1 using Leapfrog Integration. INPUTS: config_func: function taking a date or date range and returning position and velocity of bodies accel_func: function taking positions of the bodies and returning their accelerations t0: start date of the simulation; a python date t1: end date of the simulation (exclusive); a python date dt: time step in days. num_bodies: the number of celestial bodies in the simulation """ # Length of the simulation (number of steps) N: int = (t1 - t0).days * steps_per_day # Get the initial conditions q0, v0 = config_func(t0) # Infer the number of dimensions from the shape of q0 dims: int = q0.shape[1] # The time step in seconds dt = float(day2sec) / float(steps_per_day) # Square of the time step dt2: float = dt * dt # Initialize arrays to store computed positions and velocities q: np.ndarray = np.zeros((N, dims)) v: np.ndarray = np.zeros((N, dims)) # Initialize the first row with the initial conditions from the JPL ephemerides q[0, :] = q0 v[0, :] = v0 # Initialize an array to store the acceleration at each time step a: np.ndarray = np.zeros((N, dims)) # First row of accelerations a[0, :] = accel_func(q[0]) # Perform leapfrog integration simulation # https://en.wikipedia.org/wiki/Leapfrog_integration print(f'Performing leapfrog integration with {N} steps...') for i in tqdm(range(N-1)): # Positions at the next time step q[i+1,:] = q[i,:] + v[i,:] * dt + 0.5 * a[i,:] * dt2 # Accelerations of each body in the system at the next time step a[i+1,:] = accel_func(q[i+1]) # Velocities of each body at the next time step v[i+1,:] = v[i,:] + 0.5 * (a[i,:] + a[i+1,:]) * dt return q, v
[ "def", "simulate_leapfrog", "(", "config_func", ":", "Callable", ",", "accel_func", ":", "Callable", ",", "t0", ":", "date", ",", "t1", ":", "date", ",", "steps_per_day", ":", "int", ")", ":", "# Length of the simulation (number of steps)", "N", ":", "int", "=", "(", "t1", "-", "t0", ")", ".", "days", "*", "steps_per_day", "# Get the initial conditions", "q0", ",", "v0", "=", "config_func", "(", "t0", ")", "# Infer the number of dimensions from the shape of q0", "dims", ":", "int", "=", "q0", ".", "shape", "[", "1", "]", "# The time step in seconds", "dt", "=", "float", "(", "day2sec", ")", "/", "float", "(", "steps_per_day", ")", "# Square of the time step", "dt2", ":", "float", "=", "dt", "*", "dt", "# Initialize arrays to store computed positions and velocities", "q", ":", "np", ".", "ndarray", "=", "np", ".", "zeros", "(", "(", "N", ",", "dims", ")", ")", "v", ":", "np", ".", "ndarray", "=", "np", ".", "zeros", "(", "(", "N", ",", "dims", ")", ")", "# Initialize the first row with the initial conditions from the JPL ephemerides", "q", "[", "0", ",", ":", "]", "=", "q0", "v", "[", "0", ",", ":", "]", "=", "v0", "# Initialize an array to store the acceleration at each time step", "a", ":", "np", ".", "ndarray", "=", "np", ".", "zeros", "(", "(", "N", ",", "dims", ")", ")", "# First row of accelerations", "a", "[", "0", ",", ":", "]", "=", "accel_func", "(", "q", "[", "0", "]", ")", "# Perform leapfrog integration simulation", "# https://en.wikipedia.org/wiki/Leapfrog_integration", "print", "(", "f'Performing leapfrog integration with {N} steps...'", ")", "for", "i", "in", "tqdm", "(", "range", "(", "N", "-", "1", ")", ")", ":", "# Positions at the next time step", "q", "[", "i", "+", "1", ",", ":", "]", "=", "q", "[", "i", ",", ":", "]", "+", "v", "[", "i", ",", ":", "]", "*", "dt", "+", "0.5", "*", "a", "[", "i", ",", ":", "]", "*", "dt2", "# Accelerations of each body in the system at the next time step", "a", "[", "i", "+", "1", ",", ":", "]", "=", "accel_func", "(", "q", "[", "i", "+", "1", "]", ")", "# Velocities of each body at the next time step", "v", "[", "i", "+", "1", ",", ":", "]", "=", "v", "[", "i", ",", ":", "]", "+", "0.5", "*", "(", "a", "[", "i", ",", ":", "]", "+", "a", "[", "i", "+", "1", ",", ":", "]", ")", "*", "dt", "return", "q", ",", "v" ]
Simulate the earth-sun system from t0 to t1 using Leapfrog Integration. INPUTS: config_func: function taking a date or date range and returning position and velocity of bodies accel_func: function taking positions of the bodies and returning their accelerations t0: start date of the simulation; a python date t1: end date of the simulation (exclusive); a python date dt: time step in days. num_bodies: the number of celestial bodies in the simulation
[ "Simulate", "the", "earth", "-", "sun", "system", "from", "t0", "to", "t1", "using", "Leapfrog", "Integration", ".", "INPUTS", ":", "config_func", ":", "function", "taking", "a", "date", "or", "date", "range", "and", "returning", "position", "and", "velocity", "of", "bodies", "accel_func", ":", "function", "taking", "positions", "of", "the", "bodies", "and", "returning", "their", "accelerations", "t0", ":", "start", "date", "of", "the", "simulation", ";", "a", "python", "date", "t1", ":", "end", "date", "of", "the", "simulation", "(", "exclusive", ")", ";", "a", "python", "date", "dt", ":", "time", "step", "in", "days", ".", "num_bodies", ":", "the", "number", "of", "celestial", "bodies", "in", "the", "simulation" ]
python
train
38.764706
nerdvegas/rez
src/rez/wrapper.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/wrapper.py#L202-L219
def print_about(self): """Print an info message about the tool.""" filepath = os.path.join(self.suite_path, "bin", self.tool_name) print "Tool: %s" % self.tool_name print "Path: %s" % filepath print "Suite: %s" % self.suite_path msg = "%s (%r)" % (self.context.load_path, self.context_name) print "Context: %s" % msg variants = self.context.get_tool_variants(self.tool_name) if variants: if len(variants) > 1: self._print_conflicting(variants) else: variant = iter(variants).next() print "Package: %s" % variant.qualified_package_name return 0
[ "def", "print_about", "(", "self", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "suite_path", ",", "\"bin\"", ",", "self", ".", "tool_name", ")", "print", "\"Tool: %s\"", "%", "self", ".", "tool_name", "print", "\"Path: %s\"", "%", "filepath", "print", "\"Suite: %s\"", "%", "self", ".", "suite_path", "msg", "=", "\"%s (%r)\"", "%", "(", "self", ".", "context", ".", "load_path", ",", "self", ".", "context_name", ")", "print", "\"Context: %s\"", "%", "msg", "variants", "=", "self", ".", "context", ".", "get_tool_variants", "(", "self", ".", "tool_name", ")", "if", "variants", ":", "if", "len", "(", "variants", ")", ">", "1", ":", "self", ".", "_print_conflicting", "(", "variants", ")", "else", ":", "variant", "=", "iter", "(", "variants", ")", ".", "next", "(", ")", "print", "\"Package: %s\"", "%", "variant", ".", "qualified_package_name", "return", "0" ]
Print an info message about the tool.
[ "Print", "an", "info", "message", "about", "the", "tool", "." ]
python
train
38.5
quiltdata/quilt
compiler/quilt/tools/command.py
https://github.com/quiltdata/quilt/blob/651853e7e89a8af86e0ff26167e752efa5878c12/compiler/quilt/tools/command.py#L1118-L1142
def search(query, team=None): """ Search for packages """ if team is None: team = _find_logged_in_team() if team is not None: session = _get_session(team) response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query)) print("* Packages in team %s" % team) packages = response.json()['packages'] for pkg in packages: print(("%s:" % team) + ("%(owner)s/%(name)s" % pkg)) if len(packages) == 0: print("(No results)") print("* Packages in public cloud") public_session = _get_session(None) response = public_session.get("%s/api/search/" % get_registry_url(None), params=dict(q=query)) packages = response.json()['packages'] for pkg in packages: print("%(owner)s/%(name)s" % pkg) if len(packages) == 0: print("(No results)")
[ "def", "search", "(", "query", ",", "team", "=", "None", ")", ":", "if", "team", "is", "None", ":", "team", "=", "_find_logged_in_team", "(", ")", "if", "team", "is", "not", "None", ":", "session", "=", "_get_session", "(", "team", ")", "response", "=", "session", ".", "get", "(", "\"%s/api/search/\"", "%", "get_registry_url", "(", "team", ")", ",", "params", "=", "dict", "(", "q", "=", "query", ")", ")", "print", "(", "\"* Packages in team %s\"", "%", "team", ")", "packages", "=", "response", ".", "json", "(", ")", "[", "'packages'", "]", "for", "pkg", "in", "packages", ":", "print", "(", "(", "\"%s:\"", "%", "team", ")", "+", "(", "\"%(owner)s/%(name)s\"", "%", "pkg", ")", ")", "if", "len", "(", "packages", ")", "==", "0", ":", "print", "(", "\"(No results)\"", ")", "print", "(", "\"* Packages in public cloud\"", ")", "public_session", "=", "_get_session", "(", "None", ")", "response", "=", "public_session", ".", "get", "(", "\"%s/api/search/\"", "%", "get_registry_url", "(", "None", ")", ",", "params", "=", "dict", "(", "q", "=", "query", ")", ")", "packages", "=", "response", ".", "json", "(", ")", "[", "'packages'", "]", "for", "pkg", "in", "packages", ":", "print", "(", "\"%(owner)s/%(name)s\"", "%", "pkg", ")", "if", "len", "(", "packages", ")", "==", "0", ":", "print", "(", "\"(No results)\"", ")" ]
Search for packages
[ "Search", "for", "packages" ]
python
train
34.64
twilio/twilio-python
twilio/rest/messaging/v1/session/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/messaging/v1/session/__init__.py#L324-L333
def messages(self): """ Access the messages :returns: twilio.rest.messaging.v1.session.message.MessageList :rtype: twilio.rest.messaging.v1.session.message.MessageList """ if self._messages is None: self._messages = MessageList(self._version, session_sid=self._solution['sid'], ) return self._messages
[ "def", "messages", "(", "self", ")", ":", "if", "self", ".", "_messages", "is", "None", ":", "self", ".", "_messages", "=", "MessageList", "(", "self", ".", "_version", ",", "session_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_messages" ]
Access the messages :returns: twilio.rest.messaging.v1.session.message.MessageList :rtype: twilio.rest.messaging.v1.session.message.MessageList
[ "Access", "the", "messages" ]
python
train
36.1
tonyseek/python-base36
base36.py
https://github.com/tonyseek/python-base36/blob/4650988d9730e5cf9ca4a8fd634556f03c6d1c58/base36.py#L17-L35
def dumps(number): """Dumps an integer into a base36 string. :param number: the 10-based integer. :returns: the base36 string. """ if not isinstance(number, integer_types): raise TypeError('number must be an integer') if number < 0: return '-' + dumps(-number) value = '' while number != 0: number, index = divmod(number, len(alphabet)) value = alphabet[index] + value return value or '0'
[ "def", "dumps", "(", "number", ")", ":", "if", "not", "isinstance", "(", "number", ",", "integer_types", ")", ":", "raise", "TypeError", "(", "'number must be an integer'", ")", "if", "number", "<", "0", ":", "return", "'-'", "+", "dumps", "(", "-", "number", ")", "value", "=", "''", "while", "number", "!=", "0", ":", "number", ",", "index", "=", "divmod", "(", "number", ",", "len", "(", "alphabet", ")", ")", "value", "=", "alphabet", "[", "index", "]", "+", "value", "return", "value", "or", "'0'" ]
Dumps an integer into a base36 string. :param number: the 10-based integer. :returns: the base36 string.
[ "Dumps", "an", "integer", "into", "a", "base36", "string", "." ]
python
train
23.315789
tensorpack/tensorpack
examples/DeepQNetwork/expreplay.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/DeepQNetwork/expreplay.py#L242-L255
def reset_stats(self): """ Returns: mean, max: two stats of the runners, to be added to backend """ scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners])) for v in self._runners: v.total_scores.clear() try: return np.mean(scores), np.max(scores) except Exception: logger.exception("Cannot compute total scores in EnvRunner.") return None, None
[ "def", "reset_stats", "(", "self", ")", ":", "scores", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "[", "v", ".", "total_scores", "for", "v", "in", "self", ".", "_runners", "]", ")", ")", "for", "v", "in", "self", ".", "_runners", ":", "v", ".", "total_scores", ".", "clear", "(", ")", "try", ":", "return", "np", ".", "mean", "(", "scores", ")", ",", "np", ".", "max", "(", "scores", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Cannot compute total scores in EnvRunner.\"", ")", "return", "None", ",", "None" ]
Returns: mean, max: two stats of the runners, to be added to backend
[ "Returns", ":", "mean", "max", ":", "two", "stats", "of", "the", "runners", "to", "be", "added", "to", "backend" ]
python
train
34.142857
ph4r05/monero-serialize
monero_serialize/xmrrpc.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L1209-L1244
async def dump_variant(self, elem, elem_type=None, params=None, obj=None): """ Dumps variant type to the writer. Supports both wrapped and raw variant. :param elem: :param elem_type: :param params: :param obj: :return: """ fvalue = None if isinstance(elem, x.VariantType) or elem_type.WRAPS_VALUE: try: self.tracker.push_variant(elem.variant_elem_type) fvalue = { elem.variant_elem: await self._dump_field(getattr(elem, elem.variant_elem), elem.variant_elem_type, obj=obj) } self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e else: try: fdef = elem_type.find_fdef(elem_type.f_specs(), elem) self.tracker.push_variant(fdef[1]) fvalue = { fdef[0]: await self._dump_field(elem, fdef[1], obj=obj) } self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e return fvalue
[ "async", "def", "dump_variant", "(", "self", ",", "elem", ",", "elem_type", "=", "None", ",", "params", "=", "None", ",", "obj", "=", "None", ")", ":", "fvalue", "=", "None", "if", "isinstance", "(", "elem", ",", "x", ".", "VariantType", ")", "or", "elem_type", ".", "WRAPS_VALUE", ":", "try", ":", "self", ".", "tracker", ".", "push_variant", "(", "elem", ".", "variant_elem_type", ")", "fvalue", "=", "{", "elem", ".", "variant_elem", ":", "await", "self", ".", "_dump_field", "(", "getattr", "(", "elem", ",", "elem", ".", "variant_elem", ")", ",", "elem", ".", "variant_elem_type", ",", "obj", "=", "obj", ")", "}", "self", ".", "tracker", ".", "pop", "(", ")", "except", "Exception", "as", "e", ":", "raise", "helpers", ".", "ArchiveException", "(", "e", ",", "tracker", "=", "self", ".", "tracker", ")", "from", "e", "else", ":", "try", ":", "fdef", "=", "elem_type", ".", "find_fdef", "(", "elem_type", ".", "f_specs", "(", ")", ",", "elem", ")", "self", ".", "tracker", ".", "push_variant", "(", "fdef", "[", "1", "]", ")", "fvalue", "=", "{", "fdef", "[", "0", "]", ":", "await", "self", ".", "_dump_field", "(", "elem", ",", "fdef", "[", "1", "]", ",", "obj", "=", "obj", ")", "}", "self", ".", "tracker", ".", "pop", "(", ")", "except", "Exception", "as", "e", ":", "raise", "helpers", ".", "ArchiveException", "(", "e", ",", "tracker", "=", "self", ".", "tracker", ")", "from", "e", "return", "fvalue" ]
Dumps variant type to the writer. Supports both wrapped and raw variant. :param elem: :param elem_type: :param params: :param obj: :return:
[ "Dumps", "variant", "type", "to", "the", "writer", ".", "Supports", "both", "wrapped", "and", "raw", "variant", "." ]
python
train
33.472222
log2timeline/plaso
plaso/parsers/pls_recall.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/pls_recall.py#L98-L142
def ParseFileObject(self, parser_mediator, file_object): """Parses a PLSRecall.dat file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ file_offset = 0 file_size = file_object.get_size() record_map = self._GetDataTypeMap('pls_recall_record') while file_offset < file_size: try: pls_record, record_data_size = self._ReadStructureFromFileObject( file_object, file_offset, record_map) except (ValueError, errors.ParseError) as exception: if file_offset == 0: raise errors.UnableToParseFile('Unable to parse first record.') parser_mediator.ProduceExtractionWarning(( 'unable to parse record at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) break if file_offset == 0 and not self._VerifyRecord(pls_record): raise errors.UnableToParseFile('Verification of first record failed.') event_data = PlsRecallEventData() event_data.database_name = pls_record.database_name.rstrip('\x00') event_data.sequence_number = pls_record.sequence_number event_data.offset = file_offset event_data.query = pls_record.query.rstrip('\x00') event_data.username = pls_record.username.rstrip('\x00') date_time = dfdatetime_delphi_date_time.DelphiDateTime( timestamp=pls_record.last_written_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) file_offset += record_data_size
[ "def", "ParseFileObject", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "file_offset", "=", "0", "file_size", "=", "file_object", ".", "get_size", "(", ")", "record_map", "=", "self", ".", "_GetDataTypeMap", "(", "'pls_recall_record'", ")", "while", "file_offset", "<", "file_size", ":", "try", ":", "pls_record", ",", "record_data_size", "=", "self", ".", "_ReadStructureFromFileObject", "(", "file_object", ",", "file_offset", ",", "record_map", ")", "except", "(", "ValueError", ",", "errors", ".", "ParseError", ")", "as", "exception", ":", "if", "file_offset", "==", "0", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Unable to parse first record.'", ")", "parser_mediator", ".", "ProduceExtractionWarning", "(", "(", "'unable to parse record at offset: 0x{0:08x} with error: '", "'{1!s}'", ")", ".", "format", "(", "file_offset", ",", "exception", ")", ")", "break", "if", "file_offset", "==", "0", "and", "not", "self", ".", "_VerifyRecord", "(", "pls_record", ")", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Verification of first record failed.'", ")", "event_data", "=", "PlsRecallEventData", "(", ")", "event_data", ".", "database_name", "=", "pls_record", ".", "database_name", ".", "rstrip", "(", "'\\x00'", ")", "event_data", ".", "sequence_number", "=", "pls_record", ".", "sequence_number", "event_data", ".", "offset", "=", "file_offset", "event_data", ".", "query", "=", "pls_record", ".", "query", ".", "rstrip", "(", "'\\x00'", ")", "event_data", ".", "username", "=", "pls_record", ".", "username", ".", "rstrip", "(", "'\\x00'", ")", "date_time", "=", "dfdatetime_delphi_date_time", ".", "DelphiDateTime", "(", "timestamp", "=", "pls_record", ".", "last_written_time", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_WRITTEN", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")", "file_offset", "+=", "record_data_size" ]
Parses a PLSRecall.dat file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
[ "Parses", "a", "PLSRecall", ".", "dat", "file", "-", "like", "object", "." ]
python
train
39.688889
mgagne/wafflehaus.iweb
wafflehaus/iweb/glance/image_filter/visible.py
https://github.com/mgagne/wafflehaus.iweb/blob/8ac625582c1180391fe022d1db19f70a2dfb376a/wafflehaus/iweb/glance/image_filter/visible.py#L94-L101
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def visible(app): return VisibleFilter(app, conf) return visible
[ "def", "filter_factory", "(", "global_conf", ",", "*", "*", "local_conf", ")", ":", "conf", "=", "global_conf", ".", "copy", "(", ")", "conf", ".", "update", "(", "local_conf", ")", "def", "visible", "(", "app", ")", ":", "return", "VisibleFilter", "(", "app", ",", "conf", ")", "return", "visible" ]
Returns a WSGI filter app for use with paste.deploy.
[ "Returns", "a", "WSGI", "filter", "app", "for", "use", "with", "paste", ".", "deploy", "." ]
python
train
30.25
jcrist/skein
examples/echo_server/client.py
https://github.com/jcrist/skein/blob/16f8b1d3b3d9f79f36e2f152e45893339a1793e8/examples/echo_server/client.py#L20-L29
async def echo_all(app, message): """Send and recieve a message from all running echo servers""" # Loop through all registered server addresses for address in app.kv.get_prefix('address.').values(): # Parse the host and port from the stored address host, port = address.decode().split(':') port = int(port) # Send the message to the echo server await tcp_echo_client(message, loop, host, port)
[ "async", "def", "echo_all", "(", "app", ",", "message", ")", ":", "# Loop through all registered server addresses", "for", "address", "in", "app", ".", "kv", ".", "get_prefix", "(", "'address.'", ")", ".", "values", "(", ")", ":", "# Parse the host and port from the stored address", "host", ",", "port", "=", "address", ".", "decode", "(", ")", ".", "split", "(", "':'", ")", "port", "=", "int", "(", "port", ")", "# Send the message to the echo server", "await", "tcp_echo_client", "(", "message", ",", "loop", ",", "host", ",", "port", ")" ]
Send and recieve a message from all running echo servers
[ "Send", "and", "recieve", "a", "message", "from", "all", "running", "echo", "servers" ]
python
train
43.7
mozilla/FoxPuppet
foxpuppet/windows/browser/notifications/base.py
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/notifications/base.py#L42-L50
def label(self): """Provide access to the notification label. Returns: str: The notification label """ with self.selenium.context(self.selenium.CONTEXT_CHROME): return self.root.get_attribute("label")
[ "def", "label", "(", "self", ")", ":", "with", "self", ".", "selenium", ".", "context", "(", "self", ".", "selenium", ".", "CONTEXT_CHROME", ")", ":", "return", "self", ".", "root", ".", "get_attribute", "(", "\"label\"", ")" ]
Provide access to the notification label. Returns: str: The notification label
[ "Provide", "access", "to", "the", "notification", "label", "." ]
python
train
27.777778
pyviz/param
setup.py
https://github.com/pyviz/param/blob/8f0dafa78defa883247b40635f96cc6d5c1b3481/setup.py#L8-L13
def get_setup_version(reponame): """Use autover to get up to date version.""" # importing self into setup.py is unorthodox, but param has no # required dependencies outside of python from param.version import Version return Version.setup_version(os.path.dirname(__file__),reponame,archive_commit="$Format:%h$")
[ "def", "get_setup_version", "(", "reponame", ")", ":", "# importing self into setup.py is unorthodox, but param has no", "# required dependencies outside of python", "from", "param", ".", "version", "import", "Version", "return", "Version", ".", "setup_version", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "reponame", ",", "archive_commit", "=", "\"$Format:%h$\"", ")" ]
Use autover to get up to date version.
[ "Use", "autover", "to", "get", "up", "to", "date", "version", "." ]
python
train
54.166667
EconForge/dolo
trash/dolo/algos/dtcscc/gssa.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/algos/dtcscc/gssa.py#L14-L190
def gssa(model, maxit=100, tol=1e-8, initial_dr=None, verbose=False, n_sim=10000, deg=3, damp=0.1, seed=42): """ Sketch of algorithm: 0. Choose levels for the initial states and the simulation length (n_sim) 1. Obtain an initial decision rule -- here using first order perturbation 2. Draw a sequence of innovations epsilon 3. Iterate on the following steps: - Use the epsilons, initial states, and proposed decision rule to simulate model forward. Will leave us with time series of states and controls - Evaluate expectations using quadrature - Use direct response to get alternative proposal for controls - Regress updated controls on the simulated states to get proposal coefficients. New coefficients are convex combination of previous coefficients and proposal coefficients. Weights controlled by damp, where damp is the weight on the old coefficients. This should be fairly low to increase chances of convergence. - Check difference between the simulated series of controls and the direct response version of controls """ # verify input arguments if deg < 0 or deg > 5: raise ValueError("deg must be in [1, 5]") if damp < 0 or damp > 1: raise ValueError("damp must be in [0, 1]") t1 = time.time() # extract model functions and parameters g = model.__original_functions__['transition'] g_gu = model.__original_gufunctions__['transition'] h_gu = model.__original_gufunctions__['expectation'] d_gu = model.__original_gufunctions__['direct_response'] p = model.calibration['parameters'] n_s = len(model.symbols["states"]) n_x = len(model.symbols["controls"]) n_z = len(model.symbols["expectations"]) n_eps = len(model.symbols["shocks"]) s0 = model.calibration["states"] x0 = model.calibration["controls"] # construct initial decision rule if not supplied if initial_dr is None: drp = approximate_controls(model) else: drp = initial_dr # set up quadrature weights and nodes distrib = model.get_distribution() nodes, weights = distrib.discretize() # draw sequence of innovations np.random.seed(seed) distrib = model.get_distribution() sigma = distrib.sigma epsilon = np.random.multivariate_normal(np.zeros(n_eps), sigma, n_sim) # simulate initial decision rule and do initial regression for coefs init_sim = simulate(model, drp, horizon=n_sim, return_array=True, forcing_shocks=epsilon) s_sim = init_sim[:, 0, 0:n_s] x_sim = init_sim[:, 0, n_s:n_s + n_x] Phi_sim = complete_polynomial(s_sim.T, deg).T coefs = np.ascontiguousarray(lstsq(Phi_sim, x_sim)[0]) # NOTE: the ascontiguousarray above was needed for numba to compile the # `np.dot` in the simulation function in no python mode. Appearantly # the array returned from lstsq is not C-contiguous # allocate for simulated series of expectations and next period states z_sim = np.empty((n_sim, n_z)) S = np.empty_like(s_sim) X = np.empty_like(x_sim) H = np.empty_like(z_sim) new_x = np.empty_like(x_sim) # set initial states and controls s_sim[0, :] = s0 x_sim[0, :] = x0 Phi_t = np.empty(n_complete(n_s, deg)) # buffer array for simulation # create jitted function that will simulate states and controls, using # the epsilon shocks from above (define here as closure over all data # above). @jit(nopython=True) def simulate_states_controls(s, x, Phi_t, coefs): for t in range(1, n_sim): g(s[t - 1, :], x[t - 1, :], epsilon[t, :], p, s[t, :]) # fill Phi_t with new complete poly version of s[t, :] _complete_poly_impl_vec(s[t, :], deg, Phi_t) # do inner product to get new controls x[t, :] = Phi_t @coefs it = 0 err = 10.0 err_0 = 10 if verbose: headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |' headline = headline.format('N', ' Error', 'Gain', 'Time') stars = '-' * len(headline) print(stars) print(headline) print(stars) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |' while err > tol and it <= maxit: t_start = time.time() # simulate with new coefficients simulate_states_controls(s_sim, x_sim, Phi_t, coefs) # update expectations of z # update_expectations(s_sim, x_sim, z_sim, Phi_sim) z_sim[:, :] = 0.0 for i in range(weights.shape[0]): e = nodes[i, :] # extract nodes # evaluate future states at each node (stores in S) g_gu(s_sim, x_sim, e, p, S) # evaluate future controls at each future state _complete_poly_impl(S.T, deg, Phi_sim.T) np.dot(Phi_sim, coefs, out=X) # compute expectation (stores in H) h_gu(S, X, p, H) z_sim += weights[i] * H # get controls on the simulated points from direct_resposne # (stores in new_x) d_gu(s_sim, z_sim, p, new_x) # update basis matrix and do regression of new_x on s_sim to get # updated coefficients _complete_poly_impl(s_sim.T, deg, Phi_sim.T) new_coefs = np.ascontiguousarray(lstsq(Phi_sim, new_x)[0]) # check whether they differ from the preceding guess err = (abs(new_x - x_sim).max()) # update the series of controls and coefficients x_sim[:, :] = new_x coefs = (1 - damp) * new_coefs + damp * coefs if verbose: # update error and print if `verbose` err_SA = err / err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) it += 1 if it == maxit: warnings.warn(UserWarning("Maximum number of iterations reached")) # compute final fime and do final printout if `verbose` t2 = time.time() if verbose: print(stars) print('Elapsed: {} seconds.'.format(t2 - t1)) print(stars) cp = CompletePolynomial(deg, len(s0)) cp.fit_values(s_sim, x_sim) return cp
[ "def", "gssa", "(", "model", ",", "maxit", "=", "100", ",", "tol", "=", "1e-8", ",", "initial_dr", "=", "None", ",", "verbose", "=", "False", ",", "n_sim", "=", "10000", ",", "deg", "=", "3", ",", "damp", "=", "0.1", ",", "seed", "=", "42", ")", ":", "# verify input arguments", "if", "deg", "<", "0", "or", "deg", ">", "5", ":", "raise", "ValueError", "(", "\"deg must be in [1, 5]\"", ")", "if", "damp", "<", "0", "or", "damp", ">", "1", ":", "raise", "ValueError", "(", "\"damp must be in [0, 1]\"", ")", "t1", "=", "time", ".", "time", "(", ")", "# extract model functions and parameters", "g", "=", "model", ".", "__original_functions__", "[", "'transition'", "]", "g_gu", "=", "model", ".", "__original_gufunctions__", "[", "'transition'", "]", "h_gu", "=", "model", ".", "__original_gufunctions__", "[", "'expectation'", "]", "d_gu", "=", "model", ".", "__original_gufunctions__", "[", "'direct_response'", "]", "p", "=", "model", ".", "calibration", "[", "'parameters'", "]", "n_s", "=", "len", "(", "model", ".", "symbols", "[", "\"states\"", "]", ")", "n_x", "=", "len", "(", "model", ".", "symbols", "[", "\"controls\"", "]", ")", "n_z", "=", "len", "(", "model", ".", "symbols", "[", "\"expectations\"", "]", ")", "n_eps", "=", "len", "(", "model", ".", "symbols", "[", "\"shocks\"", "]", ")", "s0", "=", "model", ".", "calibration", "[", "\"states\"", "]", "x0", "=", "model", ".", "calibration", "[", "\"controls\"", "]", "# construct initial decision rule if not supplied", "if", "initial_dr", "is", "None", ":", "drp", "=", "approximate_controls", "(", "model", ")", "else", ":", "drp", "=", "initial_dr", "# set up quadrature weights and nodes", "distrib", "=", "model", ".", "get_distribution", "(", ")", "nodes", ",", "weights", "=", "distrib", ".", "discretize", "(", ")", "# draw sequence of innovations", "np", ".", "random", ".", "seed", "(", "seed", ")", "distrib", "=", "model", ".", "get_distribution", "(", ")", "sigma", "=", "distrib", ".", "sigma", "epsilon", "=", "np", ".", "random", ".", "multivariate_normal", "(", "np", ".", "zeros", "(", "n_eps", ")", ",", "sigma", ",", "n_sim", ")", "# simulate initial decision rule and do initial regression for coefs", "init_sim", "=", "simulate", "(", "model", ",", "drp", ",", "horizon", "=", "n_sim", ",", "return_array", "=", "True", ",", "forcing_shocks", "=", "epsilon", ")", "s_sim", "=", "init_sim", "[", ":", ",", "0", ",", "0", ":", "n_s", "]", "x_sim", "=", "init_sim", "[", ":", ",", "0", ",", "n_s", ":", "n_s", "+", "n_x", "]", "Phi_sim", "=", "complete_polynomial", "(", "s_sim", ".", "T", ",", "deg", ")", ".", "T", "coefs", "=", "np", ".", "ascontiguousarray", "(", "lstsq", "(", "Phi_sim", ",", "x_sim", ")", "[", "0", "]", ")", "# NOTE: the ascontiguousarray above was needed for numba to compile the", "# `np.dot` in the simulation function in no python mode. Appearantly", "# the array returned from lstsq is not C-contiguous", "# allocate for simulated series of expectations and next period states", "z_sim", "=", "np", ".", "empty", "(", "(", "n_sim", ",", "n_z", ")", ")", "S", "=", "np", ".", "empty_like", "(", "s_sim", ")", "X", "=", "np", ".", "empty_like", "(", "x_sim", ")", "H", "=", "np", ".", "empty_like", "(", "z_sim", ")", "new_x", "=", "np", ".", "empty_like", "(", "x_sim", ")", "# set initial states and controls", "s_sim", "[", "0", ",", ":", "]", "=", "s0", "x_sim", "[", "0", ",", ":", "]", "=", "x0", "Phi_t", "=", "np", ".", "empty", "(", "n_complete", "(", "n_s", ",", "deg", ")", ")", "# buffer array for simulation", "# create jitted function that will simulate states and controls, using", "# the epsilon shocks from above (define here as closure over all data", "# above).", "@", "jit", "(", "nopython", "=", "True", ")", "def", "simulate_states_controls", "(", "s", ",", "x", ",", "Phi_t", ",", "coefs", ")", ":", "for", "t", "in", "range", "(", "1", ",", "n_sim", ")", ":", "g", "(", "s", "[", "t", "-", "1", ",", ":", "]", ",", "x", "[", "t", "-", "1", ",", ":", "]", ",", "epsilon", "[", "t", ",", ":", "]", ",", "p", ",", "s", "[", "t", ",", ":", "]", ")", "# fill Phi_t with new complete poly version of s[t, :]", "_complete_poly_impl_vec", "(", "s", "[", "t", ",", ":", "]", ",", "deg", ",", "Phi_t", ")", "# do inner product to get new controls", "x", "[", "t", ",", ":", "]", "=", "Phi_t", "@", "coefs", "it", "=", "0", "err", "=", "10.0", "err_0", "=", "10", "if", "verbose", ":", "headline", "=", "'|{0:^4} | {1:10} | {2:8} | {3:8} |'", "headline", "=", "headline", ".", "format", "(", "'N'", ",", "' Error'", ",", "'Gain'", ",", "'Time'", ")", "stars", "=", "'-'", "*", "len", "(", "headline", ")", "print", "(", "stars", ")", "print", "(", "headline", ")", "print", "(", "stars", ")", "# format string for within loop", "fmt_str", "=", "'|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'", "while", "err", ">", "tol", "and", "it", "<=", "maxit", ":", "t_start", "=", "time", ".", "time", "(", ")", "# simulate with new coefficients", "simulate_states_controls", "(", "s_sim", ",", "x_sim", ",", "Phi_t", ",", "coefs", ")", "# update expectations of z", "# update_expectations(s_sim, x_sim, z_sim, Phi_sim)", "z_sim", "[", ":", ",", ":", "]", "=", "0.0", "for", "i", "in", "range", "(", "weights", ".", "shape", "[", "0", "]", ")", ":", "e", "=", "nodes", "[", "i", ",", ":", "]", "# extract nodes", "# evaluate future states at each node (stores in S)", "g_gu", "(", "s_sim", ",", "x_sim", ",", "e", ",", "p", ",", "S", ")", "# evaluate future controls at each future state", "_complete_poly_impl", "(", "S", ".", "T", ",", "deg", ",", "Phi_sim", ".", "T", ")", "np", ".", "dot", "(", "Phi_sim", ",", "coefs", ",", "out", "=", "X", ")", "# compute expectation (stores in H)", "h_gu", "(", "S", ",", "X", ",", "p", ",", "H", ")", "z_sim", "+=", "weights", "[", "i", "]", "*", "H", "# get controls on the simulated points from direct_resposne", "# (stores in new_x)", "d_gu", "(", "s_sim", ",", "z_sim", ",", "p", ",", "new_x", ")", "# update basis matrix and do regression of new_x on s_sim to get", "# updated coefficients", "_complete_poly_impl", "(", "s_sim", ".", "T", ",", "deg", ",", "Phi_sim", ".", "T", ")", "new_coefs", "=", "np", ".", "ascontiguousarray", "(", "lstsq", "(", "Phi_sim", ",", "new_x", ")", "[", "0", "]", ")", "# check whether they differ from the preceding guess", "err", "=", "(", "abs", "(", "new_x", "-", "x_sim", ")", ".", "max", "(", ")", ")", "# update the series of controls and coefficients", "x_sim", "[", ":", ",", ":", "]", "=", "new_x", "coefs", "=", "(", "1", "-", "damp", ")", "*", "new_coefs", "+", "damp", "*", "coefs", "if", "verbose", ":", "# update error and print if `verbose`", "err_SA", "=", "err", "/", "err_0", "err_0", "=", "err", "t_finish", "=", "time", ".", "time", "(", ")", "elapsed", "=", "t_finish", "-", "t_start", "if", "verbose", ":", "print", "(", "fmt_str", ".", "format", "(", "it", ",", "err", ",", "err_SA", ",", "elapsed", ")", ")", "it", "+=", "1", "if", "it", "==", "maxit", ":", "warnings", ".", "warn", "(", "UserWarning", "(", "\"Maximum number of iterations reached\"", ")", ")", "# compute final fime and do final printout if `verbose`", "t2", "=", "time", ".", "time", "(", ")", "if", "verbose", ":", "print", "(", "stars", ")", "print", "(", "'Elapsed: {} seconds.'", ".", "format", "(", "t2", "-", "t1", ")", ")", "print", "(", "stars", ")", "cp", "=", "CompletePolynomial", "(", "deg", ",", "len", "(", "s0", ")", ")", "cp", ".", "fit_values", "(", "s_sim", ",", "x_sim", ")", "return", "cp" ]
Sketch of algorithm: 0. Choose levels for the initial states and the simulation length (n_sim) 1. Obtain an initial decision rule -- here using first order perturbation 2. Draw a sequence of innovations epsilon 3. Iterate on the following steps: - Use the epsilons, initial states, and proposed decision rule to simulate model forward. Will leave us with time series of states and controls - Evaluate expectations using quadrature - Use direct response to get alternative proposal for controls - Regress updated controls on the simulated states to get proposal coefficients. New coefficients are convex combination of previous coefficients and proposal coefficients. Weights controlled by damp, where damp is the weight on the old coefficients. This should be fairly low to increase chances of convergence. - Check difference between the simulated series of controls and the direct response version of controls
[ "Sketch", "of", "algorithm", ":" ]
python
train
35.107345
inasafe/inasafe
safe/gui/tools/batch/batch_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/batch/batch_dialog.py#L779-L788
def on_scenario_directory_radio_toggled(self, flag): """Autoconnect slot activated when scenario_directory_radio is checked. :param flag: Flag indicating whether the checkbox was toggled on or off. :type flag: bool """ if flag: self.output_directory.setText(self.source_directory.text()) self.output_directory_chooser.setEnabled(not flag)
[ "def", "on_scenario_directory_radio_toggled", "(", "self", ",", "flag", ")", ":", "if", "flag", ":", "self", ".", "output_directory", ".", "setText", "(", "self", ".", "source_directory", ".", "text", "(", ")", ")", "self", ".", "output_directory_chooser", ".", "setEnabled", "(", "not", "flag", ")" ]
Autoconnect slot activated when scenario_directory_radio is checked. :param flag: Flag indicating whether the checkbox was toggled on or off. :type flag: bool
[ "Autoconnect", "slot", "activated", "when", "scenario_directory_radio", "is", "checked", "." ]
python
train
40.2
smarie/python-parsyfiles
parsyfiles/parsing_registries.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L772-L816
def __build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T], logger: Logger = None) -> Parser: """ Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type. To do that, it iterates through all registered parsers in the list in reverse order (last inserted first), and checks if they support the provided object format (single or multifile) and type. If several parsers match, it returns a cascadingparser that will try them in order. :param obj_on_filesystem: :param object_typ: :param logger: :return: """ # first remove any non-generic customization object_type = get_base_generic_type(object_typ) # find all matching parsers for this matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \ self.find_all_matching_parsers(strict=self.is_strict, desired_type=object_type, required_ext=obj_on_filesystem.ext) matching_parsers = matching[0] + matching[1] + matching[2] if len(matching_parsers) == 0: # No match. Do we have a close match ? (correct type, but not correct extension ?) if len(no_ext_match_but_type_match) > 0: raise NoParserFoundForObjectExt.create(obj_on_filesystem, object_type, set([ext_ for ext_set in [p.supported_exts for p in no_ext_match_but_type_match] for ext_ in ext_set])) else: # no, no match at all raise NoParserFoundForObjectType.create(obj_on_filesystem, object_type, set([typ_ for typ_set in [p.supported_types for p in no_type_match_but_ext_match] for typ_ in typ_set])) elif len(matching_parsers) == 1: # return the match directly return matching_parsers[0] else: # return a cascade of all parsers, in reverse order (since last is our preferred one) # print('----- WARNING : Found several parsers able to parse this item. Combining them into a cascade.') return CascadingParser(list(reversed(matching_parsers)))
[ "def", "__build_parser_for_fileobject_and_desiredtype", "(", "self", ",", "obj_on_filesystem", ":", "PersistedObject", ",", "object_typ", ":", "Type", "[", "T", "]", ",", "logger", ":", "Logger", "=", "None", ")", "->", "Parser", ":", "# first remove any non-generic customization", "object_type", "=", "get_base_generic_type", "(", "object_typ", ")", "# find all matching parsers for this", "matching", ",", "no_type_match_but_ext_match", ",", "no_ext_match_but_type_match", ",", "no_match", "=", "self", ".", "find_all_matching_parsers", "(", "strict", "=", "self", ".", "is_strict", ",", "desired_type", "=", "object_type", ",", "required_ext", "=", "obj_on_filesystem", ".", "ext", ")", "matching_parsers", "=", "matching", "[", "0", "]", "+", "matching", "[", "1", "]", "+", "matching", "[", "2", "]", "if", "len", "(", "matching_parsers", ")", "==", "0", ":", "# No match. Do we have a close match ? (correct type, but not correct extension ?)", "if", "len", "(", "no_ext_match_but_type_match", ")", ">", "0", ":", "raise", "NoParserFoundForObjectExt", ".", "create", "(", "obj_on_filesystem", ",", "object_type", ",", "set", "(", "[", "ext_", "for", "ext_set", "in", "[", "p", ".", "supported_exts", "for", "p", "in", "no_ext_match_but_type_match", "]", "for", "ext_", "in", "ext_set", "]", ")", ")", "else", ":", "# no, no match at all", "raise", "NoParserFoundForObjectType", ".", "create", "(", "obj_on_filesystem", ",", "object_type", ",", "set", "(", "[", "typ_", "for", "typ_set", "in", "[", "p", ".", "supported_types", "for", "p", "in", "no_type_match_but_ext_match", "]", "for", "typ_", "in", "typ_set", "]", ")", ")", "elif", "len", "(", "matching_parsers", ")", "==", "1", ":", "# return the match directly", "return", "matching_parsers", "[", "0", "]", "else", ":", "# return a cascade of all parsers, in reverse order (since last is our preferred one)", "# print('----- WARNING : Found several parsers able to parse this item. Combining them into a cascade.')", "return", "CascadingParser", "(", "list", "(", "reversed", "(", "matching_parsers", ")", ")", ")" ]
Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type. To do that, it iterates through all registered parsers in the list in reverse order (last inserted first), and checks if they support the provided object format (single or multifile) and type. If several parsers match, it returns a cascadingparser that will try them in order. :param obj_on_filesystem: :param object_typ: :param logger: :return:
[ "Builds", "from", "the", "registry", "a", "parser", "to", "parse", "object", "obj_on_filesystem", "as", "an", "object", "of", "type", "object_type", "." ]
python
train
56.888889
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L463-L493
def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0): """Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time. """ if ef is None: ef = np.ones_like(self.e0)*0.0000001 beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2) e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points) for i in range(len(self.e0))]) integrand = self._find_integrand(e_vals) integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:]) for i in range(e_vals.shape[1])]).T if a0 is None and f0 is None: a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1]) elif a0 is None: a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.) c0 = self._c0_func(a0, self.e0) a_vals = c0[:, np.newaxis]*self._f_e(e_vals) delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral return e_vals, a_vals, delta_t
[ "def", "_t_of_e", "(", "self", ",", "a0", "=", "None", ",", "t_start", "=", "None", ",", "f0", "=", "None", ",", "ef", "=", "None", ",", "t_obs", "=", "5.0", ")", ":", "if", "ef", "is", "None", ":", "ef", "=", "np", ".", "ones_like", "(", "self", ".", "e0", ")", "*", "0.0000001", "beta", "=", "64.0", "/", "5.0", "*", "self", ".", "m1", "*", "self", ".", "m2", "*", "(", "self", ".", "m1", "+", "self", ".", "m2", ")", "e_vals", "=", "np", ".", "asarray", "(", "[", "np", ".", "linspace", "(", "ef", "[", "i", "]", ",", "self", ".", "e0", "[", "i", "]", ",", "self", ".", "num_points", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "e0", ")", ")", "]", ")", "integrand", "=", "self", ".", "_find_integrand", "(", "e_vals", ")", "integral", "=", "np", ".", "asarray", "(", "[", "np", ".", "trapz", "(", "integrand", "[", ":", ",", "i", ":", "]", ",", "x", "=", "e_vals", "[", ":", ",", "i", ":", "]", ")", "for", "i", "in", "range", "(", "e_vals", ".", "shape", "[", "1", "]", ")", "]", ")", ".", "T", "if", "a0", "is", "None", "and", "f0", "is", "None", ":", "a0", "=", "(", "19.", "/", "12.", "*", "t_start", "*", "beta", "*", "1", "/", "integral", "[", ":", ",", "0", "]", ")", "**", "(", "1.", "/", "4.", ")", "*", "self", ".", "_f_e", "(", "e_vals", "[", ":", ",", "-", "1", "]", ")", "elif", "a0", "is", "None", ":", "a0", "=", "(", "(", "self", ".", "m1", "+", "self", ".", "m2", ")", "/", "self", ".", "f0", "**", "2", ")", "**", "(", "1.", "/", "3.", ")", "c0", "=", "self", ".", "_c0_func", "(", "a0", ",", "self", ".", "e0", ")", "a_vals", "=", "c0", "[", ":", ",", "np", ".", "newaxis", "]", "*", "self", ".", "_f_e", "(", "e_vals", ")", "delta_t", "=", "12.", "/", "19", "*", "c0", "[", ":", ",", "np", ".", "newaxis", "]", "**", "4", "/", "beta", "[", ":", ",", "np", ".", "newaxis", "]", "*", "integral", "return", "e_vals", ",", "a_vals", ",", "delta_t" ]
Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time.
[ "Rearranged", "versions", "of", "Peters", "equations" ]
python
train
34.612903
PMEAL/OpenPNM
openpnm/algorithms/AdvectionDiffusion.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/AdvectionDiffusion.py#L101-L127
def set_outflow_BC(self, pores, mode='merge'): r""" Adds outflow boundary condition to the selected pores. Outflow condition simply means that the gradient of the solved quantity does not change, i.e. is 0. """ # Hijack the parse_mode function to verify mode/pores argument mode = self._parse_mode(mode, allowed=['merge', 'overwrite', 'remove'], single=True) pores = self._parse_indices(pores) # Calculating A[i,i] values to ensure the outflow condition network = self.project.network phase = self.project.phases()[self.settings['phase']] throats = network.find_neighbor_throats(pores=pores) C12 = network['throat.conns'][throats] P12 = phase[self.settings['pressure']][C12] gh = phase[self.settings['hydraulic_conductance']][throats] Q12 = -gh * np.diff(P12, axis=1).squeeze() Qp = np.zeros(self.Np) np.add.at(Qp, C12[:, 0], -Q12) np.add.at(Qp, C12[:, 1], Q12) # Store boundary values if ('pore.bc_outflow' not in self.keys()) or (mode == 'overwrite'): self['pore.bc_outflow'] = np.nan self['pore.bc_outflow'][pores] = Qp[pores]
[ "def", "set_outflow_BC", "(", "self", ",", "pores", ",", "mode", "=", "'merge'", ")", ":", "# Hijack the parse_mode function to verify mode/pores argument", "mode", "=", "self", ".", "_parse_mode", "(", "mode", ",", "allowed", "=", "[", "'merge'", ",", "'overwrite'", ",", "'remove'", "]", ",", "single", "=", "True", ")", "pores", "=", "self", ".", "_parse_indices", "(", "pores", ")", "# Calculating A[i,i] values to ensure the outflow condition", "network", "=", "self", ".", "project", ".", "network", "phase", "=", "self", ".", "project", ".", "phases", "(", ")", "[", "self", ".", "settings", "[", "'phase'", "]", "]", "throats", "=", "network", ".", "find_neighbor_throats", "(", "pores", "=", "pores", ")", "C12", "=", "network", "[", "'throat.conns'", "]", "[", "throats", "]", "P12", "=", "phase", "[", "self", ".", "settings", "[", "'pressure'", "]", "]", "[", "C12", "]", "gh", "=", "phase", "[", "self", ".", "settings", "[", "'hydraulic_conductance'", "]", "]", "[", "throats", "]", "Q12", "=", "-", "gh", "*", "np", ".", "diff", "(", "P12", ",", "axis", "=", "1", ")", ".", "squeeze", "(", ")", "Qp", "=", "np", ".", "zeros", "(", "self", ".", "Np", ")", "np", ".", "add", ".", "at", "(", "Qp", ",", "C12", "[", ":", ",", "0", "]", ",", "-", "Q12", ")", "np", ".", "add", ".", "at", "(", "Qp", ",", "C12", "[", ":", ",", "1", "]", ",", "Q12", ")", "# Store boundary values", "if", "(", "'pore.bc_outflow'", "not", "in", "self", ".", "keys", "(", ")", ")", "or", "(", "mode", "==", "'overwrite'", ")", ":", "self", "[", "'pore.bc_outflow'", "]", "=", "np", ".", "nan", "self", "[", "'pore.bc_outflow'", "]", "[", "pores", "]", "=", "Qp", "[", "pores", "]" ]
r""" Adds outflow boundary condition to the selected pores. Outflow condition simply means that the gradient of the solved quantity does not change, i.e. is 0.
[ "r", "Adds", "outflow", "boundary", "condition", "to", "the", "selected", "pores", "." ]
python
train
45.37037
tensorflow/probability
tensorflow_probability/python/internal/tensorshape_util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L144-L160
def dims(x): """Returns a list of dimension sizes, or `None` if `rank` is unknown. For more details, see `help(tf.TensorShape.dims)`. Args: x: object representing a shape; convertible to `tf.TensorShape`. Returns: shape_as_list: list of sizes or `None` values representing each dimensions size if known. A size is `tf.Dimension` if input is a `tf.TensorShape` and an `int` otherwise. """ if isinstance(x, tf.TensorShape): return x.dims r = tf.TensorShape(x).dims return None if r is None else list(map(tf.compat.dimension_value, r))
[ "def", "dims", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "tf", ".", "TensorShape", ")", ":", "return", "x", ".", "dims", "r", "=", "tf", ".", "TensorShape", "(", "x", ")", ".", "dims", "return", "None", "if", "r", "is", "None", "else", "list", "(", "map", "(", "tf", ".", "compat", ".", "dimension_value", ",", "r", ")", ")" ]
Returns a list of dimension sizes, or `None` if `rank` is unknown. For more details, see `help(tf.TensorShape.dims)`. Args: x: object representing a shape; convertible to `tf.TensorShape`. Returns: shape_as_list: list of sizes or `None` values representing each dimensions size if known. A size is `tf.Dimension` if input is a `tf.TensorShape` and an `int` otherwise.
[ "Returns", "a", "list", "of", "dimension", "sizes", "or", "None", "if", "rank", "is", "unknown", "." ]
python
test
32.882353
specialunderwear/django-easymode
easymode/i18n/gettext.py
https://github.com/specialunderwear/django-easymode/blob/92f674b91fb8c54d6e379e2664e2000872d9c95e/easymode/i18n/gettext.py#L191-L205
def msgmerge(self, locale_file, po_string): """ Runs msgmerge on a locale_file and po_string """ cmd = "msgmerge -q %s -" % locale_file p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (msg, err) = p.communicate(input=po_string) if err: # dont raise exception, some stuff in stderr are just warmings logging.warning("%s \nfile: %s\npostring: %s" % (err, locale_file, po_string)) return msg
[ "def", "msgmerge", "(", "self", ",", "locale_file", ",", "po_string", ")", ":", "cmd", "=", "\"msgmerge -q %s -\"", "%", "locale_file", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "msg", ",", "err", ")", "=", "p", ".", "communicate", "(", "input", "=", "po_string", ")", "if", "err", ":", "# dont raise exception, some stuff in stderr are just warmings", "logging", ".", "warning", "(", "\"%s \\nfile: %s\\npostring: %s\"", "%", "(", "err", ",", "locale_file", ",", "po_string", ")", ")", "return", "msg" ]
Runs msgmerge on a locale_file and po_string
[ "Runs", "msgmerge", "on", "a", "locale_file", "and", "po_string" ]
python
train
35.866667
acutesoftware/AIKIF
aikif/toolbox/game_board_utils.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/game_board_utils.py#L25-L45
def build_board_checkers(): """ builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0 """ grd = Grid(8,8, ["B","W"]) for c in range(4): grd.set_tile(0,(c*2) - 1, "B") grd.set_tile(1,(c*2) - 0, "B") grd.set_tile(6,(c*2) + 1, "W") grd.set_tile(7,(c*2) - 0, "W") print(grd) return grd
[ "def", "build_board_checkers", "(", ")", ":", "grd", "=", "Grid", "(", "8", ",", "8", ",", "[", "\"B\"", ",", "\"W\"", "]", ")", "for", "c", "in", "range", "(", "4", ")", ":", "grd", ".", "set_tile", "(", "0", ",", "(", "c", "*", "2", ")", "-", "1", ",", "\"B\"", ")", "grd", ".", "set_tile", "(", "1", ",", "(", "c", "*", "2", ")", "-", "0", ",", "\"B\"", ")", "grd", ".", "set_tile", "(", "6", ",", "(", "c", "*", "2", ")", "+", "1", ",", "\"W\"", ")", "grd", ".", "set_tile", "(", "7", ",", "(", "c", "*", "2", ")", "-", "0", ",", "\"W\"", ")", "print", "(", "grd", ")", "return", "grd" ]
builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0
[ "builds", "a", "checkers", "starting", "board", "Printing", "Grid", "0", "B", "0", "B", "0", "B", "0", "B", "B", "0", "B", "0", "B", "0", "B", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "W", "0", "W", "0", "W", "0", "W", "W", "0", "W", "0", "W", "0", "W", "0" ]
python
train
33.714286
tensorflow/tensorboard
tensorboard/compat/tensorflow_stub/tensor_shape.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/compat/tensorflow_stub/tensor_shape.py#L649-L676
def merge_with(self, other): """Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible. """ other = as_shape(other) if self._dims is None: return other else: try: self.assert_same_rank(other) new_dims = [] for i, dim in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not convertible" % (self, other))
[ "def", "merge_with", "(", "self", ",", "other", ")", ":", "other", "=", "as_shape", "(", "other", ")", "if", "self", ".", "_dims", "is", "None", ":", "return", "other", "else", ":", "try", ":", "self", ".", "assert_same_rank", "(", "other", ")", "new_dims", "=", "[", "]", "for", "i", ",", "dim", "in", "enumerate", "(", "self", ".", "_dims", ")", ":", "new_dims", ".", "append", "(", "dim", ".", "merge_with", "(", "other", "[", "i", "]", ")", ")", "return", "TensorShape", "(", "new_dims", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Shapes %s and %s are not convertible\"", "%", "(", "self", ",", "other", ")", ")" ]
Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible.
[ "Returns", "a", "TensorShape", "combining", "the", "information", "in", "self", "and", "other", "." ]
python
train
34.285714
mLewisLogic/foursquare
foursquare/__init__.py
https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L768-L774
def _post(url, headers={}, data=None, files=None): """Tries to POST data to an endpoint""" try: response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL) return _process_response(response) except requests.exceptions.RequestException as e: _log_and_raise_exception('Error connecting with foursquare API', e)
[ "def", "_post", "(", "url", ",", "headers", "=", "{", "}", ",", "data", "=", "None", ",", "files", "=", "None", ")", ":", "try", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "files", "=", "files", ",", "verify", "=", "VERIFY_SSL", ")", "return", "_process_response", "(", "response", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "_log_and_raise_exception", "(", "'Error connecting with foursquare API'", ",", "e", ")" ]
Tries to POST data to an endpoint
[ "Tries", "to", "POST", "data", "to", "an", "endpoint" ]
python
train
52.571429
EventTeam/beliefs
src/beliefs/beliefstate.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L351-L370
def is_entailed_by(self, other): """ Given two beliefstates, returns True iff the calling instance implies the other beliefstate, meaning it contains at least the same structure (for all structures) and all values (for all defined values). Inverse of `entails`. Note: this only compares the items in the DictCell, not `pos`, `environment_variables` or `deferred_effects`. """ for (s_key, s_val) in self: if s_key in other: if not hasattr(other[s_key], 'implies'): raise Exception("Cell for %s is missing implies()" % s_key) if not other[s_key].implies(s_val): return False else: return False return True
[ "def", "is_entailed_by", "(", "self", ",", "other", ")", ":", "for", "(", "s_key", ",", "s_val", ")", "in", "self", ":", "if", "s_key", "in", "other", ":", "if", "not", "hasattr", "(", "other", "[", "s_key", "]", ",", "'implies'", ")", ":", "raise", "Exception", "(", "\"Cell for %s is missing implies()\"", "%", "s_key", ")", "if", "not", "other", "[", "s_key", "]", ".", "implies", "(", "s_val", ")", ":", "return", "False", "else", ":", "return", "False", "return", "True" ]
Given two beliefstates, returns True iff the calling instance implies the other beliefstate, meaning it contains at least the same structure (for all structures) and all values (for all defined values). Inverse of `entails`. Note: this only compares the items in the DictCell, not `pos`, `environment_variables` or `deferred_effects`.
[ "Given", "two", "beliefstates", "returns", "True", "iff", "the", "calling", "instance", "implies", "the", "other", "beliefstate", "meaning", "it", "contains", "at", "least", "the", "same", "structure", "(", "for", "all", "structures", ")", "and", "all", "values", "(", "for", "all", "defined", "values", ")", ".", "Inverse", "of", "entails", "." ]
python
train
39.3
arcturial/clickatell-python
clickatell/rest/__init__.py
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/rest/__init__.py#L18-L29
def request(self, action, data={}, headers={}, method='GET'): """ Append the REST headers to every request """ headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "X-Version": "1", "Accept": "application/json" } return Transport.request(self, action, data, headers, method)
[ "def", "request", "(", "self", ",", "action", ",", "data", "=", "{", "}", ",", "headers", "=", "{", "}", ",", "method", "=", "'GET'", ")", ":", "headers", "=", "{", "\"Authorization\"", ":", "\"Bearer \"", "+", "self", ".", "token", ",", "\"Content-Type\"", ":", "\"application/json\"", ",", "\"X-Version\"", ":", "\"1\"", ",", "\"Accept\"", ":", "\"application/json\"", "}", "return", "Transport", ".", "request", "(", "self", ",", "action", ",", "data", ",", "headers", ",", "method", ")" ]
Append the REST headers to every request
[ "Append", "the", "REST", "headers", "to", "every", "request" ]
python
train
33
mintchaos/django_inlines
django_inlines/templatetags/inlines.py
https://github.com/mintchaos/django_inlines/blob/1912e508d04884713a6c44a068c21fbd217d478a/django_inlines/templatetags/inlines.py#L40-L80
def process_inlines(parser, token): """ Searches through the provided content and applies inlines where ever they are found. Syntax:: {% process_inlines entry.body [in template_dir] [as varname] } Examples:: {% process_inlines entry.body %} {% process_inlines entry.body as body %} {% process_inlines entry.body in 'inlines/sidebar' %} {% process_inlines entry.body in 'inlines/sidebar' as body %} """ args = token.split_contents() if not len(args) in (2, 4, 6): raise template.TemplateSyntaxError("%r tag requires either 1, 3 or 5 arguments." % args[0]) var_name = args[1] ALLOWED_ARGS = ['as', 'in'] kwargs = { 'template_directory': None } if len(args) > 2: tuples = zip(*[args[2:][i::2] for i in range(2)]) for k,v in tuples: if not k in ALLOWED_ARGS: raise template.TemplateSyntaxError("%r tag options arguments must be one of %s." % (args[0], ', '.join(ALLOWED_ARGS))) if k == 'in': kwargs['template_directory'] = v if k == 'as': kwargs['asvar'] = v return InlinesNode(var_name, **kwargs)
[ "def", "process_inlines", "(", "parser", ",", "token", ")", ":", "args", "=", "token", ".", "split_contents", "(", ")", "if", "not", "len", "(", "args", ")", "in", "(", "2", ",", "4", ",", "6", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"%r tag requires either 1, 3 or 5 arguments.\"", "%", "args", "[", "0", "]", ")", "var_name", "=", "args", "[", "1", "]", "ALLOWED_ARGS", "=", "[", "'as'", ",", "'in'", "]", "kwargs", "=", "{", "'template_directory'", ":", "None", "}", "if", "len", "(", "args", ")", ">", "2", ":", "tuples", "=", "zip", "(", "*", "[", "args", "[", "2", ":", "]", "[", "i", ":", ":", "2", "]", "for", "i", "in", "range", "(", "2", ")", "]", ")", "for", "k", ",", "v", "in", "tuples", ":", "if", "not", "k", "in", "ALLOWED_ARGS", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"%r tag options arguments must be one of %s.\"", "%", "(", "args", "[", "0", "]", ",", "', '", ".", "join", "(", "ALLOWED_ARGS", ")", ")", ")", "if", "k", "==", "'in'", ":", "kwargs", "[", "'template_directory'", "]", "=", "v", "if", "k", "==", "'as'", ":", "kwargs", "[", "'asvar'", "]", "=", "v", "return", "InlinesNode", "(", "var_name", ",", "*", "*", "kwargs", ")" ]
Searches through the provided content and applies inlines where ever they are found. Syntax:: {% process_inlines entry.body [in template_dir] [as varname] } Examples:: {% process_inlines entry.body %} {% process_inlines entry.body as body %} {% process_inlines entry.body in 'inlines/sidebar' %} {% process_inlines entry.body in 'inlines/sidebar' as body %}
[ "Searches", "through", "the", "provided", "content", "and", "applies", "inlines", "where", "ever", "they", "are", "found", "." ]
python
train
28.439024
Ex-Mente/auxi.0
auxi/modelling/process/materials/thermo.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/modelling/process/materials/thermo.py#L924-L931
def amount(self): """ Determine the sum of mole amounts of all the compounds. :returns: Amount. [kmol] """ return sum(self.get_compound_amount(c) for c in self.material.compounds)
[ "def", "amount", "(", "self", ")", ":", "return", "sum", "(", "self", ".", "get_compound_amount", "(", "c", ")", "for", "c", "in", "self", ".", "material", ".", "compounds", ")" ]
Determine the sum of mole amounts of all the compounds. :returns: Amount. [kmol]
[ "Determine", "the", "sum", "of", "mole", "amounts", "of", "all", "the", "compounds", "." ]
python
valid
26.75
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L630-L644
def overlay_gateway_sflow_sflow_vlan_action(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') sflow = ET.SubElement(overlay_gateway, "sflow") sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name") sflow_profile_name_key.text = kwargs.pop('sflow_profile_name') sflow_vlan_action = ET.SubElement(sflow, "sflow-vlan-action") sflow_vlan_action.text = kwargs.pop('sflow_vlan_action') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "overlay_gateway_sflow_sflow_vlan_action", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "overlay_gateway", "=", "ET", ".", "SubElement", "(", "config", ",", "\"overlay-gateway\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-tunnels\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "overlay_gateway", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "sflow", "=", "ET", ".", "SubElement", "(", "overlay_gateway", ",", "\"sflow\"", ")", "sflow_profile_name_key", "=", "ET", ".", "SubElement", "(", "sflow", ",", "\"sflow-profile-name\"", ")", "sflow_profile_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'sflow_profile_name'", ")", "sflow_vlan_action", "=", "ET", ".", "SubElement", "(", "sflow", ",", "\"sflow-vlan-action\"", ")", "sflow_vlan_action", ".", "text", "=", "kwargs", ".", "pop", "(", "'sflow_vlan_action'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
51.333333
za-creature/gulpless
gulpless/__init__.py
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/__init__.py#L11-L70
def main(): """Entry point for command line usage.""" import colorama import argparse import logging import sys import os parser = argparse.ArgumentParser(prog="gulpless", description="Simple build system.") parser.add_argument("-v", "--version", action="version", version="%(prog)s 0.7.6") parser.add_argument("-d", "--directory", action="store", default=os.getcwd(), help="Look for `build.py` in this folder (defaults to " "the current directory)") parser.add_argument("mode", action="store", choices=["build", "interactive"], default="interactive", metavar="mode", nargs="?", help="If `interactive` (the default), will wait for " "filesystem events and attempt to keep the input " "and output folders in sync. If `build`, it will " "attempt to build all updated files, then exit.") args = parser.parse_args() os.chdir(args.directory) sys.path.append(os.getcwd()) if os.environ.get("TERM") == "cygwin": # colorama doesn't play well with git bash del os.environ["TERM"] colorama.init() os.environ["TERM"] = "cygwin" else: colorama.init() try: old, sys.dont_write_bytecode = sys.dont_write_bytecode, True import build except ImportError: sys.exit("No `build.py` found in current folder.") finally: sys.dont_write_bytecode = old try: logging.basicConfig(level=build.LOGGING, format="%(message)s") except AttributeError: logging.basicConfig(level=logging.INFO, format="%(message)s") reactor = Reactor(build.SRC, build.DEST) for handler in build.HANDLERS: reactor.add_handler(handler) reactor.run(args.mode == "build")
[ "def", "main", "(", ")", ":", "import", "colorama", "import", "argparse", "import", "logging", "import", "sys", "import", "os", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "\"gulpless\"", ",", "description", "=", "\"Simple build system.\"", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "\"%(prog)s 0.7.6\"", ")", "parser", ".", "add_argument", "(", "\"-d\"", ",", "\"--directory\"", ",", "action", "=", "\"store\"", ",", "default", "=", "os", ".", "getcwd", "(", ")", ",", "help", "=", "\"Look for `build.py` in this folder (defaults to \"", "\"the current directory)\"", ")", "parser", ".", "add_argument", "(", "\"mode\"", ",", "action", "=", "\"store\"", ",", "choices", "=", "[", "\"build\"", ",", "\"interactive\"", "]", ",", "default", "=", "\"interactive\"", ",", "metavar", "=", "\"mode\"", ",", "nargs", "=", "\"?\"", ",", "help", "=", "\"If `interactive` (the default), will wait for \"", "\"filesystem events and attempt to keep the input \"", "\"and output folders in sync. If `build`, it will \"", "\"attempt to build all updated files, then exit.\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "os", ".", "chdir", "(", "args", ".", "directory", ")", "sys", ".", "path", ".", "append", "(", "os", ".", "getcwd", "(", ")", ")", "if", "os", ".", "environ", ".", "get", "(", "\"TERM\"", ")", "==", "\"cygwin\"", ":", "# colorama doesn't play well with git bash", "del", "os", ".", "environ", "[", "\"TERM\"", "]", "colorama", ".", "init", "(", ")", "os", ".", "environ", "[", "\"TERM\"", "]", "=", "\"cygwin\"", "else", ":", "colorama", ".", "init", "(", ")", "try", ":", "old", ",", "sys", ".", "dont_write_bytecode", "=", "sys", ".", "dont_write_bytecode", ",", "True", "import", "build", "except", "ImportError", ":", "sys", ".", "exit", "(", "\"No `build.py` found in current folder.\"", ")", "finally", ":", "sys", ".", "dont_write_bytecode", "=", "old", "try", ":", "logging", ".", "basicConfig", "(", "level", "=", "build", ".", "LOGGING", ",", "format", "=", "\"%(message)s\"", ")", "except", "AttributeError", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "\"%(message)s\"", ")", "reactor", "=", "Reactor", "(", "build", ".", "SRC", ",", "build", ".", "DEST", ")", "for", "handler", "in", "build", ".", "HANDLERS", ":", "reactor", ".", "add_handler", "(", "handler", ")", "reactor", ".", "run", "(", "args", ".", "mode", "==", "\"build\"", ")" ]
Entry point for command line usage.
[ "Entry", "point", "for", "command", "line", "usage", "." ]
python
train
35.383333
saltstack/salt
salt/modules/aix_group.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aix_group.py#L186-L200
def members(name, members_list, root=None): ''' Replaces members of the group with a provided list. CLI Example: salt '*' group.members foo 'user1,user2,user3,...' Replaces a membership list for a local group 'foo'. foo:x:1234:user1,user2,user3,... ''' cmd = 'chgrpmem -m = {0} {1}'.format(members_list, name) retcode = __salt__['cmd.retcode'](cmd, python_shell=False) return not retcode
[ "def", "members", "(", "name", ",", "members_list", ",", "root", "=", "None", ")", ":", "cmd", "=", "'chgrpmem -m = {0} {1}'", ".", "format", "(", "members_list", ",", "name", ")", "retcode", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "return", "not", "retcode" ]
Replaces members of the group with a provided list. CLI Example: salt '*' group.members foo 'user1,user2,user3,...' Replaces a membership list for a local group 'foo'. foo:x:1234:user1,user2,user3,...
[ "Replaces", "members", "of", "the", "group", "with", "a", "provided", "list", "." ]
python
train
28.333333
SheffieldML/GPy
GPy/core/sparse_gp.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/core/sparse_gp.py#L121-L131
def to_dict(self, save_data=True): """ Convert the object into a json serializable dictionary. :param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary :return dict: json serializable dictionary containing the needed information to instantiate the object """ input_dict = super(SparseGP, self).to_dict(save_data) input_dict["class"] = "GPy.core.SparseGP" input_dict["Z"] = self.Z.tolist() return input_dict
[ "def", "to_dict", "(", "self", ",", "save_data", "=", "True", ")", ":", "input_dict", "=", "super", "(", "SparseGP", ",", "self", ")", ".", "to_dict", "(", "save_data", ")", "input_dict", "[", "\"class\"", "]", "=", "\"GPy.core.SparseGP\"", "input_dict", "[", "\"Z\"", "]", "=", "self", ".", "Z", ".", "tolist", "(", ")", "return", "input_dict" ]
Convert the object into a json serializable dictionary. :param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary :return dict: json serializable dictionary containing the needed information to instantiate the object
[ "Convert", "the", "object", "into", "a", "json", "serializable", "dictionary", "." ]
python
train
46.272727
arcticfoxnv/slackminion
slackminion/plugins/core/acl.py
https://github.com/arcticfoxnv/slackminion/blob/62ea77aba5ac5ba582793e578a379a76f7d26cdb/slackminion/plugins/core/acl.py#L112-L125
def acl_show(self, msg, args): """Show current allow and deny blocks for the given acl.""" name = args[0] if len(args) > 0 else None if name is None: return "%s: The following ACLs are defined: %s" % (msg.user, ', '.join(self._acl.keys())) if name not in self._acl: return "Sorry, couldn't find an acl named '%s'" % name return '\n'.join([ "%s: ACL '%s' is defined as follows:" % (msg.user, name), "allow: %s" % ', '.join(self._acl[name]['allow']), "deny: %s" % ', '.join(self._acl[name]['deny']) ])
[ "def", "acl_show", "(", "self", ",", "msg", ",", "args", ")", ":", "name", "=", "args", "[", "0", "]", "if", "len", "(", "args", ")", ">", "0", "else", "None", "if", "name", "is", "None", ":", "return", "\"%s: The following ACLs are defined: %s\"", "%", "(", "msg", ".", "user", ",", "', '", ".", "join", "(", "self", ".", "_acl", ".", "keys", "(", ")", ")", ")", "if", "name", "not", "in", "self", ".", "_acl", ":", "return", "\"Sorry, couldn't find an acl named '%s'\"", "%", "name", "return", "'\\n'", ".", "join", "(", "[", "\"%s: ACL '%s' is defined as follows:\"", "%", "(", "msg", ".", "user", ",", "name", ")", ",", "\"allow: %s\"", "%", "', '", ".", "join", "(", "self", ".", "_acl", "[", "name", "]", "[", "'allow'", "]", ")", ",", "\"deny: %s\"", "%", "', '", ".", "join", "(", "self", ".", "_acl", "[", "name", "]", "[", "'deny'", "]", ")", "]", ")" ]
Show current allow and deny blocks for the given acl.
[ "Show", "current", "allow", "and", "deny", "blocks", "for", "the", "given", "acl", "." ]
python
valid
42.571429
saltstack/salt
salt/roster/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/roster/cloud.py#L97-L113
def extract_ipv4(roster_order, ipv4): ''' Extract the preferred IP address from the ipv4 grain ''' for ip_type in roster_order: for ip_ in ipv4: if ':' in ip_: continue if not salt.utils.validate.net.ipv4_addr(ip_): continue if ip_type == 'local' and ip_.startswith('127.'): return ip_ elif ip_type == 'private' and not salt.utils.cloud.is_public_ip(ip_): return ip_ elif ip_type == 'public' and salt.utils.cloud.is_public_ip(ip_): return ip_ return None
[ "def", "extract_ipv4", "(", "roster_order", ",", "ipv4", ")", ":", "for", "ip_type", "in", "roster_order", ":", "for", "ip_", "in", "ipv4", ":", "if", "':'", "in", "ip_", ":", "continue", "if", "not", "salt", ".", "utils", ".", "validate", ".", "net", ".", "ipv4_addr", "(", "ip_", ")", ":", "continue", "if", "ip_type", "==", "'local'", "and", "ip_", ".", "startswith", "(", "'127.'", ")", ":", "return", "ip_", "elif", "ip_type", "==", "'private'", "and", "not", "salt", ".", "utils", ".", "cloud", ".", "is_public_ip", "(", "ip_", ")", ":", "return", "ip_", "elif", "ip_type", "==", "'public'", "and", "salt", ".", "utils", ".", "cloud", ".", "is_public_ip", "(", "ip_", ")", ":", "return", "ip_", "return", "None" ]
Extract the preferred IP address from the ipv4 grain
[ "Extract", "the", "preferred", "IP", "address", "from", "the", "ipv4", "grain" ]
python
train
35.647059
eaton-lab/toytree
toytree/Drawing.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Drawing.py#L314-L349
def assign_tip_labels_and_colors(self): "assign tip labels based on user provided kwargs" # COLOR # tip color overrides tipstyle.fill if self.style.tip_labels_colors: #if self.style.tip_labels_style.fill: # self.style.tip_labels_style.fill = None if self.ttree._fixed_order: if isinstance(self.style.tip_labels_colors, (list, np.ndarray)): cols = np.array(self.style.tip_labels_colors) orde = cols[self.ttree._fixed_idx] self.style.tip_labels_colors = list(orde) # LABELS # False == hide tip labels if self.style.tip_labels is False: self.style.tip_labels_style["-toyplot-anchor-shift"] = "0px" self.tip_labels = ["" for i in self.ttree.get_tip_labels()] # LABELS # user entered something... else: # if user did not change label-offset then shift it here if not self.style.tip_labels_style["-toyplot-anchor-shift"]: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" # if user entered list in get_tip_labels order reverse it for plot if isinstance(self.style.tip_labels, list): self.tip_labels = self.style.tip_labels # True assigns tip labels from tree else: if self.ttree._fixed_order: self.tip_labels = self.ttree._fixed_order else: self.tip_labels = self.ttree.get_tip_labels()
[ "def", "assign_tip_labels_and_colors", "(", "self", ")", ":", "# COLOR", "# tip color overrides tipstyle.fill", "if", "self", ".", "style", ".", "tip_labels_colors", ":", "#if self.style.tip_labels_style.fill:", "# self.style.tip_labels_style.fill = None", "if", "self", ".", "ttree", ".", "_fixed_order", ":", "if", "isinstance", "(", "self", ".", "style", ".", "tip_labels_colors", ",", "(", "list", ",", "np", ".", "ndarray", ")", ")", ":", "cols", "=", "np", ".", "array", "(", "self", ".", "style", ".", "tip_labels_colors", ")", "orde", "=", "cols", "[", "self", ".", "ttree", ".", "_fixed_idx", "]", "self", ".", "style", ".", "tip_labels_colors", "=", "list", "(", "orde", ")", "# LABELS", "# False == hide tip labels", "if", "self", ".", "style", ".", "tip_labels", "is", "False", ":", "self", ".", "style", ".", "tip_labels_style", "[", "\"-toyplot-anchor-shift\"", "]", "=", "\"0px\"", "self", ".", "tip_labels", "=", "[", "\"\"", "for", "i", "in", "self", ".", "ttree", ".", "get_tip_labels", "(", ")", "]", "# LABELS", "# user entered something...", "else", ":", "# if user did not change label-offset then shift it here", "if", "not", "self", ".", "style", ".", "tip_labels_style", "[", "\"-toyplot-anchor-shift\"", "]", ":", "self", ".", "style", ".", "tip_labels_style", "[", "\"-toyplot-anchor-shift\"", "]", "=", "\"15px\"", "# if user entered list in get_tip_labels order reverse it for plot", "if", "isinstance", "(", "self", ".", "style", ".", "tip_labels", ",", "list", ")", ":", "self", ".", "tip_labels", "=", "self", ".", "style", ".", "tip_labels", "# True assigns tip labels from tree", "else", ":", "if", "self", ".", "ttree", ".", "_fixed_order", ":", "self", ".", "tip_labels", "=", "self", ".", "ttree", ".", "_fixed_order", "else", ":", "self", ".", "tip_labels", "=", "self", ".", "ttree", ".", "get_tip_labels", "(", ")" ]
assign tip labels based on user provided kwargs
[ "assign", "tip", "labels", "based", "on", "user", "provided", "kwargs" ]
python
train
44.25
saltstack/salt
salt/log/handlers/sentry_mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/log/handlers/sentry_mod.py#L120-L235
def setup_handlers(): ''' sets up the sentry handler ''' __grains__ = salt.loader.grains(__opts__) __salt__ = salt.loader.minion_mods(__opts__) if 'sentry_handler' not in __opts__: log.debug('No \'sentry_handler\' key was found in the configuration') return False options = {} dsn = get_config_value('dsn') if dsn is not None: try: # support raven ver 5.5.0 from raven.transport import TransportRegistry, default_transports from raven.utils.urlparse import urlparse transport_registry = TransportRegistry(default_transports) url = urlparse(dsn) if not transport_registry.supported_scheme(url.scheme): raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme)) except ValueError as exc: log.info( 'Raven failed to parse the configuration provided DSN: %s', exc ) if not dsn: for key in ('project', 'servers', 'public_key', 'secret_key'): config_value = get_config_value(key) if config_value is None and key not in options: log.debug( 'The required \'sentry_handler\' configuration key, ' '\'%s\', is not properly configured. Not configuring ' 'the sentry logging handler.', key ) return elif config_value is None: continue options[key] = config_value # site: An optional, arbitrary string to identify this client installation. options.update({ # site: An optional, arbitrary string to identify this client # installation 'site': get_config_value('site'), # name: This will override the server_name value for this installation. # Defaults to socket.gethostname() 'name': get_config_value('name'), # exclude_paths: Extending this allow you to ignore module prefixes # when sentry attempts to discover which function an error comes from 'exclude_paths': get_config_value('exclude_paths', ()), # include_paths: For example, in Django this defaults to your list of # INSTALLED_APPS, and is used for drilling down where an exception is # located 'include_paths': get_config_value('include_paths', ()), # list_max_length: The maximum number of items a list-like container # should store. 'list_max_length': get_config_value('list_max_length'), # string_max_length: The maximum characters of a string that should be # stored. 'string_max_length': get_config_value('string_max_length'), # auto_log_stacks: Should Raven automatically log frame stacks # (including locals) all calls as it would for exceptions. 'auto_log_stacks': get_config_value('auto_log_stacks'), # timeout: If supported, the timeout value for sending messages to # remote. 'timeout': get_config_value('timeout', 1), # processors: A list of processors to apply to events before sending # them to the Sentry server. Useful for sending additional global state # data or sanitizing data that you want to keep off of the server. 'processors': get_config_value('processors'), # dsn: Ensure the DSN is passed into the client 'dsn': dsn }) client = raven.Client(**options) context = get_config_value('context') context_dict = {} if context is not None: for tag in context: try: tag_value = __grains__[tag] except KeyError: log.debug('Sentry tag \'%s\' not found in grains.', tag) continue if tag_value: context_dict[tag] = tag_value if context_dict: client.context.merge({'tags': context_dict}) try: handler = SentryHandler(client) exclude_patterns = get_config_value('exclude_patterns', None) if exclude_patterns: filter_regexes = [re.compile(pattern) for pattern in exclude_patterns] class FilterExcludedMessages(object): @staticmethod def filter(record): m = record.getMessage() return not any(regex.search(m) for regex in filter_regexes) handler.addFilter(FilterExcludedMessages()) handler.setLevel(LOG_LEVELS[get_config_value('log_level', 'error')]) return handler except ValueError as exc: log.debug('Failed to setup the sentry logging handler', exc_info=True)
[ "def", "setup_handlers", "(", ")", ":", "__grains__", "=", "salt", ".", "loader", ".", "grains", "(", "__opts__", ")", "__salt__", "=", "salt", ".", "loader", ".", "minion_mods", "(", "__opts__", ")", "if", "'sentry_handler'", "not", "in", "__opts__", ":", "log", ".", "debug", "(", "'No \\'sentry_handler\\' key was found in the configuration'", ")", "return", "False", "options", "=", "{", "}", "dsn", "=", "get_config_value", "(", "'dsn'", ")", "if", "dsn", "is", "not", "None", ":", "try", ":", "# support raven ver 5.5.0", "from", "raven", ".", "transport", "import", "TransportRegistry", ",", "default_transports", "from", "raven", ".", "utils", ".", "urlparse", "import", "urlparse", "transport_registry", "=", "TransportRegistry", "(", "default_transports", ")", "url", "=", "urlparse", "(", "dsn", ")", "if", "not", "transport_registry", ".", "supported_scheme", "(", "url", ".", "scheme", ")", ":", "raise", "ValueError", "(", "'Unsupported Sentry DSN scheme: {0}'", ".", "format", "(", "url", ".", "scheme", ")", ")", "except", "ValueError", "as", "exc", ":", "log", ".", "info", "(", "'Raven failed to parse the configuration provided DSN: %s'", ",", "exc", ")", "if", "not", "dsn", ":", "for", "key", "in", "(", "'project'", ",", "'servers'", ",", "'public_key'", ",", "'secret_key'", ")", ":", "config_value", "=", "get_config_value", "(", "key", ")", "if", "config_value", "is", "None", "and", "key", "not", "in", "options", ":", "log", ".", "debug", "(", "'The required \\'sentry_handler\\' configuration key, '", "'\\'%s\\', is not properly configured. Not configuring '", "'the sentry logging handler.'", ",", "key", ")", "return", "elif", "config_value", "is", "None", ":", "continue", "options", "[", "key", "]", "=", "config_value", "# site: An optional, arbitrary string to identify this client installation.", "options", ".", "update", "(", "{", "# site: An optional, arbitrary string to identify this client", "# installation", "'site'", ":", "get_config_value", "(", "'site'", ")", ",", "# name: This will override the server_name value for this installation.", "# Defaults to socket.gethostname()", "'name'", ":", "get_config_value", "(", "'name'", ")", ",", "# exclude_paths: Extending this allow you to ignore module prefixes", "# when sentry attempts to discover which function an error comes from", "'exclude_paths'", ":", "get_config_value", "(", "'exclude_paths'", ",", "(", ")", ")", ",", "# include_paths: For example, in Django this defaults to your list of", "# INSTALLED_APPS, and is used for drilling down where an exception is", "# located", "'include_paths'", ":", "get_config_value", "(", "'include_paths'", ",", "(", ")", ")", ",", "# list_max_length: The maximum number of items a list-like container", "# should store.", "'list_max_length'", ":", "get_config_value", "(", "'list_max_length'", ")", ",", "# string_max_length: The maximum characters of a string that should be", "# stored.", "'string_max_length'", ":", "get_config_value", "(", "'string_max_length'", ")", ",", "# auto_log_stacks: Should Raven automatically log frame stacks", "# (including locals) all calls as it would for exceptions.", "'auto_log_stacks'", ":", "get_config_value", "(", "'auto_log_stacks'", ")", ",", "# timeout: If supported, the timeout value for sending messages to", "# remote.", "'timeout'", ":", "get_config_value", "(", "'timeout'", ",", "1", ")", ",", "# processors: A list of processors to apply to events before sending", "# them to the Sentry server. Useful for sending additional global state", "# data or sanitizing data that you want to keep off of the server.", "'processors'", ":", "get_config_value", "(", "'processors'", ")", ",", "# dsn: Ensure the DSN is passed into the client", "'dsn'", ":", "dsn", "}", ")", "client", "=", "raven", ".", "Client", "(", "*", "*", "options", ")", "context", "=", "get_config_value", "(", "'context'", ")", "context_dict", "=", "{", "}", "if", "context", "is", "not", "None", ":", "for", "tag", "in", "context", ":", "try", ":", "tag_value", "=", "__grains__", "[", "tag", "]", "except", "KeyError", ":", "log", ".", "debug", "(", "'Sentry tag \\'%s\\' not found in grains.'", ",", "tag", ")", "continue", "if", "tag_value", ":", "context_dict", "[", "tag", "]", "=", "tag_value", "if", "context_dict", ":", "client", ".", "context", ".", "merge", "(", "{", "'tags'", ":", "context_dict", "}", ")", "try", ":", "handler", "=", "SentryHandler", "(", "client", ")", "exclude_patterns", "=", "get_config_value", "(", "'exclude_patterns'", ",", "None", ")", "if", "exclude_patterns", ":", "filter_regexes", "=", "[", "re", ".", "compile", "(", "pattern", ")", "for", "pattern", "in", "exclude_patterns", "]", "class", "FilterExcludedMessages", "(", "object", ")", ":", "@", "staticmethod", "def", "filter", "(", "record", ")", ":", "m", "=", "record", ".", "getMessage", "(", ")", "return", "not", "any", "(", "regex", ".", "search", "(", "m", ")", "for", "regex", "in", "filter_regexes", ")", "handler", ".", "addFilter", "(", "FilterExcludedMessages", "(", ")", ")", "handler", ".", "setLevel", "(", "LOG_LEVELS", "[", "get_config_value", "(", "'log_level'", ",", "'error'", ")", "]", ")", "return", "handler", "except", "ValueError", "as", "exc", ":", "log", ".", "debug", "(", "'Failed to setup the sentry logging handler'", ",", "exc_info", "=", "True", ")" ]
sets up the sentry handler
[ "sets", "up", "the", "sentry", "handler" ]
python
train
39.491379
pyrogram/pyrogram
pyrogram/client/methods/contacts/delete_contacts.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/contacts/delete_contacts.py#L27-L59
def delete_contacts( self, ids: List[int] ): """Use this method to delete contacts from your Telegram address book. Args: ids (List of ``int``): A list of unique identifiers for the target users. Can be an ID (int), a username (string) or phone number (string). Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ contacts = [] for i in ids: try: input_user = self.resolve_peer(i) except PeerIdInvalid: continue else: if isinstance(input_user, types.InputPeerUser): contacts.append(input_user) return self.send( functions.contacts.DeleteContacts( id=contacts ) )
[ "def", "delete_contacts", "(", "self", ",", "ids", ":", "List", "[", "int", "]", ")", ":", "contacts", "=", "[", "]", "for", "i", "in", "ids", ":", "try", ":", "input_user", "=", "self", ".", "resolve_peer", "(", "i", ")", "except", "PeerIdInvalid", ":", "continue", "else", ":", "if", "isinstance", "(", "input_user", ",", "types", ".", "InputPeerUser", ")", ":", "contacts", ".", "append", "(", "input_user", ")", "return", "self", ".", "send", "(", "functions", ".", "contacts", ".", "DeleteContacts", "(", "id", "=", "contacts", ")", ")" ]
Use this method to delete contacts from your Telegram address book. Args: ids (List of ``int``): A list of unique identifiers for the target users. Can be an ID (int), a username (string) or phone number (string). Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
[ "Use", "this", "method", "to", "delete", "contacts", "from", "your", "Telegram", "address", "book", "." ]
python
train
27.151515
pazz/alot
alot/crypto.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/crypto.py#L138-L155
def detached_signature_for(plaintext_str, keys): """ Signs the given plaintext string and returns the detached signature. A detached signature in GPG speak is a separate blob of data containing a signature for the specified plaintext. :param bytes plaintext_str: bytestring to sign :param keys: list of one or more key to sign with. :type keys: list[gpg.gpgme._gpgme_key] :returns: A list of signature and the signed blob of data :rtype: tuple[list[gpg.results.NewSignature], str] """ ctx = gpg.core.Context(armor=True) ctx.signers = keys (sigblob, sign_result) = ctx.sign(plaintext_str, mode=gpg.constants.SIG_MODE_DETACH) return sign_result.signatures, sigblob
[ "def", "detached_signature_for", "(", "plaintext_str", ",", "keys", ")", ":", "ctx", "=", "gpg", ".", "core", ".", "Context", "(", "armor", "=", "True", ")", "ctx", ".", "signers", "=", "keys", "(", "sigblob", ",", "sign_result", ")", "=", "ctx", ".", "sign", "(", "plaintext_str", ",", "mode", "=", "gpg", ".", "constants", ".", "SIG_MODE_DETACH", ")", "return", "sign_result", ".", "signatures", ",", "sigblob" ]
Signs the given plaintext string and returns the detached signature. A detached signature in GPG speak is a separate blob of data containing a signature for the specified plaintext. :param bytes plaintext_str: bytestring to sign :param keys: list of one or more key to sign with. :type keys: list[gpg.gpgme._gpgme_key] :returns: A list of signature and the signed blob of data :rtype: tuple[list[gpg.results.NewSignature], str]
[ "Signs", "the", "given", "plaintext", "string", "and", "returns", "the", "detached", "signature", "." ]
python
train
41.166667
HPAC/matchpy
matchpy/expressions/functions.py
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L115-L139
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: """Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables. """ if isinstance(expression, Operation): if hasattr(expression, 'variable_name'): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name ) operands = [rename_variables(o, renaming) for o in op_iter(expression)] return create_operation_expression(expression, operands) elif isinstance(expression, Expression): expression = expression.__copy__() expression.variable_name = renaming.get(expression.variable_name, expression.variable_name) return expression
[ "def", "rename_variables", "(", "expression", ":", "Expression", ",", "renaming", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "Expression", ":", "if", "isinstance", "(", "expression", ",", "Operation", ")", ":", "if", "hasattr", "(", "expression", ",", "'variable_name'", ")", ":", "variable_name", "=", "renaming", ".", "get", "(", "expression", ".", "variable_name", ",", "expression", ".", "variable_name", ")", "return", "create_operation_expression", "(", "expression", ",", "[", "rename_variables", "(", "o", ",", "renaming", ")", "for", "o", "in", "op_iter", "(", "expression", ")", "]", ",", "variable_name", "=", "variable_name", ")", "operands", "=", "[", "rename_variables", "(", "o", ",", "renaming", ")", "for", "o", "in", "op_iter", "(", "expression", ")", "]", "return", "create_operation_expression", "(", "expression", ",", "operands", ")", "elif", "isinstance", "(", "expression", ",", "Expression", ")", ":", "expression", "=", "expression", ".", "__copy__", "(", ")", "expression", ".", "variable_name", "=", "renaming", ".", "get", "(", "expression", ".", "variable_name", ",", "expression", ".", "variable_name", ")", "return", "expression" ]
Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables.
[ "Rename", "the", "variables", "in", "the", "expression", "according", "to", "the", "given", "dictionary", "." ]
python
train
47.76
wbond/asn1crypto
asn1crypto/keys.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/keys.py#L1042-L1063
def unwrap(self): """ Unwraps an RSA public key into an RSAPublicKey object. Does not support DSA or EC public keys since they do not have an unwrapped form. :return: An RSAPublicKey object """ if self.algorithm == 'rsa': return self['public_key'].parsed key_type = self.algorithm.upper() a_an = 'an' if key_type == 'EC' else 'a' raise ValueError(unwrap( ''' Only RSA public keys may be unwrapped - this key is %s %s public key ''', a_an, key_type ))
[ "def", "unwrap", "(", "self", ")", ":", "if", "self", ".", "algorithm", "==", "'rsa'", ":", "return", "self", "[", "'public_key'", "]", ".", "parsed", "key_type", "=", "self", ".", "algorithm", ".", "upper", "(", ")", "a_an", "=", "'an'", "if", "key_type", "==", "'EC'", "else", "'a'", "raise", "ValueError", "(", "unwrap", "(", "'''\n Only RSA public keys may be unwrapped - this key is %s %s public\n key\n '''", ",", "a_an", ",", "key_type", ")", ")" ]
Unwraps an RSA public key into an RSAPublicKey object. Does not support DSA or EC public keys since they do not have an unwrapped form. :return: An RSAPublicKey object
[ "Unwraps", "an", "RSA", "public", "key", "into", "an", "RSAPublicKey", "object", ".", "Does", "not", "support", "DSA", "or", "EC", "public", "keys", "since", "they", "do", "not", "have", "an", "unwrapped", "form", "." ]
python
train
27.636364
tkf/rash
rash/indexer.py
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/indexer.py#L117-L125
def index_all(self): """ Index all records under :attr:`record_path`. """ self.logger.debug('Start indexing all records under: %s', self.record_path) with self.db.connection(): for json_path in sorted(self.find_record_files()): self.index_record(json_path)
[ "def", "index_all", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Start indexing all records under: %s'", ",", "self", ".", "record_path", ")", "with", "self", ".", "db", ".", "connection", "(", ")", ":", "for", "json_path", "in", "sorted", "(", "self", ".", "find_record_files", "(", ")", ")", ":", "self", ".", "index_record", "(", "json_path", ")" ]
Index all records under :attr:`record_path`.
[ "Index", "all", "records", "under", ":", "attr", ":", "record_path", "." ]
python
train
38
Erotemic/utool
utool/util_class.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_class.py#L413-L426
def decorate_class_method(func, classkey=None, skipmain=False): """ Will inject all decorated function as methods of classkey classkey is some identifying string, tuple, or object func can also be a tuple """ #import utool as ut global __CLASSTYPE_ATTRIBUTES__ assert classkey is not None, 'must specify classkey' #if not (skipmain and ut.get_caller_modname() == '__main__'): __CLASSTYPE_ATTRIBUTES__[classkey].append(func) return func
[ "def", "decorate_class_method", "(", "func", ",", "classkey", "=", "None", ",", "skipmain", "=", "False", ")", ":", "#import utool as ut", "global", "__CLASSTYPE_ATTRIBUTES__", "assert", "classkey", "is", "not", "None", ",", "'must specify classkey'", "#if not (skipmain and ut.get_caller_modname() == '__main__'):", "__CLASSTYPE_ATTRIBUTES__", "[", "classkey", "]", ".", "append", "(", "func", ")", "return", "func" ]
Will inject all decorated function as methods of classkey classkey is some identifying string, tuple, or object func can also be a tuple
[ "Will", "inject", "all", "decorated", "function", "as", "methods", "of", "classkey" ]
python
train
33.428571
Overboard/httpfind
httpfind/httpfind.py
https://github.com/Overboard/httpfind/blob/2c372daa66dcc7158e8bb179b29d8001d473bc4a/httpfind/httpfind.py#L157-L181
def cli(): """ Command line interface """ ch = logging.StreamHandler() ch.setFormatter(logging.Formatter( '%(asctime)s.%(msecs)03d %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S" )) logger.addHandler(ch) import argparse parser = argparse.ArgumentParser(description="Search 'network' for hosts with a \ response to 'path' that matches 'filter'") parser.add_argument('network', help='IP address with optional mask, e.g. 192.168.0.0/24') parser.add_argument('-p', '--path', help='URL path at host, e.g. index.html', default='') parser.add_argument('-f', '--filter', help='Regular expression pattern for filter', dest='pattern', default='') parser.add_argument('-l', '--log', help='Enable logging', action='store_true') args = parser.parse_args() print('Scanning, please wait ...') result = survey(**vars(args)) print('Found {} match{}{}{} on {}'.format(len(result), 'es' if len(result)!=1 else '', ' for ' if args.pattern else '', args.pattern, args.network)) for x in result: print(x.hostname)
[ "def", "cli", "(", ")", ":", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "ch", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "'%(asctime)s.%(msecs)03d %(levelname)s: %(message)s'", ",", "datefmt", "=", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", "logger", ".", "addHandler", "(", "ch", ")", "import", "argparse", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Search 'network' for hosts with a \\\n response to 'path' that matches 'filter'\"", ")", "parser", ".", "add_argument", "(", "'network'", ",", "help", "=", "'IP address with optional mask, e.g. 192.168.0.0/24'", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--path'", ",", "help", "=", "'URL path at host, e.g. index.html'", ",", "default", "=", "''", ")", "parser", ".", "add_argument", "(", "'-f'", ",", "'--filter'", ",", "help", "=", "'Regular expression pattern for filter'", ",", "dest", "=", "'pattern'", ",", "default", "=", "''", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--log'", ",", "help", "=", "'Enable logging'", ",", "action", "=", "'store_true'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "print", "(", "'Scanning, please wait ...'", ")", "result", "=", "survey", "(", "*", "*", "vars", "(", "args", ")", ")", "print", "(", "'Found {} match{}{}{} on {}'", ".", "format", "(", "len", "(", "result", ")", ",", "'es'", "if", "len", "(", "result", ")", "!=", "1", "else", "''", ",", "' for '", "if", "args", ".", "pattern", "else", "''", ",", "args", ".", "pattern", ",", "args", ".", "network", ")", ")", "for", "x", "in", "result", ":", "print", "(", "x", ".", "hostname", ")" ]
Command line interface
[ "Command", "line", "interface" ]
python
train
43.72
calmjs/calmjs.parse
src/calmjs/parse/parsers/es5.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L342-L351
def p_element_list(self, p): """element_list : elision_opt assignment_expr | element_list COMMA elision_opt assignment_expr """ if len(p) == 3: p[0] = p[1] + [p[2]] else: p[1].extend(p[3]) p[1].append(p[4]) p[0] = p[1]
[ "def", "p_element_list", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "2", "]", "]", "else", ":", "p", "[", "1", "]", ".", "extend", "(", "p", "[", "3", "]", ")", "p", "[", "1", "]", ".", "append", "(", "p", "[", "4", "]", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
element_list : elision_opt assignment_expr | element_list COMMA elision_opt assignment_expr
[ "element_list", ":", "elision_opt", "assignment_expr", "|", "element_list", "COMMA", "elision_opt", "assignment_expr" ]
python
train
31.3
fedora-infra/fmn.rules
fmn/rules/pagure.py
https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/pagure.py#L45-L68
def pagure_specific_project_tag_filter(config, message, tags=None, *args, **kw): """ Particular pagure project tags Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects having the specified tags. Specify multiple tags by separating them with a comma ','. """ if not pagure_catchall(config, message): return False tags = tags.split(',') if tags else [] tags = [tag.strip() for tag in tags if tag and tag.strip()] project_tags = set() project_tags.update(message.get('project', {}).get('tags', [])) project_tags.update( message.get('pullrequest', {}).get('project', {}).get('tags', [])) project_tags.update( message.get('commit', {}).get('repo', {}).get('tags', [])) valid = len(project_tags.intersection(set(tags))) > 0 return valid
[ "def", "pagure_specific_project_tag_filter", "(", "config", ",", "message", ",", "tags", "=", "None", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "not", "pagure_catchall", "(", "config", ",", "message", ")", ":", "return", "False", "tags", "=", "tags", ".", "split", "(", "','", ")", "if", "tags", "else", "[", "]", "tags", "=", "[", "tag", ".", "strip", "(", ")", "for", "tag", "in", "tags", "if", "tag", "and", "tag", ".", "strip", "(", ")", "]", "project_tags", "=", "set", "(", ")", "project_tags", ".", "update", "(", "message", ".", "get", "(", "'project'", ",", "{", "}", ")", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "project_tags", ".", "update", "(", "message", ".", "get", "(", "'pullrequest'", ",", "{", "}", ")", ".", "get", "(", "'project'", ",", "{", "}", ")", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "project_tags", ".", "update", "(", "message", ".", "get", "(", "'commit'", ",", "{", "}", ")", ".", "get", "(", "'repo'", ",", "{", "}", ")", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "valid", "=", "len", "(", "project_tags", ".", "intersection", "(", "set", "(", "tags", ")", ")", ")", ">", "0", "return", "valid" ]
Particular pagure project tags Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects having the specified tags. Specify multiple tags by separating them with a comma ','.
[ "Particular", "pagure", "project", "tags" ]
python
train
35.5
numenta/nupic
examples/network/network_api_demo.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/network/network_api_demo.py#L84-L94
def createEncoder(): """Create the encoder instance for our test and return it.""" consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption", clipInput=True) time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay") encoder = MultiEncoder() encoder.addEncoder("consumption", consumption_encoder) encoder.addEncoder("timestamp", time_encoder) return encoder
[ "def", "createEncoder", "(", ")", ":", "consumption_encoder", "=", "ScalarEncoder", "(", "21", ",", "0.0", ",", "100.0", ",", "n", "=", "50", ",", "name", "=", "\"consumption\"", ",", "clipInput", "=", "True", ")", "time_encoder", "=", "DateEncoder", "(", "timeOfDay", "=", "(", "21", ",", "9.5", ")", ",", "name", "=", "\"timestamp_timeOfDay\"", ")", "encoder", "=", "MultiEncoder", "(", ")", "encoder", ".", "addEncoder", "(", "\"consumption\"", ",", "consumption_encoder", ")", "encoder", ".", "addEncoder", "(", "\"timestamp\"", ",", "time_encoder", ")", "return", "encoder" ]
Create the encoder instance for our test and return it.
[ "Create", "the", "encoder", "instance", "for", "our", "test", "and", "return", "it", "." ]
python
valid
36.818182
CityOfZion/neo-python
neo/Implementations/Wallets/peewee/UserWallet.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Implementations/Wallets/peewee/UserWallet.py#L83-L97
def Create(path, password, generate_default_key=True): """ Create a new user wallet. Args: path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet". password (str): a 10 characters minimum password to secure the wallet with. Returns: UserWallet: a UserWallet instance. """ wallet = UserWallet(path=path, passwordKey=password, create=True) if generate_default_key: wallet.CreateKey() return wallet
[ "def", "Create", "(", "path", ",", "password", ",", "generate_default_key", "=", "True", ")", ":", "wallet", "=", "UserWallet", "(", "path", "=", "path", ",", "passwordKey", "=", "password", ",", "create", "=", "True", ")", "if", "generate_default_key", ":", "wallet", ".", "CreateKey", "(", ")", "return", "wallet" ]
Create a new user wallet. Args: path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet". password (str): a 10 characters minimum password to secure the wallet with. Returns: UserWallet: a UserWallet instance.
[ "Create", "a", "new", "user", "wallet", "." ]
python
train
35.333333
SheffieldML/GPy
GPy/kern/src/add.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/add.py#L46-L57
def to_dict(self): """ Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object """ input_dict = super(Add, self)._save_to_input_dict() input_dict["class"] = str("GPy.kern.Add") return input_dict
[ "def", "to_dict", "(", "self", ")", ":", "input_dict", "=", "super", "(", "Add", ",", "self", ")", ".", "_save_to_input_dict", "(", ")", "input_dict", "[", "\"class\"", "]", "=", "str", "(", "\"GPy.kern.Add\"", ")", "return", "input_dict" ]
Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object
[ "Convert", "the", "object", "into", "a", "json", "serializable", "dictionary", "." ]
python
train
35.083333
Rediker-Software/doac
doac/utils.py
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/utils.py#L1-L9
def prune_old_authorization_codes(): """ Removes all unused and expired authorization codes from the database. """ from .compat import now from .models import AuthorizationCode AuthorizationCode.objects.with_expiration_before(now()).delete()
[ "def", "prune_old_authorization_codes", "(", ")", ":", "from", ".", "compat", "import", "now", "from", ".", "models", "import", "AuthorizationCode", "AuthorizationCode", ".", "objects", ".", "with_expiration_before", "(", "now", "(", ")", ")", ".", "delete", "(", ")" ]
Removes all unused and expired authorization codes from the database.
[ "Removes", "all", "unused", "and", "expired", "authorization", "codes", "from", "the", "database", "." ]
python
train
29.222222
vertexproject/synapse
synapse/cortex.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L487-L494
async def storm(self, text, opts=None): ''' Evaluate a storm query and yield result messages. Yields: ((str,dict)): Storm messages. ''' async for mesg in self.cell.streamstorm(text, opts, user=self.user): yield mesg
[ "async", "def", "storm", "(", "self", ",", "text", ",", "opts", "=", "None", ")", ":", "async", "for", "mesg", "in", "self", ".", "cell", ".", "streamstorm", "(", "text", ",", "opts", ",", "user", "=", "self", ".", "user", ")", ":", "yield", "mesg" ]
Evaluate a storm query and yield result messages. Yields: ((str,dict)): Storm messages.
[ "Evaluate", "a", "storm", "query", "and", "yield", "result", "messages", ".", "Yields", ":", "((", "str", "dict", "))", ":", "Storm", "messages", "." ]
python
train
34
alphagov/notifications-python-client
notifications_python_client/authentication.py
https://github.com/alphagov/notifications-python-client/blob/b397aed212acf15b1b1e049da2654d9a230f72d2/notifications_python_client/authentication.py#L59-L77
def get_token_issuer(token): """ Issuer of a token is the identifier used to recover the secret Need to extract this from token to ensure we can proceed to the signature validation stage Does not check validity of the token :param token: signed JWT token :return issuer: iss field of the JWT token :raises TokenIssuerError: if iss field not present :raises TokenDecodeError: if token does not conform to JWT spec """ try: unverified = decode_token(token) if 'iss' not in unverified: raise TokenIssuerError return unverified.get('iss') except jwt.DecodeError: raise TokenDecodeError
[ "def", "get_token_issuer", "(", "token", ")", ":", "try", ":", "unverified", "=", "decode_token", "(", "token", ")", "if", "'iss'", "not", "in", "unverified", ":", "raise", "TokenIssuerError", "return", "unverified", ".", "get", "(", "'iss'", ")", "except", "jwt", ".", "DecodeError", ":", "raise", "TokenDecodeError" ]
Issuer of a token is the identifier used to recover the secret Need to extract this from token to ensure we can proceed to the signature validation stage Does not check validity of the token :param token: signed JWT token :return issuer: iss field of the JWT token :raises TokenIssuerError: if iss field not present :raises TokenDecodeError: if token does not conform to JWT spec
[ "Issuer", "of", "a", "token", "is", "the", "identifier", "used", "to", "recover", "the", "secret", "Need", "to", "extract", "this", "from", "token", "to", "ensure", "we", "can", "proceed", "to", "the", "signature", "validation", "stage", "Does", "not", "check", "validity", "of", "the", "token", ":", "param", "token", ":", "signed", "JWT", "token", ":", "return", "issuer", ":", "iss", "field", "of", "the", "JWT", "token", ":", "raises", "TokenIssuerError", ":", "if", "iss", "field", "not", "present", ":", "raises", "TokenDecodeError", ":", "if", "token", "does", "not", "conform", "to", "JWT", "spec" ]
python
train
34.368421
empirical-org/Quill-NLP-Tools-and-Datasets
utils/qfragment/qfragment/sva_rb2.py
https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva_rb2.py#L273-L308
def get_verb_phrases(sentence_doc): """ Returns an object like, [(1), (5,6,7)] where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7. - Adverbs are not included. - Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included """ pattern = r'<VERB>*<ADV>*<VERB>+' # r'<VERB>?<ADV>*<VERB>+' is suggested by textacy site verb_phrases = textacy.extract.pos_regex_matches(sentence_doc, pattern) result = [] # [(1), (5,6,7)] => 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7 for vp in verb_phrases: word_numbers = [] # return the index of 'could have been happily eating' from 'She could have been happily eating chowder' first_word = vp.start x = first_word if len(vp) > 1: for verb_or_adverb in vp: # filter out adverbs if not verb_or_adverb.pos_ == 'ADV': word_numbers.append(x) x += 1 else: word_numbers.append(first_word) # filter out infinitive phrases if ( (word_numbers[0] - 1) < 0) or (sentence_doc[word_numbers[0] - 1].text.lower() != 'to'): result.append(word_numbers) return result
[ "def", "get_verb_phrases", "(", "sentence_doc", ")", ":", "pattern", "=", "r'<VERB>*<ADV>*<VERB>+'", "# r'<VERB>?<ADV>*<VERB>+' is suggested by textacy site", "verb_phrases", "=", "textacy", ".", "extract", ".", "pos_regex_matches", "(", "sentence_doc", ",", "pattern", ")", "result", "=", "[", "]", "# [(1), (5,6,7)] => 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7", "for", "vp", "in", "verb_phrases", ":", "word_numbers", "=", "[", "]", "# return the index of 'could have been happily eating' from 'She could have been happily eating chowder'", "first_word", "=", "vp", ".", "start", "x", "=", "first_word", "if", "len", "(", "vp", ")", ">", "1", ":", "for", "verb_or_adverb", "in", "vp", ":", "# filter out adverbs", "if", "not", "verb_or_adverb", ".", "pos_", "==", "'ADV'", ":", "word_numbers", ".", "append", "(", "x", ")", "x", "+=", "1", "else", ":", "word_numbers", ".", "append", "(", "first_word", ")", "# filter out infinitive phrases", "if", "(", "(", "word_numbers", "[", "0", "]", "-", "1", ")", "<", "0", ")", "or", "(", "sentence_doc", "[", "word_numbers", "[", "0", "]", "-", "1", "]", ".", "text", ".", "lower", "(", ")", "!=", "'to'", ")", ":", "result", ".", "append", "(", "word_numbers", ")", "return", "result" ]
Returns an object like, [(1), (5,6,7)] where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7. - Adverbs are not included. - Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included
[ "Returns", "an", "object", "like", "[", "(", "1", ")", "(", "5", "6", "7", ")", "]", "where", "this", "means", "2", "verb", "phrases", ".", "a", "single", "verb", "at", "index", "1", "another", "verb", "phrase", "5", "6", "7", ".", "-", "Adverbs", "are", "not", "included", ".", "-", "Infinitive", "phrases", "(", "and", "verb", "phrases", "that", "are", "subsets", "of", "infinitive", "phrases", ")", "are", "not", "included" ]
python
train
36.25
klen/pylama
pylama/lint/pylama_pydocstyle.py
https://github.com/klen/pylama/blob/f436ccc6b55b33381a295ded753e467953cf4379/pylama/lint/pylama_pydocstyle.py#L20-L37
def run(path, code=None, params=None, **meta): """pydocstyle code checking. :return list: List of errors. """ if 'ignore_decorators' in params: ignore_decorators = params['ignore_decorators'] else: ignore_decorators = None check_source_args = (code, path, ignore_decorators) if THIRD_ARG else (code, path) return [{ 'lnum': e.line, # Remove colon after error code ("D403: ..." => "D403 ..."). 'text': (e.message[0:4] + e.message[5:] if e.message[4] == ':' else e.message), 'type': 'D', 'number': e.code } for e in PyDocChecker().check_source(*check_source_args)]
[ "def", "run", "(", "path", ",", "code", "=", "None", ",", "params", "=", "None", ",", "*", "*", "meta", ")", ":", "if", "'ignore_decorators'", "in", "params", ":", "ignore_decorators", "=", "params", "[", "'ignore_decorators'", "]", "else", ":", "ignore_decorators", "=", "None", "check_source_args", "=", "(", "code", ",", "path", ",", "ignore_decorators", ")", "if", "THIRD_ARG", "else", "(", "code", ",", "path", ")", "return", "[", "{", "'lnum'", ":", "e", ".", "line", ",", "# Remove colon after error code (\"D403: ...\" => \"D403 ...\").", "'text'", ":", "(", "e", ".", "message", "[", "0", ":", "4", "]", "+", "e", ".", "message", "[", "5", ":", "]", "if", "e", ".", "message", "[", "4", "]", "==", "':'", "else", "e", ".", "message", ")", ",", "'type'", ":", "'D'", ",", "'number'", ":", "e", ".", "code", "}", "for", "e", "in", "PyDocChecker", "(", ")", ".", "check_source", "(", "*", "check_source_args", ")", "]" ]
pydocstyle code checking. :return list: List of errors.
[ "pydocstyle", "code", "checking", "." ]
python
train
39.722222
inasafe/inasafe
safe/definitions/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/utilities.py#L63-L77
def hazards_for_layer(layer_geometry_key): """Get hazard categories form layer_geometry_key. :param layer_geometry_key: The geometry id :type layer_geometry_key: str :returns: List of hazard :rtype: list """ result = [] for hazard in hazard_all: if layer_geometry_key in hazard.get('allowed_geometries'): result.append(hazard) return sorted(result, key=lambda k: k['key'])
[ "def", "hazards_for_layer", "(", "layer_geometry_key", ")", ":", "result", "=", "[", "]", "for", "hazard", "in", "hazard_all", ":", "if", "layer_geometry_key", "in", "hazard", ".", "get", "(", "'allowed_geometries'", ")", ":", "result", ".", "append", "(", "hazard", ")", "return", "sorted", "(", "result", ",", "key", "=", "lambda", "k", ":", "k", "[", "'key'", "]", ")" ]
Get hazard categories form layer_geometry_key. :param layer_geometry_key: The geometry id :type layer_geometry_key: str :returns: List of hazard :rtype: list
[ "Get", "hazard", "categories", "form", "layer_geometry_key", "." ]
python
train
27.8
theno/utlz
fabfile.py
https://github.com/theno/utlz/blob/bf7d2b53f3e0d35c6f8ded81f3f774a74fcd3389/fabfile.py#L10-L19
def flo(string): '''Return the string given by param formatted with the callers locals.''' callers_locals = {} frame = inspect.currentframe() try: outerframe = frame.f_back callers_locals = outerframe.f_locals finally: del frame return string.format(**callers_locals)
[ "def", "flo", "(", "string", ")", ":", "callers_locals", "=", "{", "}", "frame", "=", "inspect", ".", "currentframe", "(", ")", "try", ":", "outerframe", "=", "frame", ".", "f_back", "callers_locals", "=", "outerframe", ".", "f_locals", "finally", ":", "del", "frame", "return", "string", ".", "format", "(", "*", "*", "callers_locals", ")" ]
Return the string given by param formatted with the callers locals.
[ "Return", "the", "string", "given", "by", "param", "formatted", "with", "the", "callers", "locals", "." ]
python
train
30.6
openego/eTraGo
etrago/tools/utilities.py
https://github.com/openego/eTraGo/blob/2a8fc6d4368d0e9abe6fe0d0c39baf66ea0126b9/etrago/tools/utilities.py#L311-L395
def clip_foreign(network): """ Delete all components and timelines located outside of Germany. Add transborder flows divided by country of origin as network.foreign_trade. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA """ # get foreign buses by country foreign_buses = network.buses[network.buses.country_code != 'DE'] network.buses = network.buses.drop( network.buses.loc[foreign_buses.index].index) # identify transborder lines (one bus foreign, one bus not) and the country # it is coming from """transborder_lines = pd.DataFrame(index=network.lines[ ((network.lines['bus0'].isin(network.buses.index) == False) & (network.lines['bus1'].isin(network.buses.index) == True)) | ((network.lines['bus0'].isin(network.buses.index) == True) & (network.lines['bus1'].isin(network.buses.index) == False))].index) transborder_lines['bus0'] = network.lines['bus0'] transborder_lines['bus1'] = network.lines['bus1'] transborder_lines['country'] = "" for i in range(0, len(transborder_lines)): if transborder_lines.iloc[i, 0] in foreign_buses.index: transborder_lines['country'][i] = foreign_buses[str( transborder_lines.iloc[i, 0])] else: transborder_lines['country'][i] = foreign_buses[str( transborder_lines.iloc[i, 1])] # identify amount of flows per line and group to get flow per country transborder_flows = network.lines_t.p0[transborder_lines.index] for i in transborder_flows.columns: if network.lines.loc[str(i)]['bus1'] in foreign_buses.index: transborder_flows.loc[:, str( i)] = transborder_flows.loc[:, str(i)]*-1 network.foreign_trade = transborder_flows.\ groupby(transborder_lines['country'], axis=1).sum()""" # drop foreign components network.lines = network.lines.drop(network.lines[ (network.lines['bus0'].isin(network.buses.index) == False) | (network.lines['bus1'].isin(network.buses.index) == False)].index) network.links = network.links.drop(network.links[ (network.links['bus0'].isin(network.buses.index) == False) | (network.links['bus1'].isin(network.buses.index) == False)].index) network.transformers = network.transformers.drop(network.transformers[ (network.transformers['bus0'].isin(network.buses.index) == False) | (network.transformers['bus1'].isin(network. buses.index) == False)].index) network.generators = network.generators.drop(network.generators[ (network.generators['bus'].isin(network.buses.index) == False)].index) network.loads = network.loads.drop(network.loads[ (network.loads['bus'].isin(network.buses.index) == False)].index) network.storage_units = network.storage_units.drop(network.storage_units[ (network.storage_units['bus'].isin(network. buses.index) == False)].index) components = ['loads', 'generators', 'lines', 'buses', 'transformers', 'links'] for g in components: # loads_t h = g + '_t' nw = getattr(network, h) # network.loads_t for i in nw.keys(): # network.loads_t.p cols = [j for j in getattr( nw, i).columns if j not in getattr(network, g).index] for k in cols: del getattr(nw, i)[k] return network
[ "def", "clip_foreign", "(", "network", ")", ":", "# get foreign buses by country", "foreign_buses", "=", "network", ".", "buses", "[", "network", ".", "buses", ".", "country_code", "!=", "'DE'", "]", "network", ".", "buses", "=", "network", ".", "buses", ".", "drop", "(", "network", ".", "buses", ".", "loc", "[", "foreign_buses", ".", "index", "]", ".", "index", ")", "# identify transborder lines (one bus foreign, one bus not) and the country", "# it is coming from", "\"\"\"transborder_lines = pd.DataFrame(index=network.lines[\n ((network.lines['bus0'].isin(network.buses.index) == False) &\n (network.lines['bus1'].isin(network.buses.index) == True)) |\n ((network.lines['bus0'].isin(network.buses.index) == True) &\n (network.lines['bus1'].isin(network.buses.index) == False))].index)\n transborder_lines['bus0'] = network.lines['bus0']\n transborder_lines['bus1'] = network.lines['bus1']\n transborder_lines['country'] = \"\"\n for i in range(0, len(transborder_lines)):\n if transborder_lines.iloc[i, 0] in foreign_buses.index:\n transborder_lines['country'][i] = foreign_buses[str(\n transborder_lines.iloc[i, 0])]\n else:\n transborder_lines['country'][i] = foreign_buses[str(\n transborder_lines.iloc[i, 1])]\n\n # identify amount of flows per line and group to get flow per country\n transborder_flows = network.lines_t.p0[transborder_lines.index]\n for i in transborder_flows.columns:\n if network.lines.loc[str(i)]['bus1'] in foreign_buses.index:\n transborder_flows.loc[:, str(\n i)] = transborder_flows.loc[:, str(i)]*-1\n\n network.foreign_trade = transborder_flows.\\\n groupby(transborder_lines['country'], axis=1).sum()\"\"\"", "# drop foreign components", "network", ".", "lines", "=", "network", ".", "lines", ".", "drop", "(", "network", ".", "lines", "[", "(", "network", ".", "lines", "[", "'bus0'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "|", "(", "network", ".", "lines", "[", "'bus1'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "]", ".", "index", ")", "network", ".", "links", "=", "network", ".", "links", ".", "drop", "(", "network", ".", "links", "[", "(", "network", ".", "links", "[", "'bus0'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "|", "(", "network", ".", "links", "[", "'bus1'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "]", ".", "index", ")", "network", ".", "transformers", "=", "network", ".", "transformers", ".", "drop", "(", "network", ".", "transformers", "[", "(", "network", ".", "transformers", "[", "'bus0'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "|", "(", "network", ".", "transformers", "[", "'bus1'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "]", ".", "index", ")", "network", ".", "generators", "=", "network", ".", "generators", ".", "drop", "(", "network", ".", "generators", "[", "(", "network", ".", "generators", "[", "'bus'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "]", ".", "index", ")", "network", ".", "loads", "=", "network", ".", "loads", ".", "drop", "(", "network", ".", "loads", "[", "(", "network", ".", "loads", "[", "'bus'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "]", ".", "index", ")", "network", ".", "storage_units", "=", "network", ".", "storage_units", ".", "drop", "(", "network", ".", "storage_units", "[", "(", "network", ".", "storage_units", "[", "'bus'", "]", ".", "isin", "(", "network", ".", "buses", ".", "index", ")", "==", "False", ")", "]", ".", "index", ")", "components", "=", "[", "'loads'", ",", "'generators'", ",", "'lines'", ",", "'buses'", ",", "'transformers'", ",", "'links'", "]", "for", "g", "in", "components", ":", "# loads_t", "h", "=", "g", "+", "'_t'", "nw", "=", "getattr", "(", "network", ",", "h", ")", "# network.loads_t", "for", "i", "in", "nw", ".", "keys", "(", ")", ":", "# network.loads_t.p", "cols", "=", "[", "j", "for", "j", "in", "getattr", "(", "nw", ",", "i", ")", ".", "columns", "if", "j", "not", "in", "getattr", "(", "network", ",", "g", ")", ".", "index", "]", "for", "k", "in", "cols", ":", "del", "getattr", "(", "nw", ",", "i", ")", "[", "k", "]", "return", "network" ]
Delete all components and timelines located outside of Germany. Add transborder flows divided by country of origin as network.foreign_trade. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
[ "Delete", "all", "components", "and", "timelines", "located", "outside", "of", "Germany", ".", "Add", "transborder", "flows", "divided", "by", "country", "of", "origin", "as", "network", ".", "foreign_trade", "." ]
python
train
41.929412
jilljenn/tryalgo
tryalgo/laser_mirrors.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/laser_mirrors.py#L57-L80
def solve(succ, orien, i, direc): """Can a laser leaving mirror i in direction direc reach exit ? :param i: mirror index :param direc: direction leaving mirror i :param orient: orient[i]=orientation of mirror i :param succ: succ[i][direc]=succ mirror reached when leaving i in direction direc """ assert orien[i] is not None j = succ[i][direc] if j is None: # basic case return False if j == len(orien) - 1: return True if orien[j] is None: # try both orientations for x in [0, 1]: orien[j] = x if solve(succ, orien, j, reflex[direc][x]): return True orien[j] = None return False else: return solve(succ, orien, j, reflex[direc][orien[j]])
[ "def", "solve", "(", "succ", ",", "orien", ",", "i", ",", "direc", ")", ":", "assert", "orien", "[", "i", "]", "is", "not", "None", "j", "=", "succ", "[", "i", "]", "[", "direc", "]", "if", "j", "is", "None", ":", "# basic case", "return", "False", "if", "j", "==", "len", "(", "orien", ")", "-", "1", ":", "return", "True", "if", "orien", "[", "j", "]", "is", "None", ":", "# try both orientations", "for", "x", "in", "[", "0", ",", "1", "]", ":", "orien", "[", "j", "]", "=", "x", "if", "solve", "(", "succ", ",", "orien", ",", "j", ",", "reflex", "[", "direc", "]", "[", "x", "]", ")", ":", "return", "True", "orien", "[", "j", "]", "=", "None", "return", "False", "else", ":", "return", "solve", "(", "succ", ",", "orien", ",", "j", ",", "reflex", "[", "direc", "]", "[", "orien", "[", "j", "]", "]", ")" ]
Can a laser leaving mirror i in direction direc reach exit ? :param i: mirror index :param direc: direction leaving mirror i :param orient: orient[i]=orientation of mirror i :param succ: succ[i][direc]=succ mirror reached when leaving i in direction direc
[ "Can", "a", "laser", "leaving", "mirror", "i", "in", "direction", "direc", "reach", "exit", "?" ]
python
train
32.541667
Stewori/pytypes
pytypes/type_util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L1965-L1991
def generator_checker_py2(gen, gen_type, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): """Builds a typechecking wrapper around a Python 2 style generator object. """ initialized = False sn = None while True: a = gen.send(sn) if initialized or not a is None: if not gen_type.__args__[0] is Any and \ not _isinstance(a, gen_type.__args__[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): tpa = deep_type(a) msg = _make_generator_error_message(tpa, gen, gen_type.__args__[0], 'has incompatible yield type') _raise_typecheck_error(msg, True, a, tpa, gen_type.__args__[0]) # raise pytypes.ReturnTypeError(_make_generator_error_message(tpa, gen, # gen_type.__args__[0], 'has incompatible yield type')) initialized = True sn = yield a if not gen_type.__args__[1] is Any and \ not _isinstance(sn, gen_type.__args__[1], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): tpsn = deep_type(sn) msg = _make_generator_error_message(tpsn, gen, gen_type.__args__[1], 'has incompatible send type') _raise_typecheck_error(msg, False, sn, tpsn, gen_type.__args__[1])
[ "def", "generator_checker_py2", "(", "gen", ",", "gen_type", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "initialized", "=", "False", "sn", "=", "None", "while", "True", ":", "a", "=", "gen", ".", "send", "(", "sn", ")", "if", "initialized", "or", "not", "a", "is", "None", ":", "if", "not", "gen_type", ".", "__args__", "[", "0", "]", "is", "Any", "and", "not", "_isinstance", "(", "a", ",", "gen_type", ".", "__args__", "[", "0", "]", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "tpa", "=", "deep_type", "(", "a", ")", "msg", "=", "_make_generator_error_message", "(", "tpa", ",", "gen", ",", "gen_type", ".", "__args__", "[", "0", "]", ",", "'has incompatible yield type'", ")", "_raise_typecheck_error", "(", "msg", ",", "True", ",", "a", ",", "tpa", ",", "gen_type", ".", "__args__", "[", "0", "]", ")", "# \t\t\t\traise pytypes.ReturnTypeError(_make_generator_error_message(tpa, gen,", "# \t\t\t\t\t\tgen_type.__args__[0], 'has incompatible yield type'))", "initialized", "=", "True", "sn", "=", "yield", "a", "if", "not", "gen_type", ".", "__args__", "[", "1", "]", "is", "Any", "and", "not", "_isinstance", "(", "sn", ",", "gen_type", ".", "__args__", "[", "1", "]", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "tpsn", "=", "deep_type", "(", "sn", ")", "msg", "=", "_make_generator_error_message", "(", "tpsn", ",", "gen", ",", "gen_type", ".", "__args__", "[", "1", "]", ",", "'has incompatible send type'", ")", "_raise_typecheck_error", "(", "msg", ",", "False", ",", "sn", ",", "tpsn", ",", "gen_type", ".", "__args__", "[", "1", "]", ")" ]
Builds a typechecking wrapper around a Python 2 style generator object.
[ "Builds", "a", "typechecking", "wrapper", "around", "a", "Python", "2", "style", "generator", "object", "." ]
python
train
54.296296
pantsbuild/pants
src/python/pants/core_tasks/deferred_sources_mapper.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/core_tasks/deferred_sources_mapper.py#L53-L90
def process_remote_sources(self): """Create synthetic targets with populated sources from remote_sources targets.""" unpacked_sources = self.context.products.get_data(UnpackedArchives) remote_sources_targets = self.context.targets(predicate=lambda t: isinstance(t, RemoteSources)) if not remote_sources_targets: return snapshot_specs = [] filespecs = [] unpack_dirs = [] for target in remote_sources_targets: unpacked_archive = unpacked_sources[target.sources_target] sources = unpacked_archive.found_files rel_unpack_dir = unpacked_archive.rel_unpack_dir self.context.log.debug('target: {}, rel_unpack_dir: {}, sources: {}' .format(target, rel_unpack_dir, sources)) sources_in_dir = tuple(os.path.join(rel_unpack_dir, source) for source in sources) snapshot_specs.append(PathGlobsAndRoot( PathGlobs(sources_in_dir), get_buildroot(), )) filespecs.append({'globs': sources_in_dir}) unpack_dirs.append(rel_unpack_dir) snapshots = self.context._scheduler.capture_snapshots(tuple(snapshot_specs)) for target, snapshot, filespec, rel_unpack_dir in \ zip(remote_sources_targets, snapshots, filespecs, unpack_dirs): synthetic_target = self.context.add_new_target( address=Address(os.path.relpath(self.workdir, get_buildroot()), target.id), target_type=target.destination_target_type, dependencies=target.dependencies, sources=EagerFilesetWithSpec(rel_unpack_dir, filespec, snapshot), derived_from=target, **target.destination_target_args ) self.context.log.debug('synthetic_target: {}'.format(synthetic_target)) for dependent in self.context.build_graph.dependents_of(target.address): self.context.build_graph.inject_dependency(dependent, synthetic_target.address)
[ "def", "process_remote_sources", "(", "self", ")", ":", "unpacked_sources", "=", "self", ".", "context", ".", "products", ".", "get_data", "(", "UnpackedArchives", ")", "remote_sources_targets", "=", "self", ".", "context", ".", "targets", "(", "predicate", "=", "lambda", "t", ":", "isinstance", "(", "t", ",", "RemoteSources", ")", ")", "if", "not", "remote_sources_targets", ":", "return", "snapshot_specs", "=", "[", "]", "filespecs", "=", "[", "]", "unpack_dirs", "=", "[", "]", "for", "target", "in", "remote_sources_targets", ":", "unpacked_archive", "=", "unpacked_sources", "[", "target", ".", "sources_target", "]", "sources", "=", "unpacked_archive", ".", "found_files", "rel_unpack_dir", "=", "unpacked_archive", ".", "rel_unpack_dir", "self", ".", "context", ".", "log", ".", "debug", "(", "'target: {}, rel_unpack_dir: {}, sources: {}'", ".", "format", "(", "target", ",", "rel_unpack_dir", ",", "sources", ")", ")", "sources_in_dir", "=", "tuple", "(", "os", ".", "path", ".", "join", "(", "rel_unpack_dir", ",", "source", ")", "for", "source", "in", "sources", ")", "snapshot_specs", ".", "append", "(", "PathGlobsAndRoot", "(", "PathGlobs", "(", "sources_in_dir", ")", ",", "get_buildroot", "(", ")", ",", ")", ")", "filespecs", ".", "append", "(", "{", "'globs'", ":", "sources_in_dir", "}", ")", "unpack_dirs", ".", "append", "(", "rel_unpack_dir", ")", "snapshots", "=", "self", ".", "context", ".", "_scheduler", ".", "capture_snapshots", "(", "tuple", "(", "snapshot_specs", ")", ")", "for", "target", ",", "snapshot", ",", "filespec", ",", "rel_unpack_dir", "in", "zip", "(", "remote_sources_targets", ",", "snapshots", ",", "filespecs", ",", "unpack_dirs", ")", ":", "synthetic_target", "=", "self", ".", "context", ".", "add_new_target", "(", "address", "=", "Address", "(", "os", ".", "path", ".", "relpath", "(", "self", ".", "workdir", ",", "get_buildroot", "(", ")", ")", ",", "target", ".", "id", ")", ",", "target_type", "=", "target", ".", "destination_target_type", ",", "dependencies", "=", "target", ".", "dependencies", ",", "sources", "=", "EagerFilesetWithSpec", "(", "rel_unpack_dir", ",", "filespec", ",", "snapshot", ")", ",", "derived_from", "=", "target", ",", "*", "*", "target", ".", "destination_target_args", ")", "self", ".", "context", ".", "log", ".", "debug", "(", "'synthetic_target: {}'", ".", "format", "(", "synthetic_target", ")", ")", "for", "dependent", "in", "self", ".", "context", ".", "build_graph", ".", "dependents_of", "(", "target", ".", "address", ")", ":", "self", ".", "context", ".", "build_graph", ".", "inject_dependency", "(", "dependent", ",", "synthetic_target", ".", "address", ")" ]
Create synthetic targets with populated sources from remote_sources targets.
[ "Create", "synthetic", "targets", "with", "populated", "sources", "from", "remote_sources", "targets", "." ]
python
train
48.763158
saltstack/salt
salt/modules/glusterfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glusterfs.py#L545-L589
def add_volume_bricks(name, bricks): ''' Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume CLI Example: .. code-block:: bash salt '*' glusterfs.add_volume_bricks <volume> <bricks> ''' volinfo = info() if name not in volinfo: log.error('Volume %s does not exist, cannot add bricks', name) return False new_bricks = [] cmd = 'volume add-brick {0}'.format(name) if isinstance(bricks, six.string_types): bricks = [bricks] volume_bricks = [x['path'] for x in volinfo[name]['bricks'].values()] for brick in bricks: if brick in volume_bricks: log.debug( 'Brick %s already in volume %s...excluding from command', brick, name) else: new_bricks.append(brick) if new_bricks: for brick in new_bricks: cmd += ' {0}'.format(brick) return _gluster(cmd) return True
[ "def", "add_volume_bricks", "(", "name", ",", "bricks", ")", ":", "volinfo", "=", "info", "(", ")", "if", "name", "not", "in", "volinfo", ":", "log", ".", "error", "(", "'Volume %s does not exist, cannot add bricks'", ",", "name", ")", "return", "False", "new_bricks", "=", "[", "]", "cmd", "=", "'volume add-brick {0}'", ".", "format", "(", "name", ")", "if", "isinstance", "(", "bricks", ",", "six", ".", "string_types", ")", ":", "bricks", "=", "[", "bricks", "]", "volume_bricks", "=", "[", "x", "[", "'path'", "]", "for", "x", "in", "volinfo", "[", "name", "]", "[", "'bricks'", "]", ".", "values", "(", ")", "]", "for", "brick", "in", "bricks", ":", "if", "brick", "in", "volume_bricks", ":", "log", ".", "debug", "(", "'Brick %s already in volume %s...excluding from command'", ",", "brick", ",", "name", ")", "else", ":", "new_bricks", ".", "append", "(", "brick", ")", "if", "new_bricks", ":", "for", "brick", "in", "new_bricks", ":", "cmd", "+=", "' {0}'", ".", "format", "(", "brick", ")", "return", "_gluster", "(", "cmd", ")", "return", "True" ]
Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume CLI Example: .. code-block:: bash salt '*' glusterfs.add_volume_bricks <volume> <bricks>
[ "Add", "brick", "(", "s", ")", "to", "an", "existing", "volume" ]
python
train
22
woolfson-group/isambard
isambard/ampal/protein.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/protein.py#L80-L116
def flat_list_to_polymer(atom_list, atom_group_s=4): """Takes a flat list of atomic coordinates and converts it to a `Polymer`. Parameters ---------- atom_list : [Atom] Flat list of coordinates. atom_group_s : int, optional Size of atom groups. Returns ------- polymer : Polypeptide `Polymer` object containing atom coords converted `Monomers`. Raises ------ ValueError Raised if `atom_group_s` != 4 or 5 """ atom_labels = ['N', 'CA', 'C', 'O', 'CB'] atom_elements = ['N', 'C', 'C', 'O', 'C'] atoms_coords = [atom_list[x:x + atom_group_s] for x in range(0, len(atom_list), atom_group_s)] atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)] for y in atoms_coords] if atom_group_s == 5: monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA') for x in atoms] elif atom_group_s == 4: monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY') for x in atoms] else: raise ValueError( 'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.') polymer = Polypeptide(monomers=monomers) return polymer
[ "def", "flat_list_to_polymer", "(", "atom_list", ",", "atom_group_s", "=", "4", ")", ":", "atom_labels", "=", "[", "'N'", ",", "'CA'", ",", "'C'", ",", "'O'", ",", "'CB'", "]", "atom_elements", "=", "[", "'N'", ",", "'C'", ",", "'C'", ",", "'O'", ",", "'C'", "]", "atoms_coords", "=", "[", "atom_list", "[", "x", ":", "x", "+", "atom_group_s", "]", "for", "x", "in", "range", "(", "0", ",", "len", "(", "atom_list", ")", ",", "atom_group_s", ")", "]", "atoms", "=", "[", "[", "Atom", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "for", "x", "in", "zip", "(", "y", ",", "atom_elements", ")", "]", "for", "y", "in", "atoms_coords", "]", "if", "atom_group_s", "==", "5", ":", "monomers", "=", "[", "Residue", "(", "OrderedDict", "(", "zip", "(", "atom_labels", ",", "x", ")", ")", ",", "'ALA'", ")", "for", "x", "in", "atoms", "]", "elif", "atom_group_s", "==", "4", ":", "monomers", "=", "[", "Residue", "(", "OrderedDict", "(", "zip", "(", "atom_labels", ",", "x", ")", ")", ",", "'GLY'", ")", "for", "x", "in", "atoms", "]", "else", ":", "raise", "ValueError", "(", "'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.'", ")", "polymer", "=", "Polypeptide", "(", "monomers", "=", "monomers", ")", "return", "polymer" ]
Takes a flat list of atomic coordinates and converts it to a `Polymer`. Parameters ---------- atom_list : [Atom] Flat list of coordinates. atom_group_s : int, optional Size of atom groups. Returns ------- polymer : Polypeptide `Polymer` object containing atom coords converted `Monomers`. Raises ------ ValueError Raised if `atom_group_s` != 4 or 5
[ "Takes", "a", "flat", "list", "of", "atomic", "coordinates", "and", "converts", "it", "to", "a", "Polymer", "." ]
python
train
32.891892
pydata/xarray
xarray/backends/zarr.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/zarr.py#L356-L533
def open_zarr(store, group=None, synchronizer=None, chunks='auto', decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=False, overwrite_encoded_chunks=False, **kwargs): """Load and decode a dataset from a Zarr store. .. note:: Experimental The Zarr backend is new and experimental. Please report any unexpected behavior via github issues. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, obtional Group path. (a.k.a. `path` in zarr terminology.) chunks : int or dict or tuple or {None, 'auto'}, optional Chunk sizes along each dimension, e.g., ``5`` or ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created based on the variable's zarr chunks. If `chunks=None`, zarr array data will lazily convert to numpy arrays upon access. This accepts all the chunk specifications as Dask does. overwrite_encoded_chunks: bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : string or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset References ---------- http://zarr.readthedocs.io/ """ if 'auto_chunk' in kwargs: auto_chunk = kwargs.pop('auto_chunk') if auto_chunk: chunks = 'auto' # maintain backwards compatibility else: chunks = None warnings.warn("auto_chunk is deprecated. Use chunks='auto' instead.", FutureWarning, stacklevel=2) if kwargs: raise TypeError("open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys())) if not isinstance(chunks, (int, dict)): if chunks != 'auto' and chunks is not None: raise ValueError("chunks must be an int, dict, 'auto', or None. " "Instead found %s. " % chunks) if not decode_cf: mask_and_scale = False decode_times = False concat_characters = False decode_coords = False def maybe_decode_store(store, lock=False): ds = conventions.decode_cf( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables) # TODO: this is where we would apply caching return ds # Zarr supports a wide range of access modes, but for now xarray either # reads or writes from a store, never both. For open_zarr, we only read mode = 'r' zarr_store = ZarrStore.open_group(store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated) ds = maybe_decode_store(zarr_store) # auto chunking needs to be here and not in ZarrStore because variable # chunks do not survive decode_cf # return trivial case if not chunks: return ds # adapted from Dataset.Chunk() if isinstance(chunks, int): chunks = dict.fromkeys(ds.dims, chunks) if isinstance(chunks, tuple) and len(chunks) == len(ds.dims): chunks = dict(zip(ds.dims, chunks)) def get_chunk(name, var, chunks): chunk_spec = dict(zip(var.dims, var.encoding.get('chunks'))) # Coordinate labels aren't chunked if var.ndim == 1 and var.dims[0] == name: return chunk_spec if chunks == 'auto': return chunk_spec for dim in var.dims: if dim in chunks: spec = chunks[dim] if isinstance(spec, int): spec = (spec,) if isinstance(spec, (tuple, list)) and chunk_spec[dim]: if any(s % chunk_spec[dim] for s in spec): warnings.warn("Specified Dask chunks %r would " "separate Zarr chunk shape %r for " "dimension %r. This significantly " "degrades performance. Consider " "rechunking after loading instead." % (chunks[dim], chunk_spec[dim], dim), stacklevel=2) chunk_spec[dim] = chunks[dim] return chunk_spec def maybe_chunk(name, var, chunks): from dask.base import tokenize chunk_spec = get_chunk(name, var, chunks) if (var.ndim > 0) and (chunk_spec is not None): # does this cause any data to be read? token2 = tokenize(name, var._data) name2 = 'zarr-%s' % token2 var = var.chunk(chunk_spec, name=name2, lock=None) if overwrite_encoded_chunks and var.chunks is not None: var.encoding['chunks'] = tuple(x[0] for x in var.chunks) return var else: return var variables = OrderedDict([(k, maybe_chunk(k, v, chunks)) for k, v in ds.variables.items()]) return ds._replace_vars_and_dims(variables)
[ "def", "open_zarr", "(", "store", ",", "group", "=", "None", ",", "synchronizer", "=", "None", ",", "chunks", "=", "'auto'", ",", "decode_cf", "=", "True", ",", "mask_and_scale", "=", "True", ",", "decode_times", "=", "True", ",", "concat_characters", "=", "True", ",", "decode_coords", "=", "True", ",", "drop_variables", "=", "None", ",", "consolidated", "=", "False", ",", "overwrite_encoded_chunks", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "'auto_chunk'", "in", "kwargs", ":", "auto_chunk", "=", "kwargs", ".", "pop", "(", "'auto_chunk'", ")", "if", "auto_chunk", ":", "chunks", "=", "'auto'", "# maintain backwards compatibility", "else", ":", "chunks", "=", "None", "warnings", ".", "warn", "(", "\"auto_chunk is deprecated. Use chunks='auto' instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "\"open_zarr() got unexpected keyword arguments \"", "+", "\",\"", ".", "join", "(", "kwargs", ".", "keys", "(", ")", ")", ")", "if", "not", "isinstance", "(", "chunks", ",", "(", "int", ",", "dict", ")", ")", ":", "if", "chunks", "!=", "'auto'", "and", "chunks", "is", "not", "None", ":", "raise", "ValueError", "(", "\"chunks must be an int, dict, 'auto', or None. \"", "\"Instead found %s. \"", "%", "chunks", ")", "if", "not", "decode_cf", ":", "mask_and_scale", "=", "False", "decode_times", "=", "False", "concat_characters", "=", "False", "decode_coords", "=", "False", "def", "maybe_decode_store", "(", "store", ",", "lock", "=", "False", ")", ":", "ds", "=", "conventions", ".", "decode_cf", "(", "store", ",", "mask_and_scale", "=", "mask_and_scale", ",", "decode_times", "=", "decode_times", ",", "concat_characters", "=", "concat_characters", ",", "decode_coords", "=", "decode_coords", ",", "drop_variables", "=", "drop_variables", ")", "# TODO: this is where we would apply caching", "return", "ds", "# Zarr supports a wide range of access modes, but for now xarray either", "# reads or writes from a store, never both. For open_zarr, we only read", "mode", "=", "'r'", "zarr_store", "=", "ZarrStore", ".", "open_group", "(", "store", ",", "mode", "=", "mode", ",", "synchronizer", "=", "synchronizer", ",", "group", "=", "group", ",", "consolidated", "=", "consolidated", ")", "ds", "=", "maybe_decode_store", "(", "zarr_store", ")", "# auto chunking needs to be here and not in ZarrStore because variable", "# chunks do not survive decode_cf", "# return trivial case", "if", "not", "chunks", ":", "return", "ds", "# adapted from Dataset.Chunk()", "if", "isinstance", "(", "chunks", ",", "int", ")", ":", "chunks", "=", "dict", ".", "fromkeys", "(", "ds", ".", "dims", ",", "chunks", ")", "if", "isinstance", "(", "chunks", ",", "tuple", ")", "and", "len", "(", "chunks", ")", "==", "len", "(", "ds", ".", "dims", ")", ":", "chunks", "=", "dict", "(", "zip", "(", "ds", ".", "dims", ",", "chunks", ")", ")", "def", "get_chunk", "(", "name", ",", "var", ",", "chunks", ")", ":", "chunk_spec", "=", "dict", "(", "zip", "(", "var", ".", "dims", ",", "var", ".", "encoding", ".", "get", "(", "'chunks'", ")", ")", ")", "# Coordinate labels aren't chunked", "if", "var", ".", "ndim", "==", "1", "and", "var", ".", "dims", "[", "0", "]", "==", "name", ":", "return", "chunk_spec", "if", "chunks", "==", "'auto'", ":", "return", "chunk_spec", "for", "dim", "in", "var", ".", "dims", ":", "if", "dim", "in", "chunks", ":", "spec", "=", "chunks", "[", "dim", "]", "if", "isinstance", "(", "spec", ",", "int", ")", ":", "spec", "=", "(", "spec", ",", ")", "if", "isinstance", "(", "spec", ",", "(", "tuple", ",", "list", ")", ")", "and", "chunk_spec", "[", "dim", "]", ":", "if", "any", "(", "s", "%", "chunk_spec", "[", "dim", "]", "for", "s", "in", "spec", ")", ":", "warnings", ".", "warn", "(", "\"Specified Dask chunks %r would \"", "\"separate Zarr chunk shape %r for \"", "\"dimension %r. This significantly \"", "\"degrades performance. Consider \"", "\"rechunking after loading instead.\"", "%", "(", "chunks", "[", "dim", "]", ",", "chunk_spec", "[", "dim", "]", ",", "dim", ")", ",", "stacklevel", "=", "2", ")", "chunk_spec", "[", "dim", "]", "=", "chunks", "[", "dim", "]", "return", "chunk_spec", "def", "maybe_chunk", "(", "name", ",", "var", ",", "chunks", ")", ":", "from", "dask", ".", "base", "import", "tokenize", "chunk_spec", "=", "get_chunk", "(", "name", ",", "var", ",", "chunks", ")", "if", "(", "var", ".", "ndim", ">", "0", ")", "and", "(", "chunk_spec", "is", "not", "None", ")", ":", "# does this cause any data to be read?", "token2", "=", "tokenize", "(", "name", ",", "var", ".", "_data", ")", "name2", "=", "'zarr-%s'", "%", "token2", "var", "=", "var", ".", "chunk", "(", "chunk_spec", ",", "name", "=", "name2", ",", "lock", "=", "None", ")", "if", "overwrite_encoded_chunks", "and", "var", ".", "chunks", "is", "not", "None", ":", "var", ".", "encoding", "[", "'chunks'", "]", "=", "tuple", "(", "x", "[", "0", "]", "for", "x", "in", "var", ".", "chunks", ")", "return", "var", "else", ":", "return", "var", "variables", "=", "OrderedDict", "(", "[", "(", "k", ",", "maybe_chunk", "(", "k", ",", "v", ",", "chunks", ")", ")", "for", "k", ",", "v", "in", "ds", ".", "variables", ".", "items", "(", ")", "]", ")", "return", "ds", ".", "_replace_vars_and_dims", "(", "variables", ")" ]
Load and decode a dataset from a Zarr store. .. note:: Experimental The Zarr backend is new and experimental. Please report any unexpected behavior via github issues. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, obtional Group path. (a.k.a. `path` in zarr terminology.) chunks : int or dict or tuple or {None, 'auto'}, optional Chunk sizes along each dimension, e.g., ``5`` or ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created based on the variable's zarr chunks. If `chunks=None`, zarr array data will lazily convert to numpy arrays upon access. This accepts all the chunk specifications as Dask does. overwrite_encoded_chunks: bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : string or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset References ---------- http://zarr.readthedocs.io/
[ "Load", "and", "decode", "a", "dataset", "from", "a", "Zarr", "store", "." ]
python
train
41.224719
BenjaminSchubert/NitPycker
nitpycker/runner.py
https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/runner.py#L113-L135
def module_can_run_parallel(test_module: unittest.TestSuite) -> bool: """ Checks if a given module of tests can be run in parallel or not :param test_module: the module to run :return: True if the module can be run on parallel, False otherwise """ for test_class in test_module: # if the test is already failed, we just don't filter it # and let the test runner deal with it later. if hasattr(unittest.loader, '_FailedTest'): # import failure in python 3.4.5+ # noinspection PyProtectedMember if isinstance(test_class, unittest.loader._FailedTest): continue if not isinstance(test_class, collections.Iterable): # likely an import failure in python 3.4.4- # before python 3.4.5, test import failures were not serializable. # We are unable to be sure that this is a module import failure, but it very likely is # if this is the case, we'll just run this locally and see raise TestClassNotIterable() for test_case in test_class: return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False)
[ "def", "module_can_run_parallel", "(", "test_module", ":", "unittest", ".", "TestSuite", ")", "->", "bool", ":", "for", "test_class", "in", "test_module", ":", "# if the test is already failed, we just don't filter it", "# and let the test runner deal with it later.", "if", "hasattr", "(", "unittest", ".", "loader", ",", "'_FailedTest'", ")", ":", "# import failure in python 3.4.5+", "# noinspection PyProtectedMember", "if", "isinstance", "(", "test_class", ",", "unittest", ".", "loader", ".", "_FailedTest", ")", ":", "continue", "if", "not", "isinstance", "(", "test_class", ",", "collections", ".", "Iterable", ")", ":", "# likely an import failure in python 3.4.4-", "# before python 3.4.5, test import failures were not serializable.", "# We are unable to be sure that this is a module import failure, but it very likely is", "# if this is the case, we'll just run this locally and see", "raise", "TestClassNotIterable", "(", ")", "for", "test_case", "in", "test_class", ":", "return", "not", "getattr", "(", "sys", ".", "modules", "[", "test_case", ".", "__module__", "]", ",", "\"__no_parallel__\"", ",", "False", ")" ]
Checks if a given module of tests can be run in parallel or not :param test_module: the module to run :return: True if the module can be run on parallel, False otherwise
[ "Checks", "if", "a", "given", "module", "of", "tests", "can", "be", "run", "in", "parallel", "or", "not" ]
python
train
53.391304
dropbox/stone
stone/backends/js_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/js_types.py#L172-L249
def _generate_struct(self, struct_type, extra_parameters=None, nameOverride=None): """ Emits a JSDoc @typedef for a struct. """ extra_parameters = extra_parameters if extra_parameters is not None else [] self._emit_jsdoc_header(struct_type.doc) self.emit( ' * @typedef {Object} %s' % ( nameOverride if nameOverride else fmt_type_name(struct_type) ) ) # Some structs can explicitly list their subtypes. These structs # have a .tag field that indicate which subtype they are. if struct_type.is_member_of_enumerated_subtypes_tree(): if struct_type.has_enumerated_subtypes(): # This struct is the parent to multiple subtypes. # Determine all of the possible values of the .tag # property. tag_values = [] for tags, _ in struct_type.get_all_subtypes_with_tags(): for tag in tags: tag_values.append('"%s"' % tag) jsdoc_tag_union = fmt_jsdoc_union(tag_values) txt = '@property {%s} .tag - Tag identifying the subtype variant.' % \ jsdoc_tag_union self.emit_wrapped_text(txt) else: # This struct is a particular subtype. Find the applicable # .tag value from the parent type, which may be an # arbitrary number of steps up the inheritance hierarchy. parent = struct_type.parent_type while not parent.has_enumerated_subtypes(): parent = parent.parent_type # parent now contains the closest parent type in the # inheritance hierarchy that has enumerated subtypes. # Determine which subtype this is. for subtype in parent.get_enumerated_subtypes(): if subtype.data_type == struct_type: txt = '@property {\'%s\'} [.tag] - Tag identifying ' \ 'this subtype variant. This field is only ' \ 'present when needed to discriminate ' \ 'between multiple possible subtypes.' % \ subtype.name self.emit_wrapped_text(txt) break for param_name, param_type, param_docstring in extra_parameters: param_docstring = ' - %s' % param_docstring if param_docstring else '' self.emit_wrapped_text( '@property {%s} %s%s' % ( param_type, param_name, param_docstring, ), prefix=' * ', ) # NOTE: JSDoc @typedef does not support inheritance. Using @class would be inappropriate, # since these are not nominal types backed by a constructor. Thus, we emit all_fields, # which includes fields on parent types. for field in struct_type.all_fields: field_doc = ' - ' + field.doc if field.doc else '' field_type, nullable, _ = unwrap(field.data_type) field_js_type = fmt_type(field_type) # Translate nullable types into optional properties. field_name = '[' + field.name + ']' if nullable else field.name self.emit_wrapped_text( '@property {%s} %s%s' % ( field_js_type, field_name, self.process_doc(field_doc, self._docf), ), prefix=' * ', ) self.emit(' */')
[ "def", "_generate_struct", "(", "self", ",", "struct_type", ",", "extra_parameters", "=", "None", ",", "nameOverride", "=", "None", ")", ":", "extra_parameters", "=", "extra_parameters", "if", "extra_parameters", "is", "not", "None", "else", "[", "]", "self", ".", "_emit_jsdoc_header", "(", "struct_type", ".", "doc", ")", "self", ".", "emit", "(", "' * @typedef {Object} %s'", "%", "(", "nameOverride", "if", "nameOverride", "else", "fmt_type_name", "(", "struct_type", ")", ")", ")", "# Some structs can explicitly list their subtypes. These structs", "# have a .tag field that indicate which subtype they are.", "if", "struct_type", ".", "is_member_of_enumerated_subtypes_tree", "(", ")", ":", "if", "struct_type", ".", "has_enumerated_subtypes", "(", ")", ":", "# This struct is the parent to multiple subtypes.", "# Determine all of the possible values of the .tag", "# property.", "tag_values", "=", "[", "]", "for", "tags", ",", "_", "in", "struct_type", ".", "get_all_subtypes_with_tags", "(", ")", ":", "for", "tag", "in", "tags", ":", "tag_values", ".", "append", "(", "'\"%s\"'", "%", "tag", ")", "jsdoc_tag_union", "=", "fmt_jsdoc_union", "(", "tag_values", ")", "txt", "=", "'@property {%s} .tag - Tag identifying the subtype variant.'", "%", "jsdoc_tag_union", "self", ".", "emit_wrapped_text", "(", "txt", ")", "else", ":", "# This struct is a particular subtype. Find the applicable", "# .tag value from the parent type, which may be an", "# arbitrary number of steps up the inheritance hierarchy.", "parent", "=", "struct_type", ".", "parent_type", "while", "not", "parent", ".", "has_enumerated_subtypes", "(", ")", ":", "parent", "=", "parent", ".", "parent_type", "# parent now contains the closest parent type in the", "# inheritance hierarchy that has enumerated subtypes.", "# Determine which subtype this is.", "for", "subtype", "in", "parent", ".", "get_enumerated_subtypes", "(", ")", ":", "if", "subtype", ".", "data_type", "==", "struct_type", ":", "txt", "=", "'@property {\\'%s\\'} [.tag] - Tag identifying '", "'this subtype variant. This field is only '", "'present when needed to discriminate '", "'between multiple possible subtypes.'", "%", "subtype", ".", "name", "self", ".", "emit_wrapped_text", "(", "txt", ")", "break", "for", "param_name", ",", "param_type", ",", "param_docstring", "in", "extra_parameters", ":", "param_docstring", "=", "' - %s'", "%", "param_docstring", "if", "param_docstring", "else", "''", "self", ".", "emit_wrapped_text", "(", "'@property {%s} %s%s'", "%", "(", "param_type", ",", "param_name", ",", "param_docstring", ",", ")", ",", "prefix", "=", "' * '", ",", ")", "# NOTE: JSDoc @typedef does not support inheritance. Using @class would be inappropriate,", "# since these are not nominal types backed by a constructor. Thus, we emit all_fields,", "# which includes fields on parent types.", "for", "field", "in", "struct_type", ".", "all_fields", ":", "field_doc", "=", "' - '", "+", "field", ".", "doc", "if", "field", ".", "doc", "else", "''", "field_type", ",", "nullable", ",", "_", "=", "unwrap", "(", "field", ".", "data_type", ")", "field_js_type", "=", "fmt_type", "(", "field_type", ")", "# Translate nullable types into optional properties.", "field_name", "=", "'['", "+", "field", ".", "name", "+", "']'", "if", "nullable", "else", "field", ".", "name", "self", ".", "emit_wrapped_text", "(", "'@property {%s} %s%s'", "%", "(", "field_js_type", ",", "field_name", ",", "self", ".", "process_doc", "(", "field_doc", ",", "self", ".", "_docf", ")", ",", ")", ",", "prefix", "=", "' * '", ",", ")", "self", ".", "emit", "(", "' */'", ")" ]
Emits a JSDoc @typedef for a struct.
[ "Emits", "a", "JSDoc" ]
python
train
46.551282
glormph/msstitch
src/app/actions/prottable/precursorarea.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/precursorarea.py#L40-L52
def add_ms1_quant_from_top3_mzidtsv(proteins, psms, headerfields, protcol): """Collects PSMs with the highes precursor quant values, adds sum of the top 3 of these to a protein table""" if not protcol: protcol = mzidtsvdata.HEADER_MASTER_PROT top_ms1_psms = generate_top_psms(psms, protcol) for protein in proteins: prot_acc = protein[prottabledata.HEADER_PROTEIN] prec_area = calculate_protein_precursor_quant(top_ms1_psms, prot_acc) outprotein = {k: v for k, v in protein.items()} outprotein[headerfields['precursorquant'][ prottabledata.HEADER_AREA][None]] = str(prec_area) yield outprotein
[ "def", "add_ms1_quant_from_top3_mzidtsv", "(", "proteins", ",", "psms", ",", "headerfields", ",", "protcol", ")", ":", "if", "not", "protcol", ":", "protcol", "=", "mzidtsvdata", ".", "HEADER_MASTER_PROT", "top_ms1_psms", "=", "generate_top_psms", "(", "psms", ",", "protcol", ")", "for", "protein", "in", "proteins", ":", "prot_acc", "=", "protein", "[", "prottabledata", ".", "HEADER_PROTEIN", "]", "prec_area", "=", "calculate_protein_precursor_quant", "(", "top_ms1_psms", ",", "prot_acc", ")", "outprotein", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "protein", ".", "items", "(", ")", "}", "outprotein", "[", "headerfields", "[", "'precursorquant'", "]", "[", "prottabledata", ".", "HEADER_AREA", "]", "[", "None", "]", "]", "=", "str", "(", "prec_area", ")", "yield", "outprotein" ]
Collects PSMs with the highes precursor quant values, adds sum of the top 3 of these to a protein table
[ "Collects", "PSMs", "with", "the", "highes", "precursor", "quant", "values", "adds", "sum", "of", "the", "top", "3", "of", "these", "to", "a", "protein", "table" ]
python
train
50.846154
dslackw/slpkg
slpkg/checks.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/checks.py#L119-L122
def print_status(self, repo): """Print status """ print(" {0}{1}{2}".format(repo, " " * (19 - len(repo)), self.st))
[ "def", "print_status", "(", "self", ",", "repo", ")", ":", "print", "(", "\" {0}{1}{2}\"", ".", "format", "(", "repo", ",", "\" \"", "*", "(", "19", "-", "len", "(", "repo", ")", ")", ",", "self", ".", "st", ")", ")" ]
Print status
[ "Print", "status" ]
python
train
34.25
undertheseanlp/languageflow
languageflow/reader/tagged_corpus.py
https://github.com/undertheseanlp/languageflow/blob/1436e0bf72803e02ccf727f41e8fc85ba167d9fe/languageflow/reader/tagged_corpus.py#L70-L94
def analyze(self, output_folder=".", auto_remove=False): """ :type auto_remove: boolean :param boolean auto_remove: auto remove previous files in analyze folder """ if auto_remove: try: shutil.rmtree(output_folder) except: pass try: mkdir(output_folder) except: pass tokens = [token for sublist in self.sentences for token in sublist] df = pd.DataFrame(tokens) log = u"" log += u"Sentences : {}\n".format(len(self.sentences)) n = df.shape[1] log += self._analyze_first_token(df, 0, output_folder) for i in range(1, n): log += self._analyze_field(df, i, output_folder) print(log) stat_file = join(output_folder, "stats.txt") write(stat_file, log)
[ "def", "analyze", "(", "self", ",", "output_folder", "=", "\".\"", ",", "auto_remove", "=", "False", ")", ":", "if", "auto_remove", ":", "try", ":", "shutil", ".", "rmtree", "(", "output_folder", ")", "except", ":", "pass", "try", ":", "mkdir", "(", "output_folder", ")", "except", ":", "pass", "tokens", "=", "[", "token", "for", "sublist", "in", "self", ".", "sentences", "for", "token", "in", "sublist", "]", "df", "=", "pd", ".", "DataFrame", "(", "tokens", ")", "log", "=", "u\"\"", "log", "+=", "u\"Sentences : {}\\n\"", ".", "format", "(", "len", "(", "self", ".", "sentences", ")", ")", "n", "=", "df", ".", "shape", "[", "1", "]", "log", "+=", "self", ".", "_analyze_first_token", "(", "df", ",", "0", ",", "output_folder", ")", "for", "i", "in", "range", "(", "1", ",", "n", ")", ":", "log", "+=", "self", ".", "_analyze_field", "(", "df", ",", "i", ",", "output_folder", ")", "print", "(", "log", ")", "stat_file", "=", "join", "(", "output_folder", ",", "\"stats.txt\"", ")", "write", "(", "stat_file", ",", "log", ")" ]
:type auto_remove: boolean :param boolean auto_remove: auto remove previous files in analyze folder
[ ":", "type", "auto_remove", ":", "boolean", ":", "param", "boolean", "auto_remove", ":", "auto", "remove", "previous", "files", "in", "analyze", "folder" ]
python
valid
34.08
fabioz/PyDev.Debugger
pydev_ipython/inputhook.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydev_ipython/inputhook.py#L396-L414
def enable_gtk3(self, app=None): """Enable event loop integration with Gtk3 (gir bindings). Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for Gtk3, which allows the Gtk3 to integrate with terminal based applications like IPython. """ from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3 self.set_inputhook(create_inputhook_gtk3(self._stdin_file)) self._current_gui = GUI_GTK
[ "def", "enable_gtk3", "(", "self", ",", "app", "=", "None", ")", ":", "from", "pydev_ipython", ".", "inputhookgtk3", "import", "create_inputhook_gtk3", "self", ".", "set_inputhook", "(", "create_inputhook_gtk3", "(", "self", ".", "_stdin_file", ")", ")", "self", ".", "_current_gui", "=", "GUI_GTK" ]
Enable event loop integration with Gtk3 (gir bindings). Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for Gtk3, which allows the Gtk3 to integrate with terminal based applications like IPython.
[ "Enable", "event", "loop", "integration", "with", "Gtk3", "(", "gir", "bindings", ")", "." ]
python
train
36.526316
honzajavorek/redis-collections
redis_collections/base.py
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/base.py#L116-L125
def _clear(self, pipe=None): """Helper for clear operations. :param pipe: Redis pipe in case update is performed as a part of transaction. :type pipe: :class:`redis.client.StrictPipeline` or :class:`redis.client.StrictRedis` """ redis = self.redis if pipe is None else pipe redis.delete(self.key)
[ "def", "_clear", "(", "self", ",", "pipe", "=", "None", ")", ":", "redis", "=", "self", ".", "redis", "if", "pipe", "is", "None", "else", "pipe", "redis", ".", "delete", "(", "self", ".", "key", ")" ]
Helper for clear operations. :param pipe: Redis pipe in case update is performed as a part of transaction. :type pipe: :class:`redis.client.StrictPipeline` or :class:`redis.client.StrictRedis`
[ "Helper", "for", "clear", "operations", "." ]
python
train
37.7
aequitas/python-rflink
rflink/protocol.py
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L165-L188
def send_command_ack(self, device_id, action): """Send command, wait for gateway to repond with acknowledgment.""" # serialize commands yield from self._ready_to_send.acquire() acknowledgement = None try: self._command_ack.clear() self.send_command(device_id, action) log.debug('waiting for acknowledgement') try: yield from asyncio.wait_for(self._command_ack.wait(), TIMEOUT.seconds, loop=self.loop) log.debug('packet acknowledged') except concurrent.futures._base.TimeoutError: acknowledgement = {'ok': False, 'message': 'timeout'} log.warning('acknowledge timeout') else: acknowledgement = self._last_ack.get('ok', False) finally: # allow next command self._ready_to_send.release() return acknowledgement
[ "def", "send_command_ack", "(", "self", ",", "device_id", ",", "action", ")", ":", "# serialize commands", "yield", "from", "self", ".", "_ready_to_send", ".", "acquire", "(", ")", "acknowledgement", "=", "None", "try", ":", "self", ".", "_command_ack", ".", "clear", "(", ")", "self", ".", "send_command", "(", "device_id", ",", "action", ")", "log", ".", "debug", "(", "'waiting for acknowledgement'", ")", "try", ":", "yield", "from", "asyncio", ".", "wait_for", "(", "self", ".", "_command_ack", ".", "wait", "(", ")", ",", "TIMEOUT", ".", "seconds", ",", "loop", "=", "self", ".", "loop", ")", "log", ".", "debug", "(", "'packet acknowledged'", ")", "except", "concurrent", ".", "futures", ".", "_base", ".", "TimeoutError", ":", "acknowledgement", "=", "{", "'ok'", ":", "False", ",", "'message'", ":", "'timeout'", "}", "log", ".", "warning", "(", "'acknowledge timeout'", ")", "else", ":", "acknowledgement", "=", "self", ".", "_last_ack", ".", "get", "(", "'ok'", ",", "False", ")", "finally", ":", "# allow next command", "self", ".", "_ready_to_send", ".", "release", "(", ")", "return", "acknowledgement" ]
Send command, wait for gateway to repond with acknowledgment.
[ "Send", "command", "wait", "for", "gateway", "to", "repond", "with", "acknowledgment", "." ]
python
train
40.083333
dj-stripe/dj-stripe
djstripe/models/base.py
https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/models/base.py#L539-L560
def _stripe_object_to_refunds(cls, target_cls, data, charge): """ Retrieves Refunds for a charge :param target_cls: The target class to instantiate per invoice item. :type target_cls: ``Refund`` :param data: The data dictionary received from the Stripe API. :type data: dict :param charge: The charge object that refunds are for. :type invoice: ``djstripe.models.Refund`` :return: """ refunds = data.get("refunds") if not refunds: return [] refund_objs = [] for refund_data in refunds.get("data", []): item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False) refund_objs.append(item) return refund_objs
[ "def", "_stripe_object_to_refunds", "(", "cls", ",", "target_cls", ",", "data", ",", "charge", ")", ":", "refunds", "=", "data", ".", "get", "(", "\"refunds\"", ")", "if", "not", "refunds", ":", "return", "[", "]", "refund_objs", "=", "[", "]", "for", "refund_data", "in", "refunds", ".", "get", "(", "\"data\"", ",", "[", "]", ")", ":", "item", ",", "_", "=", "target_cls", ".", "_get_or_create_from_stripe_object", "(", "refund_data", ",", "refetch", "=", "False", ")", "refund_objs", ".", "append", "(", "item", ")", "return", "refund_objs" ]
Retrieves Refunds for a charge :param target_cls: The target class to instantiate per invoice item. :type target_cls: ``Refund`` :param data: The data dictionary received from the Stripe API. :type data: dict :param charge: The charge object that refunds are for. :type invoice: ``djstripe.models.Refund`` :return:
[ "Retrieves", "Refunds", "for", "a", "charge", ":", "param", "target_cls", ":", "The", "target", "class", "to", "instantiate", "per", "invoice", "item", ".", ":", "type", "target_cls", ":", "Refund", ":", "param", "data", ":", "The", "data", "dictionary", "received", "from", "the", "Stripe", "API", ".", ":", "type", "data", ":", "dict", ":", "param", "charge", ":", "The", "charge", "object", "that", "refunds", "are", "for", ".", ":", "type", "invoice", ":", "djstripe", ".", "models", ".", "Refund", ":", "return", ":" ]
python
train
29.5
datosgobar/pydatajson
pydatajson/validation.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/validation.py#L84-L107
def is_valid_catalog(catalog, validator=None): """Valida que un archivo `data.json` cumpla con el schema definido. Chequea que el data.json tiene todos los campos obligatorios y que tanto los campos obligatorios como los opcionales siguen la estructura definida en el schema. Args: catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado. Returns: bool: True si el data.json cumple con el schema, sino False. """ catalog = readers.read_catalog(catalog) if not validator: if hasattr(catalog, "validator"): validator = catalog.validator else: validator = create_validator() jsonschema_res = validator.is_valid(catalog) custom_errors = iter_custom_errors(catalog) return jsonschema_res and len(list(custom_errors)) == 0
[ "def", "is_valid_catalog", "(", "catalog", ",", "validator", "=", "None", ")", ":", "catalog", "=", "readers", ".", "read_catalog", "(", "catalog", ")", "if", "not", "validator", ":", "if", "hasattr", "(", "catalog", ",", "\"validator\"", ")", ":", "validator", "=", "catalog", ".", "validator", "else", ":", "validator", "=", "create_validator", "(", ")", "jsonschema_res", "=", "validator", ".", "is_valid", "(", "catalog", ")", "custom_errors", "=", "iter_custom_errors", "(", "catalog", ")", "return", "jsonschema_res", "and", "len", "(", "list", "(", "custom_errors", ")", ")", "==", "0" ]
Valida que un archivo `data.json` cumpla con el schema definido. Chequea que el data.json tiene todos los campos obligatorios y que tanto los campos obligatorios como los opcionales siguen la estructura definida en el schema. Args: catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado. Returns: bool: True si el data.json cumple con el schema, sino False.
[ "Valida", "que", "un", "archivo", "data", ".", "json", "cumpla", "con", "el", "schema", "definido", "." ]
python
train
33.833333
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L4903-L4917
def _write_wrapper(self, name): """Wrap write() to adapt return value for Python 2. Returns: Wrapper which is described below. """ io_attr = getattr(self._io, name) def write_wrapper(*args, **kwargs): """Wrap all write calls to the stream object.""" ret_value = io_attr(*args, **kwargs) if not IS_PY2: return ret_value return write_wrapper
[ "def", "_write_wrapper", "(", "self", ",", "name", ")", ":", "io_attr", "=", "getattr", "(", "self", ".", "_io", ",", "name", ")", "def", "write_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrap all write calls to the stream object.\"\"\"", "ret_value", "=", "io_attr", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "IS_PY2", ":", "return", "ret_value", "return", "write_wrapper" ]
Wrap write() to adapt return value for Python 2. Returns: Wrapper which is described below.
[ "Wrap", "write", "()", "to", "adapt", "return", "value", "for", "Python", "2", "." ]
python
train
29.333333
NicolasLM/spinach
spinach/job.py
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/job.py#L152-L197
def advance_job_status(namespace: str, job: Job, duration: float, err: Optional[Exception]): """Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals. """ duration = human_duration(duration) if not err: job.status = JobStatus.SUCCEEDED logger.info('Finished execution of %s in %s', job, duration) return if job.should_retry: job.status = JobStatus.NOT_SET job.retries += 1 if isinstance(err, RetryException) and err.at is not None: job.at = err.at else: job.at = (datetime.now(timezone.utc) + exponential_backoff(job.retries)) signals.job_schedule_retry.send(namespace, job=job, err=err) log_args = ( job.retries, job.max_retries + 1, job, duration, human_duration( (job.at - datetime.now(tz=timezone.utc)).total_seconds() ) ) if isinstance(err, RetryException): logger.info('Retry requested during execution %d/%d of %s ' 'after %s, retry in %s', *log_args) else: logger.warning('Error during execution %d/%d of %s after %s, ' 'retry in %s', *log_args) return job.status = JobStatus.FAILED signals.job_failed.send(namespace, job=job, err=err) logger.error( 'Error during execution %d/%d of %s after %s', job.max_retries + 1, job.max_retries + 1, job, duration, exc_info=err )
[ "def", "advance_job_status", "(", "namespace", ":", "str", ",", "job", ":", "Job", ",", "duration", ":", "float", ",", "err", ":", "Optional", "[", "Exception", "]", ")", ":", "duration", "=", "human_duration", "(", "duration", ")", "if", "not", "err", ":", "job", ".", "status", "=", "JobStatus", ".", "SUCCEEDED", "logger", ".", "info", "(", "'Finished execution of %s in %s'", ",", "job", ",", "duration", ")", "return", "if", "job", ".", "should_retry", ":", "job", ".", "status", "=", "JobStatus", ".", "NOT_SET", "job", ".", "retries", "+=", "1", "if", "isinstance", "(", "err", ",", "RetryException", ")", "and", "err", ".", "at", "is", "not", "None", ":", "job", ".", "at", "=", "err", ".", "at", "else", ":", "job", ".", "at", "=", "(", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", "+", "exponential_backoff", "(", "job", ".", "retries", ")", ")", "signals", ".", "job_schedule_retry", ".", "send", "(", "namespace", ",", "job", "=", "job", ",", "err", "=", "err", ")", "log_args", "=", "(", "job", ".", "retries", ",", "job", ".", "max_retries", "+", "1", ",", "job", ",", "duration", ",", "human_duration", "(", "(", "job", ".", "at", "-", "datetime", ".", "now", "(", "tz", "=", "timezone", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", ")", "if", "isinstance", "(", "err", ",", "RetryException", ")", ":", "logger", ".", "info", "(", "'Retry requested during execution %d/%d of %s '", "'after %s, retry in %s'", ",", "*", "log_args", ")", "else", ":", "logger", ".", "warning", "(", "'Error during execution %d/%d of %s after %s, '", "'retry in %s'", ",", "*", "log_args", ")", "return", "job", ".", "status", "=", "JobStatus", ".", "FAILED", "signals", ".", "job_failed", ".", "send", "(", "namespace", ",", "job", "=", "job", ",", "err", "=", "err", ")", "logger", ".", "error", "(", "'Error during execution %d/%d of %s after %s'", ",", "job", ".", "max_retries", "+", "1", ",", "job", ".", "max_retries", "+", "1", ",", "job", ",", "duration", ",", "exc_info", "=", "err", ")" ]
Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals.
[ "Advance", "the", "status", "of", "a", "job", "depending", "on", "its", "execution", "." ]
python
train
35.347826
pylp/pylp
pylp/utils/pipes.py
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/utils/pipes.py#L11-L15
def pipes(stream, *transformers): """Pipe several transformers end to end.""" for transformer in transformers: stream = stream.pipe(transformer) return stream
[ "def", "pipes", "(", "stream", ",", "*", "transformers", ")", ":", "for", "transformer", "in", "transformers", ":", "stream", "=", "stream", ".", "pipe", "(", "transformer", ")", "return", "stream" ]
Pipe several transformers end to end.
[ "Pipe", "several", "transformers", "end", "to", "end", "." ]
python
train
34.8
saltstack/salt
salt/queues/pgjsonb_queue.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/pgjsonb_queue.py#L211-L228
def delete(queue, items): ''' Delete an item or items from a queue ''' with _conn(commit=True) as cur: if isinstance(items, dict): cmd = str("""DELETE FROM {0} WHERE data = '{1}'""").format( # future lint: disable=blacklisted-function queue, salt.utils.json.dumps(items)) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [(salt.utils.json.dumps(el),) for el in items] cmd = 'DELETE FROM {0} WHERE data = %s'.format(queue) log.debug('SQL Query: %s', cmd) cur.executemany(cmd, items) return True
[ "def", "delete", "(", "queue", ",", "items", ")", ":", "with", "_conn", "(", "commit", "=", "True", ")", "as", "cur", ":", "if", "isinstance", "(", "items", ",", "dict", ")", ":", "cmd", "=", "str", "(", "\"\"\"DELETE FROM {0} WHERE data = '{1}'\"\"\"", ")", ".", "format", "(", "# future lint: disable=blacklisted-function", "queue", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "items", ")", ")", "log", ".", "debug", "(", "'SQL Query: %s'", ",", "cmd", ")", "cur", ".", "execute", "(", "cmd", ")", "return", "True", "if", "isinstance", "(", "items", ",", "list", ")", ":", "items", "=", "[", "(", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "el", ")", ",", ")", "for", "el", "in", "items", "]", "cmd", "=", "'DELETE FROM {0} WHERE data = %s'", ".", "format", "(", "queue", ")", "log", ".", "debug", "(", "'SQL Query: %s'", ",", "cmd", ")", "cur", ".", "executemany", "(", "cmd", ",", "items", ")", "return", "True" ]
Delete an item or items from a queue
[ "Delete", "an", "item", "or", "items", "from", "a", "queue" ]
python
train
38.277778
openspending/babbage
babbage/cube.py
https://github.com/openspending/babbage/blob/9e03efe62e0be0cceabafd4de2a09cb8ec794b92/babbage/cube.py#L60-L117
def aggregate(self, aggregates=None, drilldowns=None, cuts=None, order=None, page=None, page_size=None, page_max=None): """Main aggregation function. This is used to compute a given set of aggregates, grouped by a given set of drilldown dimensions (i.e. dividers). The query can also be filtered and sorted. """ def prep(cuts, drilldowns=False, aggregates=False, columns=None): q = select(columns) bindings = [] cuts, q, bindings = Cuts(self).apply(q, bindings, cuts) attributes = None if drilldowns is not False: attributes, q, bindings = Drilldowns(self).apply( q, bindings, drilldowns ) if aggregates is not False: aggregates, q, bindings = Aggregates(self).apply( q, bindings, aggregates ) q = self.restrict_joins(q, bindings) return q, bindings, attributes, aggregates, cuts # Count count = count_results(self, prep(cuts, drilldowns=drilldowns, columns=[1])[0]) # Summary summary = first_result(self, prep(cuts, aggregates=aggregates)[0].limit(1)) # Results q, bindings, attributes, aggregates, cuts = \ prep(cuts, drilldowns=drilldowns, aggregates=aggregates) page, q = Pagination(self).apply(q, page, page_size, page_max) ordering, q, bindings = Ordering(self).apply(q, bindings, order) q = self.restrict_joins(q, bindings) cells = list(generate_results(self, q)) return { 'total_cell_count': count, 'cells': cells, 'summary': summary, 'cell': cuts, 'aggregates': aggregates, 'attributes': attributes, 'order': ordering, 'page': page['page'], 'page_size': page['page_size'] }
[ "def", "aggregate", "(", "self", ",", "aggregates", "=", "None", ",", "drilldowns", "=", "None", ",", "cuts", "=", "None", ",", "order", "=", "None", ",", "page", "=", "None", ",", "page_size", "=", "None", ",", "page_max", "=", "None", ")", ":", "def", "prep", "(", "cuts", ",", "drilldowns", "=", "False", ",", "aggregates", "=", "False", ",", "columns", "=", "None", ")", ":", "q", "=", "select", "(", "columns", ")", "bindings", "=", "[", "]", "cuts", ",", "q", ",", "bindings", "=", "Cuts", "(", "self", ")", ".", "apply", "(", "q", ",", "bindings", ",", "cuts", ")", "attributes", "=", "None", "if", "drilldowns", "is", "not", "False", ":", "attributes", ",", "q", ",", "bindings", "=", "Drilldowns", "(", "self", ")", ".", "apply", "(", "q", ",", "bindings", ",", "drilldowns", ")", "if", "aggregates", "is", "not", "False", ":", "aggregates", ",", "q", ",", "bindings", "=", "Aggregates", "(", "self", ")", ".", "apply", "(", "q", ",", "bindings", ",", "aggregates", ")", "q", "=", "self", ".", "restrict_joins", "(", "q", ",", "bindings", ")", "return", "q", ",", "bindings", ",", "attributes", ",", "aggregates", ",", "cuts", "# Count", "count", "=", "count_results", "(", "self", ",", "prep", "(", "cuts", ",", "drilldowns", "=", "drilldowns", ",", "columns", "=", "[", "1", "]", ")", "[", "0", "]", ")", "# Summary", "summary", "=", "first_result", "(", "self", ",", "prep", "(", "cuts", ",", "aggregates", "=", "aggregates", ")", "[", "0", "]", ".", "limit", "(", "1", ")", ")", "# Results", "q", ",", "bindings", ",", "attributes", ",", "aggregates", ",", "cuts", "=", "prep", "(", "cuts", ",", "drilldowns", "=", "drilldowns", ",", "aggregates", "=", "aggregates", ")", "page", ",", "q", "=", "Pagination", "(", "self", ")", ".", "apply", "(", "q", ",", "page", ",", "page_size", ",", "page_max", ")", "ordering", ",", "q", ",", "bindings", "=", "Ordering", "(", "self", ")", ".", "apply", "(", "q", ",", "bindings", ",", "order", ")", "q", "=", "self", ".", "restrict_joins", "(", "q", ",", "bindings", ")", "cells", "=", "list", "(", "generate_results", "(", "self", ",", "q", ")", ")", "return", "{", "'total_cell_count'", ":", "count", ",", "'cells'", ":", "cells", ",", "'summary'", ":", "summary", ",", "'cell'", ":", "cuts", ",", "'aggregates'", ":", "aggregates", ",", "'attributes'", ":", "attributes", ",", "'order'", ":", "ordering", ",", "'page'", ":", "page", "[", "'page'", "]", ",", "'page_size'", ":", "page", "[", "'page_size'", "]", "}" ]
Main aggregation function. This is used to compute a given set of aggregates, grouped by a given set of drilldown dimensions (i.e. dividers). The query can also be filtered and sorted.
[ "Main", "aggregation", "function", ".", "This", "is", "used", "to", "compute", "a", "given", "set", "of", "aggregates", "grouped", "by", "a", "given", "set", "of", "drilldown", "dimensions", "(", "i", ".", "e", ".", "dividers", ")", ".", "The", "query", "can", "also", "be", "filtered", "and", "sorted", "." ]
python
train
36.275862
wummel/linkchecker
linkcheck/checker/httpurl.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/httpurl.py#L105-L116
def add_size_info (self): """Get size of URL content from HTTP header.""" if self.headers and "Content-Length" in self.headers and \ "Transfer-Encoding" not in self.headers: # Note that content-encoding causes size differences since # the content data is always decoded. try: self.size = int(self.getheader("Content-Length")) except (ValueError, OverflowError): pass else: self.size = -1
[ "def", "add_size_info", "(", "self", ")", ":", "if", "self", ".", "headers", "and", "\"Content-Length\"", "in", "self", ".", "headers", "and", "\"Transfer-Encoding\"", "not", "in", "self", ".", "headers", ":", "# Note that content-encoding causes size differences since", "# the content data is always decoded.", "try", ":", "self", ".", "size", "=", "int", "(", "self", ".", "getheader", "(", "\"Content-Length\"", ")", ")", "except", "(", "ValueError", ",", "OverflowError", ")", ":", "pass", "else", ":", "self", ".", "size", "=", "-", "1" ]
Get size of URL content from HTTP header.
[ "Get", "size", "of", "URL", "content", "from", "HTTP", "header", "." ]
python
train
41.916667
dfm/george
george/modeling.py
https://github.com/dfm/george/blob/44819680036387625ee89f81c55104f3c1600759/george/modeling.py#L176-L188
def get_parameter_dict(self, include_frozen=False): """ Get an ordered dictionary of the parameters Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``) """ return OrderedDict(zip( self.get_parameter_names(include_frozen=include_frozen), self.get_parameter_vector(include_frozen=include_frozen), ))
[ "def", "get_parameter_dict", "(", "self", ",", "include_frozen", "=", "False", ")", ":", "return", "OrderedDict", "(", "zip", "(", "self", ".", "get_parameter_names", "(", "include_frozen", "=", "include_frozen", ")", ",", "self", ".", "get_parameter_vector", "(", "include_frozen", "=", "include_frozen", ")", ",", ")", ")" ]
Get an ordered dictionary of the parameters Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
[ "Get", "an", "ordered", "dictionary", "of", "the", "parameters" ]
python
train
35.307692
cokelaer/spectrum
src/spectrum/mtm.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/mtm.py#L359-L418
def _other_dpss_method(N, NW, Kmax): """Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. See dpss function that is the official version. This version is indepedant of the C code and relies on Scipy function. However, it is slower by a factor 3 Tridiagonal form of DPSS calculation from: """ # here we want to set up an optimization problem to find a sequence # whose energy is maximally concentrated within band [-W,W]. # Thus, the measure lambda(T,W) is the ratio between the energy within # that band, and the total energy. This leads to the eigen-system # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest # eigenvalue is the sequence with maximally concentrated energy. The # collection of eigenvectors of this system are called Slepian sequences, # or discrete prolate spheroidal sequences (DPSS). Only the first K, # K = 2NW/dt orders of DPSS will exhibit good spectral concentration # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem] # Here I set up an alternative symmetric tri-diagonal eigenvalue problem # such that # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1] # and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1] # [see Percival and Walden, 1993] from scipy import linalg as la Kmax = int(Kmax) W = float(NW)/N ab = np.zeros((2,N), 'd') nidx = np.arange(N) ab[0,1:] = nidx[1:]*(N-nidx[1:])/2. ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W) # only calculate the highest Kmax-1 eigenvectors l,v = la.eig_banded(ab, select='i', select_range=(N-Kmax, N-1)) dpss = v.transpose()[::-1] # By convention (Percival and Walden, 1993 pg 379) # * symmetric tapers (k=0,2,4,...) should have a positive average. # * antisymmetric tapers should begin with a positive lobe fix_symmetric = (dpss[0::2].sum(axis=1) < 0) for i, f in enumerate(fix_symmetric): if f: dpss[2*i] *= -1 fix_skew = (dpss[1::2,1] < 0) for i, f in enumerate(fix_skew): if f: dpss[2*i+1] *= -1 # Now find the eigenvalues of the original # Use the autocovariance sequence technique from Percival and Walden, 1993 # pg 390 # XXX : why debias false? it's all messed up o.w., even with means # on the order of 1e-2 acvs = _autocov(dpss, debias=False) * N r = 4*W*np.sinc(2*W*nidx) r[0] = 2*W eigvals = np.dot(acvs, r) return dpss, eigvals
[ "def", "_other_dpss_method", "(", "N", ",", "NW", ",", "Kmax", ")", ":", "# here we want to set up an optimization problem to find a sequence", "# whose energy is maximally concentrated within band [-W,W].", "# Thus, the measure lambda(T,W) is the ratio between the energy within", "# that band, and the total energy. This leads to the eigen-system", "# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest", "# eigenvalue is the sequence with maximally concentrated energy. The", "# collection of eigenvectors of this system are called Slepian sequences,", "# or discrete prolate spheroidal sequences (DPSS). Only the first K,", "# K = 2NW/dt orders of DPSS will exhibit good spectral concentration", "# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]", "# Here I set up an alternative symmetric tri-diagonal eigenvalue problem", "# such that", "# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)", "# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]", "# and the first off-diangonal = t(N-t)/2, t=[1,2,...,N-1]", "# [see Percival and Walden, 1993]", "from", "scipy", "import", "linalg", "as", "la", "Kmax", "=", "int", "(", "Kmax", ")", "W", "=", "float", "(", "NW", ")", "/", "N", "ab", "=", "np", ".", "zeros", "(", "(", "2", ",", "N", ")", ",", "'d'", ")", "nidx", "=", "np", ".", "arange", "(", "N", ")", "ab", "[", "0", ",", "1", ":", "]", "=", "nidx", "[", "1", ":", "]", "*", "(", "N", "-", "nidx", "[", "1", ":", "]", ")", "/", "2.", "ab", "[", "1", "]", "=", "(", "(", "N", "-", "1", "-", "2", "*", "nidx", ")", "/", "2.", ")", "**", "2", "*", "np", ".", "cos", "(", "2", "*", "np", ".", "pi", "*", "W", ")", "# only calculate the highest Kmax-1 eigenvectors", "l", ",", "v", "=", "la", ".", "eig_banded", "(", "ab", ",", "select", "=", "'i'", ",", "select_range", "=", "(", "N", "-", "Kmax", ",", "N", "-", "1", ")", ")", "dpss", "=", "v", ".", "transpose", "(", ")", "[", ":", ":", "-", "1", "]", "# By convention (Percival and Walden, 1993 pg 379)", "# * symmetric tapers (k=0,2,4,...) should have a positive average.", "# * antisymmetric tapers should begin with a positive lobe", "fix_symmetric", "=", "(", "dpss", "[", "0", ":", ":", "2", "]", ".", "sum", "(", "axis", "=", "1", ")", "<", "0", ")", "for", "i", ",", "f", "in", "enumerate", "(", "fix_symmetric", ")", ":", "if", "f", ":", "dpss", "[", "2", "*", "i", "]", "*=", "-", "1", "fix_skew", "=", "(", "dpss", "[", "1", ":", ":", "2", ",", "1", "]", "<", "0", ")", "for", "i", ",", "f", "in", "enumerate", "(", "fix_skew", ")", ":", "if", "f", ":", "dpss", "[", "2", "*", "i", "+", "1", "]", "*=", "-", "1", "# Now find the eigenvalues of the original", "# Use the autocovariance sequence technique from Percival and Walden, 1993", "# pg 390", "# XXX : why debias false? it's all messed up o.w., even with means", "# on the order of 1e-2", "acvs", "=", "_autocov", "(", "dpss", ",", "debias", "=", "False", ")", "*", "N", "r", "=", "4", "*", "W", "*", "np", ".", "sinc", "(", "2", "*", "W", "*", "nidx", ")", "r", "[", "0", "]", "=", "2", "*", "W", "eigvals", "=", "np", ".", "dot", "(", "acvs", ",", "r", ")", "return", "dpss", ",", "eigvals" ]
Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. See dpss function that is the official version. This version is indepedant of the C code and relies on Scipy function. However, it is slower by a factor 3 Tridiagonal form of DPSS calculation from:
[ "Returns", "the", "Discrete", "Prolate", "Spheroidal", "Sequences", "of", "orders", "[", "0", "Kmax", "-", "1", "]", "for", "a", "given", "frequency", "-", "spacing", "multiple", "NW", "and", "sequence", "length", "N", "." ]
python
valid
43.366667
quikmile/trellio
trellio/host.py
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/host.py#L130-L138
def attach_http_service(cls, http_service: HTTPService): """ Attaches a service for hosting :param http_service: A HTTPService instance """ if cls._http_service is None: cls._http_service = http_service cls._set_bus(http_service) else: warnings.warn('HTTP service is already attached')
[ "def", "attach_http_service", "(", "cls", ",", "http_service", ":", "HTTPService", ")", ":", "if", "cls", ".", "_http_service", "is", "None", ":", "cls", ".", "_http_service", "=", "http_service", "cls", ".", "_set_bus", "(", "http_service", ")", "else", ":", "warnings", ".", "warn", "(", "'HTTP service is already attached'", ")" ]
Attaches a service for hosting :param http_service: A HTTPService instance
[ "Attaches", "a", "service", "for", "hosting", ":", "param", "http_service", ":", "A", "HTTPService", "instance" ]
python
train
39.222222
cackharot/suds-py3
suds/reader.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/reader.py#L134-L157
def open(self, url): """ Open a WSDL at the specified I{url}. First, the WSDL attempted to be retrieved from the I{object cache}. After unpickled from the cache, the I{options} attribute is restored. If not found, it is downloaded and instantiated using the I{fn} constructor and added to the cache for the next open(). @param url: A WSDL url. @type url: str. @return: The WSDL object. @rtype: I{Definitions} """ cache = self.cache() id = self.mangle(url, 'wsdl') d = cache.get(id) if d is None: d = self.fn(url, self.options) cache.put(id, d) else: d.options = self.options for imp in d.imports: imp.imported.options = self.options return d
[ "def", "open", "(", "self", ",", "url", ")", ":", "cache", "=", "self", ".", "cache", "(", ")", "id", "=", "self", ".", "mangle", "(", "url", ",", "'wsdl'", ")", "d", "=", "cache", ".", "get", "(", "id", ")", "if", "d", "is", "None", ":", "d", "=", "self", ".", "fn", "(", "url", ",", "self", ".", "options", ")", "cache", ".", "put", "(", "id", ",", "d", ")", "else", ":", "d", ".", "options", "=", "self", ".", "options", "for", "imp", "in", "d", ".", "imports", ":", "imp", ".", "imported", ".", "options", "=", "self", ".", "options", "return", "d" ]
Open a WSDL at the specified I{url}. First, the WSDL attempted to be retrieved from the I{object cache}. After unpickled from the cache, the I{options} attribute is restored. If not found, it is downloaded and instantiated using the I{fn} constructor and added to the cache for the next open(). @param url: A WSDL url. @type url: str. @return: The WSDL object. @rtype: I{Definitions}
[ "Open", "a", "WSDL", "at", "the", "specified", "I", "{", "url", "}", ".", "First", "the", "WSDL", "attempted", "to", "be", "retrieved", "from", "the", "I", "{", "object", "cache", "}", ".", "After", "unpickled", "from", "the", "cache", "the", "I", "{", "options", "}", "attribute", "is", "restored", ".", "If", "not", "found", "it", "is", "downloaded", "and", "instantiated", "using", "the", "I", "{", "fn", "}", "constructor", "and", "added", "to", "the", "cache", "for", "the", "next", "open", "()", "." ]
python
train
34.458333
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L2169-L2258
def parse_sections(self, offset): """Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info. """ self.sections = [] for i in xrange(self.FILE_HEADER.NumberOfSections): section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self ) if not section: break section_offset = offset + section.sizeof() * i section.set_file_offset(section_offset) section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()]) self.__structures__.append(section) if section.SizeOfRawData > len(self.__data__): self.__warnings.append( ('Error parsing section %d. ' % i) + 'SizeOfRawData is larger than file.') if adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__): self.__warnings.append( ('Error parsing section %d. ' % i) + 'PointerToRawData points beyond the end of the file.') if section.Misc_VirtualSize > 0x10000000: self.__warnings.append( ('Suspicious value found parsing section %d. ' % i) + 'VirtualSize is extremely large > 256MiB.') if adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000: self.__warnings.append( ('Suspicious value found parsing section %d. ' % i) + 'VirtualAddress is beyond 0x10000000.') # # Some packer used a non-aligned PointerToRawData in the sections, # which causes several common tools not to load the section data # properly as they blindly read from the indicated offset. # It seems that Windows will round the offset down to the largest # offset multiple of FileAlignment which is smaller than # PointerToRawData. The following code will do the same. # #alignment = self.OPTIONAL_HEADER.FileAlignment #self.update_section_data(section) if ( self.OPTIONAL_HEADER.FileAlignment != 0 and ( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0): self.__warnings.append( ('Error parsing section %d. ' % i) + 'Suspicious value for FileAlignment in the Optional Header. ' + 'Normally the PointerToRawData entry of the sections\' structures ' + 'is a multiple of FileAlignment, this might imply the file ' + 'is trying to confuse tools which parse this incorrectly') section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_') # Set the section's flags according the the Characteristics member set_flags(section, section.Characteristics, section_flags) if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ): self.__warnings.append( ('Suspicious flags set for section %d. ' % i) + 'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. ' + 'This might indicate a packed executable.') self.sections.append(section) if self.FILE_HEADER.NumberOfSections > 0 and self.sections: return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections else: return offset
[ "def", "parse_sections", "(", "self", ",", "offset", ")", ":", "self", ".", "sections", "=", "[", "]", "for", "i", "in", "xrange", "(", "self", ".", "FILE_HEADER", ".", "NumberOfSections", ")", ":", "section", "=", "SectionStructure", "(", "self", ".", "__IMAGE_SECTION_HEADER_format__", ",", "pe", "=", "self", ")", "if", "not", "section", ":", "break", "section_offset", "=", "offset", "+", "section", ".", "sizeof", "(", ")", "*", "i", "section", ".", "set_file_offset", "(", "section_offset", ")", "section", ".", "__unpack__", "(", "self", ".", "__data__", "[", "section_offset", ":", "section_offset", "+", "section", ".", "sizeof", "(", ")", "]", ")", "self", ".", "__structures__", ".", "append", "(", "section", ")", "if", "section", ".", "SizeOfRawData", ">", "len", "(", "self", ".", "__data__", ")", ":", "self", ".", "__warnings", ".", "append", "(", "(", "'Error parsing section %d. '", "%", "i", ")", "+", "'SizeOfRawData is larger than file.'", ")", "if", "adjust_FileAlignment", "(", "section", ".", "PointerToRawData", ",", "self", ".", "OPTIONAL_HEADER", ".", "FileAlignment", ")", ">", "len", "(", "self", ".", "__data__", ")", ":", "self", ".", "__warnings", ".", "append", "(", "(", "'Error parsing section %d. '", "%", "i", ")", "+", "'PointerToRawData points beyond the end of the file.'", ")", "if", "section", ".", "Misc_VirtualSize", ">", "0x10000000", ":", "self", ".", "__warnings", ".", "append", "(", "(", "'Suspicious value found parsing section %d. '", "%", "i", ")", "+", "'VirtualSize is extremely large > 256MiB.'", ")", "if", "adjust_SectionAlignment", "(", "section", ".", "VirtualAddress", ",", "self", ".", "OPTIONAL_HEADER", ".", "SectionAlignment", ",", "self", ".", "OPTIONAL_HEADER", ".", "FileAlignment", ")", ">", "0x10000000", ":", "self", ".", "__warnings", ".", "append", "(", "(", "'Suspicious value found parsing section %d. '", "%", "i", ")", "+", "'VirtualAddress is beyond 0x10000000.'", ")", "#", "# Some packer used a non-aligned PointerToRawData in the sections,", "# which causes several common tools not to load the section data", "# properly as they blindly read from the indicated offset.", "# It seems that Windows will round the offset down to the largest", "# offset multiple of FileAlignment which is smaller than", "# PointerToRawData. The following code will do the same.", "#", "#alignment = self.OPTIONAL_HEADER.FileAlignment", "#self.update_section_data(section)", "if", "(", "self", ".", "OPTIONAL_HEADER", ".", "FileAlignment", "!=", "0", "and", "(", "section", ".", "PointerToRawData", "%", "self", ".", "OPTIONAL_HEADER", ".", "FileAlignment", ")", "!=", "0", ")", ":", "self", ".", "__warnings", ".", "append", "(", "(", "'Error parsing section %d. '", "%", "i", ")", "+", "'Suspicious value for FileAlignment in the Optional Header. '", "+", "'Normally the PointerToRawData entry of the sections\\' structures '", "+", "'is a multiple of FileAlignment, this might imply the file '", "+", "'is trying to confuse tools which parse this incorrectly'", ")", "section_flags", "=", "retrieve_flags", "(", "SECTION_CHARACTERISTICS", ",", "'IMAGE_SCN_'", ")", "# Set the section's flags according the the Characteristics member", "set_flags", "(", "section", ",", "section", ".", "Characteristics", ",", "section_flags", ")", "if", "(", "section", ".", "__dict__", ".", "get", "(", "'IMAGE_SCN_MEM_WRITE'", ",", "False", ")", "and", "section", ".", "__dict__", ".", "get", "(", "'IMAGE_SCN_MEM_EXECUTE'", ",", "False", ")", ")", ":", "self", ".", "__warnings", ".", "append", "(", "(", "'Suspicious flags set for section %d. '", "%", "i", ")", "+", "'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. '", "+", "'This might indicate a packed executable.'", ")", "self", ".", "sections", ".", "append", "(", "section", ")", "if", "self", ".", "FILE_HEADER", ".", "NumberOfSections", ">", "0", "and", "self", ".", "sections", ":", "return", "offset", "+", "self", ".", "sections", "[", "0", "]", ".", "sizeof", "(", ")", "*", "self", ".", "FILE_HEADER", ".", "NumberOfSections", "else", ":", "return", "offset" ]
Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info.
[ "Fetch", "the", "PE", "file", "sections", ".", "The", "sections", "will", "be", "readily", "available", "in", "the", "sections", "attribute", ".", "Its", "attributes", "will", "contain", "all", "the", "section", "information", "plus", "data", "a", "buffer", "containing", "the", "section", "s", "data", ".", "The", "Characteristics", "member", "will", "be", "processed", "and", "attributes", "representing", "the", "section", "characteristics", "(", "with", "the", "IMAGE_SCN_", "string", "trimmed", "from", "the", "constant", "s", "names", ")", "will", "be", "added", "to", "the", "section", "instance", ".", "Refer", "to", "the", "SectionStructure", "class", "for", "additional", "info", "." ]
python
train
48.422222
glottobank/python-newick
src/newick.py
https://github.com/glottobank/python-newick/blob/e8d4d1e4610f271d0f0e5cb86c0e0360b43bd702/src/newick.py#L209-L222
def visit(self, visitor, predicate=None, **kw): """ Apply a function to matching nodes in the (sub)tree rooted at self. :param visitor: A callable accepting a Node object as single argument.. :param predicate: A callable accepting a Node object as single argument and \ returning a boolean signaling whether Node matches; if `None` all nodes match. :param kw: Addtional keyword arguments are passed through to self.walk. """ predicate = predicate or bool for n in self.walk(**kw): if predicate(n): visitor(n)
[ "def", "visit", "(", "self", ",", "visitor", ",", "predicate", "=", "None", ",", "*", "*", "kw", ")", ":", "predicate", "=", "predicate", "or", "bool", "for", "n", "in", "self", ".", "walk", "(", "*", "*", "kw", ")", ":", "if", "predicate", "(", "n", ")", ":", "visitor", "(", "n", ")" ]
Apply a function to matching nodes in the (sub)tree rooted at self. :param visitor: A callable accepting a Node object as single argument.. :param predicate: A callable accepting a Node object as single argument and \ returning a boolean signaling whether Node matches; if `None` all nodes match. :param kw: Addtional keyword arguments are passed through to self.walk.
[ "Apply", "a", "function", "to", "matching", "nodes", "in", "the", "(", "sub", ")", "tree", "rooted", "at", "self", "." ]
python
test
42.642857
apache/incubator-mxnet
example/reinforcement-learning/dqn/utils.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/dqn/utils.py#L133-L154
def sample_categorical(prob, rng): """Sample from independent categorical distributions Each batch is an independent categorical distribution. Parameters ---------- prob : numpy.ndarray Probability of the categorical distribution. Shape --> (batch_num, category_num) rng : numpy.random.RandomState Returns ------- ret : numpy.ndarray Sampling result. Shape --> (batch_num,) """ ret = numpy.empty(prob.shape[0], dtype=numpy.float32) for ind in range(prob.shape[0]): ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0, max=prob.shape[ 1] - 0.5) return ret
[ "def", "sample_categorical", "(", "prob", ",", "rng", ")", ":", "ret", "=", "numpy", ".", "empty", "(", "prob", ".", "shape", "[", "0", "]", ",", "dtype", "=", "numpy", ".", "float32", ")", "for", "ind", "in", "range", "(", "prob", ".", "shape", "[", "0", "]", ")", ":", "ret", "[", "ind", "]", "=", "numpy", ".", "searchsorted", "(", "numpy", ".", "cumsum", "(", "prob", "[", "ind", "]", ")", ",", "rng", ".", "rand", "(", ")", ")", ".", "clip", "(", "min", "=", "0.0", ",", "max", "=", "prob", ".", "shape", "[", "1", "]", "-", "0.5", ")", "return", "ret" ]
Sample from independent categorical distributions Each batch is an independent categorical distribution. Parameters ---------- prob : numpy.ndarray Probability of the categorical distribution. Shape --> (batch_num, category_num) rng : numpy.random.RandomState Returns ------- ret : numpy.ndarray Sampling result. Shape --> (batch_num,)
[ "Sample", "from", "independent", "categorical", "distributions" ]
python
train
36.545455
Erotemic/utool
utool/util_arg.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_arg.py#L48-L62
def get_module_verbosity_flags(*labels): """ checks for standard flags for enableing module specific verbosity """ verbose_prefix_list = ['--verbose-', '--verb', '--verb-'] veryverbose_prefix_list = ['--veryverbose-', '--veryverb', '--veryverb-'] verbose_flags = tuple( [prefix + lbl for prefix, lbl in itertools.product(verbose_prefix_list, labels)]) veryverbose_flags = tuple( [prefix + lbl for prefix, lbl in itertools.product(veryverbose_prefix_list, labels)]) veryverbose_module = get_argflag(veryverbose_flags) or VERYVERBOSE verbose_module = (get_argflag(verbose_flags) or veryverbose_module or VERBOSE) if veryverbose_module: verbose_module = 2 return verbose_module, veryverbose_module
[ "def", "get_module_verbosity_flags", "(", "*", "labels", ")", ":", "verbose_prefix_list", "=", "[", "'--verbose-'", ",", "'--verb'", ",", "'--verb-'", "]", "veryverbose_prefix_list", "=", "[", "'--veryverbose-'", ",", "'--veryverb'", ",", "'--veryverb-'", "]", "verbose_flags", "=", "tuple", "(", "[", "prefix", "+", "lbl", "for", "prefix", ",", "lbl", "in", "itertools", ".", "product", "(", "verbose_prefix_list", ",", "labels", ")", "]", ")", "veryverbose_flags", "=", "tuple", "(", "[", "prefix", "+", "lbl", "for", "prefix", ",", "lbl", "in", "itertools", ".", "product", "(", "veryverbose_prefix_list", ",", "labels", ")", "]", ")", "veryverbose_module", "=", "get_argflag", "(", "veryverbose_flags", ")", "or", "VERYVERBOSE", "verbose_module", "=", "(", "get_argflag", "(", "verbose_flags", ")", "or", "veryverbose_module", "or", "VERBOSE", ")", "if", "veryverbose_module", ":", "verbose_module", "=", "2", "return", "verbose_module", ",", "veryverbose_module" ]
checks for standard flags for enableing module specific verbosity
[ "checks", "for", "standard", "flags", "for", "enableing", "module", "specific", "verbosity" ]
python
train
50.533333
tcalmant/ipopo
pelix/remote/beans.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/remote/beans.py#L655-L693
def from_export(cls, endpoint): # type: (ExportEndpoint) -> EndpointDescription """ Converts an ExportEndpoint bean to an EndpointDescription :param endpoint: An ExportEndpoint bean :return: An EndpointDescription bean """ assert isinstance(endpoint, ExportEndpoint) # Service properties properties = endpoint.get_properties() # Set import keys properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations properties[ pelix.remote.PROP_EXPORTED_INTERFACES ] = endpoint.specifications # Remove export keys for key in ( pelix.remote.PROP_EXPORTED_CONFIGS, pelix.remote.PROP_EXPORTED_INTERFACES, pelix.remote.PROP_EXPORTED_INTENTS, pelix.remote.PROP_EXPORTED_INTENTS_EXTRA, ): try: del properties[key] except KeyError: pass # Other information properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name properties[ pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID ] = endpoint.framework return EndpointDescription(None, properties)
[ "def", "from_export", "(", "cls", ",", "endpoint", ")", ":", "# type: (ExportEndpoint) -> EndpointDescription", "assert", "isinstance", "(", "endpoint", ",", "ExportEndpoint", ")", "# Service properties", "properties", "=", "endpoint", ".", "get_properties", "(", ")", "# Set import keys", "properties", "[", "pelix", ".", "remote", ".", "PROP_ENDPOINT_ID", "]", "=", "endpoint", ".", "uid", "properties", "[", "pelix", ".", "remote", ".", "PROP_IMPORTED_CONFIGS", "]", "=", "endpoint", ".", "configurations", "properties", "[", "pelix", ".", "remote", ".", "PROP_EXPORTED_INTERFACES", "]", "=", "endpoint", ".", "specifications", "# Remove export keys", "for", "key", "in", "(", "pelix", ".", "remote", ".", "PROP_EXPORTED_CONFIGS", ",", "pelix", ".", "remote", ".", "PROP_EXPORTED_INTERFACES", ",", "pelix", ".", "remote", ".", "PROP_EXPORTED_INTENTS", ",", "pelix", ".", "remote", ".", "PROP_EXPORTED_INTENTS_EXTRA", ",", ")", ":", "try", ":", "del", "properties", "[", "key", "]", "except", "KeyError", ":", "pass", "# Other information", "properties", "[", "pelix", ".", "remote", ".", "PROP_ENDPOINT_NAME", "]", "=", "endpoint", ".", "name", "properties", "[", "pelix", ".", "remote", ".", "PROP_ENDPOINT_FRAMEWORK_UUID", "]", "=", "endpoint", ".", "framework", "return", "EndpointDescription", "(", "None", ",", "properties", ")" ]
Converts an ExportEndpoint bean to an EndpointDescription :param endpoint: An ExportEndpoint bean :return: An EndpointDescription bean
[ "Converts", "an", "ExportEndpoint", "bean", "to", "an", "EndpointDescription" ]
python
train
32.384615
ubccr/pinky
pinky/perception/aromaticity.py
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/aromaticity.py#L237-L264
def addHydrogens(molecule, usedPyroles=None): """(molecule) -> add implicit hydrogens to a molecule. If the atom has specified valences and the atom must be charged then a Valence Error is raised""" for atom in molecule.atoms: # if the atom has an explicit hcount, we can't set the # hcount if atom.has_explicit_hcount: atom.hcount = atom.explicit_hcount continue if atom.valences: for valence in atom.valences: hcount = max(0, int(valence - atom.sumBondOrders() + atom.charge)) if hcount >= 0: break else: if usedPyroles and not usedPyroles.has_key(atom.handle): #print atom.symbol, atom.valences, atom.hcount, atom.charge,\ # atom.sumBondOrders() #print [x.bondtype for x in atom.bonds] #print molecule.cansmiles() raise PinkyError("Valence error in atom %s"%molecule.atoms.index(atom)) pass #hcount = int(hcount) atom.hcount = hcount return molecule
[ "def", "addHydrogens", "(", "molecule", ",", "usedPyroles", "=", "None", ")", ":", "for", "atom", "in", "molecule", ".", "atoms", ":", "# if the atom has an explicit hcount, we can't set the", "# hcount", "if", "atom", ".", "has_explicit_hcount", ":", "atom", ".", "hcount", "=", "atom", ".", "explicit_hcount", "continue", "if", "atom", ".", "valences", ":", "for", "valence", "in", "atom", ".", "valences", ":", "hcount", "=", "max", "(", "0", ",", "int", "(", "valence", "-", "atom", ".", "sumBondOrders", "(", ")", "+", "atom", ".", "charge", ")", ")", "if", "hcount", ">=", "0", ":", "break", "else", ":", "if", "usedPyroles", "and", "not", "usedPyroles", ".", "has_key", "(", "atom", ".", "handle", ")", ":", "#print atom.symbol, atom.valences, atom.hcount, atom.charge,\\", "# atom.sumBondOrders()", "#print [x.bondtype for x in atom.bonds]", "#print molecule.cansmiles()", "raise", "PinkyError", "(", "\"Valence error in atom %s\"", "%", "molecule", ".", "atoms", ".", "index", "(", "atom", ")", ")", "pass", "#hcount = int(hcount)", "atom", ".", "hcount", "=", "hcount", "return", "molecule" ]
(molecule) -> add implicit hydrogens to a molecule. If the atom has specified valences and the atom must be charged then a Valence Error is raised
[ "(", "molecule", ")", "-", ">", "add", "implicit", "hydrogens", "to", "a", "molecule", ".", "If", "the", "atom", "has", "specified", "valences", "and", "the", "atom", "must", "be", "charged", "then", "a", "Valence", "Error", "is", "raised" ]
python
train
41.428571
StackStorm/pybind
pybind/slxos/v17r_1_01a/mpls_config/router/mpls/mpls_cmds_holder/policy/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/mpls_config/router/mpls/mpls_cmds_holder/policy/__init__.py#L349-L370
def _set_load_interval(self, v, load=False): """ Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_load_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_interval() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """load_interval must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""", }) self.__load_interval = t if hasattr(self, '_set'): self._set()
[ "def", "_set_load_interval", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "u'30..300'", "]", "}", ")", ",", "default", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", "(", "300", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"load-interval\"", ",", "rest_name", "=", "\"load-interval\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-full-command'", ":", "None", ",", "u'info'", ":", "u'Polling interval for MPLS LSP traffic statistics'", ",", "u'hidden'", ":", "u'full'", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls'", ",", "defining_module", "=", "'brocade-mpls'", ",", "yang_type", "=", "'uint32'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"load_interval must be of a type compatible with uint32\"\"\"", ",", "'defined-type'", ":", "\"uint32\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'30..300']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(300), is_leaf=True, yang_name=\"load-interval\", rest_name=\"load-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Polling interval for MPLS LSP traffic statistics', u'hidden': u'full', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__load_interval", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_load_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_interval() directly.
[ "Setter", "method", "for", "load_interval", "mapped", "from", "YANG", "variable", "/", "mpls_config", "/", "router", "/", "mpls", "/", "mpls_cmds_holder", "/", "policy", "/", "load_interval", "(", "uint32", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_load_interval", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_load_interval", "()", "directly", "." ]
python
train
103.818182
SkyLothar/shcmd
shcmd/cmd.py
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/cmd.py#L31-L50
def cd_to(path, mkdir=False): """make a generator like cd, but use it for function Usage:: >>> @cd_to("/") ... def say_where(): ... print(os.getcwd()) ... >>> say_where() / """ def cd_to_decorator(func): @functools.wraps(func) def _cd_and_exec(*args, **kwargs): with cd(path, mkdir): return func(*args, **kwargs) return _cd_and_exec return cd_to_decorator
[ "def", "cd_to", "(", "path", ",", "mkdir", "=", "False", ")", ":", "def", "cd_to_decorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "_cd_and_exec", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "cd", "(", "path", ",", "mkdir", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_cd_and_exec", "return", "cd_to_decorator" ]
make a generator like cd, but use it for function Usage:: >>> @cd_to("/") ... def say_where(): ... print(os.getcwd()) ... >>> say_where() /
[ "make", "a", "generator", "like", "cd", "but", "use", "it", "for", "function" ]
python
train
23.15
emory-libraries/eulfedora
eulfedora/api.py
https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/api.py#L1067-L1075
def sparql_query(self, query, flush=None, limit=None): """ Run a Sparql query. :param query: sparql query string :rtype: list of dictionary """ return self.find_statements(query, language='sparql', type='tuples', flush=flush, limit=limit)
[ "def", "sparql_query", "(", "self", ",", "query", ",", "flush", "=", "None", ",", "limit", "=", "None", ")", ":", "return", "self", ".", "find_statements", "(", "query", ",", "language", "=", "'sparql'", ",", "type", "=", "'tuples'", ",", "flush", "=", "flush", ",", "limit", "=", "limit", ")" ]
Run a Sparql query. :param query: sparql query string :rtype: list of dictionary
[ "Run", "a", "Sparql", "query", "." ]
python
train
32.333333
materialsproject/pymatgen
pymatgen/analysis/structure_matcher.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/structure_matcher.py#L614-L646
def _preprocess(self, struct1, struct2, niggli=True): """ Rescales, finds the reduced structures (primitive and niggli), and finds fu, the supercell size to make struct1 comparable to s2 """ struct1 = struct1.copy() struct2 = struct2.copy() if niggli: struct1 = struct1.get_reduced_structure(reduction_algo="niggli") struct2 = struct2.get_reduced_structure(reduction_algo="niggli") # primitive cell transformation if self._primitive_cell: struct1 = struct1.get_primitive_structure() struct2 = struct2.get_primitive_structure() if self._supercell: fu, s1_supercell = self._get_supercell_size(struct1, struct2) else: fu, s1_supercell = 1, True mult = fu if s1_supercell else 1/fu # rescale lattice to same volume if self._scale: ratio = (struct2.volume / (struct1.volume * mult)) ** (1 / 6) nl1 = Lattice(struct1.lattice.matrix * ratio) struct1.lattice = nl1 nl2 = Lattice(struct2.lattice.matrix / ratio) struct2.lattice = nl2 return struct1, struct2, fu, s1_supercell
[ "def", "_preprocess", "(", "self", ",", "struct1", ",", "struct2", ",", "niggli", "=", "True", ")", ":", "struct1", "=", "struct1", ".", "copy", "(", ")", "struct2", "=", "struct2", ".", "copy", "(", ")", "if", "niggli", ":", "struct1", "=", "struct1", ".", "get_reduced_structure", "(", "reduction_algo", "=", "\"niggli\"", ")", "struct2", "=", "struct2", ".", "get_reduced_structure", "(", "reduction_algo", "=", "\"niggli\"", ")", "# primitive cell transformation", "if", "self", ".", "_primitive_cell", ":", "struct1", "=", "struct1", ".", "get_primitive_structure", "(", ")", "struct2", "=", "struct2", ".", "get_primitive_structure", "(", ")", "if", "self", ".", "_supercell", ":", "fu", ",", "s1_supercell", "=", "self", ".", "_get_supercell_size", "(", "struct1", ",", "struct2", ")", "else", ":", "fu", ",", "s1_supercell", "=", "1", ",", "True", "mult", "=", "fu", "if", "s1_supercell", "else", "1", "/", "fu", "# rescale lattice to same volume", "if", "self", ".", "_scale", ":", "ratio", "=", "(", "struct2", ".", "volume", "/", "(", "struct1", ".", "volume", "*", "mult", ")", ")", "**", "(", "1", "/", "6", ")", "nl1", "=", "Lattice", "(", "struct1", ".", "lattice", ".", "matrix", "*", "ratio", ")", "struct1", ".", "lattice", "=", "nl1", "nl2", "=", "Lattice", "(", "struct2", ".", "lattice", ".", "matrix", "/", "ratio", ")", "struct2", ".", "lattice", "=", "nl2", "return", "struct1", ",", "struct2", ",", "fu", ",", "s1_supercell" ]
Rescales, finds the reduced structures (primitive and niggli), and finds fu, the supercell size to make struct1 comparable to s2
[ "Rescales", "finds", "the", "reduced", "structures", "(", "primitive", "and", "niggli", ")", "and", "finds", "fu", "the", "supercell", "size", "to", "make", "struct1", "comparable", "to", "s2" ]
python
train
36.333333